content
stringlengths
22
815k
id
int64
0
4.91M
def validateFilename(value): """ Validate filename. """ if 0 == len(value): raise ValueError("Name of SimpleGridDB file must be specified.") return value
5,349,900
def readPNM(fd): """Reads the PNM file from the filehandle""" t = noncomment(fd) s = noncomment(fd) m = noncomment(fd) if not (t.startswith('P1') or t.startswith('P4')) else '1' data = fd.read() ls = len(s.split()) if ls != 2 : name = "<pipe>" if fd.name=="<fdopen>" else "Filename = {0}".format(fd.name) raise IOError("Expected 2 elements from parsing PNM file, got {0}: {1}".format(ls, name)) xs, ys = s.split() width = int(xs) height = int(ys) m = int(m) if m != 255 : print "Just want 8 bit pgms for now!" d = fromstring(data,dtype=uint8) d = reshape(d, (height,width) ) return (m,width,height, d)
5,349,901
def test_atomic_decimal_enumeration_4_nistxml_sv_iv_atomic_decimal_enumeration_5_5(mode, save_output, output_format): """ Type atomic/decimal is restricted by facet enumeration. """ assert_bindings( schema="nistData/atomic/decimal/Schema+Instance/NISTSchema-SV-IV-atomic-decimal-enumeration-5.xsd", instance="nistData/atomic/decimal/Schema+Instance/NISTXML-SV-IV-atomic-decimal-enumeration-5-5.xml", class_name="NistschemaSvIvAtomicDecimalEnumeration5", version="1.1", mode=mode, save_output=save_output, output_format=output_format, structure_style="filenames", )
5,349,902
def run(argv=None): """ Pipeline entry point, runs the all the necessary processes """ # Initialize runtime parameters as object pipeline_options = PipelineOptions() # Save main session state so pickled functions and classes # defined in __main__ can be unpickled pipeline_options.view_as(SetupOptions).save_main_session = True # Beginning of the pipeline p = beam.Pipeline(options=pipeline_options) # Runtime Parameters given during template execution bucket_manifest_options = pipeline_options.view_as(BucketManifestOptions) # Get bucket objects blob_list = get_bucket_manifest(bucket_manifest_options.bucket) # pipeline setup lines = p | beam.Create(blob_list) lines | "compute_md5" >> beam.ParDo( ComputeMD5( bucket_manifest_options.project_id, bucket_manifest_options.pub_topic ) ) # Run the pipeline prog = p.run() prog.wait_until_finish()
5,349,903
def gdxfile(rawgdx): """A gdx.File fixture.""" return gdx.File(rawgdx)
5,349,904
def generate_sample( # pylint: disable=too-many-arguments samp: Sample, set_type: str, handler: typing.Callable, input_path: Path, output_path: Path, pre_proc: ProcType, post_proc: ProcType): """Generate JSON for a sample Arguments: samp {Sample} -- sample data set_type {str} -- data set type handler {typing.Callable} -- data processor input_path {Path} -- input path output_path {Path} -- output path pre_proc {ProcType} -- data dict pre-processor post_proc {ProcType} -- output JSON post-processor Raises: Exception -- unhandled error during generation """ try: console.group(samp.name) samp_data, file_path = handler(samp, input_path, output_path) # noqa # type: typing.Tuple[dict, str] # pylint: disable=line-too-long json_data.write(samp_data, file_path, pre_proc, post_proc) console.success( 'saved {} {} to: ./{}', samp.name, set_type, '/'.join(split(r'[/\\]+', str(file_path.absolute()))[-3:])) except (OSError, UserWarning) as err: console.error(err) try: cleanup(file_path) except UnboundLocalError: pass except Exception as err: # pylint: disable=broad-except raise err finally: console.group_end()
5,349,905
def test_children(): """test ExternalLink.__children__()""" node1 = ExternalLink(wraptext("http://example.com/"), brackets=False) node2 = ExternalLink(wraptext("http://example.com/"), wrap([Text("Example"), Text("Page")])) gen1 = node1.__children__() gen2 = node2.__children__() assert node1.url == next(gen1) assert node2.url == next(gen2) assert node2.title == next(gen2) with pytest.raises(StopIteration): next(gen1) with pytest.raises(StopIteration): next(gen2)
5,349,906
def AppBar( absolute: bool = None, app: bool = None, attributes: dict = {}, bottom: bool = None, children: list = [], class_: str = None, clipped_left: bool = None, clipped_right: bool = None, collapse: bool = None, collapse_on_scroll: bool = None, color: str = None, dark: bool = None, dense: bool = None, elevate_on_scroll: bool = None, elevation: typing.Union[float, str] = None, extended: bool = None, extension_height: typing.Union[float, str] = None, fade_img_on_scroll: bool = None, fixed: bool = None, flat: bool = None, floating: bool = None, height: typing.Union[float, str] = None, hide_on_scroll: bool = None, inverted_scroll: bool = None, layout: Union[Dict[str, Any], Element[ipywidgets.widgets.widget_layout.Layout]] = {}, light: bool = None, max_height: typing.Union[float, str] = None, max_width: typing.Union[float, str] = None, min_height: typing.Union[float, str] = None, min_width: typing.Union[float, str] = None, prominent: bool = None, scroll_off_screen: bool = None, scroll_target: str = None, scroll_threshold: typing.Union[str, float] = None, short: bool = None, shrink_on_scroll: bool = None, slot: str = None, src: typing.Union[str, dict] = None, style_: str = None, tag: str = None, tile: bool = None, v_model: Any = "!!disabled!!", v_on: str = None, v_slots: list = [], value: bool = None, width: typing.Union[float, str] = None, on_absolute: typing.Callable[[bool], Any] = None, on_app: typing.Callable[[bool], Any] = None, on_attributes: typing.Callable[[dict], Any] = None, on_bottom: typing.Callable[[bool], Any] = None, on_children: typing.Callable[[list], Any] = None, on_class_: typing.Callable[[str], Any] = None, on_clipped_left: typing.Callable[[bool], Any] = None, on_clipped_right: typing.Callable[[bool], Any] = None, on_collapse: typing.Callable[[bool], Any] = None, on_collapse_on_scroll: typing.Callable[[bool], Any] = None, on_color: typing.Callable[[str], Any] = None, on_dark: typing.Callable[[bool], Any] = None, on_dense: typing.Callable[[bool], Any] = None, on_elevate_on_scroll: typing.Callable[[bool], Any] = None, on_elevation: typing.Callable[[typing.Union[float, str]], Any] = None, on_extended: typing.Callable[[bool], Any] = None, on_extension_height: typing.Callable[[typing.Union[float, str]], Any] = None, on_fade_img_on_scroll: typing.Callable[[bool], Any] = None, on_fixed: typing.Callable[[bool], Any] = None, on_flat: typing.Callable[[bool], Any] = None, on_floating: typing.Callable[[bool], Any] = None, on_height: typing.Callable[[typing.Union[float, str]], Any] = None, on_hide_on_scroll: typing.Callable[[bool], Any] = None, on_inverted_scroll: typing.Callable[[bool], Any] = None, on_layout: typing.Callable[[Union[Dict[str, Any], Element[ipywidgets.widgets.widget_layout.Layout]]], Any] = None, on_light: typing.Callable[[bool], Any] = None, on_max_height: typing.Callable[[typing.Union[float, str]], Any] = None, on_max_width: typing.Callable[[typing.Union[float, str]], Any] = None, on_min_height: typing.Callable[[typing.Union[float, str]], Any] = None, on_min_width: typing.Callable[[typing.Union[float, str]], Any] = None, on_prominent: typing.Callable[[bool], Any] = None, on_scroll_off_screen: typing.Callable[[bool], Any] = None, on_scroll_target: typing.Callable[[str], Any] = None, on_scroll_threshold: typing.Callable[[typing.Union[str, float]], Any] = None, on_short: typing.Callable[[bool], Any] = None, on_shrink_on_scroll: typing.Callable[[bool], Any] = None, on_slot: typing.Callable[[str], Any] = None, on_src: typing.Callable[[typing.Union[str, dict]], Any] = None, on_style_: typing.Callable[[str], Any] = None, on_tag: typing.Callable[[str], Any] = None, on_tile: typing.Callable[[bool], Any] = None, on_v_model: typing.Callable[[Any], Any] = None, on_v_on: typing.Callable[[str], Any] = None, on_v_slots: typing.Callable[[list], Any] = None, on_value: typing.Callable[[bool], Any] = None, on_width: typing.Callable[[typing.Union[float, str]], Any] = None, ) -> Element[ipyvuetify.generated.AppBar]: """ """ kwargs: Dict[Any, Any] = without_default(AppBar, locals()) if isinstance(kwargs.get("layout"), dict): kwargs["layout"] = w.Layout(**kwargs["layout"]) widget_cls = ipyvuetify.generated.AppBar comp = react.core.ComponentWidget(widget=widget_cls) return Element(comp, **kwargs)
5,349,907
def check_file(filename): """Check if "filename" exists and is a file. Returns: True if file exists and is a file. False if filename==None or is not a file. """ file_ok = True error_mssg = "" if(filename == None): error_mssg = "Error: file is missing." file_ok = False else: if not os.path.isfile(filename): error_mssg = "Error: '"+str(filename)+"' is not a file." file_ok = False return file_ok, error_mssg
5,349,908
def conjoin(*funcs): """ Creates a function that composes multiple predicate functions into a single predicate that tests whether **all** elements of an object pass each predicate. Args: *funcs (callable): Function(s) to conjoin. Returns: Conjoin: Function(s) wrapped in a :class:`Conjoin` context. Example: >>> conjoiner = conjoin(lambda x: isinstance(x, int), lambda x: x > 3) >>> conjoiner([1, 2, 3]) False >>> conjoiner([1.0, 2, 1]) False >>> conjoiner([4.0, 5, 6]) False >>> conjoiner([4, 5, 6]) True .. versionadded:: 2.0.0 """ return Conjoin(*funcs)
5,349,909
def main(): """ Entry-point for the function. """ conn_obj = connect_to_database_server(DATABASE) if conn_obj == -1: print("Connection to PostgreSQL Database: {} failed.".format(DATABASE)) sys.exit(0) else: conn = conn_obj[0] cur = conn_obj[1] n_g = 3 #2 for bigrams, 3 for trigrams and so on. generate_wc(conn, cur, n_g)
5,349,910
def calculate_file_sha256(file_path): """calculate file sha256 hash code.""" with open(file_path, 'rb') as fp: sha256_cal = hashlib.sha256() sha256_cal.update(fp.read()) return sha256_cal.hexdigest()
5,349,911
def select_columns_by_feature_type(df, unique_value_to_total_value_ratio_threshold=.05, text_unique_threshold=.9, exclude_strings = True, return_dict = False, return_type='categoric'): """ Determine if a column fits into one of the following types: numeric, categoric, datetime, text. set return_type to one of these return_types to return a list of the column names associated. Determination is made based on if a column in the dataframe is continous based on a ratio between the number of unique values in a column and the total number of values Low cardinality values will get cut off if above the specified ratio. Optionally specify return_dict to return a dictionary where values are column names and values are boolean True if categoric and false if continouous Default ratio threshold is .05 'exclude_strings' is True by default (i.e. if a column has string values it will be marked as a categoric column). If looking for columns that may be numeric/continuous but first need to be processed, this can be set to False. Parameters ---------- df : Pandas DataFrame A DataFrame to search columns within unique_value_to_total_value_ratio_threshold : float The maximum ratio of unique values in a column / total observations. Akin to a cardinality ratio. Default is .05, or that anyting with more than 5% of its values being unique will be considered non-categoric. exclude_strings : Boolean Flag to include all columns with any string values as categoric columns. Default is True. return_dict: Boolean Flag to return a dictionary of the form {column: Categoric_Boolean} where the value is True if a column is categoric. Default is False return_categoric: Boolean Flag to return a list of the categoric columns. Default is True. return_numeric: Boolean Flag to return a list of the continuous columns. Default is False Returns ------- Dict/List A list of the column names that are categoric/continuous OR a dictionary with keys of column names and values True if categoric """ if return_type not in ['categoric', 'numeric', 'text', 'datetime']: warnings.warn("'return_type' must be one of: ['categoric', 'numeric', 'text', 'datetime']") from collections import OrderedDict likely_categoric = OrderedDict() for column in df.columns: likely_categoric[column] = 1.*df[column].nunique()/df[column].count() < unique_value_to_total_value_ratio_threshold # Check if any of the values in the column are strings. if exclude_strings: # If so, its value should be true to indicate it is categoric if df[column].apply(type).eq(str).any(): likely_categoric[column] = True likely_text = OrderedDict() for column in df.columns: # Check for unique pct above threshold and value is string likely_text[column] = (1.*df[column].nunique()/df[column].count() > text_unique_threshold) #& isinstance(df[column].values[0], str) likely_datetime = [] for dtype in [np.datetime64, 'datetime', 'datetime64', np.timedelta64, 'timedelta', 'timedelta64', 'datetimetz']: # Add any datetime columns found to likely_datetime collection time_cols = df.select_dtypes(include=dtype).columns.values.tolist() # Append if not empty if time_cols: likely_datetime.append(time_cols) likely_datetime = np.array(likely_datetime).flatten().tolist() if return_dict: return likely_categoric if return_type == 'numeric': numeric_cols = [col for col, value in likely_categoric.items() if (not value) & (col not in likely_datetime)] return numeric_cols elif return_type == 'categoric': categoric_cols = [col for col, value in likely_categoric.items() if value] return categoric_cols elif return_type == 'text': text_cols = [col for col, value in likely_text.items() if value] return text_cols elif return_type == 'datetime': return likely_datetime else: print('Please specify valid return option')
5,349,912
def _Counter_random(self, filter=None): """Return a single random elements from the Counter collection, weighted by count.""" return _Counter_randoms(self, 1, filter=filter)[0]
5,349,913
def EnsureAndroidSdkPackagesInstalled(abi): """Return true if at least one package was not already installed.""" abiPackageList = SdkPackagesForAbi(abi) installedSomething = False packages = AndroidListSdk() for package in abiPackageList: installedSomething |= EnsureSdkPackageInstalled(packages, package) return installedSomething
5,349,914
def dataframe_like(value, name, optional=False, strict=False): """ Convert to dataframe or raise if not dataframe_like Parameters ---------- value : object Value to verify name : str Variable name for exceptions optional : bool Flag indicating whether None is allowed strict : bool If True, then only allow dataframe. If False, allow types that support casting to dataframe. Returns ------- converted : dataframe value converted to a dataframe """ if optional and value is None: return None if not isinstance(value, dict) or ( strict and not (isinstance(value, pd.DataFrame)) ): extra_text = "If not None, " if optional else "" strict_text = " or dataframe_like " if strict else "" msg = "{0}{1} must be a dict{2}".format(extra_text, name, strict_text) raise TypeError(msg) return pd.DataFrame(value)
5,349,915
def pcaFunc(z, n_components=100): """ PCA """ pca = PCA(n_components=100) pca_result = pca.fit_transform(z) re = pd.DataFrame() re['pca-one'] = pca_result[:, 0] re['pca-two'] = pca_result[:, 1] re['pca-three'] = pca_result[:, 2] # Not print Now # print('Explained variation per principal component: {}'.format(pca.explained_variance_ratio_)) return pca_result, re
5,349,916
def choose_optimizer(discriminator, generator, netD, netG, lr_d=2e-4, lr_g=2e-3): """ Set optimizers for discriminator and generator :param discriminator: str, name :param generator: str, name :param netD: :param netG: :param lr_d: :param lr_g: :return: optimizerD, optimizerG """ if discriminator == 'Adam': optimizerD = optim.Adam(netD.parameters(), lr=lr_d, betas=(0.5, 0.999)) elif discriminator == 'RMSprop': optimizerD = optim.RMSprop(netD.parameters(), lr=lr_d) elif discriminator == 'SGD': optimizerD = optim.SGD(netD.parameters(), lr=lr_d, momentum=0.9) elif discriminator == 'zoVIA': optimizerD = zoVIA(netD, lr=lr_d) elif discriminator == 'zoESVIA': optimizerD = zoESVIA(netD, lr=lr_d) elif discriminator == 'zoscESVIA': optimizerD = zoscESVIA(netD, lr=lr_d) if generator == 'Adam': optimizerG = optim.Adam(netG.parameters(), lr=lr_g, betas=(0.5, 0.999)) elif generator == 'RMSprop': optimizerG = optim.RMSprop(netG.parameters(), lr=lr_g) elif generator == 'SGD': optimizerG = optim.SGD(netG.parameters(), lr=lr_g, momentum=0.9) elif generator == 'zoVIA': optimizerG = zoVIA(netG, lr=lr_g) elif generator == 'zoESVIA': optimizerG = zoESVIA(netG, lr=lr_g) elif generator == 'zoscESVIA': optimizerG = zoscESVIA(netG, lr=lr_g) print('Discriminator optimizer: {}, lr={}'.format(discriminator, lr_d)) print('Generator optimizer: {}, lr={}'.format(generator, lr_g)) return optimizerD, optimizerG
5,349,917
def run_migrations_offline(): """Run migrations in 'offline' mode. This configures the context with just a URL and not an Engine, though an Engine is acceptable here as well. By skipping the Engine creation we don't even need a DBAPI to be available. Calls to context.execute() here emit the given string to the script output. """ url = config.get_main_option("sqlalchemy.url") context.configure( url=url, target_metadata=target_metadata, literal_binds=True, dialect_opts={"paramstyle": "named"}, ) with context.begin_transaction(): context.run_migrations()
5,349,918
def prod(*args: int) -> int: """ This function is wrapped and documented in `_polymorphic.prod()`. """ prod_ = 1 for arg in args: prod_ *= arg return prod_
5,349,919
def field_value(field): """ Returns the value for this BoundField, as rendered in widgets. """ if field.form.is_bound: if isinstance(field.field, FileField) and field.data is None: val = field.form.initial.get(field.name, field.field.initial) else: val = field.data else: val = field.form.initial.get(field.name, field.field.initial) if callable(val): val = val() if val is None: val = '' return val
5,349,920
def distances(p): """Compute lengths of shortest paths between all nodes in Pharmacophore. Args: p (Pharmacophore): model to analyse Returns: dist (numpy array): array with distances between all nodes """ if not isinstance(p, Pharmacophore): raise TypeError("Expected Pharmacophore, got %s instead" % type(p).__name__) dist = np.array(p.edges) for i in range(p.numnodes): for j in range(i): if dist[i][j] == 0: dist[i][j] = dist[j][i] = float("inf") for i in range(len(dist)): compute = False for j in range(i): if dist[i][j] == float("inf"): compute = True break if compute: queue = [k for k in range(p.numnodes)] while queue: queue.sort(key=lambda x: dist[i, x]) u = queue[0] del queue[0] for v in np.where(p.edges[u] > 0)[0]: if v in queue: alt = dist[i, u] + p.edges[u, v] if alt < dist[i, v]: dist[i, v] = dist[v, i] = alt return dist
5,349,921
def initial_data(logged_on_user, users_fixture, streams_fixture): """ Response from /register API request. """ return { 'full_name': logged_on_user['full_name'], 'email': logged_on_user['email'], 'user_id': logged_on_user['user_id'], 'realm_name': 'Test Organization Name', 'unsubscribed': [{ 'audible_notifications': False, 'description': 'announce', 'stream_id': 7, 'is_old_stream': True, 'desktop_notifications': False, 'pin_to_top': False, 'stream_weekly_traffic': 0, 'invite_only': False, 'name': 'announce', 'push_notifications': False, 'email_address': '', 'color': '#bfd56f', 'in_home_view': True }], 'result': 'success', 'queue_id': '1522420755:786', 'realm_users': users_fixture, 'cross_realm_bots': [{ 'full_name': 'Notification Bot', 'timezone': '', 'is_bot': True, 'date_joined': '2015-12-28T19:58:29.035543+00:00', 'email': '[email protected]', 'user_id': 5, 'is_admin': False, 'avatar_url': 'dummy_avatar_url' }, { 'full_name': 'Email Gateway', 'timezone': '', 'is_bot': True, 'date_joined': '2015-12-28T19:58:29.037658+00:00', 'email': '[email protected]', 'user_id': 6, 'is_admin': False, 'avatar_url': 'dummy_avatar_url' }, { 'full_name': 'Welcome Bot', 'timezone': '', 'is_bot': True, 'date_joined': '2015-12-28T19:58:29.033231+00:00', 'email': '[email protected]', 'user_id': 4, 'is_admin': False, 'avatar_url': 'dummy_avatar_url' }, { 'full_name': 'Zulip Feedback Bot', 'timezone': '', 'is_bot': True, 'date_joined': '2015-12-28T19:58:28.972281+00:00', 'email': '[email protected]', 'user_id': 1, 'is_admin': False, 'avatar_url': 'dummy_avatar_url' }], 'subscriptions': streams_fixture, 'msg': '', 'max_message_id': 552761, 'never_subscribed': [{ 'invite_only': False, 'description': 'Announcements from the Zulip GCI Mentors', 'stream_id': 87, 'name': 'GCI announce', 'is_old_stream': True, 'stream_weekly_traffic': 0 }, { 'invite_only': False, 'description': 'General discussion', 'stream_id': 74, 'name': 'GCI general', 'is_old_stream': True, 'stream_weekly_traffic': 0 }], 'unread_msgs': { 'pms': [{ 'sender_id': 1, 'unread_message_ids': [1, 2] }, { 'sender_id': 2, 'unread_message_ids': [3] }], 'count': 0, 'mentions': [], 'streams': [{ 'stream_id': 1000, 'topic': 'Some general unread topic', 'unread_message_ids': [4, 5, 6], 'sender_ids': [1, 2] }, { 'stream_id': 99, 'topic': 'Some private unread topic', 'unread_message_ids': [7], 'sender_ids': [1, 2] }], 'huddles': [{ 'user_ids_string': '1001,11,12', 'unread_message_ids': [11, 12, 13] }, { 'user_ids_string': '1001,11,12,13', 'unread_message_ids': [101, 102], }] }, 'presences': { '[email protected]': { 'ZulipElectron': { 'pushable': False, 'client': 'ZulipElectron', 'status': 'idle', 'timestamp': 1522484059 }, 'ZulipMobile': { 'pushable': False, 'client': 'ZulipMobile', 'status': 'idle', 'timestamp': 1522384165 }, 'aggregated': { 'timestamp': 1522484059, 'client': 'ZulipElectron', 'status': 'idle' } }, logged_on_user['email']: { 'website': { 'pushable': True, 'client': 'website', 'status': 'active', 'timestamp': 1522458138 }, 'ZulipMobile': { 'pushable': True, 'client': 'ZulipMobile', 'status': 'active', 'timestamp': 1522480103 }, 'aggregated': { 'timestamp': 1522480103, 'client': 'ZulipMobile', 'status': 'active' } } }, 'twenty_four_hour_time': True, 'last_event_id': -1, 'muted_topics': [], 'realm_user_groups': [], # Deliberately use hard-coded zulip version and feature level to avoid # adding extra tests unnecessarily. 'zulip_version': MINIMUM_SUPPORTED_SERVER_VERSION[0], 'zulip_feature_level': MINIMUM_SUPPORTED_SERVER_VERSION[1], }
5,349,922
def affine_relu_backward(dout, cache): """ Backward pass for the affine-relu convenience layer """ fc_cache, relu_cache = cache da = relu_backward(dout, relu_cache) dx, dw, db = affine_backward(da, fc_cache) return dx, dw, db
5,349,923
def total_equalities_generator(block): """ Generator which returns all equality Constraint components in a model. Args: block : model to be studied Returns: A generator which returns all equality Constraint components block """ for c in activated_block_component_generator(block, ctype=Constraint): if c.upper is not None and c.lower is not None and c.upper == c.lower: yield c
5,349,924
def create_initial_population() -> List[Image]: """ Create population at step 0 """ return [random_image() for _ in range(POP_SIZE)]
5,349,925
def adapt(value: Any, pg_type: str) -> Any: """ Coerces a value with a PG type into its Python equivalent. :param value: Value :param pg_type: Postgres datatype :return: Coerced value. """ if value is None: return None if pg_type in _TYPE_MAP: return _TYPE_MAP[pg_type](value) return value
5,349,926
def createPREMISEventXML(eventType, agentIdentifier, eventDetail, eventOutcome, outcomeDetail=None, eventIdentifier=None, linkObjectList=[], eventDate=None): """ Actually create our PREMIS Event XML """ eventXML = etree.Element(PREMIS + "event", nsmap=PREMIS_NSMAP) eventIDXML = etree.SubElement(eventXML, PREMIS + "eventIdentifier") eventTypeXML = etree.SubElement(eventXML, PREMIS + "eventType") eventTypeXML.text = eventType eventIDTypeXML = etree.SubElement( eventIDXML, PREMIS + "eventIdentifierType" ) eventIDTypeXML.text = \ "http://purl.org/net/untl/vocabularies/identifier-qualifiers/#UUID" eventIDValueXML = etree.SubElement( eventIDXML, PREMIS + "eventIdentifierValue" ) if eventIdentifier: eventIDValueXML.text = eventIdentifier else: eventIDValueXML.text = uuid.uuid4().hex eventDateTimeXML = etree.SubElement(eventXML, PREMIS + "eventDateTime") if eventDate is None: eventDateTimeXML.text = xsDateTime_format(datetime.utcnow()) else: eventDateTimeXML.text = xsDateTime_format(eventDate) eventDetailXML = etree.SubElement(eventXML, PREMIS + "eventDetail") eventDetailXML.text = eventDetail eventOutcomeInfoXML = etree.SubElement( eventXML, PREMIS + "eventOutcomeInformation" ) eventOutcomeXML = etree.SubElement( eventOutcomeInfoXML, PREMIS + "eventOutcome" ) eventOutcomeXML.text = eventOutcome if outcomeDetail: eventOutcomeDetailXML = etree.SubElement( eventOutcomeInfoXML, PREMIS + "eventOutcomeDetail" ) eventOutcomeDetailNoteXML = etree.SubElement( eventOutcomeDetailXML, PREMIS + "eventOutcomeDetailNote" ) eventOutcomeDetailNoteXML.text = outcomeDetail # Assuming it's a list of 3-item tuples here [ ( identifier, type, role) ] linkAgentIDXML = etree.SubElement( eventXML, PREMIS + "linkingAgentIdentifier") linkAgentIDTypeXML = etree.SubElement( linkAgentIDXML, PREMIS + "linkingAgentIdentifierType" ) linkAgentIDTypeXML.text = \ "http://purl.org/net/untl/vocabularies/identifier-qualifiers/#URL" linkAgentIDValueXML = etree.SubElement( linkAgentIDXML, PREMIS + "linkingAgentIdentifierValue" ) linkAgentIDValueXML.text = agentIdentifier linkAgentIDRoleXML = etree.SubElement( linkAgentIDXML, PREMIS + "linkingAgentRole" ) linkAgentIDRoleXML.text = \ "http://purl.org/net/untl/vocabularies/linkingAgentRoles/#executingProgram" for linkObject in linkObjectList: linkObjectIDXML = etree.SubElement( eventXML, PREMIS + "linkingObjectIdentifier" ) linkObjectIDTypeXML = etree.SubElement( linkObjectIDXML, PREMIS + "linkingObjectIdentifierType" ) linkObjectIDTypeXML.text = linkObject[1] linkObjectIDValueXML = etree.SubElement( linkObjectIDXML, PREMIS + "linkingObjectIdentifierValue" ) linkObjectIDValueXML.text = linkObject[0] if linkObject[2]: linkObjectRoleXML = etree.SubElement( linkObjectIDXML, PREMIS + "linkingObjectRole" ) linkObjectRoleXML.text = linkObject[2] return eventXML
5,349,927
def get_primary_key(conn, table, columns): """ attempts to reverse lookup the primary key by querying the table using the first column and iteratively adding the columns that comes after it until the query returns a unique row in the table. :param conn: an SQLite connection object table: a string denoting the table name to query columns: a list containing column names of the table :return: the list of columns which makes up the primary key """ select_row_query = "SELECT * FROM `{}`".format(table) count_row_query = "SELECT COUNT(*) FROM `{}` WHERE `{}`" primary_key = [] row = conn.execute(select_row_query).fetchone() if row is not None: for i, column in enumerate(columns): if i == 0: count_row_query = count_row_query.format(table, column) else: count_row_query += " AND `{}`".format(column) count_row_query += append_eql_condition(row[i]) primary_key.append(column) count = conn.execute(count_row_query).fetchone() if count[0] == 1: return primary_key # if no primary key was found then the primary key is made up of all columns return columns
5,349,928
def redirect_to_docs(): """Redirect to API docs when at site root""" return RedirectResponse('/redoc')
5,349,929
def pytest_generate_tests(metafunc): """ Dynamic test case generation and parameterization for this module. """ if "delimiter" in metafunc.fixturenames: metafunc.parametrize("delimiter", ["\t", ","])
5,349,930
def init_neighbours(key): """ Sets then neighbouring nodes and initializes the edge count to the neighbours to 1 :param key: str - key of node to which we are searching the neighbours :return: dictionary of neighbours with corresponding edge count """ neighbours = {} neighbouring_nodes = graph[key] for node in neighbouring_nodes: if neighbouring_nodes[node] == {}: neighbours[node] = 1 else: neighbours[node] = neighbouring_nodes[node] return neighbours
5,349,931
def computeNumericalGradient(J, theta): """ Compute numgrad = computeNumericalGradient(J, theta) theta: a matrix of parameters J: a function that outputs a real-number and the gradient. Calling y = J(theta)[0] will return the function value at theta. """ # Initialize numgrad with zeros numgrad = np.zeros(theta.shape) ## ---------- YOUR CODE HERE -------------------------------------- # Instructions: # Implement numerical gradient checking, and return the result in numgrad. # You should write code so that numgrad[i][j] is (the numerical approximation to) the # partial derivative of J with respect to theta[i][j], evaluated at theta. # I.e., numgrad[i][j] should be the (approximately) partial derivative of J with # respect to theta[i][j]. # # Hint: You will probably want to compute the elements of numgrad one at a time. # Set Epsilon epsilon = 0.0001 # Outer for loop to check across the x-axis for i in range(theta.shape[0]): # Inner for loop to check across the y-axis for j in range(theta.shape[1]): # Copy current theta value to min theta_min = theta.copy() # Subtract min point by epsilon and store theta_min[i,j] = theta_min[i,j] - epsilon # Not sure cost_min, dW, db = J(theta_min) # Copy current theta for max theta_max = theta.copy() # Add max point by epsilon and store theta_max[i,j] = theta_max[i,j] + epsilon # ? cost_max, dW, db = J(theta_max) # Final Result for gradient k numgrad[i][j] = (cost_max - cost_min) / (2 * epsilon) ## --------------------------------------------------------------- return numgrad
5,349,932
def get_error_msg(handle): """ Get the latest and greatest DTrace error. """ txt = LIBRARY.dtrace_errmsg(handle, LIBRARY.dtrace_errno(handle)) return c_char_p(txt).value
5,349,933
def sigma_hat(frequency, sigma, epsilon=epsilon_0, quasistatic=False): """ conductivity with displacement current contribution .. math:: \hat{\sigma} = \sigma + i \omega \\varepsilon **Required** :param (float, numpy.array) frequency: frequency (Hz) :param float sigma: electrical conductivity (S/m) **Optional** :param float epsilon: dielectric permittivity. Default :math:`\\varepsilon_0` :param bool quasistatic: use the quasi-static assumption? Default: False """ if quasistatic is True: return sigma return sigma + 1j*omega(frequency)*epsilon
5,349,934
def encode_list(key, list_): # type: (str, Iterable) -> Dict[str, str] """ Converts a list into a space-separated string and puts it in a dictionary :param key: Dictionary key to store the list :param list_: A list of objects :return: A dictionary key->string or an empty dictionary """ if not list_: return {} return {key: " ".join(str(i) for i in list_)}
5,349,935
async def delete_relationship(request: web.Request): """ Remove relationships of resource. Uses the :meth:`~aiohttp_json_api.schema.BaseSchema.delete_relationship` method of the schema to update the relationship. :seealso: http://jsonapi.org/format/#crud-updating-relationships """ relation_name = request.match_info['relation'] ctx = JSONAPIContext(request) relation_field = ctx.schema.get_relationship_field(relation_name, source_parameter='URI') resource_id = request.match_info.get('id') validate_uri_resource_id(ctx.schema, resource_id) pagination = None if relation_field.relation is Relation.TO_MANY: pagination_type = relation_field.pagination if pagination_type: pagination = pagination_type(request) data = await request.json() sp = JSONPointer('') field = ctx.schema.get_relationship_field(relation_name) if field.relation is not Relation.TO_MANY: raise RuntimeError('Wrong relationship field.' 'Relation to-many is required.') await ctx.schema.pre_validate_field(field, data, sp) deserialized_data = field.deserialize(ctx.schema, data, sp) resource = await ctx.controller.fetch_resource(resource_id) old_resource, new_resource = \ await ctx.controller.remove_relationship(field, resource, deserialized_data, sp) if old_resource == new_resource: return web.HTTPNoContent() result = ctx.schema.serialize_relationship(relation_name, new_resource, pagination=pagination) return jsonapi_response(result)
5,349,936
def save_obj(obj, saved_name ): """ =============================================================== save_obj(obj, saved_name ) =============================================================== this function is used to save any python object to your hard desk Inputs: ---------- 1-obj: 2-saved_name: ['String'] name of the object Outputs: ---------- the object will be saved to the given path/current working directory with the given name Example: data={"key1":[1,2,3,5],"key2":[6,2,9,7]} save_obj(data,path+'/flow_acc_table') """ with open( saved_name + '.pkl', 'wb') as f: pickle.dump(obj, f, pickle.HIGHEST_PROTOCOL)
5,349,937
def cost_efficiency(radius, height, cost): """Compute and return the cost efficiency of a steel can size. The cost efficiency is the volume of the can divided by its cost. Parameters radius: the radius of the steel can height: the height of the steel can cost: the cost of the steel can Return: the cost efficiency of the steel can """ volume = cylinder_volume(radius, height) efficiency = volume / cost return efficiency
5,349,938
def process_response(): """ Outer scope for processing the response to a request via the '/response' endpoint. Ensure all data is present, request exists in Pending table and then change case status and notify app about the response via webhook. :return: status code, message TODO set up TODO Place response data into a Response object, call the Response Handler """ print('---\nRESPONSE') for key, value in request.args.items(): print(key + ':', value, type(value)) print('---') return jsonify({'status': 200})
5,349,939
def test_point_within_dimensions_border(): """Make sure a point on the non-zero border is rejected as out of bounds""" point = np.array([100, 20]) image_dimensions = np.array([100, 100]) assert not point_within_dimensions(point, image_dimensions)
5,349,940
def cb_xmlrpc_register(args): """ Register as a pyblosxom XML-RPC plugin """ args['methods'].update({'pingback.ping': pingback}) return args
5,349,941
def try_except(method): """ A decorator method to catch Exceptions :param: - `func`: A function to call """ def wrapped(self, *args, **kwargs): try: return method(self, *args, **kwargs) except self.error as error: log_error(error, self.logger, self.error_message) if hasattr(self, 'close'): self.close() return wrapped
5,349,942
def sosfilter_double_c(signal, sos, states=None): """Second order section filter function using cffi, double precision. signal_out, states = sosfilter_c(signal_in, sos, states=None) Parameters ---------- signal : ndarray Signal array of shape (N x 0). sos : ndarray Second order section coefficients array of shape (K*6 x 0). One biquad -> 6 coefficients: ``[b00, b01, b02, a00, a01, a02, ..., b10, bK1 ... , aK2]`` states : ndarray Filter states, initial value can be None. Returns ------- signal : Filtered signal array of shape (N x 0). states : ndarray Filter states, initial value can be None. """ signal_c = ffi.new( 'char[]', np.array(signal, dtype=np.double).flatten().tostring()) sos_c = ffi.new( 'char[]', np.array(sos, dtype=np.double).flatten().tostring()) nsamp = int(len(signal)) ksos = int(sos.size/6) if isinstance(states, type(None)): states = np.zeros(ksos*2).astype(np.double) states_c = ffi.new( 'char[]', np.array(states, dtype=np.double).flatten().tostring()) _c.sosfilter_double(ffi.cast("double*", signal_c), nsamp, ffi.cast("double*", sos_c), ksos, ffi.cast("double*", states_c)) out = np.fromstring( ffi.buffer(signal_c), dtype=np.double, count=nsamp) states = np.fromstring( ffi.buffer(states_c), dtype=np.double, count=len(states)) return out, states
5,349,943
def log_to_csv(queries_info): """ Write to CSV. """ with open('file_name.csv', 'a') as f: writer = csv.writer(f) for line in queries_info: writer.writerow(line)
5,349,944
def setup_logging(name, default_path='graphy/logging.yaml', default_level=logging.INFO): """ Setup logging configuration """ path = files.get_absolute_path(default_path, from_project=True) try: with open(path, 'r') as f: config = yaml.safe_load(f.read()) logging.config.dictConfig(config) coloredlogs.install() except Exception: exc_type, exc_obj, exc_tb = sys.exc_info() file_name = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1] print(exc_type, file_name, exc_tb.tb_lineno) logging.basicConfig(level=default_level) coloredlogs.install(level=default_level) return logging.getLogger(name)
5,349,945
def test_STMM_fit(): """Test Fit of STMB""" clf = STMB(typemulticlassifier='ovr',C1=1.0, C2=1.0, maxIter=30, tolSTM=1e-4, penalty = 'l2', dual = True, tol=1e-4,loss = 'squared_hinge', maxIterSVM=100000)
5,349,946
def node_exporter_check(): """ Checks existence & health of node exporter pods """ kube = kube_api() namespaces = kube.list_namespace() ns_names = [] for nspace in namespaces.items: ns_names.append(nspace.metadata.name) result = {'category': 'observability', 'case_name': 'node_exporter_check', 'criteria': 'pass', 'details': [] } status = [] flag = False logger = logging.getLogger(__name__) if 'monitoring' in ns_names: pod_list = kube.list_namespaced_pod('monitoring', watch=False) pods = pod_list.items for pod in pods: if 'node-exporter' in pod.metadata.name: pod_stats = pod_status(logger, pod) if pod_stats['criteria'] == 'fail': pod_stats['logs'] = get_logs(kube, pod) result['criteria'] = 'fail' status.append(pod.metadata.name) status.append(pod_stats) flag = True else: for nspace in namespaces.items: pod_list = kube.list_namespaced_pod(nspace.metadata.name, watch=False) pods = pod_list.items for pod in pods: if 'node-exporter' in pod.metadata.name: pod_stats = pod_status(logger, pod) if pod_stats['criteria'] == 'fail': pod_stats['logs'] = get_logs(kube, pod) result['criteria'] = 'fail' status.append(pod.metadata.name) status.append(pod_stats) flag = True if flag is False: result['criteria'] = 'fail' result['details'].append(status) store_result(logger, result) return result
5,349,947
def create(cmd, resource_group_name=None, workspace_name=None, location=None, storage_account=None, skip_role_assignment=False, provider_sku_list=None): """ Create a new Azure Quantum workspace. """ client = cf_workspaces(cmd.cli_ctx) if not workspace_name: raise RequiredArgumentMissingError("An explicit workspace name is required for this command.") if not storage_account: raise RequiredArgumentMissingError("A quantum workspace requires a valid storage account.") if not location: raise RequiredArgumentMissingError("A location for the new quantum workspace is required.") if provider_sku_list is None: raise RequiredArgumentMissingError("A list of Azure Quantum providers and SKUs is required.") info = WorkspaceInfo(cmd, resource_group_name, workspace_name, location) if not info.resource_group: raise ResourceNotFoundError("Please run 'az quantum workspace set' first to select a default resource group.") quantum_workspace = _get_basic_quantum_workspace(location, info, storage_account) # Until the "--skip-role-assignment" parameter is deprecated, use the old non-ARM code to create a workspace without doing a role assignment if skip_role_assignment: _add_quantum_providers(cmd, quantum_workspace, provider_sku_list) poller = client.begin_create_or_update(info.resource_group, info.name, quantum_workspace, polling=False) while not poller.done(): time.sleep(POLLING_TIME_DURATION) quantum_workspace = poller.result() return quantum_workspace # ARM-template-based code to create an Azure Quantum workspace and make it a "Contributor" to the storage account template_path = os.path.join(os.path.dirname( __file__), 'templates', 'create-workspace-and-assign-role.json') with open(template_path, 'r', encoding='utf8') as template_file_fd: template = json.load(template_file_fd) _add_quantum_providers(cmd, quantum_workspace, provider_sku_list) validated_providers = [] for provider in quantum_workspace.providers: validated_providers.append({"providerId": provider.provider_id, "providerSku": provider.provider_sku}) # Set default storage account parameters in case the storage account does not exist yet storage_account_sku = DEFAULT_STORAGE_SKU storage_account_sku_tier = DEFAULT_STORAGE_SKU_TIER storage_account_kind = DEFAULT_STORAGE_KIND storage_account_location = location # Look for info on existing storage account storage_account_list = list_storage_accounts(cmd, resource_group_name) if storage_account_list: for storage_account_info in storage_account_list: if storage_account_info.name == storage_account: storage_account_sku = storage_account_info.sku.name storage_account_sku_tier = storage_account_info.sku.tier storage_account_kind = storage_account_info.kind storage_account_location = storage_account_info.location break # Validate the storage account SKU tier and kind _validate_storage_account('tier', storage_account_sku_tier, SUPPORTED_STORAGE_SKU_TIERS) _validate_storage_account('kind', storage_account_kind, SUPPORTED_STORAGE_KINDS) parameters = { 'quantumWorkspaceName': workspace_name, 'location': location, 'tags': {}, 'providers': validated_providers, 'storageAccountName': storage_account, 'storageAccountId': _get_storage_account_path(info, storage_account), 'storageAccountLocation': storage_account_location, 'storageAccountSku': storage_account_sku, 'storageAccountKind': storage_account_kind, 'storageAccountDeploymentName': "Microsoft.StorageAccount-" + time.strftime("%d-%b-%Y-%H-%M-%S", time.gmtime()) } parameters = {k: {'value': v} for k, v in parameters.items()} deployment_properties = { 'mode': DeploymentMode.incremental, 'template': template, 'parameters': parameters } credentials = _get_data_credentials(cmd.cli_ctx, info.subscription) arm_client = ResourceManagementClient(credentials, info.subscription) # Show the first progress indicator dot before starting ARM template deployment print('.', end='', flush=True) deployment_async_operation = arm_client.deployments.begin_create_or_update( info.resource_group, workspace_name, # Note: This is actually specifying a the deployment name, but workspace_name is used here in test_quantum_workspace.py {'properties': deployment_properties} ) # Show progress indicator dots polling_cycles = 0 while not deployment_async_operation.done(): polling_cycles += 1 if polling_cycles > MAX_POLLS_CREATE_WORKSPACE: print() raise AzureInternalError("Create quantum workspace operation timed out.") print('.', end='', flush=True) time.sleep(POLLING_TIME_DURATION) print() quantum_workspace = deployment_async_operation.result() return quantum_workspace
5,349,948
def functional_domain_min(braf_gene_descr_min, location_descriptor_braf_domain): """Create functional domain test fixture.""" params = { "status": "preserved", "name": "Serine-threonine/tyrosine-protein kinase, catalytic domain", "id": "interpro:IPR001245", "gene_descriptor": braf_gene_descr_min, "location_descriptor": location_descriptor_braf_domain } return FunctionalDomain(**params)
5,349,949
def rsa_encrypt(rsa_key, data): """ rsa_key: 密钥 登录密码加密 """ data = bytes(data, encoding="utf8") encrypt = PKCS1_v1_5.new(RSA.importKey(rsa_key)) Sencrypt = b64encode(encrypt.encrypt(data)) return Sencrypt.decode("utf-8")
5,349,950
def shuffle_and_split_data(data_frame): """ Shuffle and split the data into 2 sets: training and validation. Args: data_frame (pandas.DataFrame): the data to shuffle and split Returns: 2 numpy.ndarray objects -> (train_indices, validation_indices) Each hold the index positions for data in the pandas.DataFrame """ shuffled_indices = numpy.random.permutation(len(data_frame)) train_up_to = int(len(data_frame) * 0.7) train_indices = shuffled_indices[:train_up_to] validation_indices = shuffled_indices[train_up_to:] return train_indices, validation_indices
5,349,951
def test_disable_agent_zero_slots() -> None: """ Start a command, disable the agent it's running on. The command should then be terminated promptly. """ slots = _fetch_slots() assert len(slots) == 1 agent_id = slots[0]["agent_id"] command_id = run_zero_slot_command(sleep=60) # Wait for it to run. wait_for_command_state(command_id, "RUNNING", 30) try: with _disable_agent(agent_id): wait_for_command_state(command_id, "TERMINATED", 5) finally: # Kill the command before failing so it does not linger. command = ["det", "-m", conf.make_master_url(), "command", "kill", command_id] subprocess.check_call(command)
5,349,952
def parse_array_from_string(list_str, dtype=int): """ Create a 1D array from text in string. Args: list_str: input string holding the array elements. Array elements should be contained in brackets [] and seperated by comma. dtype: data type of the array elements. Default is "int" Returns: 1D numpy array """ list_str = list_str.lstrip().rstrip() if not (list_str.startswith('[') and list_str.endswith(']')): msg = 'list_str should start with "[" and end with "]".' raise (SyntaxError(msg)) return np.array(list_str[1:-1].split(','), dtype=dtype)
5,349,953
def download_api_coinslists_handler(bot, job): """ the function to download API from the agregators sites to local file :param bot: a telegram bot main object :type bot: Bot :param job: job.context is a name of the site-agregator, which has been send from job_queue.run_repeating... method :type job: Job """ module_logger.info('Start a request to %s API', job.context) url = '' if job.context == 'coinmarketcap': url = COINMARKET_API_URL_COINLIST.format(CMC_API_KEY) fileoutputname = FILE_JSON_COINMARKET elif job.context == 'cryptocompare': url = CRYPTOCOMPARE_API_URL_COINLIST fileoutputname = FILE_JSON_CRYPTOCOMPARE response = requests.get(url) # extract a json from response to a class 'dict' or 'list' response_dict_list = response.json() if response.status_code == requests.codes.ok: # check if one of the APIs response is an error if (('status' in response_dict_list and response_dict_list['status']['error_code'] != 0) or (('Response' in response_dict_list) and response_dict_list['Response'] is 'Error')): error_msg = '' if job.context == 'coinmarketcap': error_msg = response_dict_list['status']['error_message'] elif job.context == 'cryptocompare': error_msg = response_dict_list['Message'] module_logger.error('%s error message: %s' % (job.context, error_msg)) else: module_logger.info('Success download the coinslist from %s', job.context) with open(fileoutputname, 'w') as outfile: json.dump(response_dict_list, outfile) module_logger.info('Success save it to %s', fileoutputname) # save a json to variable if job.context == 'coinmarketcap': jsonfiles.update_cmc_json(response_dict_list) elif job.context == 'cryptocompare': jsonfiles.update_cc_json(response_dict_list) else: module_logger.error('%s not successfully response', job.context)
5,349,954
def Maj(x, y, z): """ Majority function: False when majority are False Maj(x, y, z) = (x ∧ y) ⊕ (x ∧ z) ⊕ (y ∧ z) """ return (x & y) ^ (x & z) ^ (y & z)
5,349,955
def get_package_data(): """Load services and conn_states data into memory""" with open(DATA_PKL_FILE, "rb") as f: services, conn_states = pickle.load(f) return services, conn_states
5,349,956
def is_module(module): """Check if a given string is an existing module contained in the ``MODULES_FOLDER`` constant.""" if (os.path.isdir(os.path.join(MODULES_FOLDER, module)) and not module.startswith('_')): return True return False
5,349,957
def _finalize_sv(solution_file, data): """Add output files from TitanCNA calling optional solution. """ out = {"variantcaller": "titancna"} with open(solution_file) as in_handle: solution = dict(zip(in_handle.readline().strip("\r\n").split("\t"), in_handle.readline().strip("\r\n").split("\t"))) if solution.get("path"): out["purity"] = solution["purity"] out["ploidy"] = solution["ploidy"] out["cellular_prevalence"] = [x.strip() for x in solution["cellPrev"].split(",")] base = os.path.basename(solution["path"]) out["plot"] = dict([(n, solution["path"] + ext) for (n, ext) in [("rplots", ".Rplots.pdf"), ("cf", "/%s_CF.pdf" % base), ("cna", "/%s_CNA.pdf" % base), ("loh", "/%s_LOH.pdf" % base)] if os.path.exists(solution["path"] + ext)]) out["subclones"] = "%s.segs.txt" % solution["path"] out["hetsummary"] = solution_file out["vrn_file"] = to_vcf(out["subclones"], "TitanCNA", _get_header, _seg_to_vcf, data) out["lohsummary"] = loh.summary_status(out, data) return out
5,349,958
def list_dumps(dump_list, **kwargs): """Function: list_dumps Description: Lists the dumps under the current repository. Arguments: (input) dump_list -> List of database dumps (input) kwargs: raw -> True|False - Print raw data in JSON format """ dump_list = list(dump_list) if kwargs.get("raw", False): for item in dump_list: gen_libs.print_dict(item, json_fmt=True) else: print("{0:15} {1:30} {2:14} {3:11} {4:11} {5:100}". format("Status", "Start Time", "Shard Success", "Shard Fail", "Shard Total", "Database Dump Name")) for item in dump_list: print("{0:15} {1:30} {2:13} {3:11} {4:12} {5:100}". format( item["state"], item["start_time"], item["shards"]["successful"], item["shards"]["failed"], item["shards"]["total"], item["snapshot"]))
5,349,959
def test_create_jobs_returns_the_job(client): """Return the created job""" response = client.post( '%s/jobs' % BASE_URL, data=json.dumps(DATA), headers=HEADERS) answer = DATA.copy() del answer['type'] assert response.get_json() == answer
5,349,960
def test_parse_args_and_kwargs(): """Parse args and kwargs.""" docstring = """ Arguments: a (str): an argument. *args (str): args arguments. **kwargs (str): kwargs arguments. """ sections, warnings = parse(docstring) assert len(sections) == 1 expected_arguments = {"a": "an argument.", "*args": "args arguments.", "**kwargs": "kwargs arguments."} for argument in sections[0].value: assert argument.name in expected_arguments assert expected_arguments[argument.name] == argument.description assert not warnings
5,349,961
def compare_collision_diagram( path_data, gt_data, sim_data, begin=0, end=0, time_step_sim=0.0005, time_step_gt=0.01): """ Plots collision/gait diagrams. Parameters ---------- path_data: <str> Path to simulation results. sim_data: <str> Behavior from data. Options: 'walking' or 'grooming'. begin: <float> Starting time for initiating the plots. end: <float> Stoping time for finishing the plots. If 0.0, all data is plotted. time_step_sim: <float> Simulation data time step. time_step_gt: <float> Ground truth time step. """ data = {} length_data = 0 if sim_data == 'walking': title_plot = 'Gait diagram' collisions = { 'LF': [], 'LM': [], 'LH': [], 'RF': [], 'RM': [], 'RH': []} collisions_gt = { 'LF': [], 'LM': [], 'LH': [], 'RF': [], 'RM': [], 'RH': []} data_sim = read_ground_contacts(path_data) gt_file_path = os.path.join(gt_data, "ground_truth_contact.pkl") data_gt = np.load(gt_file_path, allow_pickle=True) for leg in collisions.keys(): sum_force = np.sum(np.array(data_sim[leg]), axis=0) segment_force = np.delete(sum_force, 0) collisions[leg].append(segment_force) if length_data == 0: length_data = len(segment_force) for leg in collisions_gt.keys(): sum_force = np.sum(np.array(data_gt[leg]), axis=0) segment_force = np.delete(sum_force, 0) collisions_gt[leg].append(segment_force) if length_data == 0: length_data = len(segment_force) elif sim_data == 'grooming': data = read_collision_forces(path_data) title_plot = 'Collisions diagram' collisions = { 'LAntenna': [], 'LFTibia': [], 'LFTarsus1': [], 'LFTarsus2': [], 'LFTarsus3': [], 'LFTarsus4': [], 'LFTarsus5': [], 'RFTarsus5': [], 'RFTarsus4': [], 'RFTarsus3': [], 'RFTarsus2': [], 'RFTarsus1': [], 'RFTibia': [], 'RAntenna': []} for segment1 in collisions.keys(): seg_forces = [] for segment2, force in data[segment1].items(): seg_forces.append(force) sum_force = np.sum(np.array(seg_forces), axis=0) segment_force = np.delete(sum_force, 0) collisions[segment1].append(segment_force) if length_data == 0: length_data = len(segment_force) if end == 0: end = length_data * time_step_sim steps_sim = 1 / time_step_sim start_sim = int(begin * steps_sim) stop_sim = int(end * steps_sim) steps_gt = 1 / time_step_gt start_gt = int(begin * steps_gt) stop_gt = int(end * steps_gt) fig, axs = plt.subplots(len(collisions.keys()), sharex=True, gridspec_kw={'hspace': 0}) fig.suptitle(title_plot) stance_frames = {} stance_frames_gt = {} for i, (segment, force) in enumerate(collisions.items()): time = np.arange(0, len(force[0]), 1) / steps_sim stance_plot = get_stance_periods(force[0], start_sim, stop_sim) stance_frames[segment] = [] for ind in range(0, len(stance_plot), 2): start_stance = stance_plot[ind] stop_stance = stance_plot[ind + 1] num_steps = int(stop_stance - start_stance) axs[i].fill_between(time[start_stance:stop_stance], 0, 1, facecolor='deepskyblue', alpha=0.5, transform=axs[i].get_xaxis_transform()) stance_frames[segment].extend(np.linspace(start_stance, stop_stance, num_steps, endpoint=False)) axs[i].fill_between(time[start_sim:stance_plot[0]], 0, 1, facecolor='white', alpha=0.5, transform=axs[i].get_xaxis_transform()) axs[i].fill_between(time[stance_plot[-1]:stop_sim], 0, 1, facecolor='white', alpha=0.5, transform=axs[i].get_xaxis_transform()) axs[i].set_yticks((0.5,)) axs[i].set_yticklabels((segment,)) for i, (segment, force) in enumerate(collisions_gt.items()): scale_factor = time_step_gt / time_step_sim stop_time = np.round(len(force[0]) * scale_factor) time = np.arange(0, stop_time, 1) / steps_sim time_gt = np.arange(0, len(force[0]), 1) / steps_gt stance_plot = get_stance_periods(force[0], start_gt, stop_gt) stance_frames_gt[segment] = [] for ind in range(0, len(stance_plot), 2): start_stance = int(np.floor(stance_plot[ind] * scale_factor)) stop_stance = int(np.ceil(stance_plot[ind + 1] * scale_factor)) num_steps = int(stop_stance - start_stance) axs[i].fill_between(time[start_stance:stop_stance], 0, 1, facecolor='y', alpha=0.5, transform=axs[i].get_xaxis_transform()) stance_frames_gt[segment].extend(np.linspace(start_stance, stop_stance, num_steps, endpoint=False)) axs[i].fill_between(time_gt[start_gt:stance_plot[0]], 0, 1, facecolor='white', alpha=0.5, transform=axs[i].get_xaxis_transform()) axs[i].fill_between(time_gt[stance_plot[-1]:stop_gt], 0, 1, facecolor='white', alpha=0.5, transform=axs[i].get_xaxis_transform()) axs[i].set_yticks((0.5,)) axs[i].set_yticklabels((segment,)) results = pd.DataFrame() tot_frames = stop_sim - start_sim for leg, frames in stance_frames.items(): tp = np.count_nonzero( np.isin( np.array(frames), np.array( stance_frames_gt[leg]))) fp = len(frames) - tp tp_count_gt = np.count_nonzero( np.isin( np.array( stance_frames_gt[leg]), np.array(frames))) fn = len(stance_frames_gt[leg]) - tp_count_gt tn = tot_frames - tp - fp - fn df_vals = pd.DataFrame([[tp / tot_frames, tn / tot_frames, fp / tot_frames, fn / tot_frames, (tp + tn) / tot_frames]], columns=['True positive', 'True negative', 'False positive', 'False negative', 'Accuracy']) df_vals['Leg'] = leg results = results.append(df_vals, ignore_index=True) #print(leg, [[key, v/tot_frames] for key, v in results[leg].items()]) axs[len(axs) - 1].set_xlabel('Time (s)') if sim_data == 'walking': gt_patch = mpatches.Patch(color='y', alpha=0.5, label='GT-Stance') sim_patch = mpatches.Patch( color='deepskyblue', alpha=0.5, label='NMF-Stance') patches = [gt_patch, sim_patch] elif sim_data == 'grooming': black_patch = mpatches.Patch(color='black', label='Collision') patches = [black_patch] axs[0].legend( handles=patches, loc='upper right', bbox_to_anchor=( 1.1, 1)) print(results) print(np.mean(results['Accuracy'])) fig, ax2 = plt.subplots() ax2.bar(results['Leg'], results['True positive'], label='True positive') ax2.bar( results['Leg'], results['True negative'], bottom=results['True positive'], label='True negative') ax2.bar( results['Leg'], results['False negative'], bottom=results['True positive'] + results['True negative'], label='False negative') ax2.bar( results['Leg'], results['False positive'], bottom=results['True positive'] + results['True negative'] + results['False negative'], label='False positive') ax2.set_xlabel('Leg') ax2.set_ylabel('Percentage') ax2.legend() plt.show()
5,349,962
def reload_county(): """ Return bird species, totals, location to map """ # receive data from drop-down menu ajax request bird = request.args.get("bird") county = request.args.get("county") # get the zoom level of the new chosen county zoomLevel = get_zoom(county) # reset session data from the ajax request session["bird_name"] = bird session["county_name"] = county session["zoom_level"] = zoomLevel # CENTER map; get_county returns long, lat tuple. long_lat = get_county(county) longitude, latitude = long_lat birding_locations = create_geoFeature(bird, county) # send all this information to website using json bird_data = { "longitude": longitude, "latitude": latitude, "mapbox_api_key": mapbox_api_key, "birding_locations": birding_locations, "bird": bird, "county": county, "zoomLevel": zoomLevel} return jsonify(bird_data)
5,349,963
def test_train_pass_3(example_timeseries, example_results, modify_config): """ Correctly run training script with plots """ mod_cfg = {'dataset': {'output': example_results['results_dir']}} with modify_config(example_timeseries['config'], mod_cfg) as cfg: runner = CliRunner() result = runner.invoke( cli, [ '-v', 'train', '--overwrite', '--plot', '--diagnostics', cfg, example_results['classify_config'], example_results['example_classify_pickle'] ] ) assert result.exit_code == 0
5,349,964
def get_conv2d_out_channels(kernel_shape, kernel_layout): """Get conv2d output channels""" kernel_shape = get_const_tuple(kernel_shape) if len(kernel_shape) == 4: idx = kernel_layout.find("O") assert idx >= 0, "Invalid conv2d kernel layout {}".format(kernel_layout) return kernel_shape[idx] if re.match(r"OIHW\d*i\d*o", kernel_layout): return kernel_shape[0] * kernel_shape[5] if re.match(r"OIHW\d*o", kernel_layout): return kernel_shape[0] * kernel_shape[4] raise ValueError("Unknown conv2d kernel layout {}".format(kernel_layout))
5,349,965
def bindparam(key, value=None, type_=None, unique=False, required=False, callable_=None): """Create a bind parameter clause with the given key. :param key: the key for this bind param. Will be used in the generated SQL statement for dialects that use named parameters. This value may be modified when part of a compilation operation, if other :class:`_BindParamClause` objects exist with the same key, or if its length is too long and truncation is required. :param value: Initial value for this bind param. This value may be overridden by the dictionary of parameters sent to statement compilation/execution. :param callable\_: A callable function that takes the place of "value". The function will be called at statement execution time to determine the ultimate value. Used for scenarios where the actual bind value cannot be determined at the point at which the clause construct is created, but embedded bind values are still desirable. :param type\_: A ``TypeEngine`` object that will be used to pre-process the value corresponding to this :class:`_BindParamClause` at execution time. :param unique: if True, the key name of this BindParamClause will be modified if another :class:`_BindParamClause` of the same name already has been located within the containing :class:`.ClauseElement`. :param required: a value is required at execution time. """ if isinstance(key, ColumnClause): return _BindParamClause(key.name, value, type_=key.type, callable_=callable_, unique=unique, required=required) else: return _BindParamClause(key, value, type_=type_, callable_=callable_, unique=unique, required=required)
5,349,966
def set_membership_to_true(apps, schema_editor): """Set membership_is_managed to true""" Channel = apps.get_model("channels", "Channel") # At the point the migration runs Channel.objects.all().update(membership_is_managed=True)
5,349,967
def create_dictionary(timestamp, original_sentence, sequence_switched, err_message, suggestion_list): """Create Dictionary Function Generates and exports a dictionary object with relevant data for website interaction to take place. """ if len(suggestion_list) != 0: err_message_str = "Possible error: " + err_message + "\n \n" new_dictionary = { "timestamp": timestamp, "original_sentence": original_sentence, "masked_sentence": sequence_switched, "err_message": err_message, "possible_corrections": suggestion_list } return new_dictionary else: return {}
5,349,968
def test_cannot_execute_shell(): """The credential should raise CredentialUnavailableError when the subprocess doesn't start""" with patch(POPEN, Mock(side_effect=OSError)): with pytest.raises(CredentialUnavailableError): AzurePowerShellCredential().get_token("scope")
5,349,969
def benefits(path): """Unemployment of Blue Collar Workers a cross-section from 1972 *number of observations* : 4877 *observation* : individuals *country* : United States A time serie containing : stateur state unemployment rate (in %) statemb state maximum benefit level state state of residence code age age in years tenure years of tenure in job lost joblost a factor with levels (slack\\\_work,position\\\_abolished,seasonal\\\_job\\\_ended,other) nwhite non-white ? school12 more than 12 years of school ? sex a factor with levels (male,female) bluecol blue collar worker ? smsa lives is smsa ? married married ? dkids has kids ? dykids has young kids (0-5 yrs) ? yrdispl year of job displacement (1982=1,..., 1991=10) rr replacement rate head is head of household ? ui applied for (and received) UI benefits ? McCall, B.P. (1995) “The impact of unemployment insurance benefit levels on recipiency”, *Journal of Business and Economic Statistics*, **13**, 189–198. Args: path: str. Path to directory which either stores file or otherwise file will be downloaded and extracted there. Filename is `benefits.csv`. Returns: Tuple of np.ndarray `x_train` with 4877 rows and 18 columns and dictionary `metadata` of column headers (feature names). """ import pandas as pd path = os.path.expanduser(path) filename = 'benefits.csv' if not os.path.exists(os.path.join(path, filename)): url = 'http://dustintran.com/data/r/Ecdat/Benefits.csv' maybe_download_and_extract(path, url, save_file_name='benefits.csv', resume=False) data = pd.read_csv(os.path.join(path, filename), index_col=0, parse_dates=True) x_train = data.values metadata = {'columns': data.columns} return x_train, metadata
5,349,970
def get_L_BB_b2_d_t(L_BB_b2_d, L_dashdash_b2_d_t): """ Args: L_BB_b2_d: param L_dashdash_b2_d_t: L_dashdash_b2_d_t: Returns: """ L_BB_b2_d_t = np.zeros(24 * 365) L_BB_b2_d = np.repeat(L_BB_b2_d, 24) L_dashdash_b2_d = np.repeat(get_L_dashdash_b2_d(L_dashdash_b2_d_t), 24) f = L_dashdash_b2_d > 0 L_BB_b2_d_t[f] = L_BB_b2_d[f] * L_dashdash_b2_d_t[f] / L_dashdash_b2_d[f] return L_BB_b2_d_t
5,349,971
def _get_tab_counts(business_id_filter, conversation_tab, ru_ref_filter, survey_id): """gets the thread count for either the current conversation tab, or, if the ru_ref_filter is active it returns the current conversation tab and all other tabs. i.e the value for the 'current' tab is always populated. Calls two different secure message endpoints depending on if ru_ref_filter is set as the get all is more expensive""" if ru_ref_filter: return message_controllers.get_all_conversation_type_counts(survey_id=survey_id, conversation_tab=conversation_tab, business_id=business_id_filter) thread_count = message_controllers.get_conversation_count(survey_id=survey_id, business_id=business_id_filter, conversation_tab=conversation_tab) return {'current': thread_count}
5,349,972
def IsInverseTime(*args): """Time delay is inversely adjsuted, proportinal to the amount of voltage outside the regulating band.""" # Getter if len(args) == 0: return lib.RegControls_Get_IsInverseTime() != 0 # Setter Value, = args lib.RegControls_Set_IsInverseTime(Value)
5,349,973
def create_userinfo(fname, lname, keypass): """ function to create new user """ new_userinfo = Userinfo(fname, lname, keypass) return new_userinfo
5,349,974
def get_networks() -> Dict[str, SpikingNetwork]: """Get a set of spiking networks to train.""" somatic_spike_fn = get_spike_fn(threshold=15) dendritic_nl_fn = get_default_dendritic_fn( threshold=2, sensitivity=10, gain=1 ) neuron_params = RecurrentNeuronParameters( tau_mem=10e-3, tau_syn=5e-3, backprop_gain=0.5, feedback_strength=15, somatic_spike_fn=somatic_spike_fn, dendritic_spike_fn=dendritic_nl_fn, ) parallel_params = PRCNeuronParameters( tau_mem=10e-3, tau_syn=5e-3, backprop_gain=0.05, feedback_strength=15, somatic_spike_fn=somatic_spike_fn, dend_na_fn=dendritic_nl_fn, dend_ca_fn=get_sigmoid_fn(threshold=4, sensitivity=10, gain=1), dend_nmda_fn=dendritic_nl_fn, tau_dend_na=5e-3, tau_dend_ca=40e-3, tau_dend_nmda=80e-3, ) simple_network_architecture = deepcopy(NETWORK_ARCHITECTURE) simple_network_architecture.weight_scale_by_layer = (3, 7) two_compartment_network_architecture = deepcopy(NETWORK_ARCHITECTURE) two_compartment_network_architecture.weight_scale_by_layer = (0.5, 7) parallel_network_architecture = deepcopy(NETWORK_ARCHITECTURE) parallel_network_architecture.weight_scale_by_layer = (0.02, 7) nets = { 'One compartment': SpikingNetwork( neuron_params, simple_network_architecture ), 'No BAP': TwoCompartmentSpikingNetwork( neuron_params, two_compartment_network_architecture ), 'BAP': RecurrentSpikingNetwork( neuron_params, two_compartment_network_architecture ), 'Parallel subunits, no BAP': ParallelSpikingNetwork( parallel_params, parallel_network_architecture ), 'Parallel subunits + BAP (full PRC model)': PRCSpikingNetwork( parallel_params, parallel_network_architecture ), } return nets
5,349,975
def _load_jupyter_server_extension(server_app): """Registers the API handler to receive HTTP requests from the frontend extension. Parameters ---------- lab_app: jupyterlab.labapp.LabApp JupyterLab application instance """ ...
5,349,976
def test_custom_entity_marshaler(): """Test that json marshaler use custom marshaler to marshal an entity. 1. Create a sub-class of JSONMarshaler with redefined create_entity_marshaler factory. 2. Create json marshaler from the sub-class. 3. Marshal an entity. 4. Check that custom marshaler is used to marshal an entity. """ class _CustomEntityMarshaler(JSONMarshaler): create_entity_marshaler = _CustomMarshaler("Custom marshaled entity") marshaled_entity = _CustomEntityMarshaler().marshal_entity(entity=Entity()) assert marshaled_entity == "Custom marshaled entity", "Wrong entity data"
5,349,977
def plot_gain_offsets(dio_cross,dio_chan_per_coarse=8,feedtype='l',ax1=None,ax2=None,legend=True,**kwargs): """ Plots the calculated gain offsets of each coarse channel along with the time averaged power spectra of the X and Y feeds """ #Get ON-OFF ND spectra Idiff,Qdiff,Udiff,Vdiff,freqs = get_diff(dio_cross,feedtype,**kwargs) obs = Waterfall(dio_cross,max_load=150) tsamp = obs.header['tsamp'] data = obs.data obs = None I,Q,U,V = get_stokes(data,feedtype) #Get phase offsets and convert to degrees coarse_G = gain_offsets(I,Q,U,V,tsamp,dio_chan_per_coarse,feedtype,**kwargs) coarse_freqs = convert_to_coarse(freqs,dio_chan_per_coarse) #Get X and Y spectra for the noise diode ON and OFF #If using circular feeds these correspond to LL and RR XX_OFF,XX_ON = foldcal(np.expand_dims(data[:,0,:],axis=1),tsamp,**kwargs) YY_OFF,YY_ON = foldcal(np.expand_dims(data[:,1,:],axis=1),tsamp,**kwargs) if ax1==None: plt.subplot(211) else: axG = plt.axes(ax1) plt.setp(axG.get_xticklabels(),visible=False) plt.plot(coarse_freqs,coarse_G,'ko',markersize=2) plt.ylabel(r'$\frac{\Delta G}{2}$',rotation=90) if feedtype=='l': plt.title('XY Gain Difference') if feedtype=='c': plt.title('LR Gain Difference') plt.grid(True) if ax2==None: plt.subplot(212) else: axXY = plt.axes(ax2,sharex=axG) if feedtype=='l': plt.plot(freqs,XX_OFF,'b-',label='XX') plt.plot(freqs,YY_OFF,'r-',label='YY') if feedtype=='c': plt.plot(freqs,XX_OFF,'b-',label='LL') plt.plot(freqs,YY_OFF,'r-',label='RR') plt.xlabel('Frequency (MHz)') plt.ylabel('Power (Counts)') if legend==True: plt.legend()
5,349,978
def process_files(pair_path): """ Process all protein (pdb) and ligand (sdf) files in input directory. Args pair_path dir (str): directory containing PDBBind data Returns structure_dict (dict): dictionary containing each structure, keyed by PDB code. Each PDB is a dict containing protein in Biopython format and ligand in RDKit Mol format """ structure_dict = {} pose_path = os.path.join(pair_path, 'ligand_poses') # get starting protein structure pdb_files = fi.find_files(pair_path, 'pdb') for f in tqdm(pdb_files, desc='pdb files'): prot = dt.read_any(f) structure_dict['protein'] = prot # get ligand pose structures lig_files = fi.find_files(pose_path, 'sdf') for f in tqdm(lig_files, desc='ligand files'): structure_dict[fi.get_pdb_name(f)] = get_ligand(f) return structure_dict
5,349,979
def metric_group_max(df, metric_names=None): """Find the step which achieves the highest mean value for a group of metrics.""" # Use METRIC_NAMES defined at the top as default metric_names = metric_names or METRIC_NAMES group_to_metrics = collections.defaultdict(set) for metric in metric_names.values(): group_to_metrics[metric.group].add(metric.name) group_df = pd.DataFrame() for group, metrics in group_to_metrics.items(): if not all(m in df for m in metrics): continue group_df[group] = df[metrics].mean(axis=1) # Need to replace nan with large negative value for idxmax group_max_step = group_df.fillna(-1e9).idxmax(axis=0) metric_max = pd.Series() metric_max_step = pd.Series() for group_name, max_step in group_max_step.iteritems(): for metric in group_to_metrics[group_name]: metric_max[metric] = df[metric][max_step] metric_max_step[metric] = max_step metric_max = metric_max.reindex(df.columns) metric_max_step = metric_max_step.reindex(df.columns) return metric_max, metric_max_step
5,349,980
def save_output(issues, filename): """Save output to file.""" with open(filename, 'a') as output: for issue in issues: output.write(issue)
5,349,981
def get_settings_value(definitions: Definitions, setting_name: str): """Get a Mathics Settings` value with name "setting_name" from definitions. If setting_name is not defined return None""" settings_value = definitions.get_ownvalue(setting_name) if settings_value is None: return None return settings_value.replace.to_python(string_quotes=False)
5,349,982
def show_clusterhost(clusterhost_id): """Get clusterhost.""" data = _get_request_args() return utils.make_json_response( 200, _reformat_host(cluster_api.get_clusterhost( clusterhost_id, user=current_user, **data )) )
5,349,983
def resize3d_cubic(data_in, scale, coordinate_transformation_mode): """Tricubic 3d scaling using python""" dtype = data_in.dtype d, h, w = data_in.shape new_d, new_h, new_w = [int(round(i * s)) for i, s in zip(data_in.shape, scale)] data_out = np.ones((new_d, new_h, new_w)) def _cubic_spline_weights(t, alpha=-0.5): """create cubic spline weights in 1D""" t2 = t * t t3 = t * t * t w1 = alpha * (t3 - 2 * t2 + t) w2 = (alpha + 2) * t3 - (3 + alpha) * t2 + 1 w3 = -(alpha + 2) * t3 + (3 + 2 * alpha) * t2 - alpha * t w4 = -alpha * t3 + alpha * t2 return np.array([w1, w2, w3, w4]) indexes = np.mgrid[-1:3, -1:3, -1:3] def _get_patch(zint, yint, xint): # Get the surrounding values indices = indexes.copy() indices[0] = np.maximum(np.minimum(indexes[0] + zint, d - 1), 0) indices[1] = np.maximum(np.minimum(indexes[1] + yint, h - 1), 0) indices[2] = np.maximum(np.minimum(indexes[2] + xint, w - 1), 0) p = data_in[indices[0], indices[1], indices[2]] return p for m in range(new_d): for j in range(new_h): for k in range(new_w): in_z = get_inx(m, d, new_d, coordinate_transformation_mode) in_y = get_inx(j, h, new_h, coordinate_transformation_mode) in_x = get_inx(k, w, new_w, coordinate_transformation_mode) zint = math.floor(in_z) zfract = in_z - math.floor(in_z) yint = math.floor(in_y) yfract = in_y - math.floor(in_y) xint = math.floor(in_x) xfract = in_x - math.floor(in_x) wz = _cubic_spline_weights(zfract) wy = _cubic_spline_weights(yfract) wx = _cubic_spline_weights(xfract) p = _get_patch(zint, yint, xint) l = np.sum(p * wx, axis=-1) col = np.sum(l * wy, axis=-1) data_out[m, j, k] = np.sum(col * wz) return data_out
5,349,984
def seasurface_skintemp_correct(*args): """ Description: Wrapper function which by OOI default applies both of the METBK seasurface skin temperature correction algorithms (warmlayer, coolskin in coare35vn). This behavior is set by the global switches JWARMFL=1 and JCOOLFL=1. The switch construction is retained for generality. Most of the METBK L2 data products and 2 of the metadata products require the skin corrections to be applied before their values can be calculated. Warmlayer corrections dsea are added. Coolskin corrections dter and dqer are subtracted. Implemented by: 2014-09-01: Russell Desiderio. Initial code. Usage (command line spaced out for clarity): (usr, tsr, qsr, ut, dter, dqer, tkt, L, zou, zot, zoq, # coare35vn output dt_wrm, tk_pwp, dsea) = # warmlayer output seasurface_skintemp_correct (rain_rate, timestamp, lon, ztmpwat, tC_sea, wnd, zwindsp, tC_air, ztmpair, relhum, zhumair, pr_air, Rshort_down, Rlong_down, lat, zinvpbl, jcool, jwarm) where OUTPUTS (documentation from coare35vn matlab code): usr = friction veclocity that includes gustiness [m/s] tsr = temperature scaling parameter [K] qsr = specific humidity scaling parameter [g/g, I changed this from Edson code] ut = not an output of the original code dter = coolskin temperature depression [degC] dqer = coolskin humidity depression [kg/kg] tkt = coolskin thickness [m] L = Obukhov length scale [m] zou = wind roughness length [m] zot = thermal roughness length [m] zoq = moisture roughness length [m] OUTPUTS (documentation from coare35vnWarm matlab code): dt_wrm = warming across entire warmlayer [degC] tk_pwp = warmlayer thickness [m] dsea = additive warmlayer temperature correction [degC]; (this is warmlayer's key output) INPUTS: rain_rate = rainfall [mm/hr] timestamp = seconds since 01-01-1900 lon = longitude [deg] ztmpwat = depth of bulk sea temperature measurement [m] tC_sea = bulk sea surface temperature [degC] wnd = windspeed relative to current [m/s] zwindsp = height of windspeed measurement[m] tC_air = air temperature [degC] ztmpair = height of air temperature measurement [m] relhum = relative humidity [%] zhumair = height of air humidity measurement [m] pr_air = air pressure [mb] Rshort_down = downwelling shortwave irradiation [W/m^2] Rlong_down = downwelling longwave irradiation [W/m^2] lat = latitude [deg] zinvpbl = inversion height; default is 600m [m] jcool = switch to activate coolskin algorithm (hardwired to 1 = true) jwarm = switch to activate warmlayer algoritgm (hardwired to 1 = true) References: Fairall, C.W., E.F. Bradley, J.S. Godfrey, G.A. Wick, J.B. Edson, and G.S. Young (1996) Cool-skin and warm-layer effects on sea surface temperature. JGR, Vol. 101, No. C1, 1295-1308, 1996. OOI (2014). Data Product Specification for L2 BULKFLX Data Products. Document Control Number 1341-00370. https://alfresco.oceanobservatories.org/ (See: Company Home >> OOI >> Controlled >> 1000 System Level >> 1341-00370_Data_Product_Spec_BULKFLX_OOI.pdf) OOI (2014). 1341-00370_BULKFLX Artifacts. https://alfresco.oceanobservatories.org/ (See: Company Home >> OOI >> REFERENCE >> Data Product Specification Artifacts >> 1341-00370_BULKFLX (Original matlab code). Notes: (1) the jwarm switch selects whether or not the warmlayer code is run. the jcool 'switch' is itself a variable within the (original) coare35vn code; it was used as a multiplicative factor when calculating coolskin corrections, so that when jcool=0, the corrections are set to 0. (2) for OOI jwarm and jcool are always 1, because all of the OOI sea temperature measurements are bulk, not skin, measurements. (3) in the more general case, jwarm = jcool always, because: (a) jcool = 1 indicates that the input sea temperature values are bulk measurements, not surface skin measurements made with an infrared thermometer. in this bulk measurement case, both coolskin and warmlayer corrections to the bulk temperature are required to model the skin temperature (jcool = jwarm = 1). (b) jcool = 0 indicates that the input sea temperature values are surface skin temperatures directly measured with an infrared thermometer, and therefore both the coolskin and warmlayer corrections are not to be applied (jcool = jwarm = 0). (4) however, both switches are retained for generality in case this open source code is appropriated and adapted. (plus, the DPS specified archiving the jwarm and jcool switches as metadata). (5) the OOI cyberinfrastructure model originally required that each data product be specifically calculated by one function. This is the main reason that the wrapper function construct is used. In addition, I've chosen to explicitly write out its output tuple arguments for each data product call, so that the dependence of the various data products on these tuple arguments is obvious (underscores are used as placeholders for those arguments not used in any particular function call). In particular, this construct specifically identifies when coolskin and warmlayer temperature corrections have been applied to various variables in the original code. (For example - the latent heat of vaporization for water depends on water temperature, but only the warmlayer correction is used calculate it). """ jwarm = args[-1] # jwarm (and jcool) are scalars if jwarm: (dt_wrm, tk_pwp, dsea) = warmlayer(*args[0:-1]) # does not pass jwarm else: # the tk_pwp parameter is often used as a divisor in warmlayer calculations to # compare the warmlayer depth with the depth of the bulk temperature sensor. # when the warmlayer code is not run, the desired results will be obtained if # dt_warm and dsea are set to 0 where tk_pwp is nonzero so that a divide by # zero error does not result. the value chosen is the default value specified # in the warmlayer code itself. (dt_wrm, tk_pwp, dsea) = (0.0, 19.0, 0.0) # construct tuple containing coolskin input arguments; # add the warmlayer temperature correction to the msrd bulk sea temp. coolskin_args = (args[4]+dsea,) + args[5:-1] # does not pass jwarm # append results of warmlayer calculation to output, # as is also done in original coare35vn warmlayer matlab code. return coare35vn(*coolskin_args) + (dt_wrm, tk_pwp, dsea)
5,349,985
def run_experiment_here( experiment_function, variant=None, exp_id=0, seed=0, use_gpu=True, gpu_id=0, # Logger params: exp_name="default", snapshot_mode='last', snapshot_gap=1, git_infos=None, script_name=None, trial_dir_suffix=None, randomize_seed=False, **setup_logger_kwargs ): """ Run an experiment locally without any serialization. :param experiment_function: Function. `variant` will be passed in as its only argument. :param exp_name: Experiment prefix for the save file. :param variant: Dictionary passed in to `experiment_function`. :param exp_id: Experiment ID. Should be unique across all experiments. Note that one experiment may correspond to multiple seeds,. :param seed: Seed used for this experiment. :param use_gpu: Run with GPU. By default False. :param script_name: Name of the running script :param log_dir: If set, set the log directory to this. Otherwise, the directory will be auto-generated based on the exp_name. :return: """ if variant is None: variant = {} variant['exp_id'] = str(exp_id) if randomize_seed or (seed is None and 'seed' not in variant): seed = random.randint(0, 100000) variant['seed'] = seed reset_execution_environment() actual_log_dir = setup_logger( exp_name=exp_name, variant=variant, exp_id=exp_id, seed=seed, snapshot_mode=snapshot_mode, snapshot_gap=snapshot_gap, git_infos=git_infos, script_name=script_name, trial_dir_suffix=trial_dir_suffix, **setup_logger_kwargs ) set_seed(seed) os.environ['gpu_id'] = str(gpu_id) run_experiment_here_kwargs = dict( variant=variant, exp_id=exp_id, seed=seed, use_gpu=use_gpu, exp_name=exp_name, snapshot_mode=snapshot_mode, snapshot_gap=snapshot_gap, git_infos=git_infos, script_name=script_name, **setup_logger_kwargs ) save_experiment_data( dict( run_experiment_here_kwargs=run_experiment_here_kwargs ), actual_log_dir ) return experiment_function(variant)
5,349,986
def extract_character_pairs(letter_case, reverse_letter_case): """ Extract character pairs. Check that two unicode value are also a mapping value of each other. :param letter_case: case mappings dictionary which contains the conversions. :param reverse_letter_case: Comparable case mapping table which contains the return direction of the conversion. :return: A table with character pairs. """ character_pairs = [] for letter_id in sorted(letter_case.keys()): if is_bidirectional_conversion(letter_id, letter_case, reverse_letter_case): mapped_value = letter_case[letter_id] character_pairs.extend([letter_id, ord(mapped_value)]) # Remove character pairs from case mapping tables del letter_case[letter_id] del reverse_letter_case[ord(mapped_value)] return character_pairs
5,349,987
def test_to_graph_should_return_link_to_spatial_coverage_with_location_triple() -> None: """It returns a spatial coverage graph isomorphic to spec.""" dataset = Dataset() dataset.identifier = "http://example.com/datasets/1" # Create location: location = Location() location.identifier = "http://example.com/locations/1" location.centroid = "POINT(4.88412 52.37509)" # Add location to dataset: dataset.spatial_coverage = location src = """ @prefix dct: <http://purl.org/dc/terms/> . @prefix rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#> . @prefix rdfs: <http://www.w3.org/2000/01/rdf-schema#> . @prefix dcat: <http://www.w3.org/ns/dcat#> . @prefix geosparql: <http://www.opengis.net/ont/geosparql#> . <http://example.com/datasets/1> a dcat:Dataset ; dct:spatial <http://example.com/locations/1> ; . <http://example.com/locations/1> a dct:Location ; dcat:centroid "POINT(4.88412 52.37509)"^^geosparql:asWKT ; . """ g1 = Graph().parse(data=dataset.to_rdf(), format="turtle") g2 = Graph().parse(data=src, format="turtle") _isomorphic = isomorphic(g1, g2) if not _isomorphic: _dump_diff(g1, g2) pass assert _isomorphic
5,349,988
def after_timestep(simulation, is_steady, force_steady=False): """ Move u -> up, up -> upp and prepare for the next time step """ # Stopping criteria for steady state simulations vel_diff = None if is_steady: vel_diff = 0 for d in range(simulation.ndim): u_new = simulation.data['u%d' % d] up = simulation.data['up%d' % d] diff = abs(u_new.vector().get_local() - up.vector().get_local()).max() vel_diff = max(vel_diff, diff) shift_fields(simulation, ['u%d', 'up%d', 'upp%d']) shift_fields(simulation, ['u_conv%d', 'up_conv%d', 'upp_conv%d']) if force_steady: simulation.data['time_coeffs'].assign(dolfin.Constant([0.0, 0.0, 0.0])) else: # Change time coefficient to second order simulation.data['time_coeffs'].assign(dolfin.Constant([3 / 2, -2, 1 / 2])) # Extrapolate the convecting velocity to the next step update_convection(simulation, force_steady=force_steady) return vel_diff
5,349,989
def get_summary_indices(df, on='NOSC'): """ Get the summary stats for the indices: median, mean, std, weighted mean and weighted std """ samples = get_list_samples(df) samples.append(on) t = df[samples] t = t.melt(id_vars=[on], var_name='SampleID', value_name='NormIntensity') t = t[t['NormIntensity'] > 0].reset_index(drop=True) t_agg = t.groupby(['SampleID']).agg({on: ['median', 'mean', 'std']}) t_agg.columns = t_agg.columns.map('_'.join) t_agg = t_agg.reset_index() t_agg[[on + '_w_mean', on + '_w_std']] = '' for sample in t['SampleID'].unique(): # print(sample) temp = t[t['SampleID'] == sample] wdf = DescrStatsW(temp[on], weights=temp['NormIntensity']) t_agg.loc[t_agg['SampleID'] == sample, on + '_w_mean'] = wdf.mean t_agg.loc[t_agg['SampleID'] == sample, on + '_w_std'] = wdf.std return t_agg
5,349,990
def view_evidence(evidence_id: int): """View a single Evidence model.""" evidence = manager.get_evidence_by_id_or_404(evidence_id) return render_template( 'evidence/evidence.html', evidence=evidence, manager=manager, )
5,349,991
def get_filenames(split, mode, data_dir): """Returns a list of filenames.""" if not split: data_dir = os.path.join(data_dir, 'cifar-10-batches-bin') assert os.path.exists(data_dir), ( 'Run cifar10_download_and_extract.py first to download and extract the ' 'CIFAR-10 data.') if split: if mode == 'train': return [ os.path.join(data_dir, 'train_batch_%d.bin' % i) for i in range(1, _NUM_DATA_FILES + 1)] elif mode == 'valid': return [os.path.join(data_dir, 'valid_batch.bin')] else: return [os.path.join(data_dir, 'test_batch.bin')] else: if mode == 'train': return [ os.path.join(data_dir, 'data_batch_%d.bin' % i) for i in range(1, _NUM_DATA_FILES + 1) ] else: return [os.path.join(data_dir, 'test_batch.bin')]
5,349,992
def rigidBlades(blds, hub=None, r_O=[0,0,0]): """ return a rigid body for the three blades All bodies should be in a similar frame """ blades = blds[0].toRigidBody() for B in blds[1:]: B_rigid = B.toRigidBody() blades = blades.combine(B_rigid, r_O=r_O) blades.name='blades' return blades
5,349,993
def mea_slow(posterior_matrix, shortest_ref_per_event, return_all=False): """Computes the maximum expected accuracy alignment along a reference with given events and probabilities. Computes a very slow but thorough search through the matrix :param posterior_matrix: matrix of posterior probabilities with reference along x axis and events along y :param shortest_ref_per_event: shortest ref position per event :param return_all: return all forward edges """ ref_len = len(posterior_matrix[0]) events_len = len(posterior_matrix) initialize = True forward_edges = list() new_edges = list() # step through all events for event_index in range(events_len): max_prob = 0 if initialize: ref_index = 0 while ref_index < ref_len: # intitialize forward edges with first event alignments # if type(posterior_matrix[ref_index][event_index]) is not int: posterior = posterior_matrix[event_index][ref_index] event_data = [ref_index, event_index, posterior, posterior, None] if 0 < posterior >= max_prob: # print("True", posterior, max_prob) new_edges.append(event_data) max_prob = posterior ref_index += 1 # print("INITIALIZE", new_edges, max_prob) if len(new_edges) != 0: forward_edges = new_edges new_edges = list() initialize = False else: # print(forward_edges) ref_index = 0 top_edge = [] while ref_index < ref_len: posterior = posterior_matrix[event_index][ref_index] if posterior >= max_prob: # no possible connecting edges and is needed for other other events create a new one if ref_index < shortest_ref_per_event[event_index]: top_edge.append([ref_index, event_index, posterior, posterior, None]) max_prob = posterior ref_index += 1 # add top edge if needed if top_edge: new_edges.append(top_edge[-1]) ref_index = 0 while ref_index < ref_len: inxs = [] probs = [] posterior = posterior_matrix[event_index][ref_index] for j, forward_edge in enumerate(forward_edges): if forward_edge[0] < ref_index: # track which probabilities with prev edge inxs.append(j) probs.append(posterior + forward_edge[3]) # if needed, keep edges aligned to ref positions previous than the current ref position elif forward_edge[0] == ref_index: # stay at reference position # add probability of event if we want to promote sideways movement inxs.append(j) probs.append(forward_edge[3]) # add new edge inxs = inxs[::-1] probs = probs[::-1] if len(probs) != 0: if max(probs) > max_prob: connecting_edge = forward_edges[inxs[int(np.argmax(probs))]] new_edges.append([ref_index, event_index, posterior, max(probs), connecting_edge]) max_prob = max(probs) else: if forward_edges[0][0] > ref_index and posterior > max_prob: new_edges.append([ref_index, event_index, posterior, posterior, None]) max_prob = posterior ref_index += 1 # print("END_NEW_EDGES", new_edges) forward_edges = new_edges new_edges = list() # grab and return the highest probability edge if return_all: return forward_edges else: highest_prob = 0 best_forward_edge = 0 for x in forward_edges: if x[3] > highest_prob: highest_prob = x[3] best_forward_edge = x return best_forward_edge
5,349,994
def predict(cart_tree, feature_set, data_set): """Predict the quality.""" feature_dict = {} for index, feature in enumerate(feature_set): feature_dict[feature] = index results = [] for element in data_set: # Append a tuple. results.append((trace(cart_tree, feature_dict, element), element[-1])) return results
5,349,995
def _generate_residue_name(residue, smiles): """Generates residue name for a particular residue which corresponds to a particular smiles pattern. Where possible (i.e for amino acids and ions) a standard residue name will be returned, otherwise a random name will be used. Parameters ---------- residue: mdtraj.core.topology.Residue The residue to assign the name to. smiles: str The SMILES pattern to generate a resiude name for. """ from mdtraj.core import residue_names from openff.toolkit.topology import Molecule # Define the set of residue names which should be discarded # if randomly generated as they have a reserved meaning. # noinspection PyProtectedMember forbidden_residue_names = [ *residue_names._AMINO_ACID_CODES, *residue_names._SOLVENT_TYPES, *residue_names._WATER_RESIDUES, "ADE", "CYT", "CYX", "DAD", "DGU", "FOR", "GUA", "HID", "HIE", "HIH", "HSD", "HSH", "HSP", "NMA", "THY", "URA", ] amino_residue_mappings = { "C[C@H](N)C(=O)O": "ALA", "N=C(N)NCCC[C@H](N)C(=O)O": "ARG", "NC(=O)C[C@H](N)C(=O)O": "ASN", "N[C@@H](CC(=O)O)C(=O)O": "ASP", "N[C@@H](CS)C(=O)O": "CYS", "N[C@@H](CCC(=O)O)C(=O)O": "GLU", "NC(=O)CC[C@H](N)C(=O)O": "GLN", "NCC(=O)O": "GLY", "N[C@@H](Cc1c[nH]cn1)C(=O)O": "HIS", "CC[C@H](C)[C@H](N)C(=O)O": "ILE", "CC(C)C[C@H](N)C(=O)O": "LEU", "NCCCC[C@H](N)C(=O)O": "LYS", "CSCC[C@H](N)C(=O)O": "MET", "N[C@@H](Cc1ccccc1)C(=O)O": "PHE", "O=C(O)[C@@H]1CCCN1": "PRO", "N[C@@H](CO)C(=O)O": "SER", "C[C@@H](O)[C@H](N)C(=O)O": "THR", "N[C@@H](Cc1c[nH]c2ccccc12)C(=O)O": "TRP", "N[C@@H](Cc1ccc(O)cc1)C(=O)O": "TYR", "CC(C)[C@H](N)C(=O)O": "VAL", } standardized_smiles = Component(smiles=smiles).smiles # Check for amino acids. if standardized_smiles in amino_residue_mappings: residue.name = amino_residue_mappings[standardized_smiles] return # Check for water if standardized_smiles == "O": residue.name = "HOH" # Re-assign the water atom names. These need to be set to get # correct CONECT statements. h_counter = 1 for atom in residue.atoms: if atom.element.symbol == "O": atom.name = "O1" else: atom.name = f"H{h_counter}" h_counter += 1 return # Check for ions openff_molecule = Molecule.from_smiles(smiles, allow_undefined_stereo=True) if openff_molecule.n_atoms == 1: residue.name = _ion_residue_name(openff_molecule) residue.atom(0).name = residue.name return # Randomly generate a name random_residue_name = "".join( [random.choice(string.ascii_uppercase) for _ in range(3)] ) while random_residue_name in forbidden_residue_names: # Re-choose the residue name until we find a safe one. random_residue_name = "".join( [random.choice(string.ascii_uppercase) for _ in range(3)] ) residue.name = random_residue_name # Assign unique atom names. element_counter = defaultdict(int) for atom in residue.atoms: atom.name = f"{atom.element.symbol}{element_counter[atom.element.symbol] + 1}" element_counter[atom.element.symbol] += 1
5,349,996
def test_incorporate_getitem_through_switch(tag): """ test_incorporate_getitem_through_switch """ fns = FnDict() scalar_gt = Primitive('scalar_gt') @fns def before(x, y): def f1(x, y): return x, y def f2(x, y): return y, x return tuple_getitem( switch(scalar_gt(x, 0), f1, f2)(x, y), 0) @fns def after(x, y): def f1(x, y): return x def f2(x, y): return y return switch(scalar_gt(x, 0), f1, f2)(x, y) return fns[tag]
5,349,997
def response_json(status, message, response): """ Helper method that converts the given data in json format :param success: status of the APIs either true or false :param data: data returned by the APIs :param message: user-friendly message :return: json response """ data = { "status": status, "message": message, "response": response, } return data
5,349,998
def settings(request): """ """ from . import conf conf = dict(vars(conf)) # conf.update(ThemeSite.objects.get_theme_conf(request=request, fail=False)) data = request.session.get('cms_bs3_theme_conf', {}) conf.update(data) return {'bs3_conf': conf}
5,349,999