content
stringlengths
0
894k
type
stringclasses
2 values
import os import pathlib import urllib import bs4 from .subsearcher import HTMLSubSearcher, SubInfo class SubHDSubSearcher(HTMLSubSearcher): """SubHD 字幕搜索器(https://subhd.tv)""" SUPPORT_LANGUAGES = ['zh_chs', 'zh_cht', 'en', 'zh_en'] SUPPORT_EXTS = ['ass', 'srt'] API_URL = 'https://subhd.tv/search/' API_SUBTITLE_DOWNLOAD = '/ajax/down_ajax' API_SUBTITLE_PREVIEW = '/ajax/file_ajax' _cache = {} shortname = 'subhd' def __init__(self, subfinder, api_urls=None): super(SubHDSubSearcher, self).__init__(subfinder, api_urls=api_urls) self.API_SUBTITLE_DOWNLOAD = self.api_urls.get( 'subhd_api_subtitle_download', self.__class__.API_SUBTITLE_DOWNLOAD ) self.API_SUBTITLE_PREVIEW = self.api_urls.get('subhd_api_subtitle_preview', self.__class__.API_SUBTITLE_PREVIEW) def _parse_search_results_html(self, doc): """parse search result html""" soup = bs4.BeautifulSoup(doc, 'lxml') subinfo_list = [] div_list = soup.select('div.mb-4') if not div_list: return subinfo_list for div in div_list: subinfo = SubInfo() div_title = div.find('div', class_='f12 pt-1') if not div_title: break a = div_title.a # 字幕标题 subinfo['title'] = a.get('title').strip() # 链接 subinfo['link'] = a.get('href').strip() div_format = div_title.find_next_siblings('div', limit=1) if not div_format: break div_format = div_format[0] # 语言 format_str = ' '.join(div_format.strings) for l1, l2 in self.LANGUAGES_MAP.items(): if l1 in format_str: subinfo['languages'].append(l2) # 格式 for ext in self.SUPPORT_EXTS: if ext in format_str or ext.upper() in format_str: subinfo['exts'].append(ext) # 下载次数 div_download = div_format.find_next_siblings('div', class_='pt-3') if not div_download: break div_download = div_download[0] fa_download = div_download.find('i', class_='fa-download') dl_str = fa_download.next_sibling dl_str = dl_str.replace('次', '') subinfo['download_count'] = int(dl_str) subinfo_list.append(subinfo) return subinfo_list def _get_subinfo_list(self, keyword): """return subinfo_list of keyword""" # searching subtitles url = self.API_URL if not url.endswith('/'): url += '/' url += urllib.parse.quote(keyword) res = self.session.get(url) doc = res.text self.referer = res.url subinfo_list = self._parse_search_results_html(doc) for subinfo in subinfo_list: subinfo['link'] = self._join_url(res.url, subinfo['link']) return subinfo_list def _visit_detailpage(self, detailpage_link): download_link = '' res = self.session.get(detailpage_link, headers={'Referer': self.referer}) if not res.ok: return download_link doc = res.text self.referer = res.url soup = bs4.BeautifulSoup(doc, 'lxml') button_download = soup.find('button', id=True, sid=True) if not button_download: return download_link api_subtitle_url = self._join_url(self.referer, self.API_SUBTITLE_DOWNLOAD) params = { 'sub_id': button_download.get('sid'), 'dtoken1': button_download.get('dtoken1'), } res = self.session.post(api_subtitle_url, json=params) if not res.ok: return download_link data = res.json() if data['success']: download_link = data['url'] else: self.subfinder.logger.info('遇到验证码, 尝试通过字幕预览下载, 如果失败请尝试手动下载: {}'.format(detailpage_link)) return download_link def _visit_downloadpage(self, downloadpage_link): pass def _try_preview_subs(self, detailpage_link): subs = [] root = os.path.dirname(self.videofile) api_url = self._join_url(detailpage_link, self.API_SUBTITLE_PREVIEW) res = self.session.get(detailpage_link, headers={'Referer': self.referer}) if not res.ok: return subs doc = res.text self.referer = res.url soup = bs4.BeautifulSoup(doc, 'lxml') a_list = soup.select('a[data-target="#fileModal"][data-sid]') if not a_list: return subs files = [] for a in a_list: s = a.string.strip() if s == '预览': sid = a.get('data-sid') fname = a.get('data-fname') ext = pathlib.PurePath(fname).suffix ext = ext[1:] if ext in self.exts: files.append((sid, fname)) for sid, fname in files: params = {'dasid': sid, 'dafname': fname} resp = self.session.post(api_url, data=params) if not resp.ok: continue data = resp.json() if not data['success']: continue filedata = data['filedata'] origin_file = os.path.basename(fname) subname = self._gen_subname(origin_file, self.videofile) subname = os.path.join(root, subname) with open(subname, 'w') as fp: fp.write(filedata) subs.append(subname) return subs def _download_subtitle(self, subinfo): subtitle_download_link = self._visit_detailpage(subinfo['link']) self._debug('subtitle_download_link: {}'.format(subtitle_download_link)) subs = None if not subtitle_download_link: subs = self._try_preview_subs(subinfo['link']) else: filepath = self._download_subs(subtitle_download_link, subinfo['title']) self._debug('filepath: {}'.format(filepath)) subs = self._extract(filepath) self._debug('subs: {}'.format(subs)) return subs
python
#!/usr/bin/env python2.7 # -*- coding: utf-8 -*- import pickle class MyContainer(object): def __init__(self, data): self._data = data def get_data(self): return self._data d1 = MyContainer([2, 5, 4, 3, [ 12, 3, 5 ], 32, { 'a': 12, 'b': 43}]) with open('/tmp/pickle_data.dat', "wb") as f: p = pickle.Pickler(f, 2) p.dump(d1)
python
import os import urllib import elasticsearch import elasticsearch_dsl import es2json.helperscripts as helperscripts class ESGenerator: """ Main generator Object where other Generators inherit from """ def __init__(self, host='localhost', port=9200, es=None, index=None, type_=None, id_=None, body=None, source=True, excludes=None, includes=None, headless=False, chunksize=1000, timeout=10, verbose=True, slice_=None): """ Construct a new ESGenerator Object. :param host: Elasticsearch host to use, default is localhost :param port: Elasticsearch port to use, default is 9200 :param index: Elasticsearch Index to use, optional, if no parameter given, ESGenerator uses ALL the indices :param es: Don't use the host/port/timeout setting, use your own elasticsearch.Elasticsearch() Object :param type_: Elasticsearch doc_type to use, optional, deprecated after Elasticsearch>=7.0.0 :param body: Query body to use for Elasticsearch, optional :param source: Include the source field in your record, default is True :param excludes: don't include the fields defined by this parameter, optional, must be python list() :param includes: only include the fields defined by this parameter, optional, must be python list() :param headless: don't include the metafields, only the data in the _source field, default is False :param chunksize: pagesize to used, default is 1000 :param timeout: Elasticsearch timeout parameter, default is 10 (seconds) :param verbose: print out progress information on /dev/stderr, default is True, optional :param slice_: only return records defined by a python slice() object free earworm when working with python slices: https://youtu.be/Nlnoa67MUJU """ if es: self.es = es else: if "://" in host: # we don't want the hostname to start with the protocoll host = urllib.parse.urlparse(host).hostname self.es = elasticsearch_dsl.connections.create_connection( host=host, port=port, timeout=timeout, max_retries=10, retry_on_timeout=True, http_compress=True) self.id_ = id_ self.source = source self.chunksize = chunksize self.headless = headless self.index = index self.type_ = type_ self.source_excludes = excludes self.source_includes = includes self.body = body self.verbose = verbose self.slice_ = slice_ def return_doc(self, hit): """ prints out the elasticsearch record defined by user input also rewrites the metadata fields back to NonPythonic Elasticsearch Standard see elasticsearch_dsl.utils.py::ObjectBase(AttrDict)__init__.py :param hit: The hit returned from the elasticsearch_dsl-call, is always """ meta = hit.meta.to_dict() if self.headless and not self.source: return {} if self.headless: return hit.to_dict() else: # collect metadata fields and convert to fields # starting with underscore ("_") for key in elasticsearch_dsl.utils.META_FIELDS: if key in meta: meta["_{}".format(key)] = meta.pop(key) if "doc_type" in meta: meta["_type"] = meta.pop("doc_type") if self.source: meta["_source"] = hit.to_dict() else: meta["_source"] = {} # @BH: necessarry? return meta def __enter__(self): """ function needed for with-statement __enter__ only returns the instanced object """ return self def __exit__(self, doc_, value, traceback): """ function needed for with-statement since we don't need to do any cleanup, this function does nothing """ pass def generator(self): """ main generator function which harvests from the Elasticsearch-Cluster after all init and argument stuff is done """ if self.id_: s = elasticsearch_dsl.Document.get(using=self.es, index=self.index, id=self.id_, _source_excludes=self.source_excludes, _source_includes=self.source_includes, _source=self.source) yield self.return_doc(s) return s = elasticsearch_dsl.Search(using=self.es, index=self.index, doc_type=self.type_).source(excludes=self.source_excludes, includes=self.source_includes) if self.body: s = s.update_from_dict(self.body) if self.verbose: hits_total = s.count() if self.slice_: hits = s[self.slice_].execute() else: hits = s.params(scroll='12h', size=self.chunksize).scan() # in scroll context, size = pagesize, still all records will be returned for n, hit in enumerate(hits): yield self.return_doc(hit) if self.verbose and ((n+1) % self.chunksize == 0 or n+1 == hits_total): helperscripts.eprint("{}/{}".format(n+1, hits_total)) class IDFile(ESGenerator): """ wrapper for esgenerator() to submit a list of ids or a file with ids to reduce the searchwindow on """ def __init__(self, idfile, missing_behaviour='print', **kwargs): """ Creates a new IDFile Object :param idfile: the path of the file containing the IDs or an iterable containing the IDs :param missing_behaviour: What should we do with missing IDs? 'print' or 'yield' an dict containing the ID """ super().__init__(**kwargs) self.idfile = idfile # string containing the path to the idfile, or an iterable containing all the IDs self.ids = [] # an iterable containing all the IDs from idfile, going to be reduced during runtime self.missing_behaviour = missing_behaviour # what to do with missing records? print or yield an dict containing the ID? default is print self.read_file() def read_file(self): """ determining weather self.idfile is an iterable or a file, harvests the IDs out of it and saves them in a set (for de-duplication) """ ids_set = set() if isinstance(self.idfile, str) and helperscripts.isfile(self.idfile): with open(self.idfile, "r") as inp: for ppn in inp: ids_set.add(ppn.rstrip()) elif helperscripts.isiter(self.idfile) and not isinstance(self.idfile, str) and not helperscripts.isfile(self.idfile): for ppn in self.idfile: ids_set.add(ppn.rstrip()) else: raise AttributeError self.iterable = list(ids_set) self.ids = list(ids_set) def write_file(self, missing): """ writing of idfile for the consume generator, we instance this here to be used in generator() function, even if we don't use it in this parent class at this point we just like to error-print every missing ids """ for item in missing: if self.missing_behaviour == 'print': helperscripts.eprint("ID {} not found".format(item)) elif self.missing_behaviour == 'yield': yield {"_id": item, 'found': False} def generator(self): """ main generator function for IDFile and IDFileConsume searching with an set of IDs can take quite long time better would be to reduce the set of documents to a pure idlist, this is quite fast over mget often, its needed to do it with a search, therefore both ways work """ missing = [] # an iterable containing missing ids while len(self.ids) > 0: if self.body: ms = elasticsearch_dsl.MultiSearch(using=self.es, index=self.index, doc_type=self.type_) # setting up MultiSearch this_iter_ids = self.ids[:self.chunksize] # an ID List per iteration, so we can check if all the IDs of this chunksize are found at the end. for _id in this_iter_ids: # add a search per ID ms = ms.add(elasticsearch_dsl.Search().source(excludes=self.source_excludes, includes=self.source_includes).from_dict(self.body).query("match", _id=_id)) responses = ms.execute() for response in responses: for hit in response: _id = hit.meta.to_dict()["id"] yield self.return_doc(hit) del self.ids[self.ids.index(_id)] del this_iter_ids[this_iter_ids.index(_id)] for _id in this_iter_ids: """ unfortunately MultiSearch doesn't throw an exception for non-Found-IDs, so we have manually check for missing ids so we again iterate over the helper_list with the IDs per chunk size (simply doing self.dis[:self.chunksize] would give us a new set) and we put all the IDs who are still in there in our missing list and delete them from self.ids and this_iter_ids """ missing.append(_id) del self.ids[self.ids.index(_id)] del this_iter_ids[this_iter_ids.index(_id)] else: try: s = elasticsearch_dsl.Document.mget(docs=self.ids[:self.chunksize], using=self.es, index=self.index, _source_excludes=self.source_excludes, _source_includes=self.source_includes, _source=self.source, missing='raise') except elasticsearch.exceptions.NotFoundError as e: for doc in e.info['docs']: # we got some missing ids and harvest the missing ids from the Elasticsearch NotFoundError Exception missing.append(doc['_id']) del self.ids[self.ids.index(doc['_id'])] else: # only gets called if we don't run into an exception for hit in s: _id = hit.meta.to_dict()["id"] yield self.return_doc(hit) del self.ids[self.ids.index(_id)] if not self.ids: """ if we delete the last item from ids, ids turns to None and then the while(len(list())) would throw an exception, since None isn't an iterable """ self.ids = [] for item in self.write_file(missing): yield item class IDFileConsume(IDFile): """ same class like IDFile, but here we overwrite the write_file and read_file functions for missing-ID-handling purposes """ def __init__(self, **kwargs): """ Creates a new IDFileConsume Object """ super().__init__(**kwargs) def read_file(self): """ no more iterables here, only files """ ids_set = set() with open(self.idfile, "r") as inp: for ppn in inp: ids_set.add(ppn.rstrip()) self.ids = list(ids_set) def write_file(self, missing): """ overwriting write_file so this outputs a idfile of the consume generator with the missing ids if no IDs are missing, that file gets deleted """ if missing: with open(self.idfile, "w") as outp: for item in missing: print(item, file=outp) if self.missing_behaviour == 'yield': yield {"_id": item, 'found': False} else: # no ids missing in the cluster? alright, we clean up os.remove(self.idfile)
python
from copy import deepcopy from sqlalchemy import ( Table, Column, Integer, String, DateTime, UniqueConstraint, DECIMAL, LargeBinary, Boolean, ForeignKey, PrimaryKeyConstraint, ) from wt.common import Currency from wt.entities.deliverables import DeliverableStatus from wt.ids import EntityType from wt.entities.issues import IssueType from wt.entities.projects import ProjectStatus from wt.provider.db import METADATA from wt.provider.db._columns import ( ID_COLUMN_TYPE, PROJECT_ID_COLUMN_TYPE, OBJECT_ID_COLUMN_REFERENCE, PARENT_ID_COLUMN_REFERENCE, ) from wt.provider.db._utils import get_enum_length from wt.costs.expenditures import ExpenditureStatus, ExpenditureType FIELD_FILES_TABLE = Table( "field_files", METADATA, Column("id", Integer(), primary_key=True, autoincrement=True), deepcopy(PARENT_ID_COLUMN_REFERENCE), Column("uri", String(2048), index=True, nullable=False), Column("created_on", DateTime(), nullable=False), UniqueConstraint("parent_id", "uri") ) FIELD_LINKS_TABLE = Table( "field_links", METADATA, Column("id", Integer(), primary_key=True, autoincrement=True), deepcopy(PARENT_ID_COLUMN_REFERENCE), Column("uri", String(2048), nullable=False), Column("title", String(126), nullable=False), Column("description", String(4096), nullable=False), Column("created_on", DateTime(), nullable=False), UniqueConstraint("parent_id", "uri") ) FIELD_TASKS_TABLE = Table( "field_tasks", METADATA, Column("id", Integer(), primary_key=True, autoincrement=True), deepcopy(PARENT_ID_COLUMN_REFERENCE), Column("task", String(1024), nullable=False), Column("completed", Boolean(), nullable=False), Column("created_on", DateTime(), nullable=False), UniqueConstraint("parent_id", "task") ) FIELD_TAGS_TABLE = Table( "field_tags", METADATA, Column("id", Integer(), primary_key=True, autoincrement=True), deepcopy(PARENT_ID_COLUMN_REFERENCE), Column("tag", String(50), index=True, nullable=False), Column("created_on", DateTime(), nullable=False), UniqueConstraint("parent_id", "tag") ) DELIVERABLES_TABLE = Table( "deliverables", METADATA, deepcopy(OBJECT_ID_COLUMN_REFERENCE), Column( "project_id", PROJECT_ID_COLUMN_TYPE, ForeignKey("projects.project_id", ondelete="RESTRICT"), index=True, nullable=False, ), Column("name", String(128), nullable=False), Column("status", String(get_enum_length(DeliverableStatus)), nullable=False), Column("description", String(), nullable=False), Column("date_opened", DateTime(), nullable=False), Column("date_closed", DateTime(), nullable=True), Column("deadline", DateTime(), nullable=True), Column("created_on", DateTime(), nullable=False), ) IDS_COUNTER_TABLE = Table( "ids_counter", METADATA, Column("project_id", ID_COLUMN_TYPE, primary_key=True), Column("next_id", Integer(), nullable=False), ) OBJECTS_TRACKER_TABLE = Table( "objects_tracker", METADATA, Column("id", ID_COLUMN_TYPE, primary_key=True), Column( "project_id", PROJECT_ID_COLUMN_TYPE, index=True, nullable=False, ), Column("type", String(get_enum_length(EntityType)), nullable=False), ) PROJECTS_TABLE = Table( "projects", METADATA, Column("project_id", PROJECT_ID_COLUMN_TYPE, primary_key=True), Column("name", String(128), nullable=False), Column("status", String(get_enum_length(ProjectStatus)), nullable=False), Column("date_opened", DateTime(), nullable=False), Column("date_closed", DateTime(), nullable=True), Column("deadline", DateTime(), nullable=True), Column("hour_rate_amount", DECIMAL(), nullable=True), Column("hour_rate_currency", String(get_enum_length(Currency)), nullable=True), Column("description", String(), nullable=False), Column("limitations_and_restrictions", String(), nullable=False), Column("goals_and_metrics", String(), nullable=False), Column("primary_color", String(7), nullable=False), Column("secondary_color", String(7), nullable=False), Column("created_on", DateTime(), nullable=False), ) ISSUES_TABLE = Table( "issues", METADATA, deepcopy(OBJECT_ID_COLUMN_REFERENCE), Column( "project_id", PROJECT_ID_COLUMN_TYPE, ForeignKey("projects.project_id", ondelete="RESTRICT"), index=True, nullable=False, ), Column("name", String(128), nullable=False), Column("description", String(), nullable=False), Column("external_type", String(256), nullable=False), Column("status", String(get_enum_length(IssueType)), nullable=False), Column("priority", String(get_enum_length(IssueType)), nullable=False), Column("type", String(get_enum_length(IssueType)), nullable=False), Column("date_opened", DateTime(), nullable=False), Column("date_closed", DateTime(), nullable=True), Column("deadline", DateTime(), nullable=True), Column("hour_rate_amount", DECIMAL(), nullable=True), Column("hour_rate_currency", String(get_enum_length(Currency)), nullable=True), Column("estimated_duration", DECIMAL(), nullable=True), Column("created_on", DateTime(), nullable=False), ) USER_TABLE = Table( "users", METADATA, Column("id", Integer(), primary_key=True, autoincrement=True), Column("username", String(64), unique=True, nullable=False), Column("password", LargeBinary(256), nullable=False), ) ENTITY_LINKS_TABLE = Table( "entity_links", METADATA, Column( "object_id", ID_COLUMN_TYPE, ForeignKey("objects_tracker.id", ondelete="RESTRICT"), nullable=False ), Column( "other_object_id", ID_COLUMN_TYPE, ForeignKey("objects_tracker.id", ondelete="RESTRICT"), nullable=False ), PrimaryKeyConstraint("object_id", "other_object_id"), ) TIMESHEETS_TABLE = Table( "timesheets", METADATA, Column("id", Integer(), primary_key=True, autoincrement=True), deepcopy(PARENT_ID_COLUMN_REFERENCE), Column("description", String(256), nullable=False), Column("duration", DECIMAL(), nullable=False), Column("date_opened", DateTime(), nullable=False), Column("created_on", DateTime(), nullable=False), ) EXPENDITURES_TABLE = Table( "expenditures", METADATA, Column("id", Integer(), primary_key=True, autoincrement=True), deepcopy(PARENT_ID_COLUMN_REFERENCE), Column("description", String(), nullable=False), Column("name", String(256), nullable=False), Column("date_opened", DateTime(), nullable=False), Column("date_closed", DateTime(), nullable=True), Column("deadline", DateTime(), nullable=True), Column("status", String(get_enum_length(ExpenditureStatus)), nullable=True), Column("type", String(get_enum_length(ExpenditureType)), nullable=True), Column("cost_amount", DECIMAL(), nullable=True), Column("cost_currency", String(get_enum_length(Currency)), nullable=True), Column("created_on", DateTime(), nullable=False), )
python
#! /usr/bin/env python # -*- coding: utf-8 -*- # vim:fenc=utf-8 # # Copyright © 2019 ckitagawa <[email protected]> # # Distributed under terms of the MIT license. import logging import threading import serial import serial.tools.list_ports import fiber_reading from collections import deque def select_device(): """User-provided serial device selector. Args: None Returns: The selected serial device as ListPortInfo. """ while True: print('Pick the serial device:') ports = serial.tools.list_ports.comports() for i, port in enumerate(ports): print('{}: {}'.format(i, port)) try: chosen_port = ports[int(input())] print('Selected {}'.format(chosen_port)) return chosen_port except IndexError: print('Invalid device!') continue class SerialDataSource(object): """A datasource that reads from a bound serial port interface.""" def __init__(self, device): self.q = deque() self.ser = serial.Serial(device, 115200) self.running = False self.t = None def start(self): """Starts the packet_service.""" if self.running: return self.running = True self.t = threading.Thread(target=self.packet_service) self.t.start() def stop(self): self.running = False self.t.join() self.t = None def get_packet(self): if self.q: return self.q.popleft() def packet_service(self): # Discard the first packet self.ser.readline().decode('ascii') while True: line = '' try: line = self.ser.readline().decode('ascii') except Exception: continue if not line: continue ints = line.split(',') l = len(ints) if l < 3: print(line) continue axis_char = int(ints[0]) axis = fiber_reading.Axis.UNKNOWN if (axis_char == 0): axis = fiber_reading.Axis.X_AXIS elif (axis_char == 1): axis = fiber_reading.Axis.Y_AXIS index = int(ints[1]) callib = int(ints[2]) reading = fiber_reading.FiberReading(axis, index, callib) for i in range(3, l): reading.AddData(int(ints[i])) self.q.append(reading)
python
# Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Maintainer: # Based on code written by Jed Smith <[email protected]> who based it on # code written by Alex Polvi <[email protected]> # import sys import unittest import json from libcloud.utils.py3 import httplib from libcloud.compute.drivers.equinixmetal import EquinixMetalNodeDriver from libcloud.compute.base import Node, KeyPair from libcloud.compute.types import NodeState from libcloud.test import MockHttp from libcloud.test.compute import TestCaseMixin from libcloud.test.file_fixtures import ComputeFileFixtures # This is causing test failures inder Python 3.5 import libcloud.compute.drivers.equinixmetal libcloud.compute.drivers.equinixmetal.USE_ASYNC_IO_IF_AVAILABLE = False __all__ = [ 'EquinixMetalTest' ] class EquinixMetalTest(unittest.TestCase, TestCaseMixin): def setUp(self): EquinixMetalNodeDriver.connectionCls.conn_class = EquinixMetalMockHttp self.driver = EquinixMetalNodeDriver('foo') def test_list_nodes(self): nodes = self.driver.list_nodes('project-id') self.assertEqual(len(nodes), 1) node = nodes[0] self.assertEqual(node.id, '1e52437e-bbbb-cccc-dddd-74a9dfd3d3bb') self.assertEqual(node.name, 'test-node') self.assertEqual(node.state, NodeState.RUNNING) self.assertTrue('147.75.255.255' in node.public_ips) self.assertTrue('2604:EEEE::EE' in node.public_ips) self.assertTrue('10.0.0.255' in node.private_ips) self.assertEqual(node.extra['created_at'], '2015-05-03T15:50:49Z') self.assertEqual(node.extra['updated_at'], '2015-05-03T16:00:08Z') self.assertEqual(node.extra['billing_cycle'], 'hourly') self.assertEqual(node.extra['locked'], False) self.assertEqual(node.size.id, 'baremetal_1') self.assertEqual(node.size.name, 'Type 1 - 16384 RAM') self.assertEqual(node.size.ram, 16384) self.assertEqual(node.size.disk, 240) self.assertEqual(node.size.price, 0.4) self.assertEqual(node.size.extra['line'], 'baremetal') self.assertEqual(node.image.id, 'ubuntu_14_04') self.assertEqual(node.image.name, 'Ubuntu 14.04 LTS') self.assertEqual(node.image.extra['distro'], 'ubuntu') self.assertEqual(node.image.extra['version'], '14.04') def test_list_nodes_response(self): nodes = self.driver.list_nodes('project-id') self.assertTrue(isinstance(nodes, list)) for node in nodes: self.assertTrue(isinstance(node, Node)) def test_list_locations(self): locations = self.driver.list_locations() self.assertEqual(len(locations), 1) def test_list_images(self): images = self.driver.list_images() self.assertEqual(len(images), 4) def test_list_sizes(self): sizes = self.driver.list_sizes() self.assertEqual(len(sizes), 1) def test_create_node(self): node = self.driver.create_node(ex_project_id="project-id", name="node-name", size=self.driver.list_sizes()[0], image=self.driver.list_images()[0], location=self.driver.list_locations()[ 0]) self.assertTrue(isinstance(node, Node)) def test_create_node_response(self): size = self.driver.list_sizes()[0] image = self.driver.list_images()[0] location = self.driver.list_locations()[0] node = self.driver.create_node(ex_project_id="project-id", name='node-name', image=image, size=size, location=location) self.assertTrue(isinstance(node, Node)) def test_reboot_node(self): node = self.driver.list_nodes('project-id')[0] self.driver.reboot_node(node) def test_reboot_node_response(self): node = self.driver.list_nodes('project-id')[0] self.driver.reboot_node(node) def test_destroy_node(self): node = self.driver.list_nodes('project-id')[0] self.driver.destroy_node(node) def test_destroy_node_response(self): node = self.driver.list_nodes('project-id')[0] self.driver.destroy_node(node) def test_reinstall_node(self): node = self.driver.list_nodes('project-id')[0] self.driver.ex_reinstall_node(node) def test_rescue_node(self): node = self.driver.list_nodes('project-id')[0] self.driver.ex_rescue_node(node) def test_list_key_pairs(self): keys = self.driver.list_key_pairs() self.assertEqual(len(keys), 3) def test_create_key_pair(self): key = self.driver.create_key_pair(name="sshkey-name", public_key="ssh-rsa AAAAB3NzaC1yc2EA\ AAADAQABAAABAQDI4pIqzpb5g3992h+yr527VRcaB68KE4vPjWPPoiQws49KIs2NMcOzS9QE4641uW\ 1u5ML2HgQdfYKMF/YFGnI1Y6xV637DjhDyZYV9LasUH49npSSJjsBcsk9JGfUpNAOdcgpFzK8V90ei\ OrOC5YncxdwwG8pwjFI9nNVPCl4hYEu1iXdyysHvkFfS2fklsNjLWrzfafPlaen+qcBxygCA0sFdW/\ 7er50aJeghdBHnE2WhIKLUkJxnKadznfAge7oEe+3LLAPfP+3yHyvp2+H0IzmVfYvAjnzliYetqQ8p\ g5ZW2BiJzvqz5PebGS70y/ySCNW1qQmJURK/Wc1bt9en root@libcloud") self.assertTrue(isinstance(key, KeyPair)) def test_delete_key_pair(self): key = self.driver.list_key_pairs()[0] self.driver.delete_key_pair(key) def test_ex_list_projects(self): projects = self.driver.ex_list_projects() self.assertEqual(len(projects), 3) def test_ex_get_bgp_config_for_project(self): config = self.driver.ex_get_bgp_config_for_project(ex_project_id='4b653fce-6405-4300-9f7d-c587b7888fe5') self.assertEqual(config.get('status'), 'enabled') def test_ex_get_bgp_config(self): config = self.driver.ex_get_bgp_config() self.assertEqual(len(config), 2) def test_ex_list_nodes_for_project(self): nodes = self.driver.ex_list_nodes_for_project(ex_project_id='4b653fce-6405-4300-9f7d-c587b7888fe5') self.assertEqual(nodes[0].public_ips, ['147.75.102.193', '2604:1380:2000:c100::3']) def test_ex_create_bgp_session(self): node = self.driver.list_nodes('project-id')[0] session = self.driver.ex_create_bgp_session(node, 'ipv4') self.assertEqual(session['status'], 'unknown') def test_ex_get_bgp_session(self): session = self.driver.ex_get_bgp_session(self.driver.ex_list_bgp_sessions()[0]['id']) self.assertEqual(session['status'], 'down') def test_ex_list_bgp_sessions_for_project(self): sessions = self.driver.ex_list_bgp_sessions_for_project(ex_project_id='4b653fce-6405-4300-9f7d-c587b7888fe5') self.assertEqual(sessions['bgp_sessions'][0]['status'], 'down') def test_ex_list_bgp_sessions_for_node(self): sessions = self.driver.ex_list_bgp_sessions_for_node(self.driver.list_nodes()[0]) self.assertEqual(sessions['bgp_sessions'][0]['status'], 'down') def test_ex_list_bgp_sessions(self): sessions = self.driver.ex_list_bgp_sessions() self.assertEqual(sessions[0]['status'], 'down') def test_ex_delete_bgp_session(self): self.driver.ex_delete_bgp_session(session_uuid='08f6b756-758b-4f1f-bfaf-b9b5479822d7') def test_ex_list_events_for_node(self): events = self.driver.ex_list_events_for_node(self.driver.list_nodes()[0]) self.assertEqual(events['events'][0]['ip'], '157.52.105.28') def test_ex_list_events_for_project(self): events = self.driver.ex_list_events_for_project(self.driver.ex_list_projects()[0]) self.assertEqual(events['meta']['total'], len(events['events'])) def test_ex_get_node_bandwidth(self): node = self.driver.list_nodes('project-id')[0] bw = self.driver.ex_get_node_bandwidth(node, 1553194476, 1553198076) self.assertTrue(len(bw['bandwidth'][0]['datapoints'][0]) > 0) def test_ex_update_node(self): node = self.driver.list_nodes('project-id')[0] self.driver.ex_update_node(node, description='new_description') def test_ex_describe_all_addresses_for_project(self): addresses = self.driver.ex_describe_all_addresses_for_project( '4b653fce-6405-4300-9f7d-c587b7888fe5') self.assertEqual(len(addresses), 5) def test_ex_describe_address(self): address = self.driver.ex_describe_address( ex_address_id='01c184f5-1413-4b0b-9f6d-ac993f6c9241') self.assertEqual(address['network'], '147.75.33.32') def test_ex_request_address_reservation(self): response = self.driver.ex_request_address_reservation( ex_project_id='3d27fd13-0466-4878-be22-9a4b5595a3df') assert response['global_ip'] def test_ex_associate_address_with_node(self): node = self.driver.list_nodes('project-id')[0] response = self.driver.ex_associate_address_with_node(node, '147.75.40.2/32') assert response['enabled'] def test_ex_disassociate_address_with_node(self): node = self.driver.list_nodes('project-id')[0] assignments = self.driver.ex_list_ip_assignments_for_node(node) for ip_assignment in assignments['ip_addresses']: if ip_assignment['gateway'] == '147.75.40.2': self.driver.ex_disassociate_address( ip_assignment['id']) break def test_list_volumes(self): volumes = self.driver.list_volumes() assert len(volumes) == 2 assert len(volumes[0].extra['attachments']) == 0 def test_create_volume(self): location = self.driver.list_locations()[0] volume = self.driver.create_volume( 10, location, description="test volume", plan="storage_1", ex_project_id='3d27fd13-0466-4878-be22-9a4b5595a3df') assert len(volume.extra['attachments']) == 0 assert not volume.extra['locked'] def test_attach_volume(self): attached = False volumes = self.driver.ex_list_volumes_for_project(ex_project_id='3d27fd13-0466-4878-be22-9a4b5595a3df') node = self.driver.ex_list_nodes_for_project(ex_project_id='3d27fd13-0466-4878-be22-9a4b5595a3df')[0] for vol in volumes: if len(vol.extra['attachments']) == 0: attached = self.driver.attach_volume(node, vol) break assert attached def test_detach_volume(self): detached = False volumes = self.driver.ex_list_volumes_for_project(ex_project_id='3d27fd13-0466-4878-be22-9a4b5595a3df') for vol in volumes: if len(vol.extra['attachments']) > 0: detached = self.driver.detach_volume(vol) break assert detached def test_destroy_volume(self): destroyed = False volumes = self.driver.ex_list_volumes_for_project(ex_project_id='3d27fd13-0466-4878-be22-9a4b5595a3df') for vol in volumes: if len(vol.extra['attachments']) == 0: destroyed = self.driver.destroy_volume(vol) break assert destroyed class EquinixMetalMockHttp(MockHttp): fixtures = ComputeFileFixtures('equinixmetal') def _metal_v1_facilities(self, method, url, body, headers): body = self.fixtures.load('facilities.json') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _metal_v1_plans(self, method, url, body, headers): body = self.fixtures.load('plans.json') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _metal_v1_projects_3d27fd13_0466_4878_be22_9a4b5595a3df_plans(self, method, url, body, headers): body = self.fixtures.load('plans.json') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _metal_v1_projects(self, method, url, body, headers): body = self.fixtures.load('projects.json') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _metal_v1_projects_4b653fce_6405_4300_9f7d_c587b7888fe5_devices(self, method, url, body, headers): body = self.fixtures.load('devices_for_project.json') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _metal_v1_projects_4a4bce6b_d2ef_41f8_95cf_0e2f32996440_devices(self, method, url, body, headers): body = self.fixtures.load('devices_for_project.json') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _metal_v1_projects_3d27fd13_0466_4878_be22_9a4b5595a3df_devices(self, method, url, body, headers): body = self.fixtures.load('devices_for_project.json') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _metal_v1_projects_4b653fce_6405_4300_9f7d_c587b7888fe5_ips(self, method, url, body, headers): body = self.fixtures.load('project_ips.json') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _metal_v1_projects_3d27fd13_0466_4878_be22_9a4b5595a3df_ips(self, method, url, body, headers): if method == 'POST': body = self.fixtures.load('reserve_ip.json') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _metal_v1_projects_4b653fce_6405_4300_9f7d_c587b7888fe5_bgp_config(self, method, url, body, headers): body = self.fixtures.load('bgp_config_project_1.json') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _metal_v1_projects_3d27fd13_0466_4878_be22_9a4b5595a3df_bgp_config(self, method, url, body, headers): body = self.fixtures.load('bgp_config_project_1.json') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _metal_v1_projects_4a4bce6b_d2ef_41f8_95cf_0e2f32996440_bgp_config(self, method, url, body, headers): body = self.fixtures.load('bgp_config_project_3.json') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _metal_v1_operating_systems(self, method, url, body, headers): body = self.fixtures.load('operatingsystems.json') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _metal_v1_ssh_keys(self, method, url, body, headers): if method == 'GET': body = self.fixtures.load('sshkeys.json') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) if method == 'POST': body = self.fixtures.load('sshkey_create.json') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _metal_v1_ssh_keys_2c1a7f23_1dc6_4a37_948e_d9857d9f607c(self, method, url, body, headers): if method == 'DELETE': return (httplib.OK, '', {}, httplib.responses[httplib.OK]) def _metal_v1_projects_project_id_devices(self, method, url, body, headers): if method == 'POST': body = self.fixtures.load('device_create.json') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) elif method == 'GET': body = self.fixtures.load('devices.json') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _metal_v1_devices_1e52437e_bbbb_cccc_dddd_74a9dfd3d3bb(self, method, url, body, headers): if method in ['DELETE', 'PUT']: return (httplib.OK, '', {}, httplib.responses[httplib.OK]) def _metal_v1_devices_1e52437e_bbbb_cccc_dddd_74a9dfd3d3bb_actions( self, method, url, body, headers): return (httplib.OK, '', {}, httplib.responses[httplib.OK]) def _metal_v1_devices_1e52437e_bbbb_cccc_dddd_74a9dfd3d3bb_bgp_sessions(self, method, url, body, headers): if method == 'POST': body = self.fixtures.load('bgp_session_create.json') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _metal_v1_bgp_sessions_08f6b756_758b_4f1f_bfaf_b9b5479822d7(self, method, url, body, headers): body = self.fixtures.load('bgp_session_get.json') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _metal_v1_projects_4b653fce_6405_4300_9f7d_c587b7888fe5_bgp_sessions(self, method, url, body, headers): if method == 'GET': body = self.fixtures.load('bgp_sessions.json') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _metal_v1_devices_905037a4_967c_4e81_b364_3a0603aa071b_bgp_sessions(self, method, url, body, headers): if method == 'GET': body = self.fixtures.load('bgp_sessions.json') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _metal_v1_projects_4a4bce6b_d2ef_41f8_95cf_0e2f32996440_bgp_sessions(self, method, url, body, headers): if method == 'GET': body = self.fixtures.load('bgp_sessions.json') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _metal_v1_projects_3d27fd13_0466_4878_be22_9a4b5595a3df_bgp_sessions(self, method, url, body, headers): if method == 'GET': body = self.fixtures.load('bgp_sessions.json') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _metal_v1_projects_3d27fd13_0466_4878_be22_9a4b5595a3df_events(self, method, url, body, headers): if method == 'GET': body = self.fixtures.load('project_events.json') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _metal_v1_devices_905037a4_967c_4e81_b364_3a0603aa071b_events(self, method, url, body, headers): if method == 'GET': body = self.fixtures.load('device_events.json') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _metal_v1_devices_1e52437e_bbbb_cccc_dddd_74a9dfd3d3bb_bandwidth(self, method, url, body, headers): if method == 'GET': body = self.fixtures.load('node_bandwidth.json') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _metal_v1_ips_01c184f5_1413_4b0b_9f6d_ac993f6c9241(self, method, url, body, headers): body = self.fixtures.load('ip_address.json') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _metal_v1_devices_1e52437e_bbbb_cccc_dddd_74a9dfd3d3bb_ips(self, method, url, body, headers): if method == 'GET': body = self.fixtures.load('ip_assignments.json') elif method == 'POST': body = self.fixtures.load('associate_ip.json') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _metal_v1_ips_aea4ee0c_675f_4b77_8337_8e13b868dd9c(self, method, url, body, headers): if method == 'DELETE': return (httplib.OK, '', {}, httplib.responses[httplib.OK]) def _metal_v1_projects_3d27fd13_0466_4878_be22_9a4b5595a3df_storage(self, method, url, body, headers): if method == 'GET': body = self.fixtures.load('volumes.json') elif method == 'POST': body = self.fixtures.load('create_volume.json') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _metal_v1_projects_4a4bce6b_d2ef_41f8_95cf_0e2f32996440_storage(self, method, url, body, headers): if method == 'GET': body = json.dumps({"volumes": []}) return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _metal_v1_projects_4b653fce_6405_4300_9f7d_c587b7888fe5_storage(self, method, url, body, headers): if method == 'GET': body = json.dumps({"volumes": []}) return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _metal_v1_storage_74f11291_fde8_4abf_8150_e51cda7308c3(self, method, url, body, headers): if method == 'DELETE': return (httplib.NO_CONTENT, '', {}, httplib.responses[httplib.NO_CONTENT]) def _metal_v1_storage_a08aaf76_e0ce_43aa_b9cd_cce0d4ae4f4c_attachments(self, method, url, body, headers): if method == 'POST': body = self.fixtures.load('attach_volume.json') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _metal_v1_storage_a08aaf76_e0ce_43aa_b9cd_cce0d4ae4f4c(self, method, url, body, headers): if method == 'DELETE': return (httplib.NO_CONTENT, '', {}, httplib.responses[httplib.NO_CONTENT]) def _metal_v1_storage_attachments_2c16a96f_bb4f_471b_8e2e_b5820b9e1603(self, method, url, body, headers): if method == 'DELETE': return (httplib.NO_CONTENT, '', {}, httplib.responses[httplib.NO_CONTENT]) if __name__ == '__main__': sys.exit(unittest.main())
python
# -*- coding: utf-8 -*- from typing import Dict, List, Optional, Tuple from django.conf import settings from rest_framework import serializers from backend.components import bk_repo from backend.helm.helm.models.chart import Chart, ChartVersion, ChartVersionSnapshot def get_chart_version( project_name: str, repo_name: str, chart_name: str, version: str, username: str, password: str ) -> Dict: """调用接口获取仓库中指定版本的详情 :param project_name: 项目名称 :param repo_name: 仓库名称 :param chart_name: 指定chart的名称,用于找到指定的chart :param version: 指定chart的版本 :param username: 访问仓库的用户身份: 用户名 :param password: 访问仓库的用户身份: 密码 """ client = bk_repo.BkRepoClient(username=username, password=password) return client.get_chart_version_detail(project_name, repo_name, chart_name, version) def update_or_create_chart_version(chart: Chart, version_detail: Dict) -> ChartVersion: """更新或创建chart版本信息""" return ChartVersion.update_or_create_version(chart, version_detail) def release_snapshot_to_version(chart_version_snapshot: ChartVersionSnapshot, chart: Chart) -> ChartVersion: """通过snapshot组装version数据""" return ChartVersion(id=0, chart=chart, keywords="chart version", **chart_version_snapshot.version_detail) class VersionListSLZ(serializers.Serializer): name = serializers.CharField() version = serializers.CharField() created = serializers.CharField() urls = serializers.ListField(child=serializers.CharField()) class ReleaseVersionListSLZ(serializers.Serializer): name = serializers.CharField() version = serializers.CharField() created = serializers.CharField() def sort_version_list(versions: List) -> List: versions.sort(key=lambda item: item["created"], reverse=True) return versions def get_helm_project_and_repo_name( project_code: str, repo_name: Optional[str] = None, is_public_repo: Optional[bool] = None ) -> Tuple[str, str]: """获取项目及仓库名称 :param project_code: BCS 项目编码 :param repo_name: repo名称 :param is_public_repo: 是否是公共仓库 :returns: 返回项目名称和仓库名称 """ if is_public_repo or repo_name == settings.BCS_SHARED_CHART_REPO_NAME: return (settings.BK_REPO_SHARED_PROJECT_NAME, settings.BK_REPO_SHARED_CHART_DEPOT_NAME) # 针对项目下的chart仓库,项目名称和仓库名称一样 return (project_code, project_code)
python
import os from collections import OrderedDict from coverage_checker.utils import get_all_path_combinations def test_get_all_path_combinations(): facets = OrderedDict([('a', ['1', '2']), ('b', ['3', '4']), ('c', ['5', '6'])]) all_paths = get_all_path_combinations(facets) expected_result = ['1/3/5', '1/3/6', '1/4/5', '1/4/6', '2/3/5', '2/3/6', '2/4/5', '2/4/6'] assert(all_paths == expected_result)
python
import re from math import sqrt, atan2 if __name__ == "__main__": """ This script file demonstrates how to transform raw CSI out from the ESP32 into CSI-amplitude and CSI-phase. """ FILE_NAME = "./example_csi.csv" f = open(FILE_NAME) for j, l in enumerate(f.readlines()): imaginary = [] real = [] amplitudes = [] phases = [] # Parse string to create integer list csi_string = re.findall(r"\[(.*)\]", l)[0] csi_raw = [int(x) for x in csi_string.split(" ") if x != ''] # Create list of imaginary and real numbers from CSI for i in range(len(csi_raw)): if i % 2 == 0: imaginary.append(csi_raw[i]) else: real.append(csi_raw[i]) # Transform imaginary and real into amplitude and phase for i in range(int(len(csi_raw) / 2)): amplitudes.append(sqrt(imaginary[i] ** 2 + real[i] ** 2)) phases.append(atan2(imaginary[i], real[i])) print("-------------------") print("csi_amplitude#{}:".format(j), amplitudes) print("csi_phase#{}: ".format(j), phases) print("-------------------")
python
# Recording video to a file # https://picamera.readthedocs.io/en/release-1.13/recipes1.html#recording-video-to-a-file import picamera camera = picamera.PiCamera() camera.resolution = (640, 480) camera.start_recording('output/07_video.h264') camera.wait_recording(5) camera.stop_recording()
python
# Copyright 2015 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS-IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Utility to enable oauth2 settings for NPTEL.""" __author__ = [ 'Abhinav Khandelwal ([email protected])', 'Rishav Thakker ([email protected])' ] import logging import re import httplib2 import appengine_config from oauth2client.client import SignedJwtAssertionCredentials from apiclient.discovery import build from google.appengine.api import memcache from modules.google_service_account.service_account_models import ( GoogleServiceAccountTypes, GoogleServiceAccountSettings, GoogleServiceAccountSettingsDTO) # In real life we'd check in a blank file and set up the code to error with a # message pointing people to https://code.google.com/apis/console. EMAIL_REGEX = re.compile(r"^[A-Za-z0-9\.\+_-]+@[A-Za-z0-9\._-]+\.[a-zA-Z]+$") DEFAULT_HTTP_TIMEOUT = 10 class GoogleServiceManager(object): """Manage all the credentials/services""" # Services are added to this object as and when required by the respective # Modules _SERVICES = {} _MEMCACHE_KEY = 'service_account_credentials' _DEFAULT_CACHE_TTL_SECS = 3600 @classmethod def _default_id_from_credential_type(cls, credential_type): """ Returns the ID for the default settings object from credential type """ return credential_type @classmethod def get_by_id(cls, id, namespace=appengine_config.DEFAULT_NAMESPACE_NAME): """Fetches an entry from the database using its ID""" entity = GoogleServiceAccountSettings.get_by_id(id, namespace=namespace) if entity: return GoogleServiceAccountSettingsDTO(entity) @classmethod def update_service_account_settings( cls, id, namespace=appengine_config.DEFAULT_NAMESPACE_NAME, credential_type=None, client_email=None, sub_user_email=None, scope=None, client_id=None, api_key=None, project_id=None, project_key_id=None, private_key=None, auth_uri=None, token_uri=None, auth_provider_x509_cert_url=None, client_x509_cert_url=None): """Updates a GoogleServiceAccountSettings object""" obj = GoogleServiceAccountSettings.get_or_create(id, namespace) if credential_type is not None: obj.credential_type = credential_type if client_email is not None: obj.client_email = client_email if sub_user_email is not None: obj.sub_user_email = sub_user_email if scope is not None: obj.scope = scope if client_id is not None: obj.client_id = client_id if api_key is not None: obj.api_key = api_key if project_id is not None: obj.project_id = project_id if project_key_id is not None: obj.project_key_id = project_key_id if private_key is not None: obj.private_key = private_key if auth_uri is not None: obj.auth_uri = auth_uri if token_uri is not None: obj.token_uri = token_uri if auth_provider_x509_cert_url is not None: obj.auth_provider_x509_cert_url = auth_provider_x509_cert_url if client_x509_cert_url is not None: obj.client_x509_cert_url = client_x509_cert_url # call initialize_credentials again if required if credential_type == GoogleServiceAccountTypes.SERVICE_ACCOUNT: if not cls.initialize_credentials( service_account_settings=obj, namespace=namespace): return None # Save and return obj.put() return GoogleServiceAccountSettingsDTO(obj) @classmethod def get_default_settings_by_type(cls, credential_type, namespace=appengine_config.DEFAULT_NAMESPACE_NAME): """Returns the default settings object for a credential type""" id = cls._default_id_from_credential_type(credential_type) entry = cls.get_by_id(id, namespace=namespace) return entry @classmethod def get_or_create_default_settings_by_type(cls, credential_type, namespace=appengine_config.DEFAULT_NAMESPACE_NAME): """ Returns the default settings object for a credential type. Creates a new object and returns it if none exist. """ entry = cls.get_default_settings_by_type(credential_type, namespace) if not entry: id = cls._default_id_from_credential_type(credential_type) entry = cls.update_service_account_settings( id=id, namespace=namespace, credential_type=credential_type) return entry @classmethod def get_all_default_settings( cls, namespace=appengine_config.DEFAULT_NAMESPACE_NAME): """Returns a list of the default settings objects for each type""" all_settings = [] for credential_type in GoogleServiceAccountTypes.to_dict().values(): entity = cls.get_default_settings_by_type( credential_type, namespace) if entity: all_settings.append(entity) return all_settings @classmethod def update_default_settings_by_type( cls, namespace=appengine_config.DEFAULT_NAMESPACE_NAME, credential_type=None, **kwargs): """ Updates the default settings object identified by type. Each type will have exactly one default object. """ id = cls._default_id_from_credential_type(credential_type) kwargs['id'] = id kwargs['credential_type'] = credential_type return cls.update_service_account_settings( namespace=namespace, **kwargs) @classmethod def _store_credentials_in_memcache( cls, credentials, namespace=appengine_config.DEFAULT_NAMESPACE_NAME): """Stores the credential object in memcache""" memcache.set( cls._MEMCACHE_KEY, credentials, time=cls._DEFAULT_CACHE_TTL_SECS, namespace=namespace) @classmethod def _get_credentials_from_memcache( cls, namespace=appengine_config.DEFAULT_NAMESPACE_NAME): """Gets the credentials from the memcache""" return memcache.get(cls._MEMCACHE_KEY, namespace=namespace) @classmethod def initialize_credentials(cls, service_account_settings=None, namespace=appengine_config.DEFAULT_NAMESPACE_NAME): """Builds a decorator for using oauth2 with webapp2.RequestHandlers.""" # In real life we'd want to make one decorator per service because # we wouldn't want users to have to give so many permissions. # Initialize more credentials here if required try: if not service_account_settings: service_account_settings = cls.get_default_settings_by_type( GoogleServiceAccountTypes.SERVICE_ACCOUNT, namespace=namespace) if not service_account_settings: raise ValueError( 'Default service_account Settings not found') key = service_account_settings.private_key scope = service_account_settings.scope client_email = service_account_settings.client_email sub_user_email = service_account_settings.sub_user_email if key and scope and client_email: if sub_user_email: credentials = SignedJwtAssertionCredentials( client_email, key, scope=scope, sub=sub_user_email) else: credentials = SignedJwtAssertionCredentials( client_email, key, scope=scope) if credentials: cls._store_credentials_in_memcache( credentials, namespace=namespace) # Reset all services cls._SERVICES = {} return credentials else: raise ValueError('Could not create credentials') else: raise ValueError('Invalid default service_account settings') # Deliberately catch everything. pylint: disable-msg=broad-except except Exception as e: logging.error('Could not initialize Google service account ' 'credentials.\nError: %s', e) return None @classmethod def _get_authorized_http_object(cls, http_obj=None, timeout=DEFAULT_HTTP_TIMEOUT, namespace=appengine_config.DEFAULT_NAMESPACE_NAME, *args, **kwargs): """Calls the authorize function of credentials""" if not http_obj: http_obj = httplib2.Http(timeout=timeout) credentials = cls._get_credentials_from_memcache(namespace) if not credentials: # Try initializing again credentials = cls.initialize_credentials(namespace=namespace) if not credentials: # Initialization failed. return None return credentials.authorize( http_obj, *args, **kwargs) @classmethod def _add_service(cls, name, version, service, namespace=appengine_config.DEFAULT_NAMESPACE_NAME): """Adds a service to _SERVICES""" if namespace not in cls._SERVICES: cls._SERVICES[namespace] = {} if name not in cls._SERVICES[namespace]: cls._SERVICES[namespace][name] = {} cls._SERVICES[namespace][name][version] = { 'name': name, 'version': version, 'service': service } return service @classmethod def _create_service(cls, name, version, http_obj=None, timeout=DEFAULT_HTTP_TIMEOUT, namespace=appengine_config.DEFAULT_NAMESPACE_NAME): """Creates and adds a service""" if None in (name, version): return None if http_obj is None: http_obj = cls._get_authorized_http_object( timeout=timeout, namespace=namespace) if not http_obj: return None try: service = build(name, version, http=http_obj) cls._add_service(name, version, service, namespace) return service except Exception as e: logging.error('Unable to initialize %s service: %s', name, e) return None @classmethod def get_service(cls, name=None, version=None, http_obj=None, timeout=DEFAULT_HTTP_TIMEOUT, namespace=appengine_config.DEFAULT_NAMESPACE_NAME): """ Returns the service from _SERVICES Note: run this function every time you need to use a service to avoid using stale services. """ if namespace in cls._SERVICES: if name in cls._SERVICES[namespace]: if version in cls._SERVICES[namespace][name]: service = cls._SERVICES[namespace][name][version].get( 'service') if service: return service # If we reach here it means service doesn't exist. Create a new service return cls._create_service( name, version, http_obj, timeout, namespace)
python
# ----------------------------------------------------------------------------- # Copyright (c) 2013-2022, NeXpy Development Team. # # Distributed under the terms of the Modified BSD License. # # The full license is in the file COPYING, distributed with this software. # ----------------------------------------------------------------------------- import os from configparser import ConfigParser from nexusformat.nexus import NeXusError class NXSettings(ConfigParser): """A ConfigParser subclass that preserves the case of option names""" def __init__(self, directory=None): super().__init__(allow_no_value=True) self.directory = self.get_directory(server_directory=directory) self.file = os.path.join(self.directory, 'settings.ini') super().read(self.file) sections = self.sections() if 'setup' not in sections: self.add_section('setup') if 'nxrefine' not in sections: self.add_section('nxrefine') if 'nxreduce' not in sections: self.add_section('nxreduce') self.add_defaults() def get_directory(self, server_directory=None): self.home_settings = ConfigParser() home_directory = os.path.join(os.path.abspath(os.path.expanduser('~')), '.nxserver') if not os.path.exists(home_directory): os.mkdir(home_directory) self.home_file = os.path.join(home_directory, 'settings.ini') self.home_settings.read(self.home_file) if 'setup' not in self.home_settings.sections(): self.home_settings.add_section('setup') if server_directory: self.home_settings.set('setup', 'directory', server_directory) with open(self.home_file, 'w') as f: self.home_settings.write(f) elif self.home_settings.has_option('setup', 'directory'): server_directory = self.home_settings.get('setup', 'directory') else: raise NeXusError( "Please define settings directory - type 'nxsettings -h'") if os.path.basename(server_directory) != 'nxserver': server_directory = os.path.join(server_directory, 'nxserver') if not os.path.exists(server_directory): os.mkdir(server_directory) return server_directory def add_defaults(self): if not self.has_option('setup', 'type'): self.set('setup', 'type', 'multicore') default = {'wavelength': 0.141, 'distance': 650, 'phi': -5.0, 'phi_end': 360.0, 'phi_step': 0.1, 'chi': -90.0, 'omega': 0.0, 'x': 0.0, 'y': 0.0, 'nsteps': 3, 'frame_rate': 10} for p in default: if not self.has_option('nxrefine', p): self.set('nxrefine', p, default[p]) default = {'threshold': 50000, 'min_pixels': 10, 'first': 10, 'last': 3640, 'monitor': 'monitor2', 'norm': 30000, 'radius': 0.2, 'qmax': 16.0} for p in default: if not self.has_option('nxreduce', p): self.set('nxreduce', p, default[p]) self.save() def input_defaults(self): for s in ['NXRefine', 'NXReduce']: print(f'\n{s} Parameters\n-------------------') s = s.lower() for p in self.options(s): value = input(f"{p} [{self.get(s, p)}]: ") if value: self.set(s, p, value) self.save() @property def settings(self): _settings = {} _settings['nxrefine'] = {k: v for (k, v) in self.items('nxrefine')} _settings['nxreduce'] = {k: v for (k, v) in self.items('nxreduce')} return _settings def set(self, section, option, value=None): if isinstance(value, int) or isinstance(value, float): super().set(section, option, f"{value:g}") elif value is not None: super().set(section, option, str(value)) else: super().set(section, option) def save(self): with open(self.file, 'w') as f: self.write(f)
python
import os import sys from PIL import Image import glob import numpy as np import h5py import csv import time import zipfile import matplotlib.pyplot as plt from sklearn.preprocessing import LabelEncoder try: from urllib.request import urlretrieve except ImportError: from urllib import urlretrieve def reporthook(count, block_size, total_size): """Taken from https://blog.shichao.io/2012/10/04/progress_speed_indicator_for_urlretrieve_in_python.html A simple reporthook() function for urllib.urlretrieve()‘s reporthook argument that shows a progressbar while downloading the data """ global start_time if count == 0: start_time = time.time() return duration = time.time() - start_time progress_size = int(count * block_size) speed = int(progress_size / (1024 * duration)) percent = int(count * block_size * 100 / total_size) sys.stdout.write("\r...%d%%, %d MB, %d KB/s, %d seconds passed" % (percent, progress_size / (1024 * 1024), speed, duration)) sys.stdout.flush() def download_data(): """Downloads and Extracts tiny-imagenet Dataset """ if not os.path.exists(os.path.join(os.getcwd(), "tiny-imagenet-200")): if not os.path.exists(os.path.join(os.getcwd(), "tiny-imagenet-200.zip")): print ('Downloading Flowers data from http://cs231n.stanford.edu/tiny-imagenet-200.zip ...') urlretrieve ('http://cs231n.stanford.edu/tiny-imagenet-200.zip', 'tiny-imagenet-200.zip', reporthook) print ('\nExtracting tiny-imagenet-200.zip ...', end='', flush=True) zfile = zipfile.ZipFile (os.path.join(os.getcwd(), 'tiny-imagenet-200.zip'), 'r') zfile.extractall ('.') zfile.close() print ('Done') def get_word_labels(): """Get the wnids and label names from the words.txt file. # Returns A dictionary where keys are the wnids and values are the label names """ file = open ('tiny-imagenet-200/words.txt', 'r') word_labels = {} for f in file: f = f.split(' ') words = f[1] words = words.replace('\n', '') word_labels[f[0]] = words file.close() return word_labels def get_train_wnid(): """Extracts the wnids from the subdirectories for every image in the train folder # Returns A dictionary where keys are the image names and values are the wnids """ wnid_labels = {} for subdir, dirs, files in os.walk('tiny-imagenet-200/train'): for filename in files: if filename.endswith(('.txt')): file = open(subdir + '/' +filename, 'r') for line in file: line = line.split(' ') wnid_labels[line[0]] = subdir.split('/')[-1] file.close() return wnid_labels def get_val_wnid(): """Extracts the wnids from the val_annotations.txt file for every image in the val folder # Returns A dictionary where keys are the image names and values are the wnids """ file = open('tiny-imagenet-200/val/val_annotations.txt', 'r') wnid_labels = {} for f in file: f = f.split(' ') wnid_labels[f[0]] = f[1] file.close() return wnid_labels def load_labels(): """Gets wnids for every image and convert them to categorical # Returns train_wnid: A dictionary where keys are the training image names and values are the wnids val_wnid: A dictionary where keys are the validation image names and values are the wnids uniq_wnids: A list of all the wnids """ train_wnid = get_train_wnid() val_wnid = get_val_wnid() uniq_wnids = list(set(list(train_wnid.values()) + list(val_wnid.values()))) return train_wnid, val_wnid, uniq_wnids def load_images (folder, wnid_labels, uniq_wnids, train_val): """loads the images from a given folder # Arguments folder: directory where the images are stored wnid_labels: A dictionary where keys are the validation image names and values are the wnids uniq_wnids: A list of all the wnids # Returns images: A numpy array of the images image_names: A numpy array of the image names labels: A numpy array of the labels wnids: A numpy array of the wnids label_names: A numpy array of the label names """ print ('Loading {} images ... '.format(train_val), end='', flush=True) word_labels = get_word_labels() images = [] labels = [] wnids = [] label_names = [] image_names = [] for subdir, dirs, files in os.walk(folder): for filename in files: if filename.endswith(('.JPEG', '.jpeg', '.JPG', '.jpg', '.PNG', '.png')): img = Image.open(subdir + '/' + filename) np_img = np.array(img) if np_img.ndim == 2: np_img = np.dstack([np_img]*3) images.append(np_img) filename = filename.split("/")[-1] labels.append(uniq_wnids.index(wnid_labels[filename])) image_names.append(np.string_(filename)) wnids.append(np.string_(wnid_labels [filename])) label_names.append(np.string_(word_labels [wnid_labels[filename]])) img.close() # if (len(images)%5000) is 0: print ('{} imges processed'.format(len(images))) images = np.array(images) labels = np.array(labels) wnids = np.array(wnids) image_names = np.array(image_names) label_names = np.array(label_names) # print ('Image processing finished') print ('Done') return images, image_names, labels, wnids, label_names def h5_creator (filename, x, y, image_names=np.array([]), wnids=np.array([]), label_names=np.array([]) ): """Creates a H5 file and datasets with all the arguments. # Arguments filename: name of the h5 file images: A numpy array of the images image_names: A numpy array of the image names labels: A numpy array of the labels wnids: A numpy array of the wnids label_names: A numpy array of the label names """ print ('Creating {} ... '.format(filename), end='', flush=True) with h5py.File(filename, 'w') as hf: hf.create_dataset('x', compression="gzip", data=x) hf.create_dataset('y', compression="gzip", data=y) hf.create_dataset('image_names', compression="gzip", data=image_names) hf.create_dataset('label_names', compression="gzip", data=label_names) hf.create_dataset('wnids', compression="gzip", data=wnids) hf.close() print ('Done') def load_data(expanded=False): """Downloads the data loads all the images and the labels # Returns Tuple of Numpy arrays if expanded is True: (x_train, y_train, train_image_names, train_wnids, train_label_names), (x_val, y_val, val_image_names, val_wnids, val_label_names) if expanded is False: (x_train, y_train), (x_val, y_val) # Arguments expanded: Boolean, where to load expanded entities """ download_data() train_wnid_labels, val_wnid_labels, uniq_wnids = load_labels() x_val, val_image_names, y_val, val_wnids, val_label_names = load_images ('tiny-imagenet-200/val', val_wnid_labels, uniq_wnids, 'Validation') x_train, train_image_names, y_train, train_wnids, train_label_names = load_images ('tiny-imagenet-200/train', train_wnid_labels, uniq_wnids, 'Training') if expanded == False: return (x_train, y_train), (x_val, y_val) else: return (x_train, y_train, train_image_names, train_wnids, train_label_names), \ (x_val, y_val, val_image_names, val_wnids, val_label_names) def create_h5(expanded=True): if expanded == False: (x_train, y_train), (x_val, y_val) = load_data(expanded=False) h5_creator ('val.h5', x_val, y_val) h5_creator ('train.h5', x_train, y_train) else: (x_train, y_train, train_image_names, train_wnids, train_label_names), \ (x_val, y_val, val_image_names, val_wnids, val_label_names) = load_data(expanded=True) h5_creator ('val.h5', x_val, y_val, val_image_names, val_wnids, val_label_names) h5_creator ('train.h5', x_train, y_train, train_image_names, train_wnids, train_label_names) if __name__ == '__main__': create_h5()
python
import sys # Expose the public API. from ehrpreper.api import * # Check major python version if sys.version_info[0] < 3: raise Exception("Ehrpreper does not support Python 2. Please upgrade to Python 3.") # Check minor python version elif sys.version_info[1] < 6: raise Exception( "Ehrpreper only supports Python 3.6 and beyond. " "Use a later version of Python" ) # Set the version attribute of the library import pkg_resources import configparser # Get the current version config = configparser.ConfigParser() config.read([pkg_resources.resource_filename("ehrpreper", "config.ini")]) __version__ = config.get("ehrpreper", "version")
python
__author__ ='Jacques Saraydaryan' class ColorRange(): min_H=0 max_H=0 label='' def getColor(self,minH,maxH,label): self.min_H=minH self.max_H=maxH self.label=label
python
#! /usr/bin/env python import rospy, std_msgs.msg from sensor_msgs.msg import Temperature pub = rospy.Publisher('henri/temp_average', Temperature, queue_size=10) average = 0 variance = 0 def callback(data): global average, variance, pub rospy.loginfo('Temperature Received: %f', data.temperature) average = (average + data.temperature)/2 variance = (variance + data.variance)/2 t = Temperature() h = std_msgs.msg.Header() h.stamp = rospy.Time.now() t.header = h t.temperature = average t.variance = variance pub.publish(t) def listen_temp(): rospy.init_node('temperature_monitor_py', anonymous=True) rospy.Subscriber('henri/temperature', Temperature, callback) rospy.spin() if __name__ == '__main__': listen_temp()
python
from flask import Flask from config import config_options from flask_sqlalchemy import SQLAlchemy from flask_uploads import UploadSet,configure_uploads,IMAGES from flask_bcrypt import Bcrypt from flask_login import LoginManager from flask_bootstrap import Bootstrap from flask_simplemde import SimpleMDE from flask_mail import Mail from dotenv import load_dotenv import os load_dotenv() MAIL_USERNAME = os.getenv("MAIL_USERNAME") MAIL_PASSWORD = os.getenv("MAIL_PASSWORD") db = SQLAlchemy() photos = UploadSet('photos',IMAGES) bcrypt = Bcrypt() bootstrap = Bootstrap() simple = SimpleMDE() mail = Mail() login_manager = LoginManager() login_manager.login_view = 'auth.login' login_manager.login_message_category = 'info' login_manager.session_protection = 'strong' def create_app(config_name): app = Flask(__name__) app.config.from_object(config_options[config_name]) from .main import main as main_blueprint app.register_blueprint(main_blueprint) from .emails import email as email_blueprint app.register_blueprint(email_blueprint) from .auth import auth as auth_blueprint app.register_blueprint(auth_blueprint,url_prefix='/authenticate') db.init_app(app) configure_uploads(app,photos) bcrypt.init_app(app) login_manager.init_app(app) bootstrap.init_app(app) simple.init_app(app) mail.init_app(app) return app
python
# import numpy as np # import matplotlib.pyplot as plt # import cv2 # img = cv2.imread('8.jpeg',0) # dft = cv2.dft(np.float32(img),flags = cv2.DFT_COMPLEX_OUTPUT) # dft_shift = np.fft.fftshift(dft) # magnitude_spectrum = 20*np.log(cv2.magnitude(dft_shift[:,:,0],dft_shift[:,:,1])) # plt.subplot(121),plt.imshow(img, cmap = 'gray') # plt.title('Input Image'), plt.xticks([]), plt.yticks([]) # plt.subplot(122),plt.imshow(magnitude_spectrum, cmap = 'gray') # plt.title('Magnitude Spectrum'), plt.xticks([]), plt.yticks([]) # plt.show() import cv2 import numpy as np import matplotlib.pyplot as plt def fftImage(gray_img, row, col): rPadded = cv2.getOptimalDFTSize(row) cPadded = cv2.getOptimalDFTSize(col) imgPadded = np.zeros((rPadded, cPadded), np.float32) imgPadded[:row, :col] = gray_img fft_img = cv2.dft(imgPadded, flags=cv2.DFT_COMPLEX_OUTPUT) #输出为复数,双通道 return fft_img def amplitudeSpectrum(fft_img): real = np.power(fft_img[:, :, 0], 2.0) imaginary = np.power(fft_img[:, :, 1], 2.0) amplitude = np.sqrt(real+imaginary) return amplitude def graySpectrum(amplitude): amplitude = np.log(amplitude+1) spectrum = cv2.normalize(amplitude, 0, 1, cv2.NORM_MINMAX, dtype=cv2.CV_32F) spectrum *= 255 return spectrum def phaseSpectrum(fft_img): phase = np.arctan2(fft_img[:,:,1], fft_img[:, :, 0]) spectrum = phase*180/np.pi return spectrum # 图像矩阵乘(-1)^(r+c), 中心化 def stdFftImage(img_gray, row, col): fimg = np.copy(img_gray) fimg = fimg.astype(np.float32) for r in range(row): for c in range(col): if(r+c)%2: fimg[r][c] = -1*img_gray[r][c] fft_img = fftImage(fimg, row, col) amplitude = amplitudeSpectrum(fft_img) ampSpectrum = graySpectrum(amplitude) return ampSpectrum def GaussianHighFilter(image,d): f = np.fft.fft2(image) fshift = np.fft.fftshift(f) def make_transform_matrix(d): transmatrix = np.zeros(image.shape) center_point = tuple(map(lambda x:(x-1)/2,s1.shape)) for i in range(transmatrix.shape[0]): for j in range(transmatrix.shape[1]): def cal_distance(pa,pb): from math import sqrt dis = sqrt((pa[0]-pb[0])**2+(pa[1]-pb[1])**2) return dis dis = cal_distance(center_point,(i,j)) transmatrix[i,j] = 1-np.exp(-(dis**2)/(2*(d**2))) return transmatrix d_matrix = make_transform_matrix(d) out_img = np.abs(np.fft.ifft2(np.fft.ifftshift(fshift*d_matrix))) return out_img if __name__ == "__main__": img_gray = cv2.imread("8.jpeg", 0) row, col = img_gray.shape[:2] fft_img = fftImage(img_gray, row, col) amplitude = amplitudeSpectrum(fft_img) ampSpectrum = graySpectrum(amplitude) phaSpectrum = phaseSpectrum(fft_img) ampSpectrum_center = stdFftImage(img_gray, row, col) cv2.imshow("img_gray", img_gray) cv2.imshow("ampSpectrum", ampSpectrum) cv2.imshow("ampSpectrum_center", ampSpectrum_center) cv2.imshow("phaSpectrum", phaSpectrum) cv2.waitKey(0) cv2.destroyAllWindows() s1 = np.log(np.abs(fft_img)) img_d1 = GaussianHighFilter(img_gray,10) img_d2 = GaussianHighFilter(img_gray,30) img_d3 = GaussianHighFilter(img_gray,50) plt.subplot(131) plt.axis("off") plt.imshow(img_d1,cmap="gray") plt.title('D_10') plt.subplot(132) plt.axis("off") plt.title('D_30') plt.imshow(img_d2,cmap="gray") plt.subplot(133) plt.axis("off") plt.title("D_50") plt.imshow(img_d3,cmap="gray") plt.show()
python
from flask import json, render_template, g, abort from flask_login import current_user, login_required import urllib, json from thanados import app from thanados.models.entity import Data @app.route('/vocabulary/') def vocabulary(): hierarchytypes = app.config["HIERARCHY_TYPES"] systemtypes = app.config["SYSTEM_TYPES"] customtypes = app.config["CUSTOM_TYPES"] valuetypes = app.config["VALUE_TYPES"] alltypesused = list(set().union(hierarchytypes, systemtypes, customtypes, valuetypes)) parenttree = [] sql_list = """ SELECT name, id, name_path FROM ( SELECT name, id::INTEGER, path, name_path, left(path, strpos(path, ' >') -1)::INTEGER AS topparent FROM thanados.types_all WHERE path LIKE '%%>%%' UNION ALL SELECT name, id::INTEGER, path, name_path, PATH::INTEGER AS topparent FROM thanados.types_all WHERE path NOT LIKE '%%>%%' ORDER BY name_path) tp WHERE topparent IN %(list)s """ g.cursor.execute(sql_list, {'list': tuple(alltypesused)}) results = g.cursor.fetchall() Typelist = [] for row in results: Typelist.append({'label': row.name, 'path': row.name_path, 'id': row.id}) def makeparents(typelist, typeClass): for id in typelist: sql_tree = "SELECT name, id FROM thanados.types_all WHERE id = %(id)s ORDER BY name" g.cursor.execute(sql_tree, {'id': id}) results = g.cursor.fetchone() if results: node = { 'text': results.name, 'id': results.id, 'type': typeClass, 'class': 'treenode' } maketree(id, node, typeClass) parenttree.append(node) def maketree(id, node, typeClass): sql_tree = """ SELECT name, id FROM thanados.types_all WHERE parent_id = %(id)s ORDER BY name """ g.cursor.execute(sql_tree, {'id': id}) results = g.cursor.fetchall() if results: node['nodes'] = [] for row in results: currentnode = { 'text': row.name, # + getEntCount(row.id), 'id': row.id, 'type': typeClass, 'class': 'treenode' } node['nodes'].append(currentnode) maketree(row.id, currentnode, typeClass) tabsToCreate = ['Main classes', 'Types', 'Value types'] makeparents(hierarchytypes, 'Main classes') #makeparents(systemtypes, 'Standard') #uncomment to display system types makeparents(customtypes, 'Types') makeparents(valuetypes, 'Value types') # return json.dumps(parenttree) return render_template('vocabulary/vocabulary.html', tree=parenttree, tabsToCreate=tabsToCreate, typelist=Typelist) @app.route('/vocabulary/<int:object_id>') @app.route('/vocabulary/<int:object_id>/<format_>') def vocabulary_view(object_id: int, format_=None): object_id = object_id loc_image = app.config["API_FILE_DISPLAY"] use_api = app.config["USE_API"] use_jpgs = app.config["USE_JPGS"] if not use_api: if use_jpgs: loc_image = app.config["JPG_FOLDER_PATH"] + '/' else: loc_image = app.config["WEB_FOLDER_PATH"] + '/' if not object_id: return render_template('vocabulary/vocabulary.html') # get dataset for type entity sql_base = 'SELECT * FROM model.entity WHERE id = %(object_id)s;' g.cursor.execute(sql_base, {'object_id': object_id}) output_base = g.cursor.fetchone() sql_date = """ SELECT date_part('year', begin_from) AS begin_from, date_part('year', begin_to) AS begin_to, date_part('year', end_from) AS end_from, date_part('year', end_to) AS end_to FROM model.entity WHERE id = %(object_id)s; """ g.cursor.execute(sql_date, {'object_id': object_id}) output_date = g.cursor.fetchone() # check if exists if not output_base: abort(403) # check if type class CRMclass = output_base.cidoc_class_code if CRMclass not in ['E55']: abort(403) extrefs = """ SELECT jsonb_agg(jsonb_strip_nulls(jsonb_build_object( 'identifier', t.identifier, 'domain', t.name, 'website', t.website, 'about', t.description, 'SKOS', t.skos, 'url', t.url, 'icon', r.icon_url ))) AS ext_types FROM thanados.ext_types t JOIN thanados.refsys r ON t.id = r.entity_id WHERE t.type_id = %(object_id)s; """ g.cursor.execute(extrefs, {'object_id': object_id}) extresult = g.cursor.fetchone() # get top parent sql_topparent = """ SELECT topparent FROM ( SELECT id::INTEGER, path, name_path, left(path, strpos(path, ' >') -1)::INTEGER AS topparent FROM thanados.types_all WHERE path LIKE '%%>%%' UNION ALL SELECT id::INTEGER, path, name_path, PATH::INTEGER AS topparent FROM thanados.types_all WHERE path NOT LIKE '%%>%%' ORDER BY name_path) tp WHERE id = %(object_id)s""" g.cursor.execute(sql_topparent, {'object_id': object_id}) topparent = g.cursor.fetchone().topparent g.cursor.execute('select name, description, id from model.entity WHERE id = %(object_id)s', {'object_id': topparent}) topparent = g.cursor.fetchone() sql_topparent_info = """ select e.name, e.description, e.id, h.multiple, h.category from model.entity e JOIN web.hierarchy h ON e.id = h.id WHERE e.id = %(topparent)s """ g.cursor.execute(sql_topparent_info, {'topparent': topparent.id}) result = g.cursor.fetchone() topparent = {} topparent['id'] = result.id topparent['name'] = result.name topparent['description'] = result.description if result.multiple: multi = 'multiple selection' else: multi = 'single selection' type = '' if result.category == 'standard': type = 'Classification' if result.category == 'value': type = 'Value type' elif result.category == 'custom': type = 'Type' topparent['selection'] = multi topparent['type'] = type topparent['forms'] = [] sql_forms = """ select openatlas_class_name as name FROM web.hierarchy_openatlas_class WHERE hierarchy_id = %(topparent)s """ g.cursor.execute(sql_forms, {'topparent': topparent['id']}) forms_used = g.cursor.fetchall() for row in forms_used: topparent['forms'].append(row.name) # get parent and path sql_path_parent = 'SELECT name_path, parent_id FROM thanados.types_all WHERE id = %(object_id)s;' g.cursor.execute(sql_path_parent, {'object_id': object_id}) output_path_parent = g.cursor.fetchone() # get name of parent sql_parentname = 'SELECT name FROM thanados.types_all WHERE id = %(object_id)s;' g.cursor.execute(sql_parentname, {'object_id': output_path_parent.parent_id}) output_parentname = g.cursor.fetchone() #define time time = {} if output_base.begin_from: time['earliest_begin'] = output_date.begin_from if output_base.begin_to: time['latest_begin'] = output_date.begin_to if output_base.end_from: time['earliest_end'] = output_date.end_from if output_base.end_to: time['latest_end'] = output_date.end_to # define json data = {} data['id'] = output_base.id data['name'] = output_base.name data['path'] = output_path_parent.name_path if output_base.description: data['description'] = output_base.description if output_path_parent.parent_id: data['parent'] = output_path_parent.parent_id data['parent_name'] = output_parentname.name if len(time) > 0: data['time'] = time credits = None license = None if extresult.ext_types: data['gazetteers'] = [] gazetteers = extresult.ext_types for row in gazetteers: if 'about' in row: about = row['about'] else: about = row['domain'] if row['website']: about = row['domain'] + ': ' + row['website'] if 'SKOS' in row: SKOS = row['SKOS'] else: SKOS = None extid = {'SKOS': SKOS, 'url': row['url'], 'about': about, 'domain': row['domain'], 'identifier': row['identifier']} if row['domain'] == 'Wikidata' and format_ != 'json': extid['description'] = Data.getWikidata(row['identifier'])['description'] extid['label'] = Data.getWikidata(row['identifier'])['label'] extid['image'] = Data.getWikidataimage(row['identifier']) if extid['image']: try: credits = extid['image']['metadata']['Artist']['value'] try: credits = credits + '<br>Credit: ' + extid['image']['metadata']['Credit']['value'] except KeyError: credits = extid['image']['metadata']['Artist']['value'] except KeyError: try: credits = extid['image']['metadata']['Credit']['value'] except KeyError: credits = 'Author unknown' try: license = '<a href="' + extid['image']['metadata']['LicenseUrl']['value'] + '" target="blank_">' try: license = license + extid['image']['metadata']['LicenseShortName']['value'] + '</a>' except KeyError: license = '' except KeyError: try: license = extid['image']['metadata']['LicenseShortName']['value'] except KeyError: license = '<a href="'+ extid['image']['origin'] +'">' + extid['image']['origin'] + '</a>' if row['icon']: extid['favicon'] = row['icon'] data['gazetteers'].append(extid) if row['domain'] == 'Getty AAT' and format_ != 'json': gettydata = Data.getGettyData(row['identifier']) extid['description'] = gettydata['description'] extid['label'] = gettydata['label'] extid['qualifier'] = gettydata['qualifier'] # get subtypes sql_children = 'SELECT id, name FROM thanados.types_all WHERE parent_id = %(object_id)s;' g.cursor.execute(sql_children, {'object_id': object_id}) output_children = g.cursor.fetchall() if output_children: data['children'] = [] for row in output_children: data['children'].append({'id': row.id, 'name': row.name}) # get files sql_files = """SELECT m.id FROM model.entity m JOIN model.link l ON m.id = l.domain_id WHERE l.range_id = %(object_id)s AND l.property_code = 'P67' AND m.openatlas_class_name = 'file' """ g.cursor.execute(sql_files, {'object_id': object_id}) output_files = g.cursor.fetchall() # get file license sql_filelicense = """ SELECT name AS license, name_path::TEXT, t.id::INTEGER AS licId, domain_id::INTEGER FROM thanados.types_all t JOIN model.link l ON t.id = l.range_id WHERE l.domain_id = %(file_id)s AND l.property_code = 'P2' AND t.name_path LIKE 'License >%%' """ # define files if output_files: data['files'] = [] # get file references sql_file_refs = """ SELECT r.description AS title, l.description AS reference FROM model.entity r JOIN model.link l ON r.id = l.domain_id WHERE l.range_id = %(file_id)s AND l.property_code = 'P67' """ for row in output_files: file_name = (Data.get_file_path(row.id)) print(file_name) file_id = (row.id) file = {'id': file_id, 'file_name': (loc_image + file_name)} g.cursor.execute(sql_file_refs, {'file_id': file_id}) output_file_refs = g.cursor.fetchone() g.cursor.execute(sql_filelicense, {'file_id': file_id}) output_filelicense = g.cursor.fetchone() if output_file_refs: if output_file_refs.title: file['source'] = output_file_refs.title if output_file_refs.reference: file['reference'] = output_file_refs.reference # add licence information if output_filelicense: file['license'] = output_filelicense.license file['licenseId'] = output_filelicense.licid data['files'].append(file) # get all subtypes recursively sql_subtypesrec = """ SELECT id from thanados.types_all WHERE path LIKE %(type_name)s OR path LIKE %(type_name2)s OR id = %(type_id)s """ entlist = [] g.cursor.execute(sql_subtypesrec, {'type_id': object_id, 'type_name': '%> ' + str(output_base.id) + ' >%', 'type_name2': str(output_base.id) + ' >%'}) output_subtypesrec = g.cursor.fetchall() if output_subtypesrec: data['types_recursive'] = [] for row in output_subtypesrec: data['types_recursive'].append(row.id) entlist.append(row.id) entlist = tuple(entlist) # get all entitites with this type sql_entities = """ SELECT child_id, child_name, maintype, type, type_id, min, lon, lat, context, filename, openatlas_class_name FROM thanados.searchdata s WHERE type_id IN %(type_id)s AND s.site_id IN %(site_ids)s """ g.cursor.execute(sql_entities, {'type_id': tuple([object_id]), 'site_ids': tuple(g.site_list)}) output_direct_ents = g.cursor.fetchall() if output_direct_ents: data['entities'] = [] for row in output_direct_ents: data['entities'].append({'id': row.child_id, 'name': row.child_name, 'main_type': row.maintype, 'type': row.type, 'type_id': row.type_id, 'value': row.min, 'lon': row.lon, 'lat': row.lat, 'context': row.context, 'file': row.filename, 'openatlas_class_name': row.openatlas_class_name}) g.cursor.execute(sql_entities, {'type_id': entlist, 'site_ids': tuple(g.site_list)}) output_direct_ents = g.cursor.fetchall() if output_direct_ents: data['entities_recursive'] = [] for row in output_direct_ents: data['entities_recursive'].append({'id': row.child_id, 'name': row.child_name, 'main_type': row.maintype, 'type': row.type, 'type_id': row.type_id, 'value': row.min, 'lon': row.lon, 'lat': row.lat, 'context': row.context, 'file': row.filename, 'openatlas_class_name': row.openatlas_class_name}) # get type tree def getchildren(id, node): sql_getChildren = """ SELECT name, id FROM thanados.types_all WHERE parent_id = %(id)s ORDER BY name """ g.cursor.execute(sql_getChildren, {'id': id}) results = g.cursor.fetchall() if results: node['nodes'] = [] for row in results: currentnode = {'text': row.name, 'class': 'treenode', 'href': '/vocabulary/%r' % row.id, 'openNodeLinkOnNewTab': False} node['nodes'].append(currentnode) getchildren(row.id, currentnode) tree = [{ 'text': data['name'], 'class': 'toptreenode' }] getchildren(object_id, tree[0]) hierarchy = {} currentcolor = '#97C2FC' if object_id == topparent['id']: currentcolor = '#ff8c8c' alltreeNodes = [{'id': topparent['id'], 'label': topparent['name'], 'color' : currentcolor}] alltreeEdges = [] def getTree(id): sql_getChildren = """ SELECT DISTINCT name, id FROM thanados.types_all WHERE parent_id = %(id)s ORDER BY name """ g.cursor.execute(sql_getChildren, {'id': id}) results = g.cursor.fetchall() if results: for row in results: currentcolor = '#97C2FC'; if row.id == object_id: currentcolor= '#ff8c8c' currentnode = {'id': row.id, 'label': row.name, 'color' : currentcolor} currentedge = {'from': id, 'to': row.id, 'color': '#757575'} alltreeNodes.append(currentnode) alltreeEdges.append(currentedge) getTree(row.id) getTree(topparent['id']) hierarchy['nodes'] = alltreeNodes hierarchy['edges'] = alltreeEdges data['topparent'] = topparent data['tree'] = tree data['hierarchy'] = hierarchy if format_ == 'json': return json.dumps(data) if object_id: return render_template('vocabulary/view.html', object_id=object_id, data=data, children=len(output_children), credit=credits, license=license, children_recursive=len(entlist), webfolder=app.config["WEB_FOLDER_PATH"])
python
# TensorFlow and tf.keras import tensorflow as tf # Helper libraries import numpy as np import matplotlib.pyplot as plt # Display the image, labeled with the predicted label (blue if accurate to true label, red if not) def plot_image(i, predictions_array, true_label, img): true_label, img = true_label[i], img[i] plt.grid(False) plt.xticks([]) plt.yticks([]) plt.imshow(img, cmap=plt.cm.binary) predicted_label = np.argmax(predictions_array) if predicted_label == true_label: color = 'blue' else: color = 'red' plt.xlabel("{} {:2.0f}% ({})".format(class_names[predicted_label], 100*np.max(predictions_array), class_names[true_label]), color=color) # Bar graph of the full set of 10 class predictions for Model Predictions. # Color the predicted label red and the true label blue (override predicted label red if accurate). def plot_value_array(i, predictions_array, true_label): true_label = true_label[i] plt.grid(False) plt.xticks(range(10)) plt.yticks([]) thisplot = plt.bar(range(10), predictions_array, color="#777777") plt.ylim([0, 1]) predicted_label = np.argmax(predictions_array) thisplot[predicted_label].set_color('red') thisplot[true_label].set_color('blue') print(tf.__version__) # --------------- Import and load the Fashion MNIST data directly from TensorFlow ---------------- fashion_mnist = tf.keras.datasets.fashion_mnist (train_images, train_labels), (test_images, test_labels) = fashion_mnist.load_data() # label numbers correspond to their respective classes of clothing class_names = ['T-shirt/top', 'Trouser', 'Pullover', 'Dress', 'Coat', 'Sandal', 'Shirt', 'Sneaker', 'Bag', 'Ankle boot'] # ------------------------ Explore the format of the dataset ---------------------- # (60000, 28, 28) --> 60,000 images in the training set, with each image represented as 28 x 28 pixels print(train_images.shape) # 60000 --> there are 60,000 corresponding labels in the training set print(len(train_labels)) # array([9, 0, 0, ..., 3, 0, 5], dtype=uint8) --> each label is a number between 0 and 9 print(train_labels) # (10000, 28, 28) --> 10,000 images in the test set, each image is represented as 28 x 28 pixels print(test_images.shape) # 10000 --> test set contains 10,000 images labels print(len(test_labels)) # ------------------------------ Preprocess the data ---------------------------- # pixel values fall in the range of 0 to 255 # Scale these values to a range of 0 to 1 before feeding them to the neural network model train_images = train_images / 255.0 test_images = test_images / 255.0 # verify data is in correct format and that you're ready to build and train the network # display the first 25 images from the training set and display the class name below each image plt.figure(figsize=(10,10)) for i in range(25): plt.subplot(5,5,i+1) plt.xticks([]) plt.yticks([]) plt.grid(False) plt.imshow(train_images[i], cmap=plt.cm.binary) plt.xlabel(class_names[train_labels[i]]) plt.show() # ------------------------------- Build and train the model --------------------------------- # set up the layers, which extract representations from the data fed into them model = tf.keras.Sequential([ # the Flatten layer transforms the format of the images from a two-dimensional array # (of 28 by 28 pixels) to a one-dimensional array (of 28 * 28 = 784 pixels) tf.keras.layers.Flatten(input_shape=(28, 28)), # two densely connected, or fully connected, neural layers tf.keras.layers.Dense(128, activation='relu'), tf.keras.layers.Dense(10) ]) # compile the model and add a few more settings model.compile(optimizer='adam', loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True), metrics=['accuracy']) # train the model by feeding it the training data # as the model trains, the loss and accuracy metrics are displayed model.fit(train_images, train_labels, epochs=10) # evaluate accuracy against the test data test_loss, test_acc = model.evaluate(test_images, test_labels, verbose=2) print('\nTest accuracy:', test_acc) # ------------------------------- Make Predictions from Model --------------------------------- # attach a softmax layer to convert the model's linear outputs—logits—to probabilities probability_model = tf.keras.Sequential([model, tf.keras.layers.Softmax()]) # predict the label for each image in the testing set into a prediction array of 10 numbers predictions = probability_model.predict(test_images) # Plot the first X test images, their predicted labels, and the true labels. num_rows = 5 num_cols = 3 num_images = num_rows*num_cols plt.figure(figsize=(2*2*num_cols, 2*num_rows)) # MODIFICATION: for i in range(num_images): for i in range(9000,9015): # MODIFICATION: plt.subplot(num_rows, 2*num_cols, 2*i+1) plt.subplot(num_rows, 2*num_cols, 2*(i - 9000)+1) plot_image(i, predictions[i], test_labels, test_images) # MODIFICATION: plt.subplot(num_rows, 2*num_cols, 2*i+2) plt.subplot(num_rows, 2*num_cols, 2*(i-9000)+2) plot_value_array(i, predictions[i], test_labels) plt.tight_layout() plt.show() # ----------------------------------- Use the Trained Model ------------------------------------- # Finally, use the trained model to make a prediction about a single image. # Grab an image from the test dataset. img = test_images[1] # (28, 28) print(img.shape) # Add the image to a batch where it's the only member. # tf.keras models are optimized to make predictions on a batch, or collection, of examples at once img = (np.expand_dims(img,0)) # (1, 28, 28) print(img.shape) # now predict the correct label for this image predictions_single = probability_model.predict(img) # [[8.26038831e-06 1.10213664e-13 9.98591125e-01 1.16777841e-08 1.29609776e-03 2.54965649e-11 # 1.04560357e-04 7.70050608e-19 4.55051066e-11 3.53864888e-17]] print(predictions_single) plot_value_array(1, predictions_single[0], test_labels) _ = plt.xticks(range(10), class_names, rotation=45) plt.show()
python
import os import os.path as osp import torch from torch.utils.data import Dataset from torch.utils.data.dataloader import default_collate from torchvision.transforms import functional as F import numpy as np import numpy.linalg as LA import cv2 import json import csv import matplotlib.pyplot as plt from pylsd import lsd import datasets.transforms as T def center_crop(img): sz = img.shape[0:2] side_length = np.min(sz) if sz[0] > sz[1]: ul_x = 0 ul_y = int(np.floor((sz[0]/2) - (side_length/2))) x_inds = [ul_x, sz[1]-1] y_inds = [ul_y, ul_y + side_length - 1] else: ul_x = int(np.floor((sz[1]/2) - (side_length/2))) ul_y = 0 x_inds = [ul_x, ul_x + side_length - 1] y_inds = [ul_y, sz[0]-1] c_img = img[y_inds[0]:y_inds[1]+1, x_inds[0]:x_inds[1]+1, :] return c_img def create_masks(image): masks = torch.zeros((1, height, width), dtype=torch.uint8) return masks def filter_length(segs, min_line_length=10): lengths = LA.norm(segs[:,2:4] - segs[:,:2], axis=1) segs = segs[lengths > min_line_length] return segs[:,:4] def normalize_segs(segs, pp, rho): pp = np.array([pp[0], pp[1], pp[0], pp[1]], dtype=np.float32) return rho*(segs - pp) def normalize_safe_np(v, axis=-1, eps=1e-6): de = LA.norm(v, axis=axis, keepdims=True) de = np.maximum(de, eps) return v/de def segs2lines_np(segs): ones = np.ones(len(segs)) ones = np.expand_dims(ones, axis=-1) p1 = np.concatenate([segs[:,:2], ones], axis=-1) p2 = np.concatenate([segs[:,2:], ones], axis=-1) lines = np.cross(p1, p2) return normalize_safe_np(lines) def sample_segs_np(segs, num_sample, use_prob=True): num_segs = len(segs) sampled_segs = np.zeros([num_sample, 4], dtype=np.float32) mask = np.zeros([num_sample, 1], dtype=np.float32) if num_sample > num_segs: sampled_segs[:num_segs] = segs mask[:num_segs] = np.ones([num_segs, 1], dtype=np.float32) else: lengths = LA.norm(segs[:,2:] - segs[:,:2], axis=-1) prob = lengths/np.sum(lengths) idxs = np.random.choice(segs.shape[0], num_sample, replace=True, p=prob) sampled_segs = segs[idxs] mask = np.ones([num_sample, 1], dtype=np.float32) return sampled_segs, mask def sample_vert_segs_np(segs, thresh_theta=22.5): lines = segs2lines_np(segs) (a,b) = lines[:,0],lines[:,1] theta = np.arctan2(np.abs(b),np.abs(a)) thresh_theta = np.radians(thresh_theta) return segs[theta < thresh_theta] class ImageDataset(Dataset): def __init__(self, cfg, image_path, return_masks=False, transform=None): self.input_width = cfg.DATASETS.INPUT_WIDTH self.input_height = cfg.DATASETS.INPUT_HEIGHT self.min_line_length = cfg.DATASETS.MIN_LINE_LENGTH self.num_input_lines = cfg.DATASETS.NUM_INPUT_LINES self.num_input_vert_lines = cfg.DATASETS.NUM_INPUT_VERT_LINE self.vert_line_angle = cfg.DATASETS.VERT_LINE_ANGLE self.return_vert_lines = cfg.DATASETS.RETURN_VERT_LINES self.return_masks = return_masks self.transform = transform self.list_filename = [image_path,] def __getitem__(self, idx): target = {} extra = {} filename = self.list_filename[idx] image = cv2.imread(filename) assert image is not None, print(filename) image = image[:,:,::-1] # convert to rgb org_image = image org_h, org_w = image.shape[0], image.shape[1] org_sz = np.array([org_h, org_w]) crop_image = center_crop(org_image) crop_h, crop_w = crop_image.shape[0], crop_image.shape[1] crop_sz = np.array([crop_h, crop_w]) image = cv2.resize(image, dsize=(self.input_width, self.input_height)) input_sz = np.array([self.input_height, self.input_width]) # preprocess ratio_x = float(self.input_width)/float(org_w) ratio_y = float(self.input_height)/float(org_h) pp = (org_w/2, org_h/2) rho = 2.0/np.minimum(org_w,org_h) # detect line and preprocess gray = cv2.cvtColor(org_image, cv2.COLOR_BGR2GRAY) org_segs = lsd(gray, scale=0.5) org_segs = filter_length(org_segs, self.min_line_length) num_segs = len(org_segs) assert len(org_segs) > 10, print(len(org_segs)) segs = normalize_segs(org_segs, pp=pp, rho=rho) # whole segs sampled_segs, line_mask = sample_segs_np( segs, self.num_input_lines) sampled_lines = segs2lines_np(sampled_segs) # vertical directional segs vert_segs = sample_vert_segs_np(segs, thresh_theta=self.vert_line_angle) if len(vert_segs) < 2: vert_segs = segs sampled_vert_segs, vert_line_mask = sample_segs_np( vert_segs, self.num_input_vert_lines) sampled_vert_lines = segs2lines_np(sampled_vert_segs) if self.return_masks: masks = create_masks(image) image = np.ascontiguousarray(image) if self.return_vert_lines: target['segs'] = torch.from_numpy(np.ascontiguousarray(sampled_vert_segs)).contiguous().float() target['lines'] = torch.from_numpy(np.ascontiguousarray(sampled_vert_lines)).contiguous().float() target['line_mask'] = torch.from_numpy(np.ascontiguousarray(vert_line_mask)).contiguous().float() else: target['segs'] = torch.from_numpy(np.ascontiguousarray(sampled_segs)).contiguous().float() target['lines'] = torch.from_numpy(np.ascontiguousarray(sampled_lines)).contiguous().float() target['line_mask'] = torch.from_numpy(np.ascontiguousarray(line_mask)).contiguous().float() if self.return_masks: target['masks'] = masks target['org_img'] = org_image target['org_sz'] = org_sz target['crop_sz'] = crop_sz target['input_sz'] = input_sz target['img_path'] = filename target['filename'] = filename extra['lines'] = target['lines'].clone() extra['line_mask'] = target['line_mask'].clone() return self.transform(image, extra, target) def __len__(self): return len(self.list_filename) def make_transform(): return T.Compose([ T.ToTensor(), T.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]) ]) def build_image(image_path, cfg): dataset = ImageDataset(cfg, image_path, return_masks=cfg.MODELS.MASKS, transform=make_transform()) return dataset
python
#!/usr/bin/env python # -*- coding: utf-8 -*- # @Author: mcxiaoke # @Date: 2015-07-10 14:13:05 import os import sys from os import path import re import tempfile import shutil import time ''' clean idea project files param: max_depth -> max depth for recursively, default=3 param: permanently -> move to system tmp dir or permanently delete, default=False ''' def clean(start_dir, max_depth=3, permanently=False): idea_pattern = re.compile(r'.*\.iml|build$|\.idea') deleted = [] backup_dir_name = 'clean_idea_backup_%s' % str(time.time()) backup_dir = path.join(tempfile.gettempdir(), backup_dir_name) for root, dirs, files in os.walk(start, topdown=True): for name in dirs: if name == '.git': dirs.remove(name) level = root.replace(start, '').count(os.sep) if level >= max_depth: continue for name in dirs+files: # print '--> %s' % path.join(root, name).replace(start,' ') if idea_pattern.match(name): # os.renames() file = path.join(root, name) deleted.append(file) try: if permanently: if path.isfile(file): os.remove(file) else: shutil.rmtree(file) else: shutil.move(file, path.join(backup_dir, name)) print("delete %s" % file) except shutil.Error, e: print('delete error: %s' % e) if deleted: print('cleaned in %s' % start) print('backup to %s' % backup_dir) else: print('no idea files in %s' % start) if __name__ == '__main__': usage = ''''Usage: %s dir' Be careful, this script will remove all files and directories named .idea/*.iml/build ''' % path.basename(sys.argv[0]) if len(sys.argv) < 2: print(usage) sys.exit(1) start = path.abspath(sys.argv[1]) clean(start)
python
import os from .handler import QuickOpenHandler from ._version import get_versions from notebook.utils import url_path_join __version__ = get_versions()['version'] del get_versions def _jupyter_server_extension_paths(): """Defines the entrypoint for the Jupyter server extension.""" return [{ "module": "jupyterlab_quickopen" }] def load_jupyter_server_extension(nb_app): """Registers the quick open API handler to receive HTTP requests from the frontend extension. Parameters ---------- nb_app: notebook.notebookapp.NotebookApp Notebook application instance """ if (not os.path.isdir(nb_app.notebook_dir) or nb_app.contents_manager.root_dir != nb_app.notebook_dir): nb_app.log.info(f'Refusing to register QuickOpenHandler extension: ' f'{nb_app.contents_manager} does not appear to load from the local filesystem') return web_app = nb_app.web_app host_pattern = '.*$' route_pattern = url_path_join(web_app.settings['base_url'], '/api/quickopen') web_app.add_handlers(host_pattern, [ (route_pattern, QuickOpenHandler) ]) nb_app.log.info(f'Registered QuickOpenHandler extension at URL path {route_pattern} ' f'to serve results of scanning local path {nb_app.notebook_dir}')
python
from abc import ABC from typing import Type from bokeh.models.glyph import Glyph from bokeh.models.renderers import GlyphRenderer from xbokeh.common.assertions import assert_type class Renderer(ABC): def __init__(self, type_: Type, renderer: GlyphRenderer) -> None: """ :renderer: instance of GlyphRenderer :data: data for ColumnDataSource. ex) data = {'x': [1,2,3,4], 'y': np.ndarray([10.0, 20.0, 30.0, 40.0])} """ super().__init__() assert_type(renderer, "renderer", GlyphRenderer) assert_type(renderer.glyph, "renderer.glyph", type_) assert_type(renderer.data_source.data, "self._renderer.data_source.data", dict) self._renderer = renderer self._glyph: Glyph = renderer.glyph @property def data(self) -> dict: return self._renderer.data_source.data def set_data(self, data: dict): assert_type(data, "data", dict) self._renderer.data_source.data = data def set_property(self, **kwargs): """ Updates the model's property """ self._glyph.update(**kwargs) def clear(self): self.set_data({"x": [], "y": []})
python
from math import log from utils import iter_primes __author__ = 'rafa' def algorithm(limit): n = 1 for p in iter_primes(): if p > limit: return n exponent = int(log(limit, p)) n *= p**exponent def solver(): """ 2520 is the smallest number that can be divided by each of the numbers from 1 to 10 without any remainder. What is the smallest positive number that is evenly divisible by all of the numbers from 1 to 20? """ assert algorithm(10) == 2520 return algorithm(20)
python
import matplotlib.pyplot as plt import seaborn as sns import numpy as np def plot_time_series(x: np.ndarray, title=None) -> None: sns.set(font_scale=1.5) sns.set_style("white") t = np.arange(start=0, stop=x.shape[0]) plt.plot(t, x, linestyle='-', marker='o') plt.title(title) plt.xlabel(r'$t$') plt.ylabel(r'$x_t$') plt.show()
python
from __future__ import print_function import argparse import os import sys import time import random import string from typing import getch import torch import torch.nn as nn from torch.autograd import Variable from char_rnn import CharRNN class ProgressBar(object): def __init__(self, total=100, stream=sys.stderr): self.total = total self.stream = stream self.last_len = 0 self.curr = 0 def count(self): self.curr += 1 self.print_progress(self.curr) def print_progress(self, value): self.stream.write('\b' * self.last_len) self.curr = value pct = 100 * self.curr / self.total out = '{:.2f}% [{}/{}] \r'.format(pct, self.curr, self.total) self.last_len = len(out) self.stream.write(out) self.stream.flush() def random_training_set(chunk_len, batch_size, file, args): ''' TODO: Convert to stateful LSTM with more features ''' inp = torch.LongTensor(batch_size, chunk_len) target = torch.LongTensor(batch_size, chunk_len) file_len = len(file) for bi in range(batch_size): start_index = random.randint(0, file_len - chunk_len) end_index = start_index + chunk_len + 1 chunk = file[start_index:end_index] if args.debug: print ('chunk', chunk) inp[bi] = char_tensor(chunk[:-1]) target[bi] = char_tensor(chunk[1:]) inp = Variable(inp) target = Variable(target) if args.cuda: inp = inp.cuda() target = target.cuda() if args.debug: print (inp, target) return inp, target def train_on_batch(inp, target, args): hidden = decoder.init_hidden(args.batch_size) if args.cuda: hidden = hidden.cuda() decoder.zero_grad() loss = 0 for c in range(args.chunk_len): output, hidden = decoder(inp[:,c], hidden) loss += criterion(output.view(args.batch_size, -1), target[:,c]) loss.backward() decoder_optimizer.step() return loss.data[0] / args.chunk_len def save(args): save_filename = os.path.splitext(os.path.basename(args.filename))[0] + '.pt' torch.save(decoder, save_filename) print('Saved as %s' % save_filename) class Generator(object): ''' Class to encapsulate generator functionality ''' def __init__(self, decoder): self.decoder = decoder def generate(self, *args, **kwargs): raise NotImplementedError class SimpleGenerator(Generator): def generate(self, prime_str='int ', predict_len=100, temperature=0.1, cuda=False, args=None, hidden=None): prime_input = Variable(char_tensor(prime_str).unsqueeze(0)) if not hidden: hidden = decoder.init_hidden(1) prime_input = Variable(char_tensor(prime_str).unsqueeze(0)) if cuda: hidden = hidden.cuda() prime_input = prime_input.cuda() # Use priming string to "build up" hidden state for p in range(len(prime_str) - 1): _, hidden = decoder(prime_input[:,p], hidden) predicted = '' inp = prime_input[:,-1] p_list = [] for p in range(predict_len): output, hidden = decoder(inp, hidden) # Sample from the network as a multinomial distribution output_dist = output.data.view(-1).div(temperature).exp() top_i = torch.multinomial(output_dist, 1)[0] p_list.append(top_i) # Add predicted character to string and use as next input predicted_char = all_characters[top_i] predicted += predicted_char inp = Variable(char_tensor(predicted_char).unsqueeze(0)) if cuda: inp = inp.cuda() # print (p_list) return predicted, hidden def generate(decoder, prime_str='int ', predict_len=100, temperature=0.35, cuda=False, args=None, hidden=None): prime_input = Variable(char_tensor(prime_str).unsqueeze(0)) if not hidden: hidden = decoder.init_hidden(1) prime_input = Variable(char_tensor(prime_str).unsqueeze(0)) if cuda: hidden = hidden.cuda() prime_input = prime_input.cuda() # Use priming string to "build up" hidden state for p in range(len(prime_str) - 1): _, hidden = decoder(prime_input[:,p], hidden) predicted = '' inp = prime_input[:,-1] p_list = [] for p in range(predict_len): output, hidden = decoder(inp, hidden) # Sample from the network as a multinomial distribution output_dist = output.data.view(-1).div(temperature).exp() top_i = torch.multinomial(output_dist, 1)[0] p_list.append(top_i) # Add predicted character to string and use as next input predicted_char = all_characters[top_i] predicted += predicted_char inp = Variable(char_tensor(predicted_char).unsqueeze(0)) if cuda: inp = inp.cuda() # print (p_list) return predicted, hidden def generate_token(decoder, prime_str='int ', temperature=0.35, cuda=False, args=None, init_hidden=None): prime_input = Variable(char_tensor(prime_str).unsqueeze(0)) if not init_hidden: hidden = decoder.init_hidden(1) prime_input = Variable(char_tensor(prime_str).unsqueeze(0)) if cuda: hidden = hidden.cuda() prime_input = prime_input.cuda() # Use priming string to "build up" hidden state for p in range(len(prime_str) - 1): _, hidden = decoder(prime_input[:,p], hidden) init_hidden = hidden init_inp = prime_input[:,-1] is_good = False while (not is_good): is_good = True predicted = '' p_list = [] hidden = init_hidden inp = init_inp stopped = False while (not stopped): print ('generate_token', inp [:10], hidden [:10]) output, hidden = decoder(inp, hidden) print ('output', output[:10]) raise Exception # Sample from the network as a multinomial distribution output_dist = output.data.view(-1).div(temperature).exp() top_i = torch.multinomial(output_dist, 1)[0] try: if top_i == p_list[-1] and top_i == p_list[-2]: is_good = False except: pass p_list.append(top_i) # Add predicted character to string and use as next input predicted_char = all_characters[top_i] if predicted_char in string.whitespace: stopped = True predicted += predicted_char print ('predicted', predicted) inp = Variable(char_tensor(predicted_char).unsqueeze(0)) if cuda: inp = inp.cuda() if len(predicted) > 15: is_good = False # print (p_list) return predicted, hidden # Initialize models and start training def build_parser(): argparser = argparse.ArgumentParser() argparser.add_argument('--filename', type=str) argparser.add_argument('--n_epochs', type=int, default=2000) argparser.add_argument('--print_every', type=int, default=1) argparser.add_argument('--hidden_size', type=int, default=256) argparser.add_argument('--n_layers', type=int, default=3) argparser.add_argument('--learning_rate', type=float, default=0.01) argparser.add_argument('--chunk_len', type=int, default=100) argparser.add_argument('--batch_size', type=int, default=64) argparser.add_argument('--cuda', action='store_true') argparser.add_argument('--debug', default=False) argparser.add_argument('--type', default=False, action='store_true') args = argparser.parse_args() if args.cuda: print("Using CUDA") return args def read_file(filename): file = open(file) return file, len(file) def char_tensor(string): tensor = torch.zeros(len(string)).long() for c in range(len(string)): try: tensor[c] = all_characters.index(string[c]) except: continue return tensor if __name__ == '__main__': args = build_parser() SYMBOL_TABLE = os.path.join('../saved_model', 'vocab.sym') if args.type and os.path.exists(SYMBOL_TABLE): all_characters = list(set(open(SYMBOL_TABLE).read())) else: file = open(args.filename).read() print('Loaded file', args.filename) print('File length', len(file)/80, 'lines') all_characters = list(set(file)) with open(SYMBOL_TABLE, 'w') as vocab: print("".join(all_characters), file=vocab) n_characters = len(all_characters) decoder = CharRNN(n_characters, args.hidden_size, n_characters, n_layers=args.n_layers) decoder_optimizer = torch.optim.Adam(decoder.parameters(), lr=args.learning_rate) criterion = nn.CrossEntropyLoss() if args.type: # Enter typing mode print ('Typing Mode...') decoder = torch.load('../saved_model/linux.pt') from typing import build_getch with build_getch() as getch: try: getchar = getch() hidden = None generator = SimpleGenerator(decoder) prime_text = 'struct' sys.stdout.write(prime_text) while(getchar!='~'): #output_text, hidden = generate(decoder, prime_text, 20, # cuda=args.cuda, args=args, # hidden=hidden) output_text, hidden = generator.generate(prime_text, 20, cuda=args.cuda, args=args, hidden=hidden) sys.stdout.write(output_text) prime_text += output_text getchar = getch() if len(prime_text) > 100: prime_text = prime_text[-100:] getch.reset() except (KeyboardInterrupt, Exception) as e: getch.reset() print (e.message) raise e raise Exception('Exit!') else: # Train model if args.cuda: decoder.cuda() start = time.time() all_losses = [] loss_avg = 0 try: SAMPLES_PER_EPOCH = 10000 total_samples = 0 print("Training for %d epochs..." % args.n_epochs) for epoch in range(1, args.n_epochs + 1): samples_processed = 0 progress_bar = ProgressBar(SAMPLES_PER_EPOCH) while(samples_processed) < SAMPLES_PER_EPOCH: inp, target = random_training_set(args.chunk_len, args.batch_size, file, args) loss = train_on_batch(inp, target, args) samples_processed += args.batch_size progress_bar.print_progress(samples_processed) total_samples += samples_processed if epoch % args.print_every == 0: def time_since(start): return time.time() - start print('[elapsed : %s epoch (%d %d%%) loss%.4f]' % \ (time_since(start), epoch, epoch / args.n_epochs * 100, loss_avg/float(samples_processed))) text, hidden = generate(decoder, 'int', 1000, cuda=args.cuda, args=args) print(text) print("Epoch {} : Saving...".format(epoch)) save(args) except KeyboardInterrupt: print("Saving before quit...") save(args)
python
from zipline.errors import UnsupportedPipelineOutput from zipline.utils.input_validation import ( expect_element, expect_types, optional, ) from .domain import Domain, GENERIC, infer_domain from .graph import ExecutionPlan, TermGraph, SCREEN_NAME from .filters import Filter from .term import AssetExists, ComputableTerm, Term class Pipeline: """ A Pipeline object represents a collection of named expressions to be compiled and executed by a PipelineEngine. A Pipeline has two important attributes: 'columns', a dictionary of named :class:`~zipline.pipeline.Term` instances, and 'screen', a :class:`~zipline.pipeline.Filter` representing criteria for including an asset in the results of a Pipeline. To compute a pipeline in the context of a TradingAlgorithm, users must call ``attach_pipeline`` in their ``initialize`` function to register that the pipeline should be computed each trading day. The most recent outputs of an attached pipeline can be retrieved by calling ``pipeline_output`` from ``handle_data``, ``before_trading_start``, or a scheduled function. Parameters ---------- columns : dict, optional Initial columns. screen : zipline.pipeline.Filter, optional Initial screen. """ __slots__ = ('_columns', '_screen', '_domain', '__weakref__') @expect_types( columns=optional(dict), screen=optional(Filter), domain=Domain ) def __init__(self, columns=None, screen=None, domain=GENERIC): if columns is None: columns = {} validate_column = self.validate_column for column_name, term in columns.items(): validate_column(column_name, term) if not isinstance(term, ComputableTerm): raise TypeError( "Column {column_name!r} contains an invalid pipeline term " "({term}). Did you mean to append '.latest'?".format( column_name=column_name, term=term, ) ) self._columns = columns self._screen = screen self._domain = domain @property def columns(self): """The output columns of this pipeline. Returns ------- columns : dict[str, zipline.pipeline.ComputableTerm] Map from column name to expression computing that column's output. """ return self._columns @property def screen(self): """ The screen of this pipeline. Returns ------- screen : zipline.pipeline.Filter or None Term defining the screen for this pipeline. If ``screen`` is a filter, rows that do not pass the filter (i.e., rows for which the filter computed ``False``) will be dropped from the output of this pipeline before returning results. Notes ----- Setting a screen on a Pipeline does not change the values produced for any rows: it only affects whether a given row is returned. Computing a pipeline with a screen is logically equivalent to computing the pipeline without the screen and then, as a post-processing-step, filtering out any rows for which the screen computed ``False``. """ return self._screen @expect_types(term=Term, name=str) def add(self, term, name, overwrite=False): """Add a column. The results of computing ``term`` will show up as a column in the DataFrame produced by running this pipeline. Parameters ---------- column : zipline.pipeline.Term A Filter, Factor, or Classifier to add to the pipeline. name : str Name of the column to add. overwrite : bool Whether to overwrite the existing entry if we already have a column named `name`. """ self.validate_column(name, term) columns = self.columns if name in columns: if overwrite: self.remove(name) else: raise KeyError(f"Column '{name}' already exists.") if not isinstance(term, ComputableTerm): raise TypeError( "{term} is not a valid pipeline column. Did you mean to " "append '.latest'?".format(term=term) ) self._columns[name] = term @expect_types(name=str) def remove(self, name): """Remove a column. Parameters ---------- name : str The name of the column to remove. Raises ------ KeyError If `name` is not in self.columns. Returns ------- removed : zipline.pipeline.Term The removed term. """ return self.columns.pop(name) @expect_types(screen=Filter, overwrite=(bool, int)) def set_screen(self, screen, overwrite=False): """Set a screen on this Pipeline. Parameters ---------- filter : zipline.pipeline.Filter The filter to apply as a screen. overwrite : bool Whether to overwrite any existing screen. If overwrite is False and self.screen is not None, we raise an error. """ if self._screen is not None and not overwrite: raise ValueError( "set_screen() called with overwrite=False and screen already " "set.\n" "If you want to apply multiple filters as a screen use " "set_screen(filter1 & filter2 & ...).\n" "If you want to replace the previous screen with a new one, " "use set_screen(new_filter, overwrite=True)." ) self._screen = screen def to_execution_plan(self, domain, default_screen, start_date, end_date): """ Compile into an ExecutionPlan. Parameters ---------- domain : zipline.pipeline.domain.Domain Domain on which the pipeline will be executed. default_screen : zipline.pipeline.Term Term to use as a screen if self.screen is None. all_dates : pd.DatetimeIndex A calendar of dates to use to calculate starts and ends for each term. start_date : pd.Timestamp The first date of requested output. end_date : pd.Timestamp The last date of requested output. Returns ------- graph : zipline.pipeline.graph.ExecutionPlan Graph encoding term dependencies, including metadata about extra row requirements. """ if self._domain is not GENERIC and self._domain is not domain: raise AssertionError( "Attempted to compile Pipeline with domain {} to execution " "plan with different domain {}.".format(self._domain, domain) ) return ExecutionPlan( domain=domain, terms=self._prepare_graph_terms(default_screen), start_date=start_date, end_date=end_date, ) def to_simple_graph(self, default_screen): """ Compile into a simple TermGraph with no extra row metadata. Parameters ---------- default_screen : zipline.pipeline.Term Term to use as a screen if self.screen is None. Returns ------- graph : zipline.pipeline.graph.TermGraph Graph encoding term dependencies. """ return TermGraph(self._prepare_graph_terms(default_screen)) def _prepare_graph_terms(self, default_screen): """Helper for to_graph and to_execution_plan.""" columns = self.columns.copy() screen = self.screen if screen is None: screen = default_screen columns[SCREEN_NAME] = screen return columns @expect_element(format=('svg', 'png', 'jpeg')) def show_graph(self, format='svg'): """ Render this Pipeline as a DAG. Parameters ---------- format : {'svg', 'png', 'jpeg'} Image format to render with. Default is 'svg'. """ g = self.to_simple_graph(AssetExists()) if format == 'svg': return g.svg elif format == 'png': return g.png elif format == 'jpeg': return g.jpeg else: # We should never get here because of the expect_element decorator # above. raise AssertionError("Unknown graph format %r." % format) @staticmethod @expect_types(term=Term, column_name=str) def validate_column(column_name, term): if term.ndim == 1: raise UnsupportedPipelineOutput(column_name=column_name, term=term) @property def _output_terms(self): """ A list of terms that are outputs of this pipeline. Includes all terms registered as data outputs of the pipeline, plus the screen, if present. """ terms = list(self._columns.values()) screen = self.screen if screen is not None: terms.append(screen) return terms @expect_types(default=Domain) def domain(self, default): """ Get the domain for this pipeline. - If an explicit domain was provided at construction time, use it. - Otherwise, infer a domain from the registered columns. - If no domain can be inferred, return ``default``. Parameters ---------- default : zipline.pipeline.domain.Domain Domain to use if no domain can be inferred from this pipeline by itself. Returns ------- domain : zipline.pipeline.domain.Domain The domain for the pipeline. Raises ------ AmbiguousDomain ValueError If the terms in ``self`` conflict with self._domain. """ # Always compute our inferred domain to ensure that it's compatible # with our explicit domain. inferred = infer_domain(self._output_terms) if inferred is GENERIC and self._domain is GENERIC: # Both generic. Fall back to default. return default elif inferred is GENERIC and self._domain is not GENERIC: # Use the non-generic domain. return self._domain elif inferred is not GENERIC and self._domain is GENERIC: # Use the non-generic domain. return inferred else: # Both non-generic. They have to match. if inferred is not self._domain: raise ValueError( "Conflicting domains in Pipeline. Inferred {}, but {} was " "passed at construction.".format(inferred, self._domain) ) return inferred
python
#!/usr/bin/env python # ToMaTo (Topology management software) # Copyright (C) 2010 Dennis Schwerdel, University of Kaiserslautern # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/> """ Grants ------ For security reasons, the fileserver uses so called *grants* to verify that an upload or download request is authorized by the hostmanager. The grants are pseudo-random strings that are very unlikely to be guessed. Note that grants have an internal timeout and loose their validity after that. Uploading files --------------- The filemanager accepts file uploads for valid grants under the URL ``http://SERVER:PORT/GRANT/upload``. Uploads have to be sent via POST with *multipart/form-data* encoding. After sucessfully uploading a file, a successs message is shown. A redirect to a different URL can be requested by appending ``?redirect=URL_BASE64`` to the upload URL where *URL_BASE64* is the base64-encoded destination URL. A simple upload form can be accessed under the URL ``http://SERVER:PORT/GRANT/upload_form``. Downloading files ----------------- The filemanager accepts file download requests for valid grants under the URL ``http://SERVER:PORT/GRANT/download``. Downloads have to be requested via GET requests. The filemanager accepts the following parameters for downloads: ``name`` The name of the file that is being sent to the client ``mimetype`` The content-type of the file that is being sent to the client The fileserver will also honor the ``If-modified-since`` header. """ import SocketServer, BaseHTTPServer, hashlib, cgi, urlparse, urllib, shutil, base64, time, os.path, datetime, sys try: #python >=2.6 from urlparse import parse_qsl #@UnusedImport except: #python <2.6 from cgi import parse_qsl #@Reimport from .. import util #@UnresolvedImport from ... import config ACTION_UPLOAD = "upload" ACTION_DOWNLOAD = "download" _httpd = None _seed = os.urandom(8) _grants = {} def deleteGrantFile(grant): if os.path.exists(grant.path): os.remove(grant.path) def _code(path): return hashlib.md5(_seed+path).hexdigest() def addGrant(path, *args, **kwargs): code = _code(path) _grants[code] = Grant(path, *args, **kwargs) return code def delGrant(code): if code in _grants: del _grants[code] def getGrant(code): return _grants.get(code) def timeout(): for grant in _grants.values(): if grant.until < time.time(): grant.remove() class Grant: def __init__(self, path, action, until=None, triggerFn=None, repeated=False, timeout=None, removeFn=None): self.path = path self.action = action if until: self.until = until else: if not timeout: timeout = {"upload": 3600, "download": 12*3600}[action] self.until = time.time() + timeout self.triggerFn = triggerFn self.removeFn = removeFn self.repeated = repeated def trigger(self): if callable(self.triggerFn): self.triggerFn(self) if not self.repeated: self.remove() def check(self, action): if not self.until >= time.time(): self.remove() return False return action == self.action def remove(self): if callable(self.removeFn): self.removeFn(self) delGrant(_code(self.path)) class RequestHandler(BaseHTTPServer.BaseHTTPRequestHandler): def process_request(self): _, _, path, _, query, _ = urlparse.urlparse(self.path) params = dict(parse_qsl(query)) return (path, params) def error(self, code, message): self.send_error(code, message) self.end_headers() self.finish() def html(self, html, code=200, redirect=None): self.send_response(code) self.end_headers() self.wfile.write("<html>") if redirect: self.wfile.write("<head><meta http-equiv=\"refresh\" content=\"0;url=%s\"/></head>" % redirect) self.wfile.write("<body>") self.wfile.write(html) self.wfile.write("</body></html>") self.finish() def do_POST(self): return self._handle() def do_HEAD(self): return self._handle() def do_GET(self): return self._handle() def _handle(self): path, params = self.process_request() try: parts = path.split("/") if len(parts) != 3 or parts[0]: return self.error(404, "Not Found") (dummy, grant, action) = parts if hasattr(self, "_handle_%s" % action): return getattr(self, "_handle_%s" % action)(grant, **params) else: return self.error(404, "Not Found") except Exception, exc: import traceback traceback.print_exc() self.error(500, "%s failed: %s" % (path, exc)) def _handle_download(self, grant, name="download", mimetype="application/octet-stream", **params): grant = getGrant(grant) if not (grant and grant.check(ACTION_DOWNLOAD)): self.error(403, "Invalid grant") return filename = grant.path if not os.path.exists(filename): grant.trigger() return self.error(404, "File not found") if "If-Modified-Since" in self.headers: date = datetime.datetime.strptime(self.headers.get("If-Modified-Since"), "%a, %d %b %Y %H:%M:%S %Z") fdate = datetime.datetime.fromtimestamp(os.path.getmtime(filename)) if fdate <= date: grant.trigger() return self.error(304, "Not modified") with open(filename, "rb") as file_: self.send_response(200) if name: self.send_header('Content-Disposition', 'attachment; filename="%s"' % name) self.send_header('Content-Type', mimetype) self.send_header('Content-Length', os.path.getsize(filename)) self.end_headers() if self.command != "HEAD": shutil.copyfileobj(file_, self.wfile) grant.trigger() self.finish() def _handle_upload_form(self, grant, **params): params = urllib.urlencode(params) return self.html('<form method="POST" enctype="multipart/form-data" action="/%s/upload?%s"><input type="file" name="upload"><input type="submit"></form>' % (grant, params)) def _handle_upload(self, grant, redirect=None, **params): grant = getGrant(grant) if not (grant and grant.check(ACTION_UPLOAD)): self.error(403, "Invalid grant") return filename = grant.path with open(filename, "wb") as file_: form = cgi.FieldStorage(fp=self.rfile, headers=self.headers, environ={'REQUEST_METHOD':self.command, 'CONTENT_TYPE':self.headers['Content-Type']}) upload = form["upload"].file shutil.copyfileobj(upload, file_) grant.trigger() if redirect: self.html("success, redirecting...", redirect=base64.b64decode(redirect)) else: self.html("upload successful") def log_message(self, format, *args): #@ReservedAssignment return class ThreadedHTTPServer(SocketServer.ThreadingMixIn, BaseHTTPServer.HTTPServer): """Handle requests in a separate thread.""" def start(): print >>sys.stderr, "Starting fileserver on port %d" % config.FILESERVER["PORT"] global _httpd _httpd = ThreadedHTTPServer(('', config.FILESERVER["PORT"]), RequestHandler) util.start_thread(_httpd.serve_forever) def stop(): _httpd.server_close()
python
# This file is part of Radicale Server - Calendar Server # Copyright © 2014 Jean-Marc Martins # Copyright © 2012-2017 Guillaume Ayoub # Copyright © 2017-2018 Unrud <[email protected]> # # This library is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # import os from tempfile import TemporaryDirectory from radicale import pathutils, storage class CollectionDeleteMixin: def delete(self, href=None): if href is None: # Delete the collection j.sal.bcdbfs.dir_remove(self._filesystem_path) else: # Delete an item if not pathutils.is_safe_filesystem_path_component(href): raise pathutils.UnsafePathError(href) path = pathutils.path_to_filesystem(self._filesystem_path, href) if not j.sal.bcdbfs.is_file(path): raise storage.ComponentNotFoundError(href) j.sal.bcdbfs.file_remove(path) # Track the change self._update_history_etag(href, None) self._clean_history()
python
import cv2 import numpy as np from moviepy.editor import VideoFileClip from .logger import Log from .calibration import GetCalibratedCamera, WarpMachine from .filtering import EdgeDetector from .lane_fitting import LaneFit from .save import chmod_rw_all, delete_file from .profiler import Profiler def draw_overlay(warper, lane_fitting, undistorted, warped): # get curvature and vehicle position left_cr, right_cr = lane_fitting.get_curvature() pos = lane_fitting.get_vehicle_position() # get fitpoints pts_y, left_fitx, right_fitx = lane_fitting.get_fitpoints() # Create an image to draw the lines on warp_zero = np.zeros_like(warped).astype(np.uint8) color_warp = np.dstack((warp_zero, warp_zero, warp_zero)) # Recast the x and y points into usable format for cv2.fillPoly() pts_left = np.array([np.transpose(np.vstack([left_fitx, pts_y]))]) pts_right = np.array([np.flipud(np.transpose(np.vstack([right_fitx, pts_y])))]) pts = np.hstack((pts_left, pts_right)) # Draw the lane onto the warped blank image cv2.fillPoly(color_warp, np.int_([pts]), (0, 255, 0)) # Warp the blank back to original image space using inverse perspective matrix (Minv) overlay = warper.unwarp(color_warp) # Combine the result with the original image vis_overlay = cv2.addWeighted(undistorted, 1, overlay, 0.3, 0) pos_str = "Left" if pos < 0 else "Right" crl_text = "Radius of curvature (left) = %.1f km" % (left_cr / 1000) crr_text = "Radius of curvature (right) = %.1f km" % (right_cr / 1000) cr_text = "Radius of curvature (avg) = %.1f km" % ((left_cr + right_cr) / 2000) pos_text = "Vehicle is %.1f m %s from the lane center" % (np.abs(pos), pos_str) def put_text(image, text, color=(255, 255, 255), ypos=100): font = cv2.FONT_HERSHEY_SIMPLEX cv2.putText(image, text, (350, ypos), font, 1, color, 2, cv2.LINE_AA) put_text(vis_overlay, crl_text, ypos=50) put_text(vis_overlay, crr_text, ypos=100) put_text(vis_overlay, cr_text, ypos=150) put_text(vis_overlay, pos_text, ypos=200) return vis_overlay class LaneLinesTracker(object): def __init__(self): self.camera = GetCalibratedCamera() self.warper = WarpMachine() # profiling self.p_video = Profiler("Total Time") self.p_undistort = Profiler("Distortion Correction") self.p_edges = Profiler("Edge Detection") self.p_warp = Profiler("Perspective Transform") self.p_fitting = Profiler("Lane Fitting") self.p_overlay = Profiler("Overlay Drawing") def process_video(self, input_file, output_file, subclip_seconds=None): # delete output file to avoid permission problems between docker/user on write delete_file(output_file) self.p_video.start() # read Log.subsection("Reading video file: %s" % input_file) clip = VideoFileClip(input_file) # subclip if subclip_seconds: Log.info("Clipping video to: %.1f s" % subclip_seconds) clip = clip.subclip(0, subclip_seconds) # set image handler Log.info("Setting Image Handler ...") clip = clip.fl_image(self.process_image) # process / save Log.subsection("Processing Video ...") clip.write_videofile(output_file, audio=False, verbose=False) chmod_rw_all(output_file) self.p_video.update() # display profiling results Log.subsection("Profiling Results ...") total_secs = self.p_video.get_elapsed() self.p_video.display_elapsed(total_secs) self.p_undistort.display_elapsed(total_secs) self.p_edges.display_elapsed(total_secs) self.p_warp.display_elapsed(total_secs) self.p_fitting.display_elapsed(total_secs) self.p_overlay.display_elapsed(total_secs) self.p_video.display_processing_factor(clip.duration) def process_image(self, image): # Distortion correction self.p_undistort.start() undistorted = self.camera.undistort(image) self.p_undistort.update() # Edge Detection self.p_edges.start() edge_detector = EdgeDetector() edges = edge_detector.detect(undistorted) self.p_edges.update() # Perspective Transform self.p_warp.start() warped = self.warper.warp(edges) self.p_warp.update() # Lane Fitting self.p_fitting.start() lane_fitting = LaneFit(image.shape[1], image.shape[0]) vis_lanes = lane_fitting.fit_polynomial(warped) self.p_fitting.update() # Draw Overlay self.p_overlay.start() vis_overlay = draw_overlay(self.warper, lane_fitting, undistorted, warped) self.p_overlay.update() return vis_overlay
python
from rest_framework import serializers from chigre.models import KegType class KegTypeSerializer(serializers.ModelSerializer): class Meta: model = KegType fields = ('id', 'name', 'size', 'pints', 'canyas')
python
import numpy as np import pandas as pd from calParser import obtainSchedule from audit_parser import audit_info from lsa_recommender import export_to_master,filter_available_classes from decision_tree import preference_score,top_preferred_courses from collaborative_filtering import loadAudits, inputData, buildRecommender, makePrediction, compileDepartScores from time import time import json from CONSTANTS import *
python
''' File: property.py Project: 08-class File Created: Saturday, 25th July 2020 9:16:43 pm Author: lanling (https://github.com/muyuuuu) ----------- Last Modified: Saturday, 25th July 2020 9:16:46 pm Modified By: lanling (https://github.com/muyuuuu) Copyright 2020 - 2020 NCST, NCST ----------- @ 佛祖保佑,永无BUG-- ''' # Python内置的@property装饰器就是负责把一个方法变成属性调用 # 防止代码的冗余 import math class Student(object): def __init__(self, score): self._score = score # Getter function 方法转属性 @property def score(self): return self._score @score.setter def score(self, value): if not isinstance(value, int): raise ValueError('score must be an integer!') if value < 0 or value > 100: raise ValueError('score must between 0 ~ 100!') self._score = value # del s.score 出发 @score.deleter def score(self): raise AttributeError("Can't delete attribute") s = Student(89) # 方法转属性 print(s.score) # 直接改属性,不推荐 s._score = 90 s.set_score = 98 # 方法变成属性赋值,于是就拥有一个可控的属性操作 print(s.score) # 不要写这种没有做任何其他额外操作的property。 # 首先,它会让你的代码变得很臃肿 # 其次,它还会让你的程序运行起来变慢很多 class People(object): @property def birth(self): return self._birth # 没有初始化时,不能改动函数名 @birth.setter def birth(self, value): self._birth = value # 设置为只读属性 @property def age(self): return 2020 - self._birth s = People() s.birth = 1998 # 赋值会错误 # s.age = 23 print(s.age) # 动态计算attribute的方法。 这种类型的attributes并不会被实际的存储,而是在需要的时候计算出来。 class Circle: def __init__(self, radius): self.radius = radius @property def area(self): return math.pi * self.radius ** 2 @property def diameter(self): return self.radius * 2 @property def perimeter(self): return 2 * math.pi * self.radius c = Circle(4.0) print(c.perimeter) # 不要像下面这样写有大量重复代码的property定义(具体如何修改需要参考后文) class Person: def __init__(self, first_name, last_name): self.first_name = first_name self.last_name = last_name @property def first_name(self): return self._first_name @first_name.setter def first_name(self, value): if not isinstance(value, str): raise TypeError('Expected a string') self._first_name = value # Repeated property code, but for a different name (bad!) @property def last_name(self): return self._last_name @last_name.setter def last_name(self, value): if not isinstance(value, str): raise TypeError('Expected a string') self._last_name = value # 子类中扩展property
python
# -*- coding: utf-8 -*- import json import os import os.path import logging log = logging.getLogger(__name__) def filelist(folderpath, ext=None): ''' Returns a list of all the files contained in the folder specified by `folderpath`. To filter the files by extension simply add a list containing all the extension with `.` as the second argument. If `flat` is False, then the Path objects are returned. ''' if not ext: ext = [] if os.path.exists(folderpath) and os.path.isdir(folderpath): return [ os.path.join(folderpath, f) for f in os.listdir(folderpath) if os.path.isfile(os.path.join(folderpath, f)) and os.path.splitext(f)[1] in ext ] else: log.warn('"{}" does not exist or is not a directory'.format(folderpath)) def particles(category=None): ''' Returns a dict containing old greek particles grouped by category. ''' filepath = os.path.join(os.path.dirname(__file__), './particles.json') with open(filepath) as f: try: particles = json.load(f) except ValueError as e: log.error('Bad json format in "{}"'.format(filepath)) else: if category: if category in particles: return particles[category] else: log.warn('Category "{}" not contained in particle dictionary!'.format(category)) return particles def bookname(bookindex): ''' Returns the name of the book given the index. ''' nt = { 0: 'Matthew', 1: 'Mark', 2: 'Luke', 3: 'John', 4: 'Acts', 5: 'Romans', 6: 'Corinthians 1', 7: 'Corinthians 2', 8: 'Galatians', 9: 'Ephesians', 10: 'Philippians', 11: 'Colossians', 12: 'Thessalonians 1', 13: 'Thessalonians 2', 14: 'Timothy 1', 15: 'Timothy 2', 16: 'Titus', 17: 'Philemon', 18: 'Hebrews', 19: 'James', 20: 'Peter 1', 21: 'Peter 2', 22: 'John 1', 23: 'John 2', 24: 'John 3', 25: 'Jude', 26: 'Revelation' } # book indices are beginning from 1 return nt[bookindex - 1] def parts(): ''' Returns the dictionary with the part as key and the contained book as indices. ''' parts = { 'Canon': [ _ for _ in range(1, 5) ], 'Apostle': [ 5 ], 'Paul': [ _ for _ in range(6, 19) ], 'General': [ _ for _ in range(19, 26) ], 'Apocalypse': [ 27 ] } return parts
python
''' Copyright (C) 2016-2021 Mo Zhou <[email protected]> License: MIT/Expat ''' import os import math import time import random from .cuda_selector import CudaSelector RESOURCE_DEFAULT = 'void' RESOURCE_TYPES = (RESOURCE_DEFAULT, 'virtual', 'cpu', 'memory', 'gpu', 'vmem') if str(os.getenv('TASQUE_RESOURCE', '')): RESOURCE_DEFAULT = str(os.getenv('TASQUE_RESOURCE')) class AbstractResource: def __init__(self): ''' Attributes: self.book: tracking resource assignment ''' self.book = dict() self.acquire = dict() self.release = dict() def idle(self): ''' Wait for some time. ''' time.sleep(2) def avail(self) -> float: ''' Total amount of available specific <kind> of resource. ''' raise NotImplementedError('how to determine available resource?') def canalloc(self, rsc: float) -> bool: ''' check whether <rsc> of resource can be allocated. does not block. ''' raise NotImplementedError(f'can I allocate <{rsc}>?') def waitfor(self, rsc: float) -> None: ''' wait until <rsc> of resource can be allocated. does indeed block. ''' raise NotImplementedError(f'is there <{rsc}>?') def request(self, pid: int, rsc: float) -> (callable, callable): ''' generate callback functions for allocating the requested resource ''' def acquire(): raise NotImplementedError('how to allocate resource?') def release(): raise NotImplementedError('how to release resource?') return (acquire, release) class VoidResource(AbstractResource): ''' Void resource / sequential execution. (default) ''' def avail(self) -> float: return math.nan def canalloc(self, rsc: float) -> bool: return (0 == len(self.book)) def waitfor(self, rsc: float) -> None: return None def request(self, pid: int, rsc: float) -> None: self.acquire[pid] = lambda: self.book.__setitem__(pid, rsc) self.release[pid] = lambda: self.book.pop(pid) class VirtualResource(AbstractResource): ''' Virtual resource. And imagined resource with upper bound as <1.0>. Can be used to arrange some taks to run in parallel. ''' def avail(self) -> float: return 1.0 def canalloc(self, rsc: float) -> bool: return (rsc <= self.avail() - sum(self.book.values())) def waitfor(self, rsc: float) -> None: while not self.canalloc(rsc): self.idle() def request(self, pid: int, rsc: float) -> None: self.acquire[pid] = lambda: self.book.__setitem__(pid, rsc) self.release[pid] = lambda: self.book.pop(pid) class GpuResource(AbstractResource): ''' GPU (CUDA) Resource. Allocate cards (as a whole) for the requestors. We only consider a card "available" when >=97% video memory is free. ''' cusel = CudaSelector() def avail(self) -> float: # Number of available cards return float(len(cusel.availCards())) def canalloc(self, rsc: float) -> bool: # available cards cards = self.cusel.availCards() # excluding those registered in self.book cards = [card for card in cards if card.index not in self.book.values()] return len(cards) > 0 def request(self, pid: int, rsc: float) -> None: # currently only support allocating 1 card at a time. assert(int(rsc) == 1) exclude = set(self.book.values()) selcard = random.choice(self.cusel.availCards()) def acquire(): os.putenv('CUDA_VISIBLE_DEVICES', str(selcard.index)) self.book[pid] = selcard.index self.acquire[pid] = acquire self.release[pid] = lambda: self.book.pop(pid) class VmemResource(AbstractResource): ''' CUDA Video Memory Resource. Allocate video memories for the requestors. In this way we can allocate GPU resources in a fine-grained manner and smartly jam various tasks on the GPUs as appropriate. Unlike coarse-grained GPU allocation such as Slurm(CUDA) which allocate each card as a whole to the requestors. ''' cusel = CudaSelector() def avail(self) -> float: cards = self.cusel.getCards() return float(sum(card.memory_free for card in cards)) def canalloc(self, rsc: float) -> bool: # First round: cards that have enough free memory cards = self.cusel.getCards() cards = [card for card in cards if card.memory_free >= rsc] # Second round: remove cards that have been allocated in the book cards = [card for card in cards if card.index not in self.book.values()] return len(cards) > 0 def request(self, pid: int, rsc: float) -> None: exclude = self.book.values() device_index = self.cusel.selectCard(rsc, exclude=exclude) def acquire(): os.putenv('CUDA_VISIBLE_DEVICES', str(device_index)) self.book[pid] = rsc self.acquire[pid] = acquire self.release[pid] = lambda: self.book.pop(pid) class CpuResource(AbstractResource): def __init__(self): super(CpuResource, self).__init__() raise NotImplementedError() class MemoryResource(AbstractResource): def __init__(self): super(MemoryResource, self).__init__() raise NotImplementedError() def create(name: str): ''' factory function ''' mapping = { RESOURCE_DEFAULT: VoidResource, 'virtual': VirtualResource, 'cpu': CpuResource, 'memory': MemoryResource, 'gpu': GpuResource, 'vmem': VmemResource, } return mapping[name]()
python
# -*- coding: utf-8 -*- from wcstring import wcstr import re class PipelineTable(object): ''' Pipeline Table Object. Attributes ---------- data : 2-dimension list 1st dimension indicates the column 2nd dimension indicates the index, with combined indexes grouped in a list colwidth : list of int set width of every column ''' def __init__(self, data=[[]], colwidth=None): self.data = data self.align = None if colwidth and len(data) == len(colwidth): self.colwidth = colwidth else: self.colwidth = [max([len(''.join(i)) for i in data])] def autofmt(self, boader=2, maxwidth=76, align='c'): if len(data) > len(align): align = align + align[-1] * (len(data) - len(align)) self.align = align self.space_fill(align=align) def space_fill(self, align='c'): for col in range(len(self.data)): for ind in range(len(self.data[col])): if align[col] == 'l': self.data[col][ind] = [wcstr(i).ljust(self.colwidth[col]) for i in self.data[col][ind]] if align[col] == 'r': self.data[col][ind] = [wcstr(i).rjust(self.colwidth[col]) for i in self.data[col][ind]] else: self.data[col][ind] = [wcstr(i).center(self.colwidth[col]) for i in self.data[col][ind]] def read_pipeline(string, mode='strict'): ''' Read a pipeline table. Parameters ---------- string : str a string containing a pipeline table ''' # differentiate wordlines and separating lines lines = [wcstr(i) for i in string.split('\n') if re.findall('^ *\|?.+\|? *',i)] seplines = [i for i in range(len(lines)) if re.findall(' *\|?[-:|]+\|? *',lines[i])] wordlines = [i for i in range(len(lines)) if i not in seplines] if len(seplines) != 1: raise ValueError("Multiple seplines detected") if len(seplines)>1 \ else ValueError("No sepline detected") sepline = seplines[0] coldata = [[i for i in re.split(r"(?<!\\)\|", j) if i.strip()] for j in wordlines] colcount = len(coldata[0]) # Check column length for i in len(coldata): if len(coldata[i]) < colcount: coldata[i].extend([""]*(colcount - len(coldata[i]))) elif len(colcount[i]) > colcount: raise ValueError("Length of columns of data is larger than header") coldata = list(zip(*coldata)) print(coldata) return PipelineTable(data=coldata) def put_pipeline(pt, align='c'): ''' Put down a pipeline table. Parameters ---------- pt : PipelineTable align : str or iterable containing align characters 'l' : left-aligned 'r' : right-aligned 'c' : centered ''' pt.autofmt(align=align) # column name first print('|','|'.join([''.join(i[0]) for i in pt.data]),'|',sep='') print('|','|'.join([i*'-' for i in pt.colwidth]),'|',sep='') colcounter = [1] * len(pt.data) indcounter = [0] * len(pt.data) bdrindic = [] nextline = [] # the remaining parts while(colcounter[0] < len(pt.data[0])): for col in range(len(pt.data)): if indcounter[col] >= len(pt.data[col][colcounter[col]]): nextline.append('-'*pt.colwidth[col]) colcounter[col] += 1 indcounter[col] = 0 bdrindic.append(True) else: nextline.append(pt.data[col][colcounter[col]][indcounter[col]]) indcounter[col] += 1 bdrindic.append(False) bdrindic.append(False) print('|', end='') for col in range(len(pt.data)): print(nextline[col], end='') print('|', end='') print() nextline = [] bdrindic = [] return
python
from itertools import groupby from pathlib import Path inp = Path('input.txt').read_text() vowels = set('aeiou') nope = 'ab cd pq xy'.split() print(sum( ( sum(c in vowels for c in line) >= 3 and len(list(groupby(line))) < len(line) and not any(s in line for s in nope) ) for line in inp.splitlines() ))
python
import matplotlib.pyplot as plt x_values = list(range(1, 5001)) y_values = [x**3 for x in x_values] plt.scatter(x_values, y_values) plt.show()
python
# Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os import catboost import pandas as pd DATA_PATH = os.path.join( os.path.dirname(os.path.abspath(__file__)), '../../../../../../../examples/src/main/resources/datasets/boston_housing_dataset.txt') MODEL_PATH = os.path.join(os.path.dirname(os.path.abspath(__file__)), "../models/model_reg.cbm") DATA_SAMPLE_PREDICT_PATH = os.path.join( os.path.dirname(os.path.abspath(__file__)), '../../../../../../../examples/src/main/resources/datasets/boston_housing_dataset-catboost-expected-results' '.txt') def main(): # load dataset features = [ f'f_{i}' for i in range(13) ] target = 'target' df = pd.read_csv(DATA_PATH, names=features + ['target']) # fit model model = catboost.CatBoost({ 'loss_function': 'RMSE', 'verbose': False, 'random_seed': 0 }) model.fit(df[features], df[target]) model.save_model(MODEL_PATH) # predict on sample predicts = model.predict(df[features]) pd.DataFrame({ 'x': predicts }).to_csv(DATA_SAMPLE_PREDICT_PATH, index=False, header=False) # predict on one sample print('Parameters:') r = df[:1][features].to_dict('records') for k, v in r[0].items(): print(f'input.put("{k}", {v}d);') print('Expected predict:') print(model.predict(df[:1])[0]) if __name__ == '__main__': main()
python
from enum import Enum import regex from ..config import Config from ..utils import Api class OsuConsts(Enum): """ all constants related to osu """ # "": 0, MODS = { "NF": 1 << 0, "EZ": 1 << 1, "TD": 1 << 2, "HD": 1 << 3, "HR": 1 << 4, "SD": 1 << 5, "DT": 1 << 6, "RX": 1 << 7, "HT": 1 << 8, "NC": 1 << 9, "FL": 1 << 10, "AT": 1 << 11, "SO": 1 << 12, "AP": 1 << 13, "PF": 1 << 14, "4K": 1 << 15, "5K": 1 << 16, "6K": 1 << 17, "7K": 1 << 18, "8K": 1 << 19, "FI": 1 << 20, "RD": 1 << 21, "LM": 1 << 22, "TR": 1 << 23, "9K": 1 << 24, "10K": 1 << 25, "1K": 1 << 26, "3K": 1 << 27, "2K": 1 << 28, "V2": 1 << 29 } MODS_INT = {v: k for k, v in MODS.items()} DIFF_MODS = ["HR", "EZ", "DT", "HT", "NC", "FL", "HD", "NF"] TIME_MODS = ["DT", "HT", "NC"] AR_MS_STEP1 = 120 AR_MS_STEP2 = 150 AR0_MS = 1800 AR5_MS = 1200 AR10_MS = 450 OD_MS_STEP = 6 OD0_MS = 79.5 OD10_MS = 19.5 DT_SPD = 1.5 HT_SPD = .75 HR_AR = 1.4 EZ_AR = 0.5 HR_CS = 1.3 EZ_CS = 0.5 HR_OD = 1.4 EZ_OD = 0.5 HR_HP = 1.4 EZ_HP = 0.5 STRAIN_STEP = 400.0 DECAY_BASE = [0.3, 0.15] STAR_SCALING_FACTOR = 0.0675 EXTREME_SCALING_FACTOR = 0.5 DECAY_WEIGHT = 0.9 MODS_RE = regex.compile(rf"^({'|'.join(OsuConsts.MODS.value.keys())})+$") OSU_API = Api("https://osu.ppy.sh/api", 60, {"k": Config.credentials.osu_api_key}) # todo make a list of apis for multi server comparability __all__ = ["OsuConsts", "MODS_RE", "OSU_API", "utils", "apiTools", "stating", "graphing", "embedding"]
python
""" This file is part of L3Morpho. L3Morpho is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. L3Morpho is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with L3Morpho. If not, see <http://www.gnu.org/licenses/>. -------------------------------------------------------------------- Author: Michael Gasser <[email protected]> Create Language, Morphology, and POSMorphology objects for Amharic. All functions specific to Amharic morphology are here (or imported from geez.py). """ from . import language from .geez import * ### Various functions that will be values of attributes of Amharic Morphology ### and POSMorphology objects. def vb_get_citation(root, fs, simplified=False, guess=False, vc_as=False): '''Return the canonical (prf, 3sm) form for the root and featstructs in featstruct set fss. If vc_as is True, preserve the voice and aspect of the original word. ''' if root == 'al_e': return "'ale" # Return root if no citation is found result = root # Unfreeze the feature structure fs = fs.unfreeze() # Update the feature structure to incorporate default (with or without vc and as) fs.update(AM.morphology['v'].citationFS if vc_as else AM.morphology['v'].defaultFS) # Refreeze the feature structure fs.freeze() # Find the first citation form compatible with the updated feature structure citation = AM.morphology['v'].gen(root, fs, from_dict=False, simplified=simplified, guess=guess) if citation: result = citation[0][0] elif not vc_as: # Verb may not occur in simplex form; try passive fs = fs.unfreeze() fs.update({'vc': 'ps'}) fs.freeze() citation = AM.morphology['v'].gen(root, fs, from_dict=False, simplified=simplified, guess=guess) if citation: result = citation[0][0] return result def n_get_citation(root, fs, simplified=False, guess=False, vc_as=False): '''Return the canonical (prf, 3sm) form for the root and featstructs in featstruct set fss. If vc_as is True, preserve the voice and aspect of the original word. ''' if fs.get('v'): # It's a deverbal noun return vb_get_citation(root, fs, simplified=simplified, guess=guess, vc_as=vc_as) else: return root def simplify(word): """Simplify Amharic orthography.""" word = word.replace("`", "'").replace('H', 'h').replace('^', '').replace('_', '') return word def orthographize(word): '''Convert phonological romanization to orthographic.''' word = word.replace('_', '').replace('I', '') return word def cop_anal2string(anal): '''Convert a copula analysis to a string. anal is ("cop", "new", "new", gramFS) ''' s = 'POS: copula' if anal[1]: s += ', root: <' + anal[1] + '>' s += '\n' fs = anal[3] if fs: sb = fs['sb'] s += ' subj:' s += arg2string(sb) if fs.get('neg'): s += ' negative\n' cj = fs.get('cj2') if cj: s += ' conjunctive suffix: ' + cj + '\n' return s def n_anal2string(anal): '''Convert a noun analysis to a string. anal is ("(*)n", root, citation, gramFS) ''' root = anal[1] citation = anal[2] fs = anal[3] deverbal = fs and fs.get('v') POS = '?POS: ' if '?' in anal[0] else 'POS: ' s = POS if deverbal: if deverbal == 'agt': s += 'agentive noun' elif deverbal == 'man': s += 'manner noun' elif deverbal == 'inf': s += 'infinitive' else: s += 'instrumental noun' if root: s += ', root: <' + root + '>' if citation: s += ', citation: ' + citation else: s += 'noun' if citation: s += ', stem: ' + citation elif root: s += ', stem: ' + root s += '\n' if fs: poss = fs.get('poss') if poss and poss.get('expl'): s += ' possessor:' s += arg2string(poss, True) gram = '' # For agent, infinitive, instrumental, give aspect and voice unless both are simple asp = fs.get('as') vc = fs.get('vc') rl = fs.get('rl') any_gram = False if deverbal and asp == 'it': gram += ' iterative' any_gram = True elif deverbal and asp == 'rc': if any_gram: gram += ',' gram += ' reciprocal' any_gram = True if deverbal and vc == 'ps': if any_gram: gram += ',' gram += ' passive' any_gram = True elif vc == 'tr': if any_gram: gram += ',' gram += ' transitive' any_gram = True elif vc == 'cs': if any_gram: gram += ',' gram += ' causative' any_gram = True if fs.get('neg'): # Only possible for infinitive if any_gram: gram += ',' gram += ' negative' any_gram = True if fs.get('plr'): if any_gram: gram += ',' gram += ' plural' any_gram = True if fs.get('def'): if any_gram: gram += ',' any_gram = True gram += ' definite' if fs.get('dis'): if any_gram: gram += ',' any_gram = True gram += ' distrib(Iyye-)' if rl and rl.get('acc'): if any_gram: gram += ',' any_gram = True gram += ' accusative' if rl and rl.get('gen'): if any_gram: gram += ',' any_gram = True gram += ' genitive' # der = fs.get('der') # if der and der.get('ass'): # if any_gram: gram += ',' # any_gram = True # gram += ' assoc(-awi)' if any_gram: s += ' grammar:' + gram + '\n' pp = fs.get('pp') cnj = fs.get('cnj') if pp or cnj: if pp: s += ' preposition: ' + pp if cnj: if pp: s += ',' s += ' conjunctive suffix: ' + cnj s += '\n' return s def vb_anal2string(anal): '''Convert a verb analysis to a string. anal is ("(*)v", root, citation, gramFS) ''' pos = 'verb' root = anal[1] citation = anal[2] fs = anal[3] POS = '?POS: ' if '?' in anal[0] else 'POS: ' s = POS + pos if root: if '{' in root: # Segmented form; not root s += ', segmentation: ' + root else: s += ', root: <' + root + '>' if citation: s += ', citation: ' + citation s += '\n' if fs: sb = fs['sb'] s += ' subject:' s += arg2string(sb) ob = fs.get('ob') if ob and ob.get('expl'): s += ' object:' s += arg2string(ob, True) s += ' grammar:' rl = fs.get('rl') tm = fs.get('tm') if tm == 'prf': s += ' perfective' elif tm == 'imf': s += ' imperfective' elif tm == 'j_i': s += ' jussive/imperative' elif tm == 'ger': s += ' gerundive' else: s += ' present' if fs.get('ax'): s += ', aux:alle' asp = fs['as'] if asp == 'it': s += ', iterative' elif asp == 'rc': s += ', reciprocal' vc = fs['vc'] if vc == 'ps': s += ', passive' elif vc == 'tr': s += ', transitive' elif vc == 'cs': s += ', causative' if fs.get('rel') or fs.get('neg'): if fs.get('rel'): s += ', relative' if rl and rl.get('acc'): s += ', accusative' if fs.get('def'): s += ', definite' if fs.get('neg'): s += ', negative' s += '\n' cj1 = fs.get('cj1') cj2 = fs.get('cj2') prep = fs.get('pp') if cj1 or cj2 or prep: any_affix = False if prep: any_affix = True s += ' preposition: ' + prep if cj1: if any_affix: s += ',' s += ' conjunctive prefix: ' + cj1 if cj2: if any_affix: s += ',' s += ' conjunctive suffix: ' + cj2 s += '\n' return s def arg2string(fs, obj=False): '''Convert an argument Feature Structure to a string.''' s = '' if fs.get('p1'): s += ' 1' elif fs.get('p2'): s += ' 2' else: s += ' 3' if fs.get('plr'): s += ', plur' else: s += ', sing' if not fs.get('plr') and (fs.get('p2') or not fs.get('p1')): if fs.get('fem'): s += ', fem' elif not fs.get('frm'): s += ', masc' if obj: if fs.get('p2'): if fs.get('frm'): s += ', formal' if fs.get('prp'): if fs.get('l'): s += ', prep: -l-' else: s += ', prep: -b-' s += '\n' return s def vb_anal_to_dict(root, fs): '''Convert a verb analysis Feature Structure to a dict.''' args = [] # List of features that are true bools = [] strings = {} gram = {} gram['root'] = root sbj = fs['sb'] obj = fs.get('ob', None) vc = fs['vc'] asp = fs['as'] tm = fs['tm'] cj1 = fs.get('cj1', None) cj2 = fs.get('cj2', None) prp = fs.get('pp', None) rl = fs.get('rl', {}) # Subject and object prep = False formal = False labels = ['person', 'number', 'gender'] if obj.get('expl'): if obj.get('p2'): formal = True labels.append('formality') prep = True labels.append('prepositional') args.append(labels) args1 = [] args1.append(agr_to_list(sbj, 'subject', formal)) if obj.get('expl'): args1.append(agr_to_list(obj, 'object', formal)) args.append(args1) # TAM if tm == 'imf': strings['tense/mood'] = 'imperfective' elif tm == 'prf': strings['tense/mood'] = 'perfective' elif tm == 'ger': strings['tense/mood'] = 'gerundive' else: strings['tense/mood'] = 'jussive/imperative' # DERIVATIONAL STUFF if vc == 'ps': strings['voice'] = 'passive' elif vc == 'tr': strings['voice'] = 'transitive' elif vc == 'cs': strings['voice'] = 'causative' if asp == 'it': strings['aspect'] = 'iterative' elif asp == 'rc': strings['aspect'] = 'reciprocal' # NEGATION if fs.get('neg'): bools.append('negative') # RELATIVIZATION if fs.get('rel'): bools.append('relative') # CASE if rl and rl.get('acc'): bools.append('accusative') # CONJUNCTIONS AND PREPOSITIONS if cj1: strings['prefix conjunction'] = cj1 if cj2: strings['suffix conjunction'] = cj2 if prp: strings['preposition'] = prp gram['args'] = args gram['strings'] = strings gram['bools'] = bools return gram def vb_dict_to_anal(root, dct, freeze=True): '''Convert a verb analysis dict to a Feature Structure.''' fs = FeatStruct() root = root or dct['root'] # Arguments sbj = list_to_arg(dct, 'sbj') if dct.get('obj'): obj = list_to_arg(dct, 'obj') else: obj = FeatStruct() obj['expl'] = False fs['sb'] = sbj fs['ob'] = obj # TAM: labels are the same as FS values fs['tm'] = dct.get('tam', 'prf') # DERIVATIONAL STUFF fs['as'] = dct.get('asp', 'smp') fs['vc'] = dct.get('voice_am', 'smp') # OTHER GRAMMAR fs['neg'] = dct.get('neg', False) fs['rel'] = dct.get('rel', False) fs['acc'] = dct.get('acc', False) if dct.get('aux'): fs['aux'] = 'al' else: fs['aux'] = None # PREPOSITIONS and CONJUNCTIONS fs['pp'] = dct.get('prep_am') if fs['pp']: fs['sub'] = True fs['cj1'] = dct.get('preconj_am') if fs['cj1']: fs['sub'] = True fs['cj2'] = dct.get('sufconj_am') return [root, FSSet(fs)] def agr_to_list(agr, cat, formal=False): '''Convert an agreement Feature Structure to a list. Category, then person, number, gender, formality (2nd prs), prepositional. ''' gram = [cat] if agr.get('p1'): gram.append('1') elif agr.get('p2'): gram.append('2') else: gram.append('3') if agr.get('plr'): gram.append('plural') else: gram.append('singular') if not agr.get('p1') and not agr.get('plr'): # Gender only for 2nd and 3rd person singular if agr.get('fem'): gram.append('feminine') else: gram.append('masculine') else: gram.append('') if formal: if cat == 'object' and agr.get('p2'): if agr.get('frm'): gram.append('formal') else: gram.append('informal') if agr.get('prp'): if agr.get('b'): gram.append('b-') else: gram.append('l-') elif cat == 'object': gram.append('no') return gram def list_to_arg(dct, prefix): '''Convert a dict to an argument Feature Structure.''' arg = FeatStruct() person = dct.get(prefix + '_pers') number = dct.get(prefix + '_num') gender = dct.get(prefix + '_gen') arg['expl'] = True # Person if person == '1': arg['p1'] = True arg['p2'] = False elif person == '2': arg['p2'] = True arg['p1'] = False else: # 3rd person the default arg['p1'] = False arg['p2'] = False # Number if number == 'plur': arg['plr'] = True else: # Singular the default arg['plr'] = False # Gender if person != '1': if gender == 'fem': arg['fem'] = True else: arg['fem'] = False # 2nd person: formality if person == '2': formality = dct.get(prefix + '_form') if formality == 'form': arg['frm'] = True else: # Informal the default arg['frm'] = False # Prepositional (object only) if prefix == 'obj': prep = dct.get(prefix + '_prep_am') if prep == 'l': arg['prp'] = 'l' elif prep == 'b': arg['prp'] = 'b' else: arg['prp'] = None return arg def root_postproc(root, geez=False): '''Postprocess a root, with or without converting to Geez.''' if geez: return root2geez(GEEZ_SERA['am'][1], root, lang='am') else: # # Irregular # if root == "al_e": # return '<al_e>' return '<' + root + '>' def n_postproc(analysis): '''Postprocess a noun, replacing the root, if deverbal with postprocessed form.''' gram1 = list(analysis[1])[0] if analysis[0]: if not gram1.get('v'): # This is not deverbal; convert the "root" (really the stem) to Geez analysis[0] = sera2geez(GEEZ_SERA['am'][1], analysis[0], lang='am') ## Create Language object for Amharic, including preprocessing, postprocessing, ## and segmentation units (phones). AM = language.Language("Amharic", 'am', postproc=lambda form: sera2geez(GEEZ_SERA['am'][1], form, lang='am'), preproc=lambda form: geez2sera(GEEZ_SERA['am'][0], form, lang='am', simp=True), postpostproc=lambda form: ta_convert(form), stat_root_feats=['vc', 'as'], stat_feats=[['poss', 'expl'], ['cnj'], ['cj1'], ['cj2'], ['pp'], ['rel']], seg_units=[["a", "e", "E", "i", "I", "o", "u", "H", "w", "y", "'", "`", "_", "|", "*"], {"b": ["b", "bW"], "c": ["c", "cW"], "C": ["C", "CW"], "d": ["d", "dW"], "f": ["f", "fW"], "g": ["g", "gW"], "h": ["h", "hW"], "j": ["j", "jW"], "k": ["k", "kW"], "l": ["l", "lW"], "m": ["m", "mW"], "n": ["n", "nW"], "p": ["p", "pW"], "P": ["P", "PW"], "N": ["N", "NW"], "q": ["q", "qW"], "r": ["r", "rW"], "s": ["s", "sW"], "S": ["S", "SW"], "t": ["t", "tW"], "T": ["T", "TW"], "v": ["v", "vW"], "x": ["x", "xW"], "z": ["z", "zW"], "Z": ["Z", "ZW"], "^": ["^s", "^S", "^h", "^hW", "^sW", "^SW"]}]) ## Create Morphology object and noun, verb, and copula POSMorphology objects for Amharic, ## including punctuation and ASCII characters that are part of the romanization. AM.set_morphology(language.Morphology((), pos_morphs=[('cop',), ('n',), ('v',)], # Exclude ^ and - (because it can be used in compounds) punctuation=r'[“‘”’–—:;/,<>?.!%$()[\]{}|#@&*\_+=\"፡።፣፤፥፦፧፨]', # Include digits? characters=r'[a-zA-Zሀ-ፚ\'`^]')) ### Assign various attributes to Morphology and POSMorphology objects # Functions that simplifies Amharic orthography AM.morphology.simplify = lambda word: simplify(word) AM.morphology.orthographize = lambda word: orthographize(word) # Function that performs trivial analysis on forms that don't require romanization AM.morphology.triv_anal = lambda form: no_convert(form) ## Functions converting between feature structures and simple dicts AM.morphology['v'].anal_to_dict = lambda root, anal: vb_anal_to_dict(root, anal) AM.morphology['v'].dict_to_anal = lambda root, anal: vb_dict_to_anal(root, anal) ## Default feature structures for POSMorphology objects ## Used in generation and production of citation form AM.morphology['v'].defaultFS = \ language.FeatStruct("[pos=v,tm=prf,as=smp,vc=smp,sb=[-p1,-p2,-plr,-fem],ob=[-expl,-p1,-p2,-plr,-fem,-b,-l,-prp,-frm],cj1=None,cj2=None,pp=None,ax=None,-neg,-rel,-sub,-def,-acc,-ye,rl=[-p,-acc]]") AM.morphology['v'].FS_implic = {'rel': ['def', 'sub'], 'cj1': ['sub'], 'pp': ['rel', 'sub'], ('pp', ('be', 'le', 'ke', 'wede', 'Inde', 'sIle', 'Iske', 'Iyye')): [['rl', ['p']]], 'def': ['rel', 'sub'], 'l': ['prp'], 'b': ['prp'], 'ob': [['expl']]} # defaultFS with voice and aspect unspecified AM.morphology['v'].citationFS = language.FeatStruct("[pos=v,tm=prf,sb=[-p1,-p2,-plr,-fem],ob=[-expl],cj1=None,cj2=None,pp=None,ax=None,-neg,-rel,-sub,-def,-ye,-acc,rl=[-p,-acc]]") AM.morphology['n'].defaultFS = \ language.FeatStruct("[pos=n,-acc,-def,-neg,-fem,-itu,as=smp,cnj=None,-dis,-gen,-plr,poss=[-expl,-p1,-p2,-plr,-fem,-frm],pp=None,v=None,vc=smp,rl=[-p,-gen,-acc]]") AM.morphology['n'].FS_implic = {'poss': [['expl'], 'def'], ('pp', ('be', 'le', 'ke', 'wede', 'Inde', 'sIle', 'Iske')): [['rl', ['p']]], ('gen', True): [['rl', ['gen']]], ('acc', True): [['rl', ['acc']]]} # defaultFS with voice and aspect unspecified AM.morphology['n'].citationFS = language.FeatStruct("[-acc,-def,-neg,cnj=None,-dis,-gen,-plr,poss=[-expl],pp=None,v=inf]") AM.morphology['cop'].defaultFS = language.FeatStruct("[cj2=None,-neg,ob=[-expl],-rel,sb=[-fem,-p1,-p2,-plr,-frm],-sub,tm=prs]") ## Functions that return the citation forms for words AM.morphology['v'].citation = lambda root, fss, simplified, guess, vc_as: vb_get_citation(root, fss, simplified, guess, vc_as) AM.morphology['n'].citation = lambda root, fss, simplified, guess, vc_as: n_get_citation(root, fss, simplified, guess, vc_as) ## Functions that convert analyses to strings AM.morphology['v'].anal2string = lambda fss: vb_anal2string(fss) AM.morphology['n'].anal2string = lambda fss: n_anal2string(fss) AM.morphology['cop'].anal2string = lambda fss: cop_anal2string(fss) ## Postprocessing function for nouns (treats roots differently) # AM.morphology['v'].postproc = lambda analysis: vb_postproc(analysis) AM.morphology['n'].postproc = lambda analysis: n_postproc(analysis) # AM.morphology['cop'].postproc = lambda analysis: cop_postproc(analysis) def load_anal(pos='v', lex=True, guess=False): if lex: AM.morphology[pos].load_fst(True, verbose=True) if guess: AM.morphology[pos].load_fst(True, guess=True, verbose=True) def load_gen(pos='v', lex=True, guess=False): if lex: AM.morphology[pos].load_fst(True, generate=True, invert=True, verbose=True) if guess: AM.morphology[pos].load_fst(True, generate=True, invert=True, guess=True, verbose=True)
python
# This program allows you to mark a square on the map using a two-digit system. # The first digit is the vertical column number and the second digit is the horizontal row number. row1 = ["⬜️", "⬜️", "⬜️"] row2 = ["⬜️", "⬜️", "⬜️"] row3 = ["⬜️", "⬜️", "⬜️"] map = [row1, row2, row3] print(f"{row1}\n{row2}\n{row3}") position = input("Where do you want to put the treasure? ") row = int(position[0]) - 1 column = int(position[1]) - 1 map[column][row] = "X" print(f"{row1}\n{row2}\n{row3}")
python
# -*- coding: utf-8 -*- from django import template import datetime # import timedelta register = template.Library() def nice_repr(timedelta, display="long", sep=", "): """ Turns a datetime.timedelta object into a nice string repr. display can be "minimal", "short" or "long" [default]. >>> from datetime import timedelta as td >>> nice_repr(td(days=1, hours=2, minutes=3, seconds=4)) '1 day, 2 hours, 3 minutes, 4 seconds' >>> nice_repr(td(days=1, seconds=1), "minimal") '1d, 1s' """ assert isinstance(timedelta, datetime.timedelta), "First argument must be a timedelta." result = [] weeks = timedelta.days / 7 days = timedelta.days % 7 hours = timedelta.seconds / 3600 minutes = (timedelta.seconds % 3600) / 60 seconds = timedelta.seconds % 60 if display == "sql": days += weeks * 7 return "%i %02i:%02i:%02i" % (days, hours, minutes, seconds) elif display == 'minimal': words = ["w", "d", "h", "m", "s"] elif display == 'short': words = [" wks", " days", " hrs", " min", " sec"] else: words = [" weeks", " days", " hours", " minutes", " seconds"] values = [weeks, days, hours, minutes, seconds] for i in range(len(values)): if values[i]: if values[i] == 1 and len(words[i]) > 1: result.append("%i%s" % (values[i], words[i].rstrip('s'))) else: result.append("%i%s" % (values[i], words[i])) return sep.join(result) def iso8601_repr(timedelta): """ Represent a timedelta as an ISO8601 duration. http://en.wikipedia.org/wiki/ISO_8601#Durations >>> from datetime import timedelta as td >>> iso8601_repr(td(days=1, hours=2, minutes=3, seconds=4)) 'P1DT2H3M4S' """ years = timedelta.days / 365 weeks = (timedelta.days % 365) / 7 days = timedelta.days % 7 hours = timedelta.seconds / 3600 minutes = (timedelta.seconds % 3600) / 60 seconds = timedelta.seconds % 60 formatting = ( ('P', ( ('Y', years), ('W', weeks), ('D', days), )), ('T', ( ('H', hours), ('M', minutes), ('S', seconds), )), ) result = [] for category, subcats in formatting: result += category for format, value in subcats: if value: result.append('%d%c' % (value, format)) return "".join(result) @register.filter(name='timedelta') def timedelta(value, display="long"): if value is None: return value return nice_repr(value, display) @register.filter(name='iso8601') def iso8601(value): if value is None: return value return iso8601_repr(value)
python
''' Escreva um programa que converta uma temperatura digitada em °C e converta em °F. ''' c = float(input('Digite a temperatura em °C: ')) f = (9*c + 160)/5 print(f'A temperatura de {c}°C é {f}°F!')
python
import logging import numpy as np import pandas as pd import scipy.special import scipy.stats def encode_array(vals, sep=',', fmt='{:.6g}'): return sep.join(map(fmt.format, vals)) def decode_array(vals, sep=','): return np.asarray(list(map(float, vals.split(',')))) def encode_matrix(vals, sep1=',', sep2=';', fmt='{:.6g}'): return sep2.join(encode_array(vals1, sep=sep1, fmt=fmt) for vals1 in vals) def decode_matrix(vals, sep1=',', sep2=';'): return np.asarray([decode_array(vals1, sep=sep1) for vals1 in vals.split(';')]) def load(path): cands = [ MCAlphaPrediction, AlphaPrediction, WMCProbPrediction, MCProbPrediction, ProbPrediction, ] errors = [] for cls in cands: try: return cls.load(path) except KeyError as e: errors.append(e) for e in errors: logging.error(e) raise NotImplementedError class Prediction: @property def ids(self): return self._ids def get_probs(self): # (N, K) return self._probs @classmethod def load(cls, path): raise NotImplementedError def save(self, path, ids): raise NotImplementedError def get_posterior(self, hists): raise NotImplementedError def hist_likelihood(hists, probs): # (..., K), (..., K) -> (...,) return (probs ** hists).sum(axis=-1) def get_posterior_dirichlet0(hists, alpha0=1.): K = hists.shape[1] # (N, K) alpha = alpha0 * np.ones(K) / K post_alpha = hists + alpha[:, None] return AlphaPrediction(post_alpha, pred.ids) def get_posterior_dirichlet(pred, hists, alpha0=1.): probs = pred.get_probs() alpha = alpha0 * probs assert hists.shape == probs.shape # (N, K) post_alpha = hists + alpha return AlphaPrediction(post_alpha, pred.ids) class ProbPrediction(Prediction): def __init__(self, probs, ids): self._probs = np.asarray(probs) # (N, K) assert len(self._probs.shape) == 2 self._ids = ids def get_agreement_probs(self): # (N,) return (self._probs ** 2).sum(axis=1) @classmethod def load(cls, path): tab = pd.read_csv(path, sep='\t') probs = np.asarray(list(map(decode_array, tab['prob']))) return cls(probs, tab['id']) def save(self, path): columns = ['id', 'prob'] tab = pd.DataFrame({ 'id': self._ids, 'prob': list(map(encode_array, self._probs)), }, columns=columns) tab.to_csv(path, sep='\t', index=False) class MCProbPrediction(Prediction): def __init__(self, mc_probs, ids): self._mc_probs = np.asarray(mc_probs) # (N, S, K) assert len(self._mc_probs.shape) == 3 self._probs = self._mc_probs.mean(axis=1) # (N, K) self._ids = ids def get_agreement_probs(self): # (N,) mc_agree_probs = (self._mc_probs ** 2).sum(axis=2) # (N, S) return mc_agree_probs.mean(axis=1) @classmethod def load(cls, path): tab = pd.read_csv(path, sep='\t') mc_probs = np.asarray(list(map(decode_matrix, tab['mc_prob']))) return cls(mc_probs, tab['id']) def save(self, path): columns = ['id', 'mc_prob'] tab = pd.DataFrame({ 'id': self._ids, 'mc_prob': list(map(encode_matrix, self._mc_probs)), }, columns=columns) tab.to_csv(path, sep='\t', index=False) def get_posterior(self, hists): hl = hist_likelihood(hists[:, None, :], self._mc_probs) # (N, S, K) -> (N, S) weights = hl / hl.sum(axis=-1, keepdims=True) # normalized -> (N, S) logging.info(weights) wmc_pred = WMCProbPrediction(self._mc_probs, weights, ids=self.ids) # (N, S, K), (N, S) return wmc_pred class WMCProbPrediction(Prediction): def __init__(self, mc_probs, mc_weights, ids): self._mc_probs = np.asarray(mc_probs) # (N, S, K) self._mc_weights = np.asarray(mc_weights) # (N, S) or (1, S) assert len(self._mc_probs.shape) == 3 assert self._mc_weights.shape == self._mc_probs.shape[:2] self._probs = (self._mc_probs * self._mc_weights[:, :, None]).sum(axis=1) # (N, K) self._ids = ids @classmethod def load(cls, path): tab = pd.read_csv(path, sep='\t') mc_probs = np.asarray(list(map(decode_matrix, tab['mc_prob']))) mc_weights = np.asarray(list(map(decode_array, tab['mc_weight']))) return cls(mc_probs, mc_weights, tab['id']) def save(self, path): columns = ['id', 'mc_prob', 'mc_weight'] tab = pd.DataFrame({ 'id': self._ids, 'mc_prob': list(map(encode_matrix, self._mc_probs)), 'mc_weight': list(map(encode_array, self._mc_weights)), }, columns=columns) tab.to_csv(path, sep='\t', index=False) class AlphaPrediction(Prediction): eps = clip_min = np.finfo(float).eps clip_max = 1./np.finfo(float).eps def __init__(self, alphas, ids): self._alphas = np.asarray(alphas) # (N, K) self._alphas[np.isnan(self._alphas)] = self.clip_min # Repair underflowed values self._alphas = np.clip(self._alphas, self.clip_min, self.clip_max) assert len(self._alphas.shape) == 2 self._alpha0s = self._alphas.sum(axis=1) self._probs = self._alphas / self._alpha0s[:,None] self._ids = ids def get_alphas(self): return self._alphas def get_agreement_probs(self): # (N,) denom = self._alpha0s * (self._alpha0s + 1) square_moments = self._alphas * (self._alphas + 1) / denom[:, None] # (N, K) agree_probs = square_moments.sum(axis=1) # (N,) return agree_probs @classmethod def load(cls, path): tab = pd.read_csv(path, sep='\t') alphas = np.asarray(list(map(decode_array, tab['alpha']))) return cls(alphas, tab['id']) def save(self, path): columns = ['id', 'alpha'] tab = pd.DataFrame({ 'id': self._ids, 'alpha': list(map(encode_array, self._alphas)), }, columns=columns) tab.to_csv(path, sep='\t', index=False) def get_posterior(self, hists): alpha = self._alphas assert hists.shape == alpha.shape # (N, K) post_alpha = hists + alpha return AlphaPrediction(post_alpha, self.ids) class MCAlphaPrediction(Prediction): eps = clip_min = np.finfo(float).eps clip_max = 1./np.finfo(float).eps def __init__(self, mc_alphas, ids): self._mc_alphas = np.asarray(mc_alphas) # (N, S, K) self._mc_alphas[np.isnan(self._mc_alphas)] = self.clip_min # repair underflowed values self._mc_alphas = np.clip(self._mc_alphas, self.clip_min, self.clip_max) assert len(self._mc_alphas.shape) == 3 self._alphas = self._mc_alphas.mean(axis=1) # (N, K) self._mc_alpha0s = self._mc_alphas.sum(axis=2) # (N, S) self._mc_mean_probs = self._mc_alphas / self._mc_alpha0s[:, :, None] #(N, S, K) self._probs = self._mc_mean_probs.mean(axis=1) #(N, K) self._ids = ids def get_alphas(self): return self._alphas def get_agreement_probs(self): # (N,) mc_square_moments = self._mc_alphas * (self._mc_alphas + 1) / (self._mc_alpha0s * (self._mc_alpha0s + 1))[:, :, None] # (N, S, K) mc_agree_probs = mc_square_moments.sum(axis=2) # (N, S) return mc_agree_probs.mean(axis=1) @classmethod def load(cls, path): tab = pd.read_csv(path, sep='\t') mc_alphas = np.asarray(list(map(decode_matrix, tab['mc_alpha']))) return cls(mc_alphas, tab['id']) def save(self, path): columns = ['id', 'mc_alpha'] tab = pd.DataFrame({ 'id': self._ids, 'mc_alpha': list(map(encode_matrix, self._mc_alphas)), }, columns=columns) tab.to_csv(path, sep='\t', index=False)
python
import unittest import sys sys.path.insert(0, '../') from view_header import Route, PresentView, Flash, MSG_TYPE class TestRoute(unittest.TestCase): r1 = Route(True, 'test', {}) r2 = Route(True, 'test', {0:1, 1:'obj'}) def test_is_redirect(self): self.assertEqual(self.r1.is_redirect(), True) def test_get_name(self): self.assertEqual(self.r1.get_name(), 'test') def test_get_args1(self): self.assertEqual(len(self.r1.get_args()), 0) def test_get_args2(self): self.assertEqual(len(self.r2.get_args()), 2) self.assertEqual(self.r2.get_args()[0], 1) self.assertEqual(self.r2.get_args()[1], 'obj') def test_equals1(self): #basic self.assertFalse(self.r1.equals(self.r2)) def test_equals2(self): r2_copy = Route(True, 'test', {0:1, 1:'obj'}) self.assertTrue(self.r2.equals(r2_copy)) def test_equals3(self): r1_copy = Route(True, 'test', {}) self.assertTrue(self.r1.equals(r1_copy)) def test_equals4(self): temp = Route(True, 'test_', {}) self.assertFalse(self.r1.equals(temp)) def test_equals5(self): temp = Route(False, 'test', {}) self.assertFalse(self.r1.equals(temp)) def test_equals6(self): #testing the isinstance self.assertFalse(self.r1.equals(2)) self.assertFalse(self.r1.equals('asdf')) self.assertFalse(self.r1.equals({})) self.assertFalse(self.r1.equals([2])) class TestPresentView(unittest.TestCase): r1 = Route(True, 'test', {}) r2 = Route(True, 'test', {0:1, 1:'obj'}) f1 = Flash("test", MSG_TYPE.SUCCESS) f2 = Flash("test", MSG_TYPE.FAIL) v11 = PresentView(r1, f1) v12 = PresentView(r1, f2) v21 = PresentView(r2, f1) v22 = PresentView(r2, f2) def test_get_route1(self): #deep equality temp = Route(True, 'test', {}) self.assertTrue(self.v11.get_route().equals(temp)) def test_get_route2(self): # pointer equality self.assertEqual(self.v11.get_route(), self.r1) def test_get_route3(self): # pointer equality temp = Route(True, 'test', {}) self.assertNotEqual(self.v11.get_route(), temp) def test_get_flash1(self): temp = Flash("test", MSG_TYPE.FAIL) self.assertTrue(self.v22.get_flash().equals(temp)) def test_get_flash2(self): # pointer equality self.assertEqual(self.v11.get_flash(), self.f1) def test_get_flash3(self): # pointer equality temp = Flash("test", MSG_TYPE.SUCCESS) self.assertNotEqual(self.v11.get_flash(), temp) def test_get_flash4(self): # pointer equality temp = PresentView(self.r2) self.assertEqual(temp.get_flash(), None) # def test_equals1(self): self.assertFalse(self.v11.equals(self.v12)) def test_equals2(self): self.assertFalse(self.v11.equals(2)) def test_equals3(self): self.assertTrue(self.v11.equals(self.v11)) def test_equals4(self): temp = PresentView(self.r2, self.f1) self.assertTrue(self.v21.equals(temp)) def test_equals5(self): temp = Flash("FAILLL", MSG_TYPE.SUCCESS) temp = PresentView(self.r2, temp) self.assertFalse(self.v21.equals(temp)) def test_equals6(self): # None flash temp = PresentView(self.r2) self.assertFalse(temp.equals(self.v11)) def test_equals7(self): # None flash temp = PresentView(self.r2) self.assertFalse(self.v22.equals(temp)) def test_equals8(self): # None flash temp = PresentView(self.r2) temp2 = PresentView(self.r2) self.assertFalse(temp.equals(temp2)) def test_equals9(self): # None flash temp = PresentView(self.r2) self.assertFalse(temp.equals(temp)) class TestFlash(unittest.TestCase): f1 = Flash("test", MSG_TYPE.SUCCESS) f2 = Flash("test", MSG_TYPE.FAIL) f3 = Flash(1, MSG_TYPE.FAIL) def test_equals1(self): f1_copy = Flash("test", MSG_TYPE.SUCCESS) self.assertTrue(self.f1.equals(f1_copy)) def test_equals2(self): f1_copy = Flash("test 2", MSG_TYPE.SUCCESS) self.assertFalse(self.f1.equals(f1_copy)) def test_equals3(self): #testing the isinstance self.assertFalse(self.f1.equals(2)) self.assertFalse(self.f1.equals('asdf')) self.assertFalse(self.f1.equals({})) self.assertFalse(self.f1.equals([2])) def test_equals4(self): self.assertFalse(self.f1.equals(self.f2)) def test_gm1(self): self.assertEqual(self.f1.get_msg(), 'test') def test_gmt2(self): self.assertEqual(self.f1.get_msg_type(), MSG_TYPE.SUCCESS) def test_gmt3(self): self.assertEqual(self.f2.get_msg_type(), MSG_TYPE.FAIL) class Test_MSG_TYPE(unittest.TestCase): s = MSG_TYPE.SUCCESS f = MSG_TYPE.FAIL def test_success(self): self.assertEqual(self.s.value, 'success') self.assertEqual(self.s.name, 'SUCCESS') def test_fail(self): self.assertEqual(self.f.value, 'danger') self.assertEqual(self.f.name, 'FAIL') # FAIL = 'danger' if __name__ == '__main__': unittest.main()
python
from datetime import datetime def from_iso8601(date): return datetime.fromisoformat(date) def to_iso8601(year, month, day, hour, minute, second): return datetime(year, month, day, hour, minute, second, 0).isoformat()
python
# -*- coding: utf-8 -*- # Generated by Django 1.11.5 on 2017-10-17 06:04 from __future__ import unicode_literals from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): dependencies = [ ('api', '0007_auto_20171005_1713'), ] operations = [ migrations.CreateModel( name='Column', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('table_name', models.CharField(max_length=100)), ('column_name', models.CharField(max_length=100)), ], ), migrations.CreateModel( name='Columns', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('name', models.CharField(max_length=100)), ('name_id', models.CharField(max_length=100)), ], ), migrations.CreateModel( name='No_Relation_Columns', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('column', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='api.Column')), ], ), migrations.CreateModel( name='No_Relation_Options', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('grep_strings', models.CharField(max_length=100)), ('no_relation_column', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='api.No_Relation_Columns')), ], ), migrations.CreateModel( name='No_Relation_Table', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('priority', models.IntegerField()), ('column', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='api.Column')), ('columns', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='api.Columns')), ], ), migrations.CreateModel( name='Relation_Columns', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('column', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='api.Column')), ], ), migrations.CreateModel( name='Relation_Options', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('condition', models.CharField(max_length=100)), ('relation_column', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='api.Relation_Columns')), ], ), migrations.CreateModel( name='Relation_Table', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('priority', models.IntegerField()), ('column', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='api.Column')), ('columns', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='api.Columns')), ], ), migrations.CreateModel( name='Tables', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('name', models.CharField(max_length=100)), ('name_id', models.CharField(max_length=100)), ], ), migrations.RemoveField( model_name='skill', name='category', ), migrations.DeleteModel( name='Skill', ), migrations.DeleteModel( name='SkillCategory', ), migrations.AddField( model_name='relation_columns', name='relation_table', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='api.Relation_Table'), ), migrations.AddField( model_name='no_relation_columns', name='no_relation_table', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='api.No_Relation_Table'), ), ]
python
# # Copyright Bernhard Firner, 2019-2020 # # Ship class and supporting classes from collections import OrderedDict from enum import Enum import torch from dice import ArmadaDice from game_constants import ( ArmadaDimensions, ArmadaTypes ) class UpgradeType(Enum): commander = 1 officer = 2 weapons_team = 3 support_team = 4 offensive_retrofit = 5 defensive_retrofit = 6 turbolasers = 7 ion_cannons = 8 ordnance = 9 fleet_support = 10 experimental_retrofit = 11 boarding_team = 12 title = 13 class Armament: def __init__(self, redCount, blueCount, blackCount): self.red = redCount self.blue = blueCount self.black = blackCount class ShipType: def __init__(self, name, attributes): self.name = name self.attributes = attributes class Ship: def __init__(self, name, player_number, template=None, upgrades=None, encoding=None, device=None): """Contsruct a specific instance of a ship. Args: name (str) : Name for this vessel. player_number (int) : The player who controls this ship. template (ShipType) : Ship template to copy. upgrades (table str->str) : Upgrades to equip. encoding (torch.Tensor) : An existing encoding to copy (if template and upgrades are None) device (str) : Default Tensor type ('cuda' or 'cpu'). Automatic if None. """ if (template is None or upgrades is None) and encoding is None: raise RuntimeError("Ship requires either template and updrades or encoding.") self.name = name if device is None: device = torch.device("cuda" if torch.cuda.is_available() else "cpu") self.encoding = torch.zeros(Ship.encodeSize()).to(device) if encoding is not None: self.encoding.copy_(encoding) else: self.encoding.fill_(0.) # Initialize attributes of this specific ship instance self.set('player', player_number) self.set('hull', int(template["Hull"])) self.set("ship", 0.) self.set("size", ArmadaDimensions.size_names.index(template['Size'].lower())) idx, length = Ship.get_index("defense_tokens") self.encoding[idx:idx + length] = 0. for ttype in ArmadaTypes.defense_tokens: tname = "Defense Token {}".format(ttype.capitalize()) token_idx = idx + ArmadaTypes.defense_tokens.index(ttype) if tname in template: if 0 == len(template[tname]): self.encoding[token_idx] = 0 else: self.encoding[token_idx] = int(template[tname]) # Max shields (current shields will be filled in the reset function) idx = Ship.get_index("max_shields")[0] for zone in ['left', 'right', 'front', 'rear']: name = "Shields {}".format(zone.capitalize()) self.encoding[idx + ArmadaTypes.hull_zones.index(zone)] = int(template[name]) if 'Huge' == template['Size']: for zone in ['left-auxiliary', 'right-auxiliary']: name = "Shields {} {}".format(zone.capitalize()) self.encoding[idx + ArmadaTypes.hull_zones.index(zone)] = int(template[name]) # Presence of hull zones/firing arcs idx, length = Ship.get_index("hull_zones") self.encoding[idx:idx + length] = 0. # Set the hull zones to indicate which are present idx = Ship.get_index("hull_zones")[0] for zone in ['left', 'right', 'front', 'rear']: self.encoding[idx + ArmadaTypes.hull_zones.index(zone)] = 1. if 'Huge' == template['Size']: for zone in ['left-auxiliary', 'right-auxiliary']: self.encoding[idx + ArmadaTypes.hull_zones.index(zone)] = 1. # Initialize the armaments idx = Ship.get_index("dice")[0] for i, zone in enumerate(['left', 'right', 'front', 'rear']): for j, color in enumerate(ArmadaDice.die_colors): name = "Armament {} {}".format(zone.capitalize(), color.capitalize()) hull_offset = ArmadaTypes.hull_zones.index(zone) if 0 < len(template[name]): number = int(template[name]) else: number = 0 self.encoding[idx + hull_offset * len(ArmadaDice.die_colors) + j] = number if 'Huge' == template['Size']: for i, zone in enumerate(['left-auxiliary', 'right-auxiliary']): for j, color in enumerate(ArmadaDice.die_colors): name = "Armament {} {}".format(zone.capitalize(), color.capitalize()) hull_offset = ArmadaTypes.hull_zones.index(zone) number = int(template[name]) self.encoding[idx + hull_offset * len(ArmadaDice.die_colors) + j] = number self.reset() # TODO Check for legality and actually handle self.width, self.height = ArmadaDimensions.ship_bases_feet[ ArmadaDimensions.size_names[int(self.get('size'))]] self.upgrades = upgrades @staticmethod def _initialize_encoding(): """Initialize the _enc_index and _enc_len variables.""" Ship._enc_index = OrderedDict() Ship._enc_len = OrderedDict() def addEntry(name, length, cur_idx): Ship._enc_index[name] = cur_idx Ship._enc_len[name] = length return Ship._enc_index[name] + Ship._enc_len[name] cur_idx = addEntry(name='player', length=1, cur_idx=0) cur_idx = addEntry(name='hull', length=1, cur_idx=cur_idx) cur_idx = addEntry(name='damage', length=1, cur_idx=cur_idx) # TODO Face up damage card effects cur_idx = addEntry(name='speed', length=1, cur_idx=cur_idx) cur_idx = addEntry(name='ship', length=1, cur_idx=cur_idx) cur_idx = addEntry(name='size', length=1, cur_idx=cur_idx) # Defense tokens and state belong here, whether the token has been spent during this # attack step is stored in the attack state cur_idx = addEntry(name='defense_tokens', length=len(ArmadaTypes.defense_tokens), cur_idx=cur_idx) cur_idx = addEntry(name='green_defense_tokens', length=len(ArmadaTypes.defense_tokens), cur_idx=cur_idx) cur_idx = addEntry(name='red_defense_tokens', length=len(ArmadaTypes.defense_tokens), cur_idx=cur_idx) cur_idx = addEntry(name='max_shields', length=len(ArmadaTypes.hull_zones), cur_idx=cur_idx) cur_idx = addEntry(name='shields', length=len(ArmadaTypes.hull_zones), cur_idx=cur_idx) # Presence of particular hull zones cur_idx = addEntry(name='hull_zones', length=len(ArmadaTypes.hull_zones), cur_idx=cur_idx) # Armament for each zone cur_idx = addEntry( name='dice', length=len(ArmadaTypes.hull_zones) * len(ArmadaDice.die_colors), cur_idx=cur_idx) # TODO Line of sight marker locations and firing arc locations # TODO Upgrades # TODO Ignition arc cur_idx = addEntry(name='commands', length=ArmadaTypes.max_command_dials, cur_idx=cur_idx) # Location is a pair of x and y coordinates in feet (since that is the range ruler size). cur_idx = addEntry(name='location', length=2, cur_idx=cur_idx) # The heading is the clockwise rotation of the ship in radians cur_idx = addEntry(name='heading', length=1, cur_idx=cur_idx) @staticmethod def encodeSize(): """Get the size of the ship encoding. Returns: int: Size of the ship encoding (number of Tensor elements) """ # Programmatically initialize the index lookup if it doesn't exist if not hasattr(Ship, '_enc_index'): Ship._initialize_encoding() last_key = list(Ship._enc_index.keys())[-1] size = Ship._enc_index[last_key] + Ship._enc_len[last_key] return size @staticmethod def get_index(data_name): """Get the index of a data element. Arguments: data_name(str): Name of the data element. Returns: (int, int): Tuple of the beginning of the data and the length. """ # Programmatically initialize the index lookup if it doesn't exist if not hasattr(Ship, '_enc_index'): Ship._initialize_encoding() if data_name not in Ship._enc_index: raise RuntimeError("Ship has no attribute named {}".format(data_name)) return (Ship._enc_index[data_name], Ship._enc_len[data_name]) def base_size(self): """Get the ship width and length. Returns: tuple(int, int): width and length """ index = self.encoding[Ship._enc_index['size']] return ArmadaDimensions.ship_bases[ArmadaDimensions.size_names[index]] def token_count(self, index): """Get the number of green and red tokens at the given index. The index corresponds to a particular type of token as defined in ArmadaTypes.defense_tokens. Returns: tuple(int, int): The number of green and red tokens. """ green_idx = Ship._enc_index["green_defense_tokens"] red_idx = Ship._enc_index["red_defense_tokens"] return self.encoding[green_idx + index], self.encoding[red_idx + index] def ready_defense_tokens(self): """Replace all red tokens with green versions.""" with torch.no_grad(): # Add the red tokens to the green tokens and set red tokens to 0 green_idx = Ship._enc_index["green_defense_tokens"] red_idx = Ship._enc_index["red_defense_tokens"] token_len = Ship._enc_len['green_defense_tokens'] self.encoding[green_idx:green_idx + token_len] += self.encoding[red_idx:red_idx + token_len] self.encoding[red_idx:red_idx + src_len] = 0. def spend_token(self, token_type, color_type): """Spend a token of the given type and color. Args: token_type (str): Token type to spend. color_type (int): 0 for green, 1 for red """ red_idx = Ship._enc_index["red_defense_tokens"] type_offset = ArmadaTypes.defense_tokens.index(token_type) if 0 == color_type: green_idx = Ship._enc_index["green_defense_tokens"] self.encoding[green_idx + type_offset] -= 1 self.encoding[red_idx + type_offset] += 1 else: self.encoding[red_idx + type_offset] -= 1 def ready_upgrade_cards(self): """Unexhaust upgrade cards.""" # Not implemented yet pass def adjacent_zones(self, zone): """Return hull zones adjacent to the given zone.""" index = int(self.encoding[Ship._enc_index['size']].item()) size = ArmadaDimensions.size_names[index] if size == 'huge': if zone not in ArmadaTypes.adjacent_huge_hull_zones: raise RuntimeError("Unrecognized hull zone {}".format(zone)) return ArmadaTypes.adjacent_huge_hull_zones[zone] else: if zone not in ArmadaTypes.adjacent_hull_zones: raise RuntimeError("Unrecognized hull zone {}".format(zone)) return ArmadaTypes.adjacent_hull_zones[zone] def get(self, name): """Get a value from the encoding. Arguments: name (str): Name of the encoding field. Returns: value (float): The value of the encoding with the given name. """ index, length = Ship.get_index(name) if 1 == length: return self.encoding[index].item() else: raise RuntimeError("Use Ship.get_range for multi-element data.") def get_range(self, name): """Get a view of the encoding of a field with multiple elements. Arguments: name (str): Name of the encoding field. Returns: value (torch.Tensor): The tensor is a view of the original data, clone or convert to a list to avoid modification. """ index, length = Ship.get_index(name) if 1 == length: raise RuntimeError("Use Ship.get for single element data.") else: return self.encoding[index:index + length] def set(self, name, value): """Set a value in encoding. Arguments: name (str): Name of the encoding field. value (numeric, List, or torch.Tensor): A value assignable to a tensor. """ vtype = type(value) if vtype is not int and vtype is not float and vtype is not list and vtype is not torch.Tensor: raise RuntimeError('Ship.set does not have data type "{}"'.format(vtype)) index, length = Ship.get_index(name) if 1 == length: self.encoding[index] = value else: if type(value) is int or type(value) is float: raise RuntimeError("Attempt to assign a scalar value to an encoding range.") # Convert a list to a tensor to assign a range if type(value) is list: self.encoding[index:index + length] = torch.tensor(value) else: self.encoding[index:index + length] = value def set_range(self, name, value): """Set a range in the encoding to a value. Arguments: name (str): Name of the encoding field. value (numeric): Value to set. """ vtype = type(value) if vtype is not int and vtype is not float: raise RuntimeError('Ship.set_range does not support data type "{}"'.format(vtype)) index, length = Ship.get_index(name) self.encoding[index:index + length] = value def reset(self): """Resets shields, hull, and defense tokens and initialize values in the encoding.""" self.set("damage", 0.) self.set("speed", 0.) self.set_range("commands", 0.) # Set defense tokens, and shields # Initialize all tokens as green self.set('green_defense_tokens', self.get_range('defense_tokens')) self.set_range('red_defense_tokens', 0.) self.set('shields', self.get_range('max_shields')) # Set a location off of the board. Lump each player's ships together. self.set("location", [-1., self.get('player') * -1.]) self.set("heading", 0.) def roll(self, zone, distance): """ return an attack roll for the given arc at the given range. Args: zone (str) : One of front, left, right, and rear distance (str) : short, medium, or long Returns an array of colors and faces """ colors = [] faces = [] # TODO Extreme range # Roll red dice at all valid ranges die_offset = Ship._enc_index['dice'] hull_offset = die_offset + ArmadaTypes.hull_zones.index(zone) * len(ArmadaDice.die_colors) if distance in ["short", "medium", "long"]: red_offset = ArmadaDice.die_colors.index("red") num_dice = int(self.encoding[hull_offset + red_offset].item()) colors = colors + ["red"] * num_dice # Roll blue dice at all short to medium if distance in ["short", "medium"]: blue_offset = ArmadaDice.die_colors.index("blue") num_dice = int(self.encoding[hull_offset + blue_offset].item()) colors = colors + ["blue"] * num_dice # Roll black dice at short range if distance in ["short"]: black_offset = ArmadaDice.die_colors.index("black") num_dice = int(self.encoding[hull_offset + black_offset].item()) colors = colors + ["black"] * num_dice # TODO FIXME Only gathering should happen in the ship, rolling should follow in a different # area of code for color in colors: faces.append(ArmadaDice.random_roll(color)) return colors, faces def shield_damage(self, zone, amount): """ Deal damage to a hull zone but only deplete the shields, don't assign hull damage. Return the amount of damage that is in excess of the shields. Args: zone (str): One of ArmadaTypes.hull_zones amount (int): Amount of damage Returns: (int): Amount of damage that will be assigned to the hull. """ damage = amount if "hull" != zone: shield_offset = Ship._enc_index['shields'] + ArmadaTypes.hull_zones.index(zone) shields = int(self.encoding[shield_offset].item()) if shields >= damage: shields -= damage damage = 0 else: damage -= shields shields = 0 self.encoding[shield_offset] = shields return damage def damage(self, zone, amount): """ Deal damage to a hull zone. Args: zone (str): One of ArmadaTypes.hull_zones or "hull" amount (int): Amount of damage """ damage = amount if "hull" != zone: shield_offset = Ship._enc_index['shields'] + ArmadaTypes.hull_zones.index(zone) shields = int(self.encoding[shield_offset].item()) if shields >= damage: shields -= damage damage = 0 else: damage -= shields shields = 0 self.encoding[shield_offset] = shields # TODO FIXME This would be the correct time to handle the standard critical (or XX-9) self.set('damage', self.get('damage') + damage) def hull(self): hull_offset = Ship._enc_index['hull'] hull = int(self.encoding[hull_offset].item()) return hull def damage_cards(self): return int(self.get('damage')) def stringify(self): """Return a string version of the ship.""" shield_offset = Ship._enc_index['shields'] shield_length = Ship._enc_len['shields'] shields = self.encoding[shield_offset:shield_offset + shield_length] green_def_idx = Ship._enc_index['green_defense_tokens'] green_def_len = Ship._enc_len['green_defense_tokens'] green_tokens = self.encoding[green_def_idx:green_def_idx + green_def_len] red_def_idx = Ship._enc_index['red_defense_tokens'] red_def_len = Ship._enc_len['red_defense_tokens'] red_tokens = self.encoding[red_def_idx:red_def_idx + red_def_len] return str( "{}: hull ({}/{}), shields {}, green defense tokens {}, red defense tokens {}".format( self.name, self.hull()-self.damage_cards(), self.hull(), shields, green_tokens, red_tokens)) def __str__(self): return self.stringify() def __repr__(self): return self.stringify() def parseShips(filename): """ Returns a list of ships.""" keys = {} ship_templates = {} with open(filename, newline='') as ships: shipreader = csv.reader(ships, delimiter=',', quotechar='|') rowcount = 0 for row in shipreader: # parse the header first to find the column keys if ( 0 == rowcount ): count = 0 for key in row: count = count + 1 keys[count] = key else: newship = {} count = 0 # Fill in all of the information on this vessel for key in row: count = count + 1 newship[keys[count]] = key # Create a new ship template ship_templates[newship['Ship Name']] = newship rowcount = rowcount + 1 ship_types = {} for name, attributes in ship_templates.items(): ship_types[name] = ShipType(name, attributes) #print("{}:".format(name)) #for a_name, a_value in attributes.items(): # print(" {} : {}".format(a_name, a_value)) return ship_types
python
import os import sys import time import wave import numpy as np from datetime import datetime from pyaudio import PyAudio, paInt16 class GenAudio(object): def __init__(self): self.num_samples = 2000 # pyaudio内置缓冲大小 self.sampling_rate = 8000 # 取样频率 self.level = 1500 # 声音保存的阈值 self.count_num = 20 # count_num个取样之内出现COUNT_NUM个大于LEVEL的取样则记录声音 self.save_length = 8 # 声音记录的最小长度:save_length * num_samples 个取样 self.time_count = 1000 # 录音时间,单位s self.voice_string = [] # 保存文件 def save_wav(self, filename): wf = wave.open(filename, 'wb') wf.setnchannels(1) wf.setsampwidth(2) wf.setframerate(self.sampling_rate) wf.writeframes(np.array(self.voice_string).tobytes()) wf.close() def read_audio(self): pa = PyAudio() stream = pa.open(format=paInt16, channels=1, rate=self.sampling_rate, input=True, frames_per_buffer=self.num_samples) save_count = 0 save_buffer = [] time_count = self.time_count while True: time_count -= 1 # 读入num_samples个取样 string_audio_data = stream.read(self.num_samples) # 将读入的数据转换为数组 audio_data = np.frombuffer(string_audio_data, dtype=np.short) # 计算大于 level 的取样的个数 large_sample_count = np.sum(audio_data > self.level) print(np.max(audio_data)), "large_sample_count=>", large_sample_count # 如果个数大于COUNT_NUM,则至少保存SAVE_LENGTH个块 if large_sample_count > self.count_num: save_count = self.save_length else: save_count -= 1 if save_count < 0: save_count = 0 if save_count > 0: save_buffer.append(string_audio_data) else: if len(save_buffer) > 0: self.voice_string = save_buffer save_buffer = [] print("Recode a piece of voice successfully!") return True if time_count == 0: if len(save_buffer) > 0: self.voice_string = save_buffer save_buffer = [] print("Recode a piece of voice successfully!") return True else: return False return True def saveVoice(): r = GenAudio() r.read_audio() if os.path.exists("voice.wav"): # 如果文件存在 # 删除文件,可使用以下两种方法。 os.remove("voice.wav") time.sleep(1) r.save_wav("voice.wav")
python
# -*- coding: utf-8 -*- """ Created on Fri Nov 16 00:13:05 2018 @author: Gireesh Sundaram """ import pandas as pd import numpy as np import seaborn as sns from sklearn.preprocessing import OneHotEncoder, LabelEncoder from sklearn.cross_validation import train_test_split from sklearn.neighbors import KNeighborsClassifier from sklearn.tree import DecisionTreeClassifier from sklearn.svm import SVC from sklearn.naive_bayes import GaussianNB from sklearn.ensemble import GradientBoostingClassifier, RandomForestClassifier, AdaBoostClassifier from sklearn.metrics import f1_score, recall_score, precision_score, confusion_matrix from imblearn.over_sampling import SMOTE import xgboost as xgb #%% data = pd.read_csv("D:\\Hackathons\\Amex\\Datasets\\train.csv") test = pd.read_csv("D:\\Hackathons\\Amex\\Datasets\\test.csv") train = data.sample(frac = 0.9) historic = pd.read_csv("D:\\Hackathons\\Amex\\Datasets\\historic_restruct.csv") #%% train['hour'] = pd.to_numeric(train['DateTime'].str.slice(11,13)) train["time"] = np.where(train['hour'].between(0, 4), "Midnight", np.where(train['hour'].between(5, 8), "Early Morning", np.where(train['hour'].between(9, 12), "Morning", np.where(train['hour'].between(13, 16), "Afternoon", np.where(train['hour'].between(17, 20), "Evening", "Night"))))) #%% train = train.merge(historic, on = ['user_id', 'product'], how='left') interest_view = train[['view', 'interest']] interest_view = interest_view.fillna(value = 0) #%% selectedfeatures = ['product', 'campaign_id', 'webpage_id', 'product_category_1', 'gender', 'user_group_id', 'age_level', 'user_depth'] selectedcols = train[selectedfeatures] #%% #Tryig to see if some row has any of the missing values, but does not! navaluecols = ['user_group_id', 'age_level', 'user_depth', 'city_development_index'] handlingna = data[navaluecols] handlingna["user_id"] = train["user_id"] handlingna = handlingna.drop_duplicates() user_id = handlingna[handlingna["user_id"].duplicated(keep=False)] #%% selectedcols['gender'] = selectedcols['gender'].fillna(value = "Female") selectedcols['age_level'] = selectedcols['age_level'].fillna(value = 2) selectedcols['user_depth'] = selectedcols['user_depth'].fillna(value = 1) #selectedcols['city_development_index'] = selectedcols['city_development_index'].fillna(value = 3) selectedcols = selectedcols.fillna(value = -99) LE = LabelEncoder() selectedcols_1 = selectedcols.apply(LE.fit_transform) #%% OHE = OneHotEncoder() selectedcols_2 = OHE.fit_transform(selectedcols_1).toarray() selectedcols_2 = pd.DataFrame(selectedcols_2) selectedcols_2['is_click'] = train['is_click'].reset_index(drop=True) #selectedcols_2['interest'] = interest_view['interest'] #selectedcols_2['view'] = interest_view['view'] #%% x_train, x_test, y_train, y_test = train_test_split(selectedcols_2.drop(columns = ['is_click']), selectedcols_2['is_click']) sm = SMOTE() train_ip_new, train_op_new = sm.fit_sample(x_train, y_train) #%% model = DecisionTreeClassifier() model.fit(train_ip_new, train_op_new) prediction = model.predict(x_test) score = f1_score(y_test, prediction) recall = recall_score(y_test, prediction) precision = precision_score(y_test, prediction) cm = confusion_matrix(y_test, prediction) #%% def featureselection(dataframe): dataframe['hour'] = pd.to_numeric(dataframe['DateTime'].str.slice(11,13)) selectedcols = dataframe[selectedfeatures] selectedcols['gender'] = selectedcols['gender'].fillna(value = "Female") selectedcols['age_level'] = selectedcols['age_level'].fillna(value = 2) selectedcols['user_depth'] = selectedcols['user_depth'].fillna(value = 1) #selectedcols['city_development_index'] = selectedcols['city_development_index'].fillna(value = 3) selectedcols = selectedcols.fillna(value = -99) selectedcols_1 = selectedcols.apply(LE.fit_transform) selectedcols_2 = OHE.fit_transform(selectedcols_1).toarray() selectedcols_2 = pd.DataFrame(selectedcols_2) return selectedcols_2 #%% preprocessed = featureselection(test) output = model.predict(preprocessed) #%% final_submission = pd.DataFrame() final_submission["session_id"] = test['session_id'] final_submission["is_click"] = output final_submission.to_csv("D:\\Hackathons\\Amex\\Datasets\\submission_10_DT_improving_features.csv", index = False) #%% for items in selectedfeatures: print(items) print(data[items].unique()) print(test[items].unique()) #%% time_by_day = train[["hour", 'is_click']].groupby(["hour"]).sum() count_gender = data.groupby(['product', 'gender']).size().reset_index(name='count') count_age = data.groupby(['product', 'age_level']).size().reset_index(name='count') count_depth = data.groupby(['product', 'user_depth']).size().reset_index(name='count') count_city = data.groupby(['product', 'city_development_index']).size().reset_index(name='count') #%% interest = pd.read_csv("D:\\Hackathons\\Amex\\Datasets\\historical_user_logs.csv") #%% view = interest.groupby(['user_id', 'product', 'action']).size().reset_index(name='count') view_p = view.pivot_table(index = ['user_id', 'product'], columns = 'action', values = 'count').reset_index().fillna(value = 0) view_p.to_csv("D:\\Hackathons\\Amex\\Datasets\\historic_restruct.csv", index = False) preprocessed.to_csv("D:\\Hackathons\\Amex\\Datasets\\preprocessed_op.csv", index = False)
python
# # Copyright (c) 2020 Xilinx, Inc. All rights reserved. # SPDX-License-Identifier: MIT # platform = "microblaze" procs = ["microblaze"] serial_port = "serial" arch = "microblaze" linux_compiler = "microblazeel-xilinx-linux-gnu-" dtb_loadaddr = 0x81E00000 dtb_arch = "microblaze" dtb_dtg = "microblaze-generic" dtb_defconfig = "microblaze-generic_defconfig" dtb_compiler = "microblazeel-xilinx-linux-gnu-" kernel_loadaddr = 0x80000000 kernel_defconfig = "mmu_defconfig" kernel_artifacts = ["arch/microblaze/boot/simpleImage.system.ub"] kernel_image = "simpleImage.system.ub" uboot_defconfig = "microblaze-generic_defconfig" uboot_artifacts = ["u-boot"] boot_scr_loadaddr = 0xBF200000 rootfs_loadaddr = 0x82E00000 overrides = ["microblaze"] system_dtb = "microblaze-generic.dtb" uboot_devicetree = "microblaze-generic"
python
# -*- coding: utf-8 -*- from terminaltables import AsciiTable from colorclass import Color class CostAnalysis: def __init__(self, db): self.db = db def draw(self, market, symbol, args): if len(args) != 0: raise Exception('no argument required for {}'.format(CostAnalysis.__name__)) cb_cols = ( '耗用原料', '耗用物料', '直接人工', '製造費用', '製造成本', '銷貨成本', ) cb_data = self.db.cost_breakdown().query( market, symbol, cb_cols, ) me_cols = ( '薪資支出', '保險費', '修繕費', '水電費', '折舊', '燃料費', '包裝費', '其他費用', ) me_data = self.db.manufacturing_expense2().query( market, symbol, me_cols, ) data = dict() for d in cb_data: v = [] for c in cb_cols: v.append(d[c]) data[d['year']] = v for d in me_data: if d['year'] not in data: data[d['year']] = ('?', '?', '?', '?', '?', '?') v = [] for c in me_cols: v.append(d[c]) data[d['year']] += v # Arrange them for terminaltables. table_data = [('year',) + cb_cols + me_cols] for year in sorted(data.keys()): dd = data[year] if len(dd) == 6: dd += (None,)*8 row1 = (year,) for d in dd: row1 += (int(d/1000),) if d is not None else (' ',) table_data.append(row1) row2 = (' ',) for d in dd[:6]: t = "{:03.2f}%".format(d/dd[4]*100) if d is not None else ' ' row2 += (Color("{autogreen}" + t + "{/autogreen}"),) for d in dd[6:]: t = "{:03.2f}%".format(d/dd[3]*100) if d is not None else ' ' row2 += (Color("{autogreen}" + t + "{/autogreen}"),) table_data.append(row2) table = AsciiTable(table_data) print(table.table)
python
# Size of program memory (bytes) MAX_PGM_MEM = 4096 # Size of context memory (bytes) MAX_DATA_MEM = 2048 # Max stack size (bytes) MAX_STACK = 512 # Number of registers MAX_REGS = 11 # Default output indentation for some debug messages IND = " " * 8 # Maximum values for various unsigned integers MAX_UINT8 = 0xff MAX_UINT16 = 0xffff MAX_UINT32 = 0xffffffff MAX_UINT64 = 0xffffffffffffffff #  +----------------+--------+--------------------+ #  |   4 bits       |  1 bit |   3 bits           | #  | operation code | source | instruction class  | #  +----------------+--------+--------------------+ #  (MSB)                                      (LSB) # OpCode Classes OPC_LD = 0x00 # load from immediate OPC_LDX = 0x01 # load from register OPC_ST = 0x02 # store immediate OPC_STX = 0x03 # store value from register OPC_ALU = 0x04 # 32 bits arithmetic operation OPC_JMP = 0x05 # jump OPC_RES = 0x06 # unused, reserved for future use OPC_ALU64 = 0x07 # 64 bits arithmetic operation # Operation codes (OPC_ALU or OPC_ALU64). ALU_ADD = 0x00 # addition ALU_SUB = 0x01 # subtraction ALU_MUL = 0x02 # multiplication ALU_DIV = 0x03 # division ALU_OR = 0x04 # or ALU_AND = 0x05 # and ALU_LSH = 0x06 # left shift ALU_RSH = 0x07 # right shift ALU_NEG = 0x08 # negation ALU_MOD = 0x09 # modulus ALU_XOR = 0x0a # exclusive or ALU_MOV = 0x0b # move ALU_ARSH = 0x0c # sign extending right shift ALU_ENDC = 0x0d # endianess conversion #  +--------+--------+-------------------+ #  | 3 bits | 2 bits |   3 bits          | #  |  mode  |  size  | instruction class | #  +--------+--------+-------------------+ #  (MSB)                             (LSB) # Load/Store Modes LDST_IMM = 0x00 # immediate value LDST_ABS = 0x01 # absolute LDST_IND = 0x02 # indirect LDST_MEM = 0x03 # load from / store to memory # 0x04 # reserved # 0x05 # reserved LDST_XADD = 0x06 # exclusive add # Sizes LEN_W = 0x00 # word (4 bytes) LEN_H = 0x01 # half-word (2 bytes) LEN_B = 0x02 # byte (1 byte) LEN_DW = 0x03 # double word (8 bytes) # Operation codes (OPC_JMP) JMP_JA = 0x00 # jump JMP_JEQ = 0x01 # jump if equal JMP_JGT = 0x02 # jump if greater than JMP_JGE = 0x03 # jump if greater or equal JMP_JSET = 0x04 # jump if `src`& `reg` JMP_JNE = 0x05 # jump if not equal JMP_JSGT = 0x06 # jump if greater than (signed) JMP_JSGE = 0x07 # jump if greater or equal (signed) JMP_CALL = 0x08 # helper function call JMP_EXIT = 0x09 # return from program JMP_JLT = 0x0a # jump if lower than JMP_JLE = 0x0b # jump if lower ir equal JMP_JSLT = 0x0c # jump if lower than (signed) JMP_JSLE = 0x0d # jump if lower or equal (signed) # Sources JMP_K = 0x00 # 32-bit immediate value JMP_X = 0x01 # `src` register
python
#!/usr/bin/env python # -*- coding: utf-8 -*- from runner.koan import * class AboutStrings(Koan): # https://docs.python.org/3/library/stdtypes.html#textseq # https://docs.python.org/3/library/unittest.html#assert-methods # https://docs.python.org/3/library/functions.html#isinstance def test_double_quoted_strings_are_strings(self): string = "Hello, world." self.assertEqual(True, isinstance(string, str)) def test_single_quoted_strings_are_also_strings(self): string = 'Goodbye, world.' self.assertEqual(True, isinstance(string, str)) def test_triple_quote_strings_are_also_strings(self): string = """Howdy, world!""" self.assertEqual(True, isinstance(string, str)) def test_triple_single_quotes_work_too(self): string = '''Bonjour tout le monde!''' self.assertEqual(True, isinstance(string, str)) # Both string and bytes literals may optionally be prefixed with a letter 'r' # or 'R'; such strings are called raw strings and treat backslashes as literal # characters. As a result, in string literals, '\U' and '\u' escapes in raw # strings are not treated specially. Given that Python 2.x’s raw unicode # literals behave differently than Python 3.x’s the 'ur' syntax is not supported. def test_raw_strings_are_also_strings(self): string = r"Konnichi wa, world!" self.assertEqual(True, isinstance(string, str)) def test_use_single_quotes_to_create_string_with_double_quotes(self): string = 'He said, "Go Away."' self.assertEqual('He said, "Go Away."', string) def test_use_double_quotes_to_create_strings_with_single_quotes(self): string = "Don't" self.assertEqual(r"Don't", string) def test_use_backslash_for_escaping_quotes_in_strings(self): a = "He said, \"Don't\"" b = 'He said, "Don\'t"' self.assertEqual(True, (a == b)) # https://docs.python.org/3/library/functions.html#len def test_use_backslash_at_the_end_of_a_line_to_continue_onto_the_next_line(self): string = "It was the best of times,\n\ It was the worst of times." self.assertEqual(52, len(string)) def test_triple_quoted_strings_can_span_lines(self): string = """ Howdy, world! """ self.assertEqual(15, len(string)) def test_triple_quoted_strings_need_less_escaping(self): a = "Hello \"world\"." b = """Hello "world".""" self.assertEqual(True, (a == b)) def test_escaping_quotes_at_the_end_of_triple_quoted_string(self): string = """Hello "world\"""" self.assertEqual('Hello "world"', string) def test_plus_concatenates_strings(self): string = "Hello, " + "world" self.assertEqual("Hello, world", string) def test_adjacent_strings_are_concatenated_automatically(self): string = "Hello" ", " "world" self.assertEqual("Hello, world", string) def test_plus_will_not_modify_original_strings(self): hi = "Hello, " there = "world" string = hi + there self.assertEqual("Hello, ", hi) self.assertEqual("world", there) def test_plus_equals_will_append_to_end_of_string(self): hi = "Hello, " there = "world" hi += there self.assertEqual("Hello, world", hi) def test_plus_equals_also_leaves_original_string_unmodified(self): original = "Hello, " hi = original there = "world" hi += there self.assertEqual("Hello, ", original) def test_most_strings_interpret_escape_characters(self): string = "\n" self.assertEqual('\n', string) self.assertEqual("""\n""", string) self.assertEqual(1, len(string))
python
# Mount RPC client -- RFC 1094 (NFS), Appendix A # This module demonstrates how to write your own RPC client in Python. # When this example was written, there was no RPC compiler for # Python. Without such a compiler, you must first create classes # derived from Packer and Unpacker to handle the data types for the # server you want to interface to. You then write the client class. # If you want to support both the TCP and the UDP version of a # protocol, use multiple inheritance as shown below. import rpc from rpc import Packer, Unpacker, TCPClient, UDPClient # Program number and version for the mount protocol MOUNTPROG = 100005 MOUNTVERS = 1 # Size of the 'fhandle' opaque structure FHSIZE = 32 # Packer derived class for Mount protocol clients. # The only thing we need to pack beyond basic types is an 'fhandle' class MountPacker(Packer): def pack_fhandle(self, fhandle): self.pack_fopaque(FHSIZE, fhandle) # Unpacker derived class for Mount protocol clients. # The important types we need to unpack are fhandle, fhstatus, # mountlist and exportlist; mountstruct, exportstruct and groups are # used to unpack components of mountlist and exportlist and the # corresponding functions are passed as function argument to the # generic unpack_list function. class MountUnpacker(Unpacker): def unpack_fhandle(self): return self.unpack_fopaque(FHSIZE) def unpack_fhstatus(self): status = self.unpack_uint() if status == 0: fh = self.unpack_fhandle() else: fh = None return status, fh def unpack_mountlist(self): return self.unpack_list(self.unpack_mountstruct) def unpack_mountstruct(self): hostname = self.unpack_string() directory = self.unpack_string() return (hostname, directory) def unpack_exportlist(self): return self.unpack_list(self.unpack_exportstruct) def unpack_exportstruct(self): filesys = self.unpack_string() groups = self.unpack_groups() return (filesys, groups) def unpack_groups(self): return self.unpack_list(self.unpack_string) # These are the procedures specific to the Mount client class. # Think of this as a derived class of either TCPClient or UDPClient. class PartialMountClient: # This method is called by Client.__init__ to initialize # self.packer and self.unpacker def addpackers(self): self.packer = MountPacker() self.unpacker = MountUnpacker('') # This method is called by Client.__init__ to bind the socket # to a particular network interface and port. We use the # default network interface, but if we're running as root, # we want to bind to a reserved port def bindsocket(self): import os try: uid = os.getuid() except AttributeError: uid = 1 if uid == 0: port = rpc.bindresvport(self.sock, '') # 'port' is not used else: self.sock.bind(('', 0)) # This function is called to cough up a suitable # authentication object for a call to procedure 'proc'. def mkcred(self): if self.cred == None: self.cred = rpc.AUTH_UNIX, rpc.make_auth_unix_default() return self.cred # The methods Mnt, Dump etc. each implement one Remote # Procedure Call. This is done by calling self.make_call() # with as arguments: # # - the procedure number # - the arguments (or None) # - the "packer" function for the arguments (or None) # - the "unpacker" function for the return value (or None) # # The packer and unpacker function, if not None, *must* be # methods of self.packer and self.unpacker, respectively. # A value of None means that there are no arguments or is no # return value, respectively. # # The return value from make_call() is the return value from # the remote procedure call, as unpacked by the "unpacker" # function, or None if the unpacker function is None. # # (Even if you expect a result of None, you should still # return the return value from make_call(), since this may be # needed by a broadcasting version of the class.) # # If the call fails, make_call() raises an exception # (this includes time-outs and invalid results). # # Note that (at least with the UDP protocol) there is no # guarantee that a call is executed at most once. When you do # get a reply, you know it has been executed at least once; # when you don't get a reply, you know nothing. def Mnt(self, directory): return self.make_call(1, directory, \ self.packer.pack_string, \ self.unpacker.unpack_fhstatus) def Dump(self): return self.make_call(2, None, \ None, self.unpacker.unpack_mountlist) def Umnt(self, directory): return self.make_call(3, directory, \ self.packer.pack_string, None) def Umntall(self): return self.make_call(4, None, None, None) def Export(self): return self.make_call(5, None, \ None, self.unpacker.unpack_exportlist) # We turn the partial Mount client into a full one for either protocol # by use of multiple inheritance. (In general, when class C has base # classes B1...Bn, if x is an instance of class C, methods of x are # searched first in C, then in B1, then in B2, ..., finally in Bn.) class TCPMountClient(PartialMountClient, TCPClient): def __init__(self, host): TCPClient.__init__(self, host, MOUNTPROG, MOUNTVERS) class UDPMountClient(PartialMountClient, UDPClient): def __init__(self, host): UDPClient.__init__(self, host, MOUNTPROG, MOUNTVERS) # A little test program for the Mount client. This takes a host as # command line argument (default the local machine), prints its export # list, and attempts to mount and unmount each exported files system. # An optional first argument of -t or -u specifies the protocol to use # (TCP or UDP), default is UDP. def test(): import sys if sys.argv[1:] and sys.argv[1] == '-t': C = TCPMountClient del sys.argv[1] elif sys.argv[1:] and sys.argv[1] == '-u': C = UDPMountClient del sys.argv[1] else: C = UDPMountClient if sys.argv[1:]: host = sys.argv[1] else: host = '' mcl = C(host) list = mcl.Export() for item in list: print item try: mcl.Mnt(item[0]) except: print 'Sorry' continue mcl.Umnt(item[0])
python
import os import librosa import numpy as np import pandas as pd from pandas import DataFrame from sklearn.preprocessing import LabelEncoder # def get_feature_label(row, directory): def get_feature_label(row, directory): file_name = os.path.join(directory, str(row.ID) + '.wav') # file_name = os.path.join("data_pipeline", "urban_sound_files", str(row.ID) + '.wav') # handle exception to check if there isn't a file which is corrupted try: # here kaiser_fast is a technique used for faster extraction X, sample_rate = librosa.load(file_name, res_type='kaiser_fast') # extract mfcc feature from data mfccs = librosa.feature.mfcc(y=X, sr=sample_rate, n_mfcc=40) mfccs_scaled: np = np.mean(mfccs.T, axis=0) except Exception as e: print("Error encountered while parsing file: ", file_name) return None, None feature: np = mfccs_scaled label = row.Class return feature, label def get_data_labels(featues_df: DataFrame) -> DataFrame: """ Convert features and corresponding classification labels into numpy arrays so that they can be feeded into neuronal network. :param temp: :return: X and y parameter y is our target variable """ X: np = np.array(featues_df.feature.tolist()) y: np = np.array(featues_df.label.tolist()) # encode label classification le = LabelEncoder() # one hot encoded labels # yy = to_categorical(le.fit_transform(y)) return X, X# yy def get_features_and_labels(data_in, directory): """ """ # function to load files and extract features train_temp: DataFrame = pd.DataFrame(columns=['feature', 'label']) for idx, row in data_in.iterrows(): feature, label = get_feature_label(row, directory) train_temp = train_temp.append({'feature': feature, 'label': label}, ignore_index=True) train_temp.columns = ['feature', 'label'] x_train, y_train = get_data_labels(train_temp) return x_train, y_train
python
from urllib.parse import urlencode,parse_qs,unquote def stringify(d,u=False): qs = urlencode(d) if u: qs = unquote(qs) return qs def parse(url): d = dict( (k, v if len(v)>1 else v[0] ) for k, v in parse_qs(url).items() ) return d
python
#!/usr/bin/python #coding:utf-8 import json import copy import time import os endpoint = "bind9" name_stats_path = "/var/named/data/named_stats.txt" def main(): if os.path.isfile(name_stats_path): os.remove(name_stats_path) os.system("rndc stats") ts = int(time.time()) payload = [] data = {"endpoint":endpoint,"metric":"","timestamp":ts,"step":60,"value":"","counterType":"COUNTER","tags":""} f = open(name_stats_path) for line in f: if "++ Incoming Requests ++" in line: data["tags"] = "tag=Incoming_Requests" continue elif "++ Incoming Queries ++" in line: data["tags"] = "tag=Incoming_Queries" continue elif "++ Outgoing Queries ++" in line: data["tags"] = "tag=Outgoing_Queries" continue elif "++ Name Server Statistics ++" in line: data["tags"] = "tag=Name_Server_Statistics" continue elif "++ Zone Maintenance Statistics ++" in line: data["tags"] = "tag=Zone_Maintenance_Statistics" continue elif "++ Resolver Statistics ++" in line: data["tags"] = "tag=Resolver_Statistics" continue elif "++ Cache DB RRsets ++" in line: data["tags"] = "tag=Cache DB RRsets" continue elif "++ Socket I/O Statistics ++" in line: data["tags"] = "tag=Socket_I/O_Statistics" continue named_stats = line.strip().split(' ') if named_stats[0].isdigit() != True: continue data["value"] = named_stats[0] data["metric"] = string_join(named_stats) payload.append(copy.copy(data)) os.remove(name_stats_path) print json.dumps(payload,indent=4) def string_join(split_list): num = 0 join_str = split_list[1] for string in split_list: num = num + 1 if num <= 2: continue join_str = join_str + "_" + string return join_str if __name__ == "__main__": main()
python
from plugins.database import db class BaseModel: def save(self): try: db.session.add(self) db.session.commit() return True except: return False
python
"""Creates a custom kinematics body with two links and one joint """ from openravepy import * from numpy import eye, array, zeros env = Environment() # create openrave environment env.SetViewer('qtcoin') # attach viewer (optional) with env: robot=RaveCreateRobot(env,'') robot.SetName('camera') linkinfo=KinBody.LinkInfo() linkinfo._name='camerabase' ginfo=KinBody.GeometryInfo() ginfo._type=GeometryType.Box ginfo._vGeomData=[0.1,0.1,0.1] # box extents ginfo._vDiffuseColor=[0,0,1] ginfo._t = eye(4) linkinfo._vgeometryinfos = [ginfo] camera1info=Robot.AttachedSensorInfo() camera1info._linkname='camerabase' camera1info._name = 'ensenson10' camera1info._sensorname = 'base_pinhole_camera' camera1info._trelative = eye(4) camera1info._trelative[0:3,3] = [0,0,0.1] camera1info._sensorgeometry = CameraGeomData() camera1info._sensorgeometry.width = 640 camera1info._sensorgeometry.height = 480 camera1info._sensorgeometry.intrinsics.K = array([[640.0,0,320],[0,640,240],[0,0,1]]) camera1info._sensorgeometry.intrinsics.distortion_coeffs = zeros(5) camera1info._sensorgeometry.intrinsics.distortion_model = 'opencv' camera1info._sensorgeometry.intrinsics.focal_length = 0.05 robot.Init([linkinfo],[],[],[]) env.Add(robot) robot.AddAttachedSensor(camera1info,True)
python
import itertools from surprise import accuracy from collections import defaultdict class RecommenderMetrics: def mae(predictions): return accuracy.mae(predictions, verbose=False) def rmse(predictions): return accuracy.rmse(predictions, verbose=False)
python
from setuptools import setup install_requires = ( 'beautifulsoup4==4.6.3', ) tests_require = ( 'pytest', 'pytest-cov', 'mock', ) setup_requires = ( 'pytest-runner', 'flake8', ) setup( name='tracking-id-injector', version='1.0.1', url='https://github.com/msufa/tracking-id-injector', author='Maciek Sufa', description=('Console script for injecting Google Analytics tracking IDs ' 'into HTML files.'), license='Apache 2.0', packages=['tridinjector'], install_requires=install_requires, tests_require=tests_require, setup_requires=setup_requires, entry_points={ 'console_scripts': [ 'tracking-id-injector = tridinjector.injector:main' ] }, )
python
import argparse import time import math import numpy as np import sklearn.metrics as sk import torch import torch.nn as nn from torch.autograd import Variable import torch.nn.functional as F import data import model from utils_lm import batchify, get_batch, repackage_hidden # go through rigamaroo to do ..utils.display_results import show_performance if __package__ is None: import sys from os import path sys.path.append(path.dirname(path.dirname(path.abspath(__file__)))) from utils.display_results import show_performance from utils.log_sum_exp import log_sum_exp parser = argparse.ArgumentParser(description='PyTorch PennTreeBank RNN/LSTM Language Model') parser.add_argument('--data', type=str, default='data/penn/', help='location of the data corpus') parser.add_argument('--model', type=str, default='LSTM', help='type of recurrent net (LSTM, QRNN, GRU)') parser.add_argument('--emsize', type=int, default=400, help='size of word embeddings') parser.add_argument('--nhid', type=int, default=1150, help='number of hidden units per layer') parser.add_argument('--nlayers', type=int, default=3, help='number of layers') parser.add_argument('--lr', type=float, default=30, help='initial learning rate') parser.add_argument('--clip', type=float, default=0.25, help='gradient clipping') parser.add_argument('--epochs', type=int, default=8000, help='upper epoch limit') parser.add_argument('--batch_size', type=int, default=80, metavar='N', help='batch size') parser.add_argument('--bptt', type=int, default=70, help='sequence length') parser.add_argument('--dropout', type=float, default=0.4, help='dropout applied to layers (0 = no dropout)') parser.add_argument('--dropouth', type=float, default=0.3, help='dropout for rnn layers (0 = no dropout)') parser.add_argument('--dropouti', type=float, default=0.65, help='dropout for input embedding layers (0 = no dropout)') parser.add_argument('--dropoute', type=float, default=0.1, help='dropout to remove words from embedding layer (0 = no dropout)') parser.add_argument('--wdrop', type=float, default=0.5, help='amount of weight dropout to apply to the RNN hidden to hidden matrix') parser.add_argument('--seed', type=int, default=1111, help='random seed') parser.add_argument('--nonmono', type=int, default=5, help='random seed') parser.add_argument('--cuda', action='store_false', help='use CUDA') parser.add_argument('--log-interval', type=int, default=200, metavar='N', help='report interval') randomhash = ''.join(str(time.time()).split('.')) parser.add_argument('--save', type=str, default=randomhash+'.pt', help='path to save the final model') parser.add_argument('--alpha', type=float, default=2, help='alpha L2 regularization on RNN activation (alpha = 0 means no regularization)') parser.add_argument('--beta', type=float, default=1, help='beta slowness regularization applied on RNN activiation (beta = 0 means no regularization)') parser.add_argument('--wdecay', type=float, default=1.2e-6, help='weight decay applied to all weights') parser.add_argument('--resume', type=str, default='', help='path of model to resume') parser.add_argument('--optimizer', type=str, default='sgd', help='optimizer to use (sgd, adam)') parser.add_argument('--when', nargs="+", type=int, default=[-1], help='When (which epochs) to divide the learning rate by 10 - accepts multiple') parser.add_argument('--character_level', action='store_true', help="Use this flag to evaluate character-level models.") args = parser.parse_args() args.tied = True # Set the random seed manually for reproducibility. np.random.seed(args.seed) torch.manual_seed(args.seed) if torch.cuda.is_available(): if not args.cuda: print("WARNING: You have a CUDA device, so you should probably run with --cuda") else: torch.cuda.manual_seed(args.seed) ############################################################################### # Load data ############################################################################### def model_save(fn): with open(fn, 'wb') as f: torch.save([model, criterion, optimizer], f) def model_load(fn): global model, criterion, optimizer with open(fn, 'rb') as f: model, criterion, optimizer = torch.load(f) import os import hashlib fn = 'corpus.{}.data'.format(hashlib.md5(args.data.encode()).hexdigest()) if os.path.exists(fn): print('Loading cached dataset...') corpus = torch.load(fn) else: print('Producing dataset...') corpus = data.Corpus(args.data) torch.save(corpus, fn) eval_batch_size = 10 test_batch_size = 1 # DON'T CHANGE THIS train_data = batchify(corpus.train, args.batch_size, args) val_data = batchify(corpus.valid, eval_batch_size, args) test_data = batchify(corpus.test, test_batch_size, args) print('Producing ood datasets...') answers_corpus = data.OODCorpus('eng_web_tbk/answers/conll/answers_penntrees.dev.conll', corpus.dictionary, char=args.character_level) answers_data = batchify(answers_corpus.data, test_batch_size, args) email_corpus = data.OODCorpus('eng_web_tbk/email/conll/email_penntrees.dev.conll', corpus.dictionary, char=args.character_level) email_data = batchify(email_corpus.data, test_batch_size, args) newsgroup_corpus = data.OODCorpus('eng_web_tbk/newsgroup/conll/newsgroup_penntrees.dev.conll', corpus.dictionary, char=args.character_level) newsgroup_data = batchify(newsgroup_corpus.data, test_batch_size, args) reviews_corpus = data.OODCorpus('eng_web_tbk/reviews/conll/reviews_penntrees.dev.conll', corpus.dictionary, char=args.character_level) reviews_data = batchify(reviews_corpus.data, test_batch_size, args) weblog_corpus = data.OODCorpus('eng_web_tbk/weblog/conll/weblog_penntrees.dev.conll', corpus.dictionary, char=args.character_level) weblog_data = batchify(weblog_corpus.data, test_batch_size, args) ############################################################################### # Build the model ############################################################################### from splitcross import SplitCrossEntropyLoss criterion = None ntokens = len(corpus.dictionary) model = model.RNNModel(args.model, ntokens, args.emsize, args.nhid, args.nlayers, args.dropout, args.dropouth, args.dropouti, args.dropoute, args.wdrop, args.tied) ### assert args.resume, 'must provide a --resume argument' print('Resuming model ...') model_load(args.resume) optimizer.param_groups[0]['lr'] = args.lr model.dropouti, model.dropouth, model.dropout, args.dropoute = args.dropouti, args.dropouth, args.dropout, args.dropoute if args.wdrop: from weight_drop import WeightDrop for rnn in model.rnns: if type(rnn) == WeightDrop: rnn.dropout = args.wdrop elif rnn.zoneout > 0: rnn.zoneout = args.wdrop ### if not criterion: splits = [] if ntokens > 500000: # One Billion # This produces fairly even matrix mults for the buckets: # 0: 11723136, 1: 10854630, 2: 11270961, 3: 11219422 splits = [4200, 35000, 180000] elif ntokens > 75000: # WikiText-103 splits = [2800, 20000, 76000] print('Using', splits) criterion = SplitCrossEntropyLoss(args.emsize, splits=splits, verbose=False) ### if args.cuda: model = model.cuda() criterion = criterion.cuda() ### params = list(model.parameters()) + list(criterion.parameters()) total_params = sum(x.size()[0] * x.size()[1] if len(x.size()) > 1 else x.size()[0] for x in params if x.size()) print('Args:', args) print('Model total parameters:', total_params) ############################################################################### # Eval code ############################################################################### ood_num_examples = test_data.size(0) // 5 expected_ap = ood_num_examples / (ood_num_examples + test_data.size(0)) recall_level = 0.9 def get_base_rates(): batch, i = 0, 0 seq_len = args.bptt ntokens = len(corpus.dictionary) token_counts = np.zeros(ntokens) total_count = 0 for i in range(0, train_data.size(0), args.bptt): # Assume OE dataset is larger. It is, because we're using wikitext-2. data, targets = get_batch(train_data, i, args, seq_len=seq_len) for j in range(targets.numel()): token_counts[targets[j].data.cpu().numpy()[0]] += 1 total_count += 1 batch += 1 return token_counts / total_count print('Getting base rates...') # base_rates = get_base_rates() # np.save('./base_rates.npy', base_rates) base_rates = Variable(torch.from_numpy(np.load('./base_rates.npy').astype(np.float32))).cuda().float().squeeze() # shit happens uniform_base_rates = Variable(torch.from_numpy(np.ones(len(corpus.dictionary)).astype(np.float32))).cuda().float().squeeze() uniform_base_rates /= uniform_base_rates.numel() print('Done.') def evaluate(data_source, corpus, batch_size=10, ood=False): # Turn on evaluation mode which disables dropout. model.eval() if args.model == 'QRNN': model.reset() loss_accum = 0 losses = [] ntokens = len(corpus.dictionary) for i in range(0, data_source.size(0) - 1, args.bptt): if (i >= ood_num_examples // test_batch_size) and (ood is True): break hidden = model.init_hidden(batch_size) hidden = repackage_hidden(hidden) data, targets = get_batch(data_source, i, args, evaluation=True) output, hidden = model(data, hidden) logits = model.decoder(output) smaxes = F.softmax(logits - torch.max(logits, dim=1, keepdim=True)[0], dim=1) tmp = smaxes[range(targets.size(0)), targets] log_prob = torch.log(tmp).mean(0) # divided by seq len, so this is the negative nats per char loss = -log_prob.data.cpu().numpy()[0] loss_accum += loss # losses.append(loss) # Experimental! # anomaly_score = -torch.max(smaxes, dim=1)[0].mean() # negative MSP anomaly_score = ((smaxes).add(1e-18).log() * uniform_base_rates.unsqueeze(0)).sum(1).mean(0) # negative KL to uniform losses.append(anomaly_score.data.cpu().numpy()[0]) # return loss_accum / (len(data_source) // args.bptt), losses # Run on test data. print('\nPTB') test_loss, test_losses = evaluate(test_data, corpus, test_batch_size) print('=' * 89) print('| End of training | test loss {:5.2f} | test ppl {:8.2f} | test bpc {:8.3f}'.format( test_loss, math.exp(test_loss), test_loss / math.log(2))) print('=' * 89) print('\nAnswers (OOD)') ood_loss, ood_losses = evaluate(answers_data, answers_corpus, test_batch_size, ood=True) print('=' * 89) print('| End of training | test loss {:5.2f} | test ppl {:8.2f} | test bpc {:8.3f}'.format( ood_loss, math.exp(ood_loss), ood_loss / math.log(2))) print('=' * 89) show_performance(ood_losses, test_losses, expected_ap, recall_level=recall_level) print('\nEmail (OOD)') ood_loss, ood_losses = evaluate(email_data, email_corpus, test_batch_size, ood=True) print('=' * 89) print('| End of training | test loss {:5.2f} | test ppl {:8.2f} | test bpc {:8.3f}'.format( ood_loss, math.exp(ood_loss), ood_loss / math.log(2))) print('=' * 89) show_performance(ood_losses, test_losses, expected_ap, recall_level=recall_level) print('\nNewsgroup (OOD)') ood_loss, ood_losses = evaluate(newsgroup_data, newsgroup_corpus, test_batch_size, ood=True) print('=' * 89) print('| End of training | test loss {:5.2f} | test ppl {:8.2f} | test bpc {:8.3f}'.format( ood_loss, math.exp(ood_loss), ood_loss / math.log(2))) print('=' * 89) show_performance(ood_losses, test_losses, expected_ap, recall_level=recall_level) print('\nReviews (OOD)') ood_loss, ood_losses = evaluate(reviews_data, reviews_corpus, test_batch_size, ood=True) print('=' * 89) print('| End of training | test loss {:5.2f} | test ppl {:8.2f} | test bpc {:8.3f}'.format( ood_loss, math.exp(ood_loss), ood_loss / math.log(2))) print('=' * 89) show_performance(ood_losses, test_losses, expected_ap, recall_level=recall_level) print('\nWeblog (OOD)') ood_loss, ood_losses = evaluate(weblog_data, weblog_corpus, test_batch_size, ood=True) print('=' * 89) print('| End of training | test loss {:5.2f} | test ppl {:8.2f} | test bpc {:8.3f}'.format( ood_loss, math.exp(ood_loss), ood_loss / math.log(2))) print('=' * 89) show_performance(ood_losses, test_losses, expected_ap, recall_level=recall_level)
python
from typing import Optional from openslides_backend.action.actions.user.user_scope_permission_check_mixin import ( UserScope, ) from openslides_backend.permissions.management_levels import ( CommitteeManagementLevel, OrganizationManagementLevel, ) from openslides_backend.permissions.permissions import Permissions from tests.system.action.base import BaseActionTestCase class ScopePermissionsTestMixin(BaseActionTestCase): def setup_admin_scope_permissions(self, scope: Optional[UserScope]) -> None: """ Helper function to setup permissions for different scopes for user 1. If no scope is given, the user has no permissions. """ if scope is None: self.set_organization_management_level(None) elif scope == UserScope.Organization: self.set_organization_management_level( OrganizationManagementLevel.CAN_MANAGE_USERS ) elif scope == UserScope.Committee: self.update_model( "user/1", { "organization_management_level": None, "committee_$1_management_level": CommitteeManagementLevel.CAN_MANAGE, }, ) elif scope == UserScope.Meeting: self.create_meeting() self.set_organization_management_level(None) self.set_user_groups(1, [3]) self.set_group_permissions(3, [Permissions.User.CAN_MANAGE]) def setup_scoped_user(self, scope: UserScope) -> None: """ Helper function to setup user 111 in different scopes. """ if scope == UserScope.Organization: self.set_models( { "committee/1": {"meeting_ids": [1]}, "committee/2": {"meeting_ids": [2]}, "meeting/1": { "user_ids": [111], "committee_id": 1, "group_ids": [11], "is_active_in_organization_id": 1, }, "meeting/2": { "user_ids": [111], "committee_id": 2, "group_ids": [22], "is_active_in_organization_id": 1, }, "user/111": { "meeting_ids": [1, 2], "committee_ids": [1, 2], "group_$_ids": ["1", "2"], "group_$1_ids": [11], "group_$2_ids": [22], }, "group/11": {"meeting_id": 1, "user_ids": [111]}, "group/22": {"meeting_id": 2, "user_ids": [111]}, } ) elif scope == UserScope.Committee: self.set_models( { "committee/1": {"meeting_ids": [1, 2]}, "meeting/1": { "user_ids": [111], "committee_id": 1, "group_ids": [11], "is_active_in_organization_id": 1, }, "meeting/2": { "user_ids": [111], "committee_id": 1, "group_ids": [11], "is_active_in_organization_id": 1, }, "user/111": { "meeting_ids": [1, 2], "committee_ids": [1], "group_$_ids": ["1", "2"], "group_$1_ids": [11], "group_$2_ids": [22], }, "group/11": {"meeting_id": 1, "user_ids": [111]}, "group/22": {"meeting_id": 2, "user_ids": [111]}, } ) elif scope == UserScope.Meeting: self.set_models( { "meeting/1": {"committee_id": 1, "is_active_in_organization_id": 1}, "user/111": {"meeting_ids": [1], "committee_ids": [1]}, } )
python
""" GUI layout that allows free positioning of children. @author Ben Giacalone """ from tools.envedit.gui.gui_layout import GUILayout class GUIFreeLayout(GUILayout): def __init__(self): GUILayout.__init__(self) self.children = [] # Adds a child to the layout def add_child(self, child): if self.rendering: child.add_render() self.children.append(child) self.update() # Removes a child from the layout def remove_child(self, child): child.stop_render() self.children.remove(child) self.update() # Removes all children from the layout def clear(self): for _ in range(len(self.children)): self.remove_child(self.children[0]) # Checks if this component contains a point in screen space, then propagates to children # Note: this layout cannot respond to events def get_selected_component(self, x, y): if self.bbox.point_inside(x, y): for child in self.children: child_component = child.get_selected_component(x, y) if child_component is not None: return child_component return None def update(self): for child in self.children: # If child is outside bounds, reposition it back in if child.bbox.x + child.bbox.width > self.bbox.x + self.bbox.width: child.bbox.x -= (child.bbox.x + child.bbox.width) - (self.bbox.x + self.bbox.width) if child.bbox.y + child.bbox.height > self.bbox.y + self.bbox.height: child.bbox.y -= (child.bbox.y + child.bbox.height) - (self.bbox.y + self.bbox.height) child.set_clip_region(self.clip_region.get_intersection(self.bbox)) child.update() def add_render(self): self.rendering = True if self.rendering: for child in self.children: child.add_render() def stop_render(self): self.rendering = False for child in self.children: child.stop_render()
python
#!/bin/python # # File: test-all.py # Authors: Leonid Shamis ([email protected]) # Keith Schwarz ([email protected]) # # A test harness that automatically runs your compiler on all of the tests # in the 'samples' directory. This should help you diagnose errors in your # compiler and will help you gauge your progress as you're going. It also # will help catch any regressions you accidentally introduce later on in # the project. # # That said, this test script is not designed to catch all errors and you # will need to do your own testing. Be sure to look over these tests # carefully and to think over what cases are covered and, more importantly, # what cases are not. import os from subprocess import * TEST_DIRECTORY = 'samples' for _, _, files in os.walk(TEST_DIRECTORY): for file in files: if not (file.endswith('.decaf') or file.endswith('.frag')): continue refName = os.path.join(TEST_DIRECTORY, '%s.out' % file.split('.')[0]) testName = os.path.join(TEST_DIRECTORY, file) result = Popen('./dcc < ' + testName, shell = True, stderr = STDOUT, stdout = PIPE) result = Popen('diff -w - ' + refName, shell = True, stdin = result.stdout, stdout = PIPE) print 'Executing test "%s"' % testName print ''.join(result.stdout.readlines())
python
number_1 = int(input('Enter your first number:')) number_2 = int(input('Enter your second number:')) operator = str(input('Enter your operator')) if operator=='+': print(number_1 + number_2) elif operator=='-': print(number_1 - number_2) elif operator=='*': print(number_1 * number_2) elif operator=='/': print(number_1 / number_2) else: print('Invalid operator')
python
# Winston Peng # SoftDev1 pd9 # K10 -- Jinja Tuning # 2019-9-23 from flask import Flask, render_template import static.script as script app = Flask(__name__) @app.route('/occupyflaskst') def occupations(): return render_template( 'occ.html', team = 'Connor Oh, Nahi Khan, Winston Peng -- Team Beaker', # <h1> header = 'Jinja Tuning -- Occupations', # <title> title = 'Job Occupations', # This gets the random job randOcc = script.randJob(), # Dictionary of the list occ = script.csvDict ) if __name__ == '__main__': app.debug = True app.run()
python
#!/usr/bin/env python # macro_avg.py v1.0 9-19-2012 Jeff Doak [email protected] from chargedensity import * import numpy as np import sys if len(sys.argv) > 1: if str(sys.argv[1]) == "CHG": a = ChargeDensity(str(sys.argv[1]),format_="chgcar") else: a = ChargeDensity(str(sys.argv[1])) else: a = ChargeDensity("LOCPOT") avg1 = a.avg_density_vol() avg2 = np.average(a.density) A = np.linalg.norm(a.unitcell.cell_vec[0]) B = np.linalg.norm(a.unitcell.cell_vec[1]) C = np.linalg.norm(a.unitcell.cell_vec[2]) area = A*B print "avg1",avg1 print "avg2",avg2 print area print A,B,C sys.exit() a.unitcell.scale = 1.0 den_z = a.integrate_z_density() z_pos = np.linspace(0,a.unitcell.cell_vec[2,2],len(den_z)) macro_z = a.macro_avg_z(p1) for i in range(len(den_z)): print z_pos[i],den_z[i],macro_z[i] # Calculate bulk and vacuum average, assuming that the bulk is located in the # 1st half of the cell (along z) and the vacuum is in the second half of the # cell. bulk_start = 0.2 bulk_stop = 0.3 vac_start = 0.7 vac_stop = 0.8 bi = int(np.floor(bulk_start*len(den_z))) bf = int(np.floor(bulk_stop*len(den_z))) vi = int(np.floor(vac_start*len(den_z))) vf = int(np.floor(vac_stop*len(den_z))) bulk_avg = np.average(macro_z[bi:bf]) bulk_std = np.std(macro_z[bi:bf]) #bulk_center = macro_z[int(np.floor(0.25*len(den_z)))] vac_avg = np.average(macro_z[vi:vf]) vac_std = np.std(macro_z[vi:vf]) #vac_center = macro_z[int(np.floor(0.75*len(den_z)))] print print "Bulk_avg_(eV) Bulk_std_(eV) Vac_avg_(eV) Vac_std_(eV)" print bulk_avg,bulk_std,vac_avg,vac_std #print "Bulk_avg_(eV) Bulk_center_(eV) Vac_avg_(eV) Vac_center_(eV)" #print bulk_avg,bulk_center,vac_avg,vac_center
python
""" A python module to communicate with Elecrolux Connectivity Platform """ __all__ = [ 'Error', 'LoginError', 'RequestError', 'ResponseError', 'Session' ] from .Session import ( Error, LoginError, RequestError, ResponseError, Session )
python
from enum import Enum class Transition(Enum): """ Enumeration of the transitions a job can go through. """ ACQUIRE = 0 RELEASE = 1 START = 2 PROGRESS = 3 FINISH = 4 ERROR = 5 RESET = 6 ABORT = 7 CANCEL = 8 @property def json_property_name(self) -> str: """ Gets the name of the JSON property for this transition. """ return f"on_{self.name.lower()}"
python
from ..models.box_daily_square import BoxDailySquare class BoxDailySquareManager(object): def create_box(self, data): box, created = BoxDailySquare.objects.get_or_create( user=data['user'], office=data['office'] ) return box
python
import os import shutil import subprocess CONNECT_REPORTS_REPO_URL = 'https://github.com/cloudblue/connect-reports.git' BASE_DIR = os.path.abspath( os.path.normpath( os.path.join( os.path.dirname(__file__), '..', ), ), ) REPO_EMBED_DIR = os.path.join( BASE_DIR, 'connect/.data/connect_reports', ) def get_latest_reports(): if os.path.exists(REPO_EMBED_DIR): shutil.rmtree(REPO_EMBED_DIR) print(f'Cloning {CONNECT_REPORTS_REPO_URL}...') subprocess.check_call( [ 'git', 'clone', CONNECT_REPORTS_REPO_URL, REPO_EMBED_DIR, ], ) result = subprocess.run( [ 'git', '-C', REPO_EMBED_DIR, 'rev-list', '--tags', '--max-count=1', ], capture_output=True, stdin=subprocess.DEVNULL, start_new_session=True, ) result.check_returncode() commit_id = result.stdout.decode().replace('\n', '') print(f'Checkout latest tag ({commit_id})...') subprocess.check_call( [ 'git', '-C', REPO_EMBED_DIR, 'checkout', commit_id, ], ) print(f'Latest reports saved in {REPO_EMBED_DIR}') if __name__ == '__main__': get_latest_reports()
python
from scipy import integrate def integrand(x0, x1, x2): return x2 * x1**2 + x0 x2_lim = (0.0, 0.5) x1_lim = lambda x2:(0.0, 1.0-2.0*x2) x0_lim = lambda x1,x2:(-1.0, 1.0+2.0*x2-x1) # int_{x2=0}^{0.5} int_{x1=0}^{1-2x2} int_{x0=-1}^{1+2x2-x1} (x2 x1**2 + x0) dx0 dx1 dx2 integral,error = integrate.nquad(integrand, [x0_lim, x1_lim, x2_lim]) print(integral, error)
python
#!/usr/bin/env python # # Copyright (c), 2016-2020, SISSA (International School for Advanced Studies). # All rights reserved. # This file is distributed under the terms of the MIT License. # See the file 'LICENSE' in the root directory of the present # distribution, or http://opensource.org/licenses/MIT. # # @author Davide Brunato <[email protected]> # import unittest import platform import warnings import os from xmlschema import XMLSchemaParseError, XMLSchemaIncludeWarning, XMLSchemaImportWarning from xmlschema.etree import etree_element from xmlschema.namespaces import SCHEMAS_DIR from xmlschema.qnames import XSD_ELEMENT, XSI_TYPE from xmlschema.validators import XMLSchema11 from xmlschema.testing import SKIP_REMOTE_TESTS, XsdValidatorTestCase, print_test_header class TestXMLSchema10(XsdValidatorTestCase): TEST_CASES_DIR = os.path.join(os.path.dirname(__file__), '../test_cases') def test_schema_validation(self): schema = self.schema_class(self.vh_xsd_file) self.assertEqual(schema.validation, 'strict') schema = self.schema_class(self.vh_xsd_file, validation='lax') self.assertEqual(schema.validation, 'lax') schema = self.schema_class(self.vh_xsd_file, validation='skip') self.assertEqual(schema.validation, 'skip') with self.assertRaises(ValueError): self.schema_class(self.vh_xsd_file, validation='none') def test_schema_string_repr(self): schema = self.schema_class(self.vh_xsd_file) tmpl = "%s(basename='vehicles.xsd', namespace='http://example.com/vehicles')" self.assertEqual(str(schema), tmpl % self.schema_class.__name__) def test_schema_copy(self): schema = self.vh_schema.copy() self.assertNotEqual(id(self.vh_schema), id(schema)) self.assertNotEqual(id(self.vh_schema.namespaces), id(schema.namespaces)) self.assertNotEqual(id(self.vh_schema.maps), id(schema.maps)) def test_resolve_qname(self): schema = self.schema_class("""<xs:schema xmlns:xs="http://www.w3.org/2001/XMLSchema" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"> <xs:element name="root" /> </xs:schema>""") self.assertEqual(schema.resolve_qname('xs:element'), XSD_ELEMENT) self.assertEqual(schema.resolve_qname('xsi:type'), XSI_TYPE) self.assertEqual(schema.resolve_qname(XSI_TYPE), XSI_TYPE) self.assertEqual(schema.resolve_qname('element'), 'element') self.assertRaises(ValueError, schema.resolve_qname, '') self.assertRaises(ValueError, schema.resolve_qname, 'xsi:a type ') self.assertRaises(ValueError, schema.resolve_qname, 'xml::lang') def test_global_group_definitions(self): schema = self.check_schema(""" <xs:group name="wrong_child"> <xs:element name="foo"/> </xs:group>""", validation='lax') self.assertEqual(len(schema.errors), 1) self.check_schema('<xs:group name="empty" />', XMLSchemaParseError) self.check_schema('<xs:group name="empty"><xs:annotation/></xs:group>', XMLSchemaParseError) def test_wrong_includes_and_imports(self): with warnings.catch_warnings(record=True) as context: warnings.simplefilter("always") self.check_schema(""" <xs:schema xmlns:xs="http://www.w3.org/2001/XMLSchema" targetNamespace="ns"> <xs:include schemaLocation="example.xsd" /> <xs:import schemaLocation="example.xsd" /> <xs:redefine schemaLocation="example.xsd"/> <xs:import namespace="http://missing.example.test/" /> <xs:import/> </xs:schema> """) self.assertEqual(len(context), 3, "Wrong number of include/import warnings") self.assertEqual(context[0].category, XMLSchemaIncludeWarning) self.assertEqual(context[1].category, XMLSchemaIncludeWarning) self.assertEqual(context[2].category, XMLSchemaImportWarning) self.assertTrue(str(context[0].message).startswith("Include")) self.assertTrue(str(context[1].message).startswith("Redefine")) self.assertTrue(str(context[2].message).startswith("Import of namespace")) def test_wrong_references(self): # Wrong namespace for element type's reference self.check_schema(""" <xs:element name="dimension" type="xs:dimensionType"/> <xs:simpleType name="dimensionType"> <xs:restriction base="xs:short"/> </xs:simpleType> """, XMLSchemaParseError) def test_annotations(self): schema = self.check_schema(""" <xs:element name='foo'> <xs:annotation /> </xs:element>""") self.assertIsNotNone(schema.elements['foo'].annotation) schema = self.check_schema(""" <xs:simpleType name='Magic'> <xs:annotation> <xs:documentation> stuff </xs:documentation> </xs:annotation> <xs:restriction base='xs:string'> <xs:enumeration value='A'/> </xs:restriction> </xs:simpleType>""") self.assertIsNotNone(schema.types["Magic"].annotation) self.check_schema(""" <xs:simpleType name='Magic'> <xs:annotation /> <xs:annotation /> <xs:restriction base='xs:string'> <xs:enumeration value='A'/> </xs:restriction> </xs:simpleType>""", XMLSchemaParseError) def test_base_schemas(self): self.schema_class(os.path.join(SCHEMAS_DIR, 'xml_minimal.xsd')) def test_root_elements(self): # Test issue #107 fix schema = self.schema_class("""<?xml version="1.0" encoding="utf-8"?> <xs:schema xmlns:xs="http://www.w3.org/2001/XMLSchema"> <xs:element name="root1" type="root"/> <xs:element name="root2" type="root"/> <xs:complexType name="root"> <xs:sequence> <xs:element name="elementWithNoType"/> </xs:sequence> </xs:complexType> </xs:schema>""") self.assertEqual(set(schema.root_elements), {schema.elements['root1'], schema.elements['root2']}) def test_is_restriction_method(self): # Test issue #111 fix schema = self.schema_class(source=self.casepath('issues/issue_111/issue_111.xsd')) extended_header_def = schema.types['extendedHeaderDef'] self.assertTrue(extended_header_def.is_derived(schema.types['blockDef'])) @unittest.skipIf(SKIP_REMOTE_TESTS or platform.system() == 'Windows', "Remote networks are not accessible or avoid SSL verification error on Windows.") def test_remote_schemas_loading(self): col_schema = self.schema_class("https://raw.githubusercontent.com/brunato/xmlschema/master/" "tests/test_cases/examples/collection/collection.xsd", timeout=300) self.assertTrue(isinstance(col_schema, self.schema_class)) vh_schema = self.schema_class("https://raw.githubusercontent.com/brunato/xmlschema/master/" "tests/test_cases/examples/vehicles/vehicles.xsd", timeout=300) self.assertTrue(isinstance(vh_schema, self.schema_class)) def test_schema_defuse(self): vh_schema = self.schema_class(self.vh_xsd_file, defuse='always') self.assertIsInstance(vh_schema.root, etree_element) for schema in vh_schema.maps.iter_schemas(): self.assertIsInstance(schema.root, etree_element) class TestXMLSchema11(TestXMLSchema10): schema_class = XMLSchema11 if __name__ == '__main__': print_test_header() unittest.main()
python
"""Dyson new v2 pure Hot+Cool device.""" import logging from .const import HeatMode from .dyson_pure_cool import DysonPureCool from .utils import printable_fields _LOGGER = logging.getLogger(__name__) class DysonPureHotCool(DysonPureCool): """Dyson new Pure Hot+Cool device.""" def _parse_command_args(self, **kwargs): """Parse command arguments. :param kwargs Arguments :return payload dictionary """ data = super()._parse_command_args(**kwargs) heat_target = kwargs.get('heat_target') heat_mode = kwargs.get('heat_mode') f_heat_target = heat_target if heat_target \ else self._current_state.heat_target f_heat_mode = heat_mode.value if heat_mode \ else self._current_state.heat_mode data["hmax"] = f_heat_target data["hmod"] = f_heat_mode return data def enable_heat_mode(self): """Turn on head mode.""" data = { "hmod": HeatMode.HEAT_ON.value } self.set_fan_configuration(data) def disable_heat_mode(self): """Turn off head mode.""" data = { "hmod": HeatMode.HEAT_OFF.value } self.set_fan_configuration(data) def set_heat_target(self, heat_target): """Set temperature target. Use either const.HeatTarget.celsius or const.HeatTarget.fahrenheit to get a string representation of the target temperature in kelvins. ex. set_heat_target(const.HeatTarget.celsius(24)) :param heat_target: target temperature in Kalvin """ data = { "hmax": heat_target } self.set_fan_configuration(data) def __repr__(self): """Return a String representation.""" fields = self._fields() return 'DysonPureHotCool(' + ",".join( printable_fields(fields)) + ')'
python
from .data import COVID19India from .mongo_db import get_data, upload_data from .data_processing import get_daily_data, get_state_daily, get_interval_data from .inshorts_news import InshortsNews
python
from .csr import skeleton_to_csgraph, branch_statistics, summarize, Skeleton __version__ = '0.10.0-dev' __all__ = ['skeleton_to_csgraph', 'branch_statistics', 'summarize', 'Skeleton']
python
class NumMatrix: def __init__(self, matrix: List[List[int]]): if len(matrix) == 0: self.dp = [] return width, height = len(matrix[0]), len(matrix) self.dp = [[0] * (width + 1) for _ in range(height + 1)] for i in range(1, height+1): for j in range(1, width+1): # dp.c = dp.t + dp.l + m.c - d.tl self.dp[i][j] = self.dp[i-1][j] + self.dp[i][j-1] + matrix[i-1][j-1] - self.dp[i-1][j-1] def sumRegion(self, row1: int, col1: int, row2: int, col2: int) -> int: if len(self.dp) == 0: return 0 s = self.dp[row2+1][col2+1] - self.dp[row2+1][col1] - self.dp[row1][col2+1] + self.dp[row1][col1] return s # Your NumMatrix object will be instantiated and called as such: # obj = NumMatrix(matrix) # param_1 = obj.sumRegion(row1,col1,row2,col2)
python
# ############################################################################ # Copyright (c) 2017 Huawei Technologies Co.,Ltd and others. # # All rights reserved. This program and the accompanying materials # are made available under the terms of the Apache License, Version 2.0 # which accompanies this distribution, and is available at # http://www.apache.org/licenses/LICENSE-2.0 # ############################################################################ from __future__ import print_function from __future__ import absolute_import import logging import time from datetime import datetime from yardstick.benchmark.scenarios import base from yardstick.common import openstack_utils LOG = logging.getLogger(__name__) class Resize(base.Scenario): """Execute a cold migration for two hosts Parameters server_id - ID of the server type: string unit: N/A default: null server- dict of the server type: dict unit: N/A default: null Either server_id or server is required. flavor_id - ID of the flavor type: string unit: N/A default: null flavor- dict of the flavor type: dict unit: N/A default: null Either flavor_id or flavor is required. Outputs rc - response code of resize operation 0 for success 1 for failure type: int unit: N/A resize_time - the duration time resize operation used type: float unit: N/A default: null error_message - the error message(only if fail to resize) type: string unit: N/A default: null """ __scenario_type__ = "RESIZE" def __init__(self, scenario_cfg, context_cfg): self.scenario_cfg = scenario_cfg self.options = self.scenario_cfg.get('options', {}) self.nova_client = openstack_utils.get_nova_client() def run(self, result): default_server_id = self.options.get('server', {}).get('id', '') server_id = self.options.get('server_id', default_server_id) default_flavor_id = self.options.get('flavor', {}).get('id', '') flavor_id = self.options.get('flavor_id', default_flavor_id) LOG.debug('Server id is %s, Flavor id is %s', server_id, flavor_id) keys = self.scenario_cfg.get('output', '').split() LOG.info('Start to resize') try: self.nova_client.servers.resize(server_id, flavor_id) except Exception as e: values = [1, str(e).split('.')[0]] else: start_time = datetime.now() self._wait_check_status(server_id, 'verify_resize') LOG.info('Server status change to VERIFY_RESIZE') LOG.info('Start to comfirm resize') self.nova_client.servers.confirm_resize(server_id) self._wait_check_status(server_id, 'active') LOG.info('Server status change to ACTIVE') end_time = datetime.now() LOG.info('Resize successful') duration = end_time - start_time resize_time = duration.seconds + duration.microseconds * 1.0 / 1e6 values = [0, resize_time] return self._push_to_outputs(keys, values) def _wait_check_status(self, server_id, wait_status): while True: status = self.nova_client.servers.get(server_id).status.lower() if status == wait_status: break time.sleep(1)
python
import os, sys, re, time import urllib, urllib2 from BeautifulSoup import BeautifulSoup #import beautifulsoup4 import gzip from StringIO import StringIO import MySQLdb import simplejson as json import datetime import pandas as pd import pymongo #from cassandra.cluster import Cluster import conf.config as config from cryptocurry.crypto_settings import * from requests import Request, Session from requests.exceptions import ConnectionError, Timeout, TooManyRedirects sleeptime = config.SLEEPTIME class NoRedirectHandler(urllib2.HTTPRedirectHandler): def http_error_302(self, req, fp, code, msg, headers): infourl = urllib.addinfourl(fp, headers, req.get_full_url()) infourl.status = code infourl.code = code return infourl http_error_300 = http_error_302 http_error_301 = http_error_302 http_error_303 = http_error_302 http_error_307 = http_error_302 def decodeGzippedContent(encoded_content): response_stream = StringIO(encoded_content) decoded_content = "" try: gzipper = gzip.GzipFile(fileobj=response_stream) decoded_content = gzipper.read() except: # Maybe this isn't gzipped content after all.... decoded_content = encoded_content return(decoded_content) def getmongoclient(): client = pymongo.MongoClient(port=config.MONGO_PORT) def scrapeFromInvest(): url = "https://www.investing.com/crypto/" opener = urllib2.build_opener(urllib2.HTTPHandler(), urllib2.HTTPSHandler(), NoRedirectHandler()) http_headers = { 'User-Agent' : r'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/27.0.1453.110 Safari/537.36', 'Accept' : 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8', 'Accept-Language' : 'en-US,en;q=0.8', 'Accept-Encoding' : 'gzip,deflate,sdch', 'Connection' : 'keep-alive', 'Host' : 'www.investing.com', 'Referer' : 'https://www.google.com' } investing_request = urllib2.Request(url, None, http_headers) investing_response = None try: investing_response = opener.open(investing_request) except: print "Could not get the raw cryptocurrency data - Error: %s\n"%sys.exc_info()[1].__str__() return False if not investing_response: print "Could not retrieve response from the request to https://www.investing.com/crypto/" return False investing_data_enc = investing_response.read() investing_data = decodeGzippedContent(investing_data_enc) #print investing_data soup = BeautifulSoup(investing_data) datatds = soup.findAll("td", {'class' : 'flag'}) mongoconn = pymongo.MongoClient("mongodb://%s:%s@localhost:%s/cryptocurrency"%(config.MONGO_USER, config.MONGO_PASSWD, config.MONGO_PORT)) db = mongoconn.cryptocurrency for td in datatds: currnametd = td.findNext('td') currname = currnametd['title'] currnametd = currnametd.findNext('td') currsymbol = currnametd['title'] currnametd = currnametd.findNext('td') currprice = currnametd.getText() currprice = currprice.replace("$", "") currprice = currprice.replace(",", "") currnametd = currnametd.findNext('td') market_cap = currnametd.getText() market_cap = market_cap.replace("&#x24;", "") currnametd = currnametd.findNext('td') vol24h = currnametd.getText() vol24h = vol24h.replace("&#x24;", "") currnametd = currnametd.findNext('td') totalvol = currnametd.getText() totalvol = totalvol.replace('%', '') currnametd = currnametd.findNext('td') chg24h = currnametd.getText() chg24h = chg24h.replace('+', "") chg24h = chg24h.replace('%', "") currnametd = currnametd.findNext('td') chg7d = currnametd.getText() chg7d = chg7d.replace('+', "") chg7d = chg7d.replace('%', "") mongodata = {'currency_name' : currname, 'currency_symbol' : currsymbol, 'currency_price' : currprice, 'market_cap' : market_cap, 'volume_24hr' : vol24h, 'total_volume' : totalvol, 'change_24hr' : chg24h, 'change_7days' : chg7d, 'entrydatetime' : str(datetime.datetime.now())} try: result = db.investdata.insert_one(mongodata) except: print "Could not enter data in mongo db. Error: %s\n"%sys.exc_info()[1].__str__() print "Done collecting data from investing at %s...\n"%str(datetime.datetime.now()) return True def getDataFromCoinMarket(): coinmarketapikey = "edc74898-5367-43bf-b3cb-2af1ab8b42b7" opener = urllib2.build_opener(urllib2.HTTPHandler(), urllib2.HTTPSHandler(), NoRedirectHandler()) http_headers = { 'User-Agent' : r'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/27.0.1453.110 Safari/537.36', 'Accept' : 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8', 'Accept-Language' : 'en-US,en;q=0.8', 'Accept-Encoding' : 'gzip,deflate,sdch', 'Connection' : 'keep-alive', 'Host' : 'pro-api.coinmarketcap.com', 'X-CMC_PRO_API_KEY' : coinmarketapikey } listings_latest_url = "https://pro-api.coinmarketcap.com/v1/cryptocurrency/listings/latest?sort=market_cap&start=1&limit=50&convert=USD&cryptocurrency_type=coins" listings_request = urllib2.Request(listings_latest_url, None, http_headers) listings_response = None try: listings_response = opener.open(listings_request) except: print "Could not get the cryptocurrency listings data - Error: %s\n"%sys.exc_info()[1].__str__() return False if not listings_response: print "Could not retrieve response from the request to https://pro-api.coinmarketcap.com/v1/cryptocurrency/listings/latest" return False listings_data_enc = listings_response.read() listings_data = decodeGzippedContent(listings_data_enc) #print listings_data listings_dict = json.loads(listings_data) listings_data_list = listings_dict['data'] curr_data_map = {} mongoconn = pymongo.MongoClient("mongodb://%s:%s@localhost:%s/cryptocurrency"%(config.MONGO_USER, config.MONGO_PASSWD, config.MONGO_PORT)) db = mongoconn.cryptocurrency for elemdict in listings_data_list: idno = elemdict['id'] name = elemdict['name'] volume_24h = elemdict['quote']['USD']['volume_24h'] price = elemdict['quote']['USD']['price'] percent_change_1h = elemdict['quote']['USD']['percent_change_1h'] percent_change_24h = elemdict['quote']['USD']['percent_change_24h'] percent_change_7d = elemdict['quote']['USD']['percent_change_7d'] last_updated = elemdict['quote']['USD']['last_updated'] mongodata = {'idno' : str(idno), 'currency_name' : name, 'currency_price' : price, 'volume_24hr' : volume_24h, 'percent_change_1hr' : percent_change_1h, 'percent_change_24hr' : percent_change_24h, 'percent_change_7days' : percent_change_7d, 'last_updated' : last_updated, 'entrydatetime' : str(datetime.datetime.now())} try: result = db.coinmarketdata.insert_one(mongodata) except: print "Could not enter data in mongo db. Error: %s\n"%sys.exc_info()[1].__str__() print "Collected data from coinmarket at %s...\n"%str(datetime.datetime.now()) return curr_data_map """ This uses the coinmarketcap API - Basic Plan (Free). """ def coinmarketcap(): url = COIN_MARKET_CAP_DOMAIN + '/v1/cryptocurrency/listings/latest' parameters = { 'start':'1', 'limit':'100', 'convert':'USD' } headers = { 'Accepts': 'application/json', 'X-CMC_PRO_API_KEY': COIN_MARKET_CAP_API_KEY, } session = Session() session.headers.update(headers) try: response = session.get(url, params=parameters) data = json.loads(response.text) except (ConnectionError, Timeout, TooManyRedirects) as e: print(e) print "Could not collect data from CoinMarketCap. Returning." return 0 infolist = [] mongoconn = pymongo.MongoClient("mongodb://%s:%s@localhost:%s/cryptocurrency"%(config.MONGO_USER, config.MONGO_PASSWD, config.MONGO_PORT)) db = mongoconn.cryptocurrency cryptocurrencydatalist = data[u'data'] infolist = [] mongoconn = pymongo.MongoClient("mongodb://%s:%s@localhost:%s/cryptocurrency"%(config.MONGO_USER, config.MONGO_PASSWD, config.MONGO_PORT)) for cryptodict in cryptocurrencydatalist: last_updated, entrydatetime, cryptocurrname, cryptosymbol, marketcap,price, supply, volume, percent_change_1h, percent_change_24h, percent_change_7d = "", "", "", "", "", "", "", "", "", "", "" entrydatetime = str(datetime.datetime.now()) if cryptodict.has_key('last_updated'): last_updated = cryptodict['last_updated'] else: last_updated = entrydatetime if cryptodict.has_key(u'name'): cryptocurrname = cryptodict[u'name'] else: continue # If no name is found, then it is not of much use to us. if cryptodict.has_key(u'symbol'): cryptosymbol = cryptodict[u'symbol'] else: cryptosymbol = cryptocurrname if cryptodict.has_key(u'quote') and cryptodict[u'quote'].has_key('USD') and cryptodict[u'quote'][u'USD'].has_key(u'market_cap'): marketcap = cryptodict[u'quote'][u'USD'][u'market_cap'] else: marketcap = 0.00 if cryptodict.has_key(u'quote') and cryptodict[u'quote'].has_key('USD') and cryptodict[u'quote'][u'USD'].has_key(u'price'): price = cryptodict[u'quote'][u'USD'][u'price'] else: price = 0.00 if cryptodict.has_key(u'total_supply'): supply = cryptodict['total_supply'] else: supply = 0 if cryptodict.has_key(u'quote') and cryptodict[u'quote'].has_key('USD') and cryptodict[u'quote'][u'USD'].has_key(u'volume_24h'): volume = cryptodict[u'quote'][u'USD'][u'volume_24h'] else: volume = 0.00 if cryptodict.has_key(u'quote') and cryptodict[u'quote'].has_key('USD') and cryptodict[u'quote'][u'USD'].has_key(u'percent_change_1h'): percent_change_1h = cryptodict[u'quote'][u'USD'][u'percent_change_1h'] else: percent_change_1h = 0.00 if cryptodict.has_key(u'quote') and cryptodict[u'quote'].has_key('USD') and cryptodict[u'quote'][u'USD'].has_key(u'percent_change_24h'): percent_change_24h = cryptodict[u'quote'][u'USD'][u'percent_change_24h'] else: percent_change_24h = 0.00 if cryptodict.has_key(u'quote') and cryptodict[u'quote'].has_key('USD') and cryptodict[u'quote'][u'USD'].has_key(u'percent_change_7d'): percent_change_7d = cryptodict[u'quote'][u'USD'][u'percent_change_7d'] else: percent_change_7d = 0.00 valdict = {'currency_name' : cryptocurrname, 'currency_symbol' : cryptosymbol, 'marketcap' : marketcap, 'currency_price' : price, 'supply' : supply, 'volume' : volume, 'percent1hr' : percent_change_1h, 'percent24hr' : percent_change_24h, 'percent7d' : percent_change_7d, 'entrydatetime' : str(last_updated)} infolist.append(valdict) try: result = db.coinmarketcapdata.insert_one(valdict) #print valdict,"\n\n" except: print "Could not enter data in mongo db. Error: %s\n"%sys.exc_info()[1].__str__() print "Collected data from coinmarketcap website.\n" return infolist """ This is an index for 30 cryptocurrencies combined on some mathematical basis. This information is useful to those who want to invest in cryptocurrencies and hedge their risks by putting various sums in the 30 selected cryptocurrencies. In order to know more, please to the explanation at https://cci30.com/ """ def cci30index(): cci30url = "https://cci30.com/ajax/getIndexHistory.php" opener = urllib2.build_opener(urllib2.HTTPHandler(), urllib2.HTTPSHandler(), NoRedirectHandler()) http_headers = { 'User-Agent' : r'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/27.0.1453.110 Safari/537.36', 'Accept' : 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8', 'Accept-Language' : 'en-US,en;q=0.8', 'Accept-Encoding' : 'gzip,deflate,sdch', 'Connection' : 'keep-alive', 'Host' : 'coinmarketcap.com', 'Referer' : 'https://www.google.com' } cci30_request = urllib2.Request(cci30url, None, http_headers) cci30_response = None try: cci30_response = opener.open(cci30_request) except: print "Could not get the raw cryptocurrency data - Error: %s\n"%sys.exc_info()[1].__str__() return False content = decodeGzippedContent(cci30_response.read()) # content is a csv formatted data set mongoconn = pymongo.MongoClient("mongodb://%s:%s@localhost:%s/cryptocurrency"%(config.MONGO_USER, config.MONGO_PASSWD, config.MONGO_PORT)) db = mongoconn.cryptocurrency headers = [] records = [] alldata = [] datarecs = content.split("\n") headers = datarecs[0].split(",") for i in range(headers.__len__()): headers[i] = headers[i].strip() # Remove whitespace characters for datastr in datarecs: datalist = datastr.split(",") for i in range(1, datalist.__len__()): datalist[i] = datalist[i].strip() records.append(datalist) for recdata in records[1:]: ictr = 0 datadict = {} for rdata in recdata: datadict[headers[ictr]] = rdata ictr += 1 if ictr == headers.__len__(): break try: result = db.cci30data.insert_one(datadict) alldata.append(datadict) except: print "Error: ", sys.exc_info()[1].__str__(), "\n" print "collected data from cci30 index at %s"%datetime.datetime.now() return alldata """ There doesn't seem to be any fucking location that provides a feed, either as an API or as some screen data. How do I get the data from this asshole? Don't say I have to pay to get it, 'cause if that is so, then they are going to get troubled by illegal means.... Accidents happen all the time, buildings collapse for no apparent reason, fire breaks out for myriad reasons, bank accounts get hacked, footage of senior executives in a compromizing situations come out of nowhere, people show up at the wrong place at the wrong time, and then they vanish... Hmmmmm.... your actions route your life. """ def bloombergcryptoindex(): url = "https://www.bloomberg.com/professional/product/indices/bloomberg-galaxy-crypto-index/" def collectionEventLoop(scraper_functions_list): lasttime = 0 while True: currtime = time.time() if currtime - lasttime < sleeptime: # if we scraped within the last 'sleeptime', we go to sleep time.sleep(sleeptime) continue for i in range(0, scraper_functions_list.__len__()): scraper_functions_list[i]() lasttime = currtime if __name__ == "__main__": scraperslist = [scrapeFromInvest, getDataFromCoinMarket, coinmarketcap, cci30index,] # Add scraper functions here. # scraperslist = [scrapeFromInvest, getDataFromCoinMarket, cci30index,] # Add scraper functions here. collectionEventLoop(scraperslist)
python
n = input('Digite algo: ') print('O timpo primitivo do que foi digitado é: {}'.format(type(n))) print('Ele é numérico? {}'.format(n.isnumeric())) # Compara se é um numero, se sim envia a mensagem True print('Ele é um texto? {}'.format(n.isalpha())) # Compara se é Letra, se sim envia a mensagem True print('Ele tem um texto ou numero? {}'.format(n.isalnum())) # Compara se é Letra ou numero, se sim envia a mensagem True print('Está tudo em maiúscula? {}'.format(n.isupper())) # Compara se tudo está em letra maíuscola print('Está dentro da tabela ASCII? {}'.format(n.isascii())) # Compara se o n esta dentro da tabela ASCII print('É somente espaços? {}'.format(n.isspace()))
python
import sys from rpython.tool.pairtype import pairtype from rpython.flowspace.model import Constant from rpython.rtyper.rdict import AbstractDictRepr, AbstractDictIteratorRepr from rpython.rtyper.lltypesystem import lltype, llmemory, rffi from rpython.rlib import objectmodel, jit, rgc, types from rpython.rlib.signature import signature from rpython.rlib.objectmodel import specialize, likely from rpython.rtyper.debug import ll_assert from rpython.rlib.rarithmetic import r_uint, intmask from rpython.rtyper import rmodel from rpython.rtyper.error import TyperError from rpython.rtyper.annlowlevel import llhelper # ____________________________________________________________ # # generic implementation of RPython dictionary, with parametric DICTKEY and # DICTVALUE types. The basic implementation is a sparse array of indexes # plus a dense array of structs that contain keys and values. struct looks # like that: # # # struct dictentry { # DICTKEY key; # DICTVALUE value; # long f_hash; # (optional) key hash, if hard to recompute # bool f_valid; # (optional) the entry is filled # } # # struct dicttable { # int num_live_items; # int num_ever_used_items; # int resize_counter; # {byte, short, int, long} *indexes; # dictentry *entries; # lookup_function_no; # one of the four possible functions for different # # size dicts; the rest of the word is a counter for how # # many 'entries' at the start are known to be deleted # (Function DICTKEY, DICTKEY -> bool) *fnkeyeq; # (Function DICTKEY -> int) *fnkeyhash; # } # # @jit.look_inside_iff(lambda d, key, hash, flag: jit.isvirtual(d)) @jit.oopspec('ordereddict.lookup(d, key, hash, flag)') def ll_call_lookup_function(d, key, hash, flag): fun = d.lookup_function_no & FUNC_MASK # This likely() here forces gcc to compile the check for fun == FUNC_BYTE # first. Otherwise, this is a regular switch and gcc (at least 4.7) # compiles this as a series of checks, with the FUNC_BYTE case last. # It sounds minor, but it is worth 6-7% on a PyPy microbenchmark. if likely(fun == FUNC_BYTE): return ll_dict_lookup(d, key, hash, flag, TYPE_BYTE) elif fun == FUNC_SHORT: return ll_dict_lookup(d, key, hash, flag, TYPE_SHORT) elif IS_64BIT and fun == FUNC_INT: return ll_dict_lookup(d, key, hash, flag, TYPE_INT) elif fun == FUNC_LONG: return ll_dict_lookup(d, key, hash, flag, TYPE_LONG) assert False def get_ll_dict(DICTKEY, DICTVALUE, get_custom_eq_hash=None, DICT=None, ll_fasthash_function=None, ll_hash_function=None, ll_eq_function=None, method_cache={}, dummykeyobj=None, dummyvalueobj=None, rtyper=None): # get the actual DICT type. if DICT is None, it's created, otherwise # forward reference is becoming DICT if DICT is None: DICT = lltype.GcForwardReference() # compute the shape of the DICTENTRY structure entryfields = [] entrymeths = { 'allocate': lltype.typeMethod(_ll_malloc_entries), 'delete': _ll_free_entries, 'must_clear_key': (isinstance(DICTKEY, lltype.Ptr) and DICTKEY._needsgc()), 'must_clear_value': (isinstance(DICTVALUE, lltype.Ptr) and DICTVALUE._needsgc()), } if getattr(ll_eq_function, 'no_direct_compare', False): entrymeths['no_direct_compare'] = True # * the key entryfields.append(("key", DICTKEY)) # * the state of the entry - trying to encode it as dummy objects if dummykeyobj: # all the state can be encoded in the key entrymeths['dummy_obj'] = dummykeyobj entrymeths['valid'] = ll_valid_from_key entrymeths['mark_deleted'] = ll_mark_deleted_in_key # the key is overwritten by 'dummy' when the entry is deleted entrymeths['must_clear_key'] = False elif dummyvalueobj: # all the state can be encoded in the value entrymeths['dummy_obj'] = dummyvalueobj entrymeths['valid'] = ll_valid_from_value entrymeths['mark_deleted'] = ll_mark_deleted_in_value # value is overwritten by 'dummy' when entry is deleted entrymeths['must_clear_value'] = False else: # we need a flag to know if the entry was ever used entryfields.append(("f_valid", lltype.Bool)) entrymeths['valid'] = ll_valid_from_flag entrymeths['mark_deleted'] = ll_mark_deleted_in_flag # * the value entryfields.append(("value", DICTVALUE)) if ll_fasthash_function is None: entryfields.append(("f_hash", lltype.Signed)) entrymeths['hash'] = ll_hash_from_cache else: entrymeths['hash'] = ll_hash_recomputed entrymeths['fasthashfn'] = ll_fasthash_function # Build the lltype data structures DICTENTRY = lltype.Struct("odictentry", *entryfields) DICTENTRYARRAY = lltype.GcArray(DICTENTRY, adtmeths=entrymeths) fields = [ ("num_live_items", lltype.Signed), ("num_ever_used_items", lltype.Signed), ("resize_counter", lltype.Signed), ("indexes", llmemory.GCREF), ("lookup_function_no", lltype.Signed), ("entries", lltype.Ptr(DICTENTRYARRAY)) ] if get_custom_eq_hash is not None: r_rdict_eqfn, r_rdict_hashfn = get_custom_eq_hash() fields.extend([ ("fnkeyeq", r_rdict_eqfn.lowleveltype), ("fnkeyhash", r_rdict_hashfn.lowleveltype) ]) adtmeths = { 'keyhash': ll_keyhash_custom, 'keyeq': ll_keyeq_custom, 'r_rdict_eqfn': r_rdict_eqfn, 'r_rdict_hashfn': r_rdict_hashfn, 'paranoia': True, } else: # figure out which functions must be used to hash and compare ll_keyhash = ll_hash_function ll_keyeq = ll_eq_function ll_keyhash = lltype.staticAdtMethod(ll_keyhash) if ll_keyeq is not None: ll_keyeq = lltype.staticAdtMethod(ll_keyeq) adtmeths = { 'keyhash': ll_keyhash, 'keyeq': ll_keyeq, 'paranoia': False, } adtmeths['KEY'] = DICTKEY adtmeths['VALUE'] = DICTVALUE adtmeths['lookup_function'] = lltype.staticAdtMethod(ll_call_lookup_function) adtmeths['allocate'] = lltype.typeMethod(_ll_malloc_dict) DICT.become(lltype.GcStruct("dicttable", adtmeths=adtmeths, *fields)) return DICT class OrderedDictRepr(AbstractDictRepr): def __init__(self, rtyper, key_repr, value_repr, dictkey, dictvalue, custom_eq_hash=None, force_non_null=False): #assert not force_non_null self.rtyper = rtyper self.finalized = False self.DICT = lltype.GcForwardReference() self.lowleveltype = lltype.Ptr(self.DICT) self.custom_eq_hash = custom_eq_hash is not None if not isinstance(key_repr, rmodel.Repr): # not computed yet, done by setup() assert callable(key_repr) self._key_repr_computer = key_repr else: self.external_key_repr, self.key_repr = self.pickkeyrepr(key_repr) if not isinstance(value_repr, rmodel.Repr): # not computed yet, done by setup() assert callable(value_repr) self._value_repr_computer = value_repr else: self.external_value_repr, self.value_repr = self.pickrepr(value_repr) self.dictkey = dictkey self.dictvalue = dictvalue self.dict_cache = {} self._custom_eq_hash_repr = custom_eq_hash # setup() needs to be called to finish this initialization def _externalvsinternal(self, rtyper, item_repr): return rmodel.externalvsinternal(self.rtyper, item_repr) def _setup_repr(self): if 'key_repr' not in self.__dict__: key_repr = self._key_repr_computer() self.external_key_repr, self.key_repr = self.pickkeyrepr(key_repr) if 'value_repr' not in self.__dict__: self.external_value_repr, self.value_repr = self.pickrepr(self._value_repr_computer()) if isinstance(self.DICT, lltype.GcForwardReference): DICTKEY = self.key_repr.lowleveltype DICTVALUE = self.value_repr.lowleveltype # * we need an explicit flag if the key and the value is not # able to store dummy values s_key = self.dictkey.s_value s_value = self.dictvalue.s_value kwd = {} if self.custom_eq_hash: self.r_rdict_eqfn, self.r_rdict_hashfn = ( self._custom_eq_hash_repr()) kwd['get_custom_eq_hash'] = self._custom_eq_hash_repr else: kwd['ll_hash_function'] = self.key_repr.get_ll_hash_function() kwd['ll_eq_function'] = self.key_repr.get_ll_eq_function() kwd['ll_fasthash_function'] = self.key_repr.get_ll_fasthash_function() kwd['dummykeyobj'] = self.key_repr.get_ll_dummyval_obj(self.rtyper, s_key) kwd['dummyvalueobj'] = self.value_repr.get_ll_dummyval_obj( self.rtyper, s_value) get_ll_dict(DICTKEY, DICTVALUE, DICT=self.DICT, rtyper=self.rtyper, **kwd) def convert_const(self, dictobj): from rpython.rtyper.lltypesystem import llmemory # get object from bound dict methods #dictobj = getattr(dictobj, '__self__', dictobj) if dictobj is None: return lltype.nullptr(self.DICT) if not isinstance(dictobj, (dict, objectmodel.r_dict)): raise TypeError("expected a dict: %r" % (dictobj,)) try: key = Constant(dictobj) return self.dict_cache[key] except KeyError: self.setup() self.setup_final() l_dict = ll_newdict_size(self.DICT, len(dictobj)) self.dict_cache[key] = l_dict r_key = self.key_repr if r_key.lowleveltype == llmemory.Address: raise TypeError("No prebuilt dicts of address keys") r_value = self.value_repr if isinstance(dictobj, objectmodel.r_dict): if self.r_rdict_eqfn.lowleveltype != lltype.Void: l_fn = self.r_rdict_eqfn.convert_const(dictobj.key_eq) l_dict.fnkeyeq = l_fn if self.r_rdict_hashfn.lowleveltype != lltype.Void: l_fn = self.r_rdict_hashfn.convert_const(dictobj.key_hash) l_dict.fnkeyhash = l_fn for dictkeycontainer, dictvalue in dictobj._dict.items(): llkey = r_key.convert_const(dictkeycontainer.key) llvalue = r_value.convert_const(dictvalue) _ll_dict_insertclean(l_dict, llkey, llvalue, dictkeycontainer.hash) return l_dict else: for dictkey, dictvalue in dictobj.items(): llkey = r_key.convert_const(dictkey) llvalue = r_value.convert_const(dictvalue) _ll_dict_insertclean(l_dict, llkey, llvalue, l_dict.keyhash(llkey)) return l_dict def rtype_len(self, hop): v_dict, = hop.inputargs(self) return hop.gendirectcall(ll_dict_len, v_dict) def rtype_bool(self, hop): v_dict, = hop.inputargs(self) return hop.gendirectcall(ll_dict_bool, v_dict) def make_iterator_repr(self, *variant): return DictIteratorRepr(self, *variant) def rtype_method_get(self, hop): v_dict, v_key, v_default = hop.inputargs(self, self.key_repr, self.value_repr) hop.exception_cannot_occur() v_res = hop.gendirectcall(ll_dict_get, v_dict, v_key, v_default) return self.recast_value(hop.llops, v_res) def rtype_method_setdefault(self, hop): v_dict, v_key, v_default = hop.inputargs(self, self.key_repr, self.value_repr) hop.exception_cannot_occur() v_res = hop.gendirectcall(ll_dict_setdefault, v_dict, v_key, v_default) return self.recast_value(hop.llops, v_res) def rtype_method_copy(self, hop): v_dict, = hop.inputargs(self) hop.exception_cannot_occur() return hop.gendirectcall(ll_dict_copy, v_dict) def rtype_method_update(self, hop): v_dic1, v_dic2 = hop.inputargs(self, self) hop.exception_cannot_occur() return hop.gendirectcall(ll_dict_update, v_dic1, v_dic2) def rtype_method__prepare_dict_update(self, hop): v_dict, v_num = hop.inputargs(self, lltype.Signed) hop.exception_cannot_occur() hop.gendirectcall(ll_prepare_dict_update, v_dict, v_num) def _rtype_method_kvi(self, hop, ll_func): v_dic, = hop.inputargs(self) r_list = hop.r_result cLIST = hop.inputconst(lltype.Void, r_list.lowleveltype.TO) hop.exception_cannot_occur() return hop.gendirectcall(ll_func, cLIST, v_dic) def rtype_method_keys(self, hop): return self._rtype_method_kvi(hop, ll_dict_keys) def rtype_method_values(self, hop): return self._rtype_method_kvi(hop, ll_dict_values) def rtype_method_items(self, hop): return self._rtype_method_kvi(hop, ll_dict_items) def rtype_bltn_list(self, hop): return self._rtype_method_kvi(hop, ll_dict_keys) def rtype_method_iterkeys(self, hop): hop.exception_cannot_occur() return DictIteratorRepr(self, "keys").newiter(hop) def rtype_method_itervalues(self, hop): hop.exception_cannot_occur() return DictIteratorRepr(self, "values").newiter(hop) def rtype_method_iteritems(self, hop): hop.exception_cannot_occur() return DictIteratorRepr(self, "items").newiter(hop) def rtype_method_iterkeys_with_hash(self, hop): hop.exception_cannot_occur() return DictIteratorRepr(self, "keys_with_hash").newiter(hop) def rtype_method_iteritems_with_hash(self, hop): hop.exception_cannot_occur() return DictIteratorRepr(self, "items_with_hash").newiter(hop) def rtype_method_clear(self, hop): v_dict, = hop.inputargs(self) hop.exception_cannot_occur() return hop.gendirectcall(ll_dict_clear, v_dict) def rtype_method_popitem(self, hop): v_dict, = hop.inputargs(self) r_tuple = hop.r_result cTUPLE = hop.inputconst(lltype.Void, r_tuple.lowleveltype) hop.exception_is_here() return hop.gendirectcall(ll_dict_popitem, cTUPLE, v_dict) def rtype_method_pop(self, hop): if hop.nb_args == 2: v_args = hop.inputargs(self, self.key_repr) target = ll_dict_pop elif hop.nb_args == 3: v_args = hop.inputargs(self, self.key_repr, self.value_repr) target = ll_dict_pop_default hop.exception_is_here() v_res = hop.gendirectcall(target, *v_args) return self.recast_value(hop.llops, v_res) def rtype_method_contains_with_hash(self, hop): v_dict, v_key, v_hash = hop.inputargs(self, self.key_repr, lltype.Signed) hop.exception_is_here() return hop.gendirectcall(ll_dict_contains_with_hash, v_dict, v_key, v_hash) def rtype_method_setitem_with_hash(self, hop): v_dict, v_key, v_hash, v_value = hop.inputargs( self, self.key_repr, lltype.Signed, self.value_repr) if self.custom_eq_hash: hop.exception_is_here() else: hop.exception_cannot_occur() hop.gendirectcall(ll_dict_setitem_with_hash, v_dict, v_key, v_hash, v_value) def rtype_method_getitem_with_hash(self, hop): v_dict, v_key, v_hash = hop.inputargs( self, self.key_repr, lltype.Signed) if not self.custom_eq_hash: hop.has_implicit_exception(KeyError) # record that we know about it hop.exception_is_here() v_res = hop.gendirectcall(ll_dict_getitem_with_hash, v_dict, v_key, v_hash) return self.recast_value(hop.llops, v_res) def rtype_method_delitem_with_hash(self, hop): v_dict, v_key, v_hash = hop.inputargs( self, self.key_repr, lltype.Signed) if not self.custom_eq_hash: hop.has_implicit_exception(KeyError) # record that we know about it hop.exception_is_here() hop.gendirectcall(ll_dict_delitem_with_hash, v_dict, v_key, v_hash) class __extend__(pairtype(OrderedDictRepr, rmodel.Repr)): def rtype_getitem((r_dict, r_key), hop): v_dict, v_key = hop.inputargs(r_dict, r_dict.key_repr) if not r_dict.custom_eq_hash: hop.has_implicit_exception(KeyError) # record that we know about it hop.exception_is_here() v_res = hop.gendirectcall(ll_dict_getitem, v_dict, v_key) return r_dict.recast_value(hop.llops, v_res) def rtype_delitem((r_dict, r_key), hop): v_dict, v_key = hop.inputargs(r_dict, r_dict.key_repr) if not r_dict.custom_eq_hash: hop.has_implicit_exception(KeyError) # record that we know about it hop.exception_is_here() hop.gendirectcall(ll_dict_delitem, v_dict, v_key) def rtype_setitem((r_dict, r_key), hop): v_dict, v_key, v_value = hop.inputargs(r_dict, r_dict.key_repr, r_dict.value_repr) if r_dict.custom_eq_hash: hop.exception_is_here() else: hop.exception_cannot_occur() hop.gendirectcall(ll_dict_setitem, v_dict, v_key, v_value) def rtype_contains((r_dict, r_key), hop): v_dict, v_key = hop.inputargs(r_dict, r_dict.key_repr) hop.exception_is_here() return hop.gendirectcall(ll_dict_contains, v_dict, v_key) class __extend__(pairtype(OrderedDictRepr, OrderedDictRepr)): def convert_from_to((r_dict1, r_dict2), v, llops): # check that we don't convert from Dicts with # different key/value types if r_dict1.dictkey is None or r_dict2.dictkey is None: return NotImplemented if r_dict1.dictkey is not r_dict2.dictkey: return NotImplemented if r_dict1.dictvalue is None or r_dict2.dictvalue is None: return NotImplemented if r_dict1.dictvalue is not r_dict2.dictvalue: return NotImplemented return v # ____________________________________________________________ # # Low-level methods. These can be run for testing, but are meant to # be direct_call'ed from rtyped flow graphs, which means that they will # get flowed and annotated, mostly with SomePtr. DICTINDEX_LONG = lltype.Ptr(lltype.GcArray(lltype.Unsigned)) DICTINDEX_INT = lltype.Ptr(lltype.GcArray(rffi.UINT)) DICTINDEX_SHORT = lltype.Ptr(lltype.GcArray(rffi.USHORT)) DICTINDEX_BYTE = lltype.Ptr(lltype.GcArray(rffi.UCHAR)) IS_64BIT = sys.maxint != 2 ** 31 - 1 FUNC_SHIFT = 2 FUNC_MASK = 0x03 # two bits if IS_64BIT: FUNC_BYTE, FUNC_SHORT, FUNC_INT, FUNC_LONG = range(4) else: FUNC_BYTE, FUNC_SHORT, FUNC_LONG = range(3) TYPE_BYTE = rffi.UCHAR TYPE_SHORT = rffi.USHORT TYPE_INT = rffi.UINT TYPE_LONG = lltype.Unsigned def ll_malloc_indexes_and_choose_lookup(d, n): # keep in sync with ll_clear_indexes() below if n <= 256: d.indexes = lltype.cast_opaque_ptr(llmemory.GCREF, lltype.malloc(DICTINDEX_BYTE.TO, n, zero=True)) d.lookup_function_no = FUNC_BYTE elif n <= 65536: d.indexes = lltype.cast_opaque_ptr(llmemory.GCREF, lltype.malloc(DICTINDEX_SHORT.TO, n, zero=True)) d.lookup_function_no = FUNC_SHORT elif IS_64BIT and n <= 2 ** 32: d.indexes = lltype.cast_opaque_ptr(llmemory.GCREF, lltype.malloc(DICTINDEX_INT.TO, n, zero=True)) d.lookup_function_no = FUNC_INT else: d.indexes = lltype.cast_opaque_ptr(llmemory.GCREF, lltype.malloc(DICTINDEX_LONG.TO, n, zero=True)) d.lookup_function_no = FUNC_LONG def ll_clear_indexes(d, n): fun = d.lookup_function_no & FUNC_MASK d.lookup_function_no = fun if fun == FUNC_BYTE: rgc.ll_arrayclear(lltype.cast_opaque_ptr(DICTINDEX_BYTE, d.indexes)) elif fun == FUNC_SHORT: rgc.ll_arrayclear(lltype.cast_opaque_ptr(DICTINDEX_SHORT, d.indexes)) elif IS_64BIT and fun == FUNC_INT: rgc.ll_arrayclear(lltype.cast_opaque_ptr(DICTINDEX_INT, d.indexes)) elif fun == FUNC_LONG: rgc.ll_arrayclear(lltype.cast_opaque_ptr(DICTINDEX_LONG, d.indexes)) else: assert False @jit.dont_look_inside def ll_call_insert_clean_function(d, hash, i): fun = d.lookup_function_no & FUNC_MASK if fun == FUNC_BYTE: ll_dict_store_clean(d, hash, i, TYPE_BYTE) elif fun == FUNC_SHORT: ll_dict_store_clean(d, hash, i, TYPE_SHORT) elif IS_64BIT and fun == FUNC_INT: ll_dict_store_clean(d, hash, i, TYPE_INT) elif fun == FUNC_LONG: ll_dict_store_clean(d, hash, i, TYPE_LONG) else: assert False def ll_call_delete_by_entry_index(d, hash, i): fun = d.lookup_function_no & FUNC_MASK if fun == FUNC_BYTE: ll_dict_delete_by_entry_index(d, hash, i, TYPE_BYTE) elif fun == FUNC_SHORT: ll_dict_delete_by_entry_index(d, hash, i, TYPE_SHORT) elif IS_64BIT and fun == FUNC_INT: ll_dict_delete_by_entry_index(d, hash, i, TYPE_INT) elif fun == FUNC_LONG: ll_dict_delete_by_entry_index(d, hash, i, TYPE_LONG) else: assert False def ll_valid_from_flag(entries, i): return entries[i].f_valid def ll_valid_from_key(entries, i): ENTRIES = lltype.typeOf(entries).TO dummy = ENTRIES.dummy_obj.ll_dummy_value return entries[i].key != dummy def ll_valid_from_value(entries, i): ENTRIES = lltype.typeOf(entries).TO dummy = ENTRIES.dummy_obj.ll_dummy_value return entries[i].value != dummy def ll_mark_deleted_in_flag(entries, i): entries[i].f_valid = False def ll_mark_deleted_in_key(entries, i): ENTRIES = lltype.typeOf(entries).TO dummy = ENTRIES.dummy_obj.ll_dummy_value entries[i].key = dummy def ll_mark_deleted_in_value(entries, i): ENTRIES = lltype.typeOf(entries).TO dummy = ENTRIES.dummy_obj.ll_dummy_value entries[i].value = dummy @signature(types.any(), types.int(), returns=types.any()) def ll_hash_from_cache(entries, i): return entries[i].f_hash @signature(types.any(), types.int(), returns=types.any()) def ll_hash_recomputed(entries, i): ENTRIES = lltype.typeOf(entries).TO return ENTRIES.fasthashfn(entries[i].key) def ll_keyhash_custom(d, key): DICT = lltype.typeOf(d).TO return objectmodel.hlinvoke(DICT.r_rdict_hashfn, d.fnkeyhash, key) def ll_keyeq_custom(d, key1, key2): DICT = lltype.typeOf(d).TO return objectmodel.hlinvoke(DICT.r_rdict_eqfn, d.fnkeyeq, key1, key2) def ll_dict_len(d): return d.num_live_items def ll_dict_bool(d): # check if a dict is True, allowing for None return bool(d) and d.num_live_items != 0 def ll_dict_getitem(d, key): return ll_dict_getitem_with_hash(d, key, d.keyhash(key)) def ll_dict_getitem_with_hash(d, key, hash): index = d.lookup_function(d, key, hash, FLAG_LOOKUP) if index >= 0: return d.entries[index].value else: raise KeyError def ll_dict_setitem(d, key, value): ll_dict_setitem_with_hash(d, key, d.keyhash(key), value) def ll_dict_setitem_with_hash(d, key, hash, value): index = d.lookup_function(d, key, hash, FLAG_STORE) _ll_dict_setitem_lookup_done(d, key, value, hash, index) # It may be safe to look inside always, it has a few branches though, and their # frequencies needs to be investigated. @jit.look_inside_iff(lambda d, key, value, hash, i: jit.isvirtual(d) and jit.isconstant(key)) def _ll_dict_setitem_lookup_done(d, key, value, hash, i): ENTRY = lltype.typeOf(d.entries).TO.OF if i >= 0: entry = d.entries[i] entry.value = value else: reindexed = False if len(d.entries) == d.num_ever_used_items: try: reindexed = ll_dict_grow(d) except: _ll_dict_rescue(d) raise rc = d.resize_counter - 3 if rc <= 0: try: ll_dict_resize(d) reindexed = True except: _ll_dict_rescue(d) raise rc = d.resize_counter - 3 ll_assert(rc > 0, "ll_dict_resize failed?") if reindexed: ll_call_insert_clean_function(d, hash, d.num_ever_used_items) # d.resize_counter = rc entry = d.entries[d.num_ever_used_items] entry.key = key entry.value = value if hasattr(ENTRY, 'f_hash'): entry.f_hash = hash if hasattr(ENTRY, 'f_valid'): entry.f_valid = True d.num_ever_used_items += 1 d.num_live_items += 1 @jit.dont_look_inside def _ll_dict_rescue(d): # MemoryError situation! The 'indexes' contains an invalid entry # at this point. But we can call ll_dict_reindex() with the # following arguments, ensuring no further malloc occurs. ll_dict_reindex(d, _ll_len_of_d_indexes(d)) _ll_dict_rescue._dont_inline_ = True def _ll_dict_insertclean(d, key, value, hash): # never translated ENTRY = lltype.typeOf(d.entries).TO.OF ll_call_insert_clean_function(d, hash, d.num_ever_used_items) entry = d.entries[d.num_ever_used_items] entry.key = key entry.value = value if hasattr(ENTRY, 'f_hash'): entry.f_hash = hash if hasattr(ENTRY, 'f_valid'): entry.f_valid = True d.num_ever_used_items += 1 d.num_live_items += 1 rc = d.resize_counter - 3 d.resize_counter = rc def _ll_len_of_d_indexes(d): # xxx Haaaack: returns len(d.indexes). Works independently of # the exact type pointed to by d, using a forced cast... # Must only be called by @jit.dont_look_inside functions. return lltype.length_of_simple_gcarray_from_opaque(d.indexes) def _overallocate_entries_len(baselen): # This over-allocates proportional to the list size, making room # for additional growth. This over-allocates slightly more eagerly # than with regular lists. The idea is that there are many more # lists than dicts around in PyPy, and dicts of 5 to 8 items are # not that rare (so a single jump from 0 to 8 is a good idea). # The growth pattern is: 0, 8, 17, 27, 38, 50, 64, 80, 98, ... newsize = baselen + (baselen >> 3) return newsize + 8 @jit.look_inside_iff(lambda d: jit.isvirtual(d)) def ll_dict_grow(d): # note: this @jit.look_inside_iff is here to inline the three lines # at the end of this function. It's important because dicts start # with a length-zero 'd.entries' which must be grown as soon as we # insert an element. if d.num_live_items < d.num_ever_used_items // 2: # At least 50% of the allocated entries are dead, so perform a # compaction. If ll_dict_remove_deleted_items detects that over # 75% of allocated entries are dead, then it will also shrink the # memory allocated at the same time as doing a compaction. ll_dict_remove_deleted_items(d) return True new_allocated = _overallocate_entries_len(len(d.entries)) # Detect a relatively rare case where the indexes numeric type is too # small to store all the entry indexes: there would be 'new_allocated' # entries, which may in corner cases be larger than 253 even though we # have single bytes in 'd.indexes' (and the same for the larger # boundaries). The 'd.indexes' hashtable is never more than 2/3rd # full, so we know that 'd.num_live_items' should be at most 2/3 * 256 # (or 65536 or etc.) so after the ll_dict_remove_deleted_items() below # at least 1/3rd items in 'd.entries' are free. fun = d.lookup_function_no & FUNC_MASK toobig = False if fun == FUNC_BYTE: assert d.num_live_items < ((1 << 8) - MIN_INDEXES_MINUS_ENTRIES) toobig = new_allocated > ((1 << 8) - MIN_INDEXES_MINUS_ENTRIES) elif fun == FUNC_SHORT: assert d.num_live_items < ((1 << 16) - MIN_INDEXES_MINUS_ENTRIES) toobig = new_allocated > ((1 << 16) - MIN_INDEXES_MINUS_ENTRIES) elif IS_64BIT and fun == FUNC_INT: assert d.num_live_items < ((1 << 32) - MIN_INDEXES_MINUS_ENTRIES) toobig = new_allocated > ((1 << 32) - MIN_INDEXES_MINUS_ENTRIES) # if toobig: ll_dict_remove_deleted_items(d) assert d.num_live_items == d.num_ever_used_items return True newitems = lltype.malloc(lltype.typeOf(d).TO.entries.TO, new_allocated) rgc.ll_arraycopy(d.entries, newitems, 0, 0, len(d.entries)) d.entries = newitems return False @jit.dont_look_inside def ll_dict_remove_deleted_items(d): if d.num_live_items < len(d.entries) // 4: # At least 75% of the allocated entries are dead, so shrink the memory # allocated as well as doing a compaction. new_allocated = _overallocate_entries_len(d.num_live_items) newitems = lltype.malloc(lltype.typeOf(d).TO.entries.TO, new_allocated) else: newitems = d.entries # The loop below does a lot of writes into 'newitems'. It's a better # idea to do a single gc_writebarrier rather than activating the # card-by-card logic (worth 11% in microbenchmarks). from rpython.rtyper.lltypesystem.lloperation import llop llop.gc_writebarrier(lltype.Void, newitems) # ENTRIES = lltype.typeOf(d).TO.entries.TO ENTRY = ENTRIES.OF isrc = 0 idst = 0 isrclimit = d.num_ever_used_items while isrc < isrclimit: if d.entries.valid(isrc): src = d.entries[isrc] dst = newitems[idst] dst.key = src.key dst.value = src.value if hasattr(ENTRY, 'f_hash'): dst.f_hash = src.f_hash if hasattr(ENTRY, 'f_valid'): assert src.f_valid dst.f_valid = True idst += 1 isrc += 1 assert d.num_live_items == idst d.num_ever_used_items = idst if ((ENTRIES.must_clear_key or ENTRIES.must_clear_value) and d.entries == newitems): # must clear the extra entries: they may contain valid pointers # which would create a temporary memory leak while idst < isrclimit: entry = newitems[idst] if ENTRIES.must_clear_key: entry.key = lltype.nullptr(ENTRY.key.TO) if ENTRIES.must_clear_value: entry.value = lltype.nullptr(ENTRY.value.TO) idst += 1 else: d.entries = newitems ll_dict_reindex(d, _ll_len_of_d_indexes(d)) def ll_dict_delitem(d, key): ll_dict_delitem_with_hash(d, key, d.keyhash(key)) def ll_dict_delitem_with_hash(d, key, hash): index = d.lookup_function(d, key, hash, FLAG_DELETE) if index < 0: raise KeyError _ll_dict_del(d, index) @jit.look_inside_iff(lambda d, i: jit.isvirtual(d) and jit.isconstant(i)) def _ll_dict_del(d, index): d.entries.mark_deleted(index) d.num_live_items -= 1 # clear the key and the value if they are GC pointers ENTRIES = lltype.typeOf(d.entries).TO ENTRY = ENTRIES.OF entry = d.entries[index] if ENTRIES.must_clear_key: entry.key = lltype.nullptr(ENTRY.key.TO) if ENTRIES.must_clear_value: entry.value = lltype.nullptr(ENTRY.value.TO) if d.num_live_items == 0: # Dict is now empty. Reset these fields. d.num_ever_used_items = 0 d.lookup_function_no &= FUNC_MASK elif index == d.num_ever_used_items - 1: # The last element of the ordereddict has been deleted. Instead of # simply marking the item as dead, we can safely reuse it. Since it's # also possible that there are more dead items immediately behind the # last one, we reclaim all the dead items at the end of the ordereditem # at the same point. i = d.num_ever_used_items - 2 while i >= 0 and not d.entries.valid(i): i -= 1 j = i + 1 assert j >= 0 d.num_ever_used_items = j # If the dictionary is at least 87.5% dead items, then consider shrinking # it. if d.num_live_items + DICT_INITSIZE <= len(d.entries) / 8: ll_dict_resize(d) def ll_dict_resize(d): # make a 'new_size' estimate and shrink it if there are many # deleted entry markers. See CPython for why it is a good idea to # quadruple the dictionary size as long as it's not too big. # (Quadrupling comes from '(d.num_live_items + d.num_live_items + 1) * 2' # as long as num_live_items is not too large.) num_extra = min(d.num_live_items + 1, 30000) _ll_dict_resize_to(d, num_extra) ll_dict_resize.oopspec = 'odict.resize(d)' def _ll_dict_resize_to(d, num_extra): new_estimate = (d.num_live_items + num_extra) * 2 new_size = DICT_INITSIZE while new_size <= new_estimate: new_size *= 2 if new_size < _ll_len_of_d_indexes(d): ll_dict_remove_deleted_items(d) else: ll_dict_reindex(d, new_size) def ll_dict_reindex(d, new_size): if bool(d.indexes) and _ll_len_of_d_indexes(d) == new_size: ll_clear_indexes(d, new_size) # hack: we can reuse the same array else: ll_malloc_indexes_and_choose_lookup(d, new_size) d.resize_counter = new_size * 2 - d.num_live_items * 3 ll_assert(d.resize_counter > 0, "reindex: resize_counter <= 0") ll_assert((d.lookup_function_no >> FUNC_SHIFT) == 0, "reindex: lookup_fun >> SHIFT") # entries = d.entries i = 0 ibound = d.num_ever_used_items while i < ibound: if entries.valid(i): hash = entries.hash(i) ll_call_insert_clean_function(d, hash, i) i += 1 #old_entries.delete() XXXX! # ------- a port of CPython's dictobject.c's lookdict implementation ------- PERTURB_SHIFT = 5 FREE = 0 DELETED = 1 VALID_OFFSET = 2 MIN_INDEXES_MINUS_ENTRIES = VALID_OFFSET + 1 FLAG_LOOKUP = 0 FLAG_STORE = 1 FLAG_DELETE = 2 @specialize.memo() def _ll_ptr_to_array_of(T): return lltype.Ptr(lltype.GcArray(T)) @jit.look_inside_iff(lambda d, key, hash, store_flag, T: jit.isvirtual(d) and jit.isconstant(key)) @jit.oopspec('ordereddict.lookup(d, key, hash, store_flag, T)') def ll_dict_lookup(d, key, hash, store_flag, T): INDEXES = _ll_ptr_to_array_of(T) entries = d.entries indexes = lltype.cast_opaque_ptr(INDEXES, d.indexes) mask = len(indexes) - 1 i = r_uint(hash & mask) # do the first try before any looping ENTRIES = lltype.typeOf(entries).TO direct_compare = not hasattr(ENTRIES, 'no_direct_compare') index = rffi.cast(lltype.Signed, indexes[intmask(i)]) if index >= VALID_OFFSET: checkingkey = entries[index - VALID_OFFSET].key if direct_compare and checkingkey == key: if store_flag == FLAG_DELETE: indexes[i] = rffi.cast(T, DELETED) return index - VALID_OFFSET # found the entry if d.keyeq is not None and entries.hash(index - VALID_OFFSET) == hash: # correct hash, maybe the key is e.g. a different pointer to # an equal object found = d.keyeq(checkingkey, key) #llop.debug_print(lltype.Void, "comparing keys", ll_debugrepr(checkingkey), ll_debugrepr(key), found) if d.paranoia: if (entries != d.entries or lltype.cast_opaque_ptr(llmemory.GCREF, indexes) != d.indexes or not entries.valid(index - VALID_OFFSET) or entries[index - VALID_OFFSET].key != checkingkey): # the compare did major nasty stuff to the dict: start over return ll_dict_lookup(d, key, hash, store_flag, T) if found: if store_flag == FLAG_DELETE: indexes[i] = rffi.cast(T, DELETED) return index - VALID_OFFSET deletedslot = -1 elif index == DELETED: deletedslot = intmask(i) else: # pristine entry -- lookup failed if store_flag == FLAG_STORE: indexes[i] = rffi.cast(T, d.num_ever_used_items + VALID_OFFSET) return -1 # In the loop, a deleted entry (everused and not valid) is by far # (factor of 100s) the least likely outcome, so test for that last. perturb = r_uint(hash) while 1: # compute the next index using unsigned arithmetic i = (i << 2) + i + perturb + 1 i = i & mask index = rffi.cast(lltype.Signed, indexes[intmask(i)]) if index == FREE: if store_flag == FLAG_STORE: if deletedslot == -1: deletedslot = intmask(i) indexes[deletedslot] = rffi.cast(T, d.num_ever_used_items + VALID_OFFSET) return -1 elif index >= VALID_OFFSET: checkingkey = entries[index - VALID_OFFSET].key if direct_compare and checkingkey == key: if store_flag == FLAG_DELETE: indexes[i] = rffi.cast(T, DELETED) return index - VALID_OFFSET # found the entry if d.keyeq is not None and entries.hash(index - VALID_OFFSET) == hash: # correct hash, maybe the key is e.g. a different pointer to # an equal object found = d.keyeq(checkingkey, key) if d.paranoia: if (entries != d.entries or lltype.cast_opaque_ptr(llmemory.GCREF, indexes) != d.indexes or not entries.valid(index - VALID_OFFSET) or entries[index - VALID_OFFSET].key != checkingkey): # the compare did major nasty stuff to the dict: start over return ll_dict_lookup(d, key, hash, store_flag, T) if found: if store_flag == FLAG_DELETE: indexes[i] = rffi.cast(T, DELETED) return index - VALID_OFFSET elif deletedslot == -1: deletedslot = intmask(i) perturb >>= PERTURB_SHIFT def ll_dict_store_clean(d, hash, index, T): # a simplified version of ll_dict_lookup() which assumes that the # key is new, and the dictionary doesn't contain deleted entries. # It only finds the next free slot for the given hash. INDEXES = _ll_ptr_to_array_of(T) indexes = lltype.cast_opaque_ptr(INDEXES, d.indexes) mask = len(indexes) - 1 i = r_uint(hash & mask) perturb = r_uint(hash) while rffi.cast(lltype.Signed, indexes[i]) != FREE: i = (i << 2) + i + perturb + 1 i = i & mask perturb >>= PERTURB_SHIFT indexes[i] = rffi.cast(T, index + VALID_OFFSET) def ll_dict_delete_by_entry_index(d, hash, locate_index, T): # Another simplified version of ll_dict_lookup() which locates a # hashtable entry with the given 'index' stored in it, and deletes it. # This *should* be safe against evil user-level __eq__/__hash__ # functions because the 'hash' argument here should be the one stored # into the directory, which is correct. INDEXES = _ll_ptr_to_array_of(T) indexes = lltype.cast_opaque_ptr(INDEXES, d.indexes) mask = len(indexes) - 1 i = r_uint(hash & mask) perturb = r_uint(hash) locate_value = locate_index + VALID_OFFSET while rffi.cast(lltype.Signed, indexes[i]) != locate_value: assert rffi.cast(lltype.Signed, indexes[i]) != FREE i = (i << 2) + i + perturb + 1 i = i & mask perturb >>= PERTURB_SHIFT indexes[i] = rffi.cast(T, DELETED) # ____________________________________________________________ # # Irregular operations. # Start the hashtable size at 16 rather than 8, as with rdict.py, because # it is only an array of bytes DICT_INITSIZE = 16 @specialize.memo() def _ll_empty_array(DICT): """Memo function: cache a single prebuilt allocated empty array.""" return DICT.entries.TO.allocate(0) def ll_newdict(DICT): d = DICT.allocate() d.entries = _ll_empty_array(DICT) ll_malloc_indexes_and_choose_lookup(d, DICT_INITSIZE) d.num_live_items = 0 d.num_ever_used_items = 0 d.resize_counter = DICT_INITSIZE * 2 return d OrderedDictRepr.ll_newdict = staticmethod(ll_newdict) def ll_newdict_size(DICT, orig_length_estimate): length_estimate = (orig_length_estimate // 2) * 3 n = DICT_INITSIZE while n < length_estimate: n *= 2 d = DICT.allocate() d.entries = DICT.entries.TO.allocate(orig_length_estimate) ll_malloc_indexes_and_choose_lookup(d, n) d.num_live_items = 0 d.num_ever_used_items = 0 d.resize_counter = n * 2 return d # rpython.memory.lldict uses a dict based on Struct and Array # instead of GcStruct and GcArray, which is done by using different # 'allocate' and 'delete' adtmethod implementations than the ones below def _ll_malloc_dict(DICT): return lltype.malloc(DICT) def _ll_malloc_entries(ENTRIES, n): return lltype.malloc(ENTRIES, n, zero=True) def _ll_free_entries(entries): pass # ____________________________________________________________ # # Iteration. def get_ll_dictiter(DICTPTR): return lltype.Ptr(lltype.GcStruct('dictiter', ('dict', DICTPTR), ('index', lltype.Signed))) class DictIteratorRepr(AbstractDictIteratorRepr): def __init__(self, r_dict, variant="keys"): self.r_dict = r_dict self.variant = variant self.lowleveltype = get_ll_dictiter(r_dict.lowleveltype) if variant == 'reversed': self.ll_dictiter = ll_dictiter_reversed self._ll_dictnext = _ll_dictnext_reversed else: self.ll_dictiter = ll_dictiter self._ll_dictnext = _ll_dictnext def ll_dictiter(ITERPTR, d): iter = lltype.malloc(ITERPTR.TO) iter.dict = d # initialize the index with usually 0, but occasionally a larger value iter.index = d.lookup_function_no >> FUNC_SHIFT return iter @jit.look_inside_iff(lambda iter: jit.isvirtual(iter) and (iter.dict is None or jit.isvirtual(iter.dict))) @jit.oopspec("odictiter.next(iter)") def _ll_dictnext(iter): dict = iter.dict if dict: entries = dict.entries index = iter.index assert index >= 0 entries_len = dict.num_ever_used_items while index < entries_len: nextindex = index + 1 if entries.valid(index): iter.index = nextindex return index else: # In case of repeated iteration over the start of # a dict where the items get removed, like # collections.OrderedDict.popitem(last=False), # the hack below will increase the value stored in # the high bits of lookup_function_no and so the # next iteration will start at a higher value. # We should carefully reset these high bits to zero # as soon as we do something like ll_dict_reindex(). if index == (dict.lookup_function_no >> FUNC_SHIFT): dict.lookup_function_no += (1 << FUNC_SHIFT) index = nextindex # clear the reference to the dict and prevent restarts iter.dict = lltype.nullptr(lltype.typeOf(iter).TO.dict.TO) raise StopIteration def ll_dictiter_reversed(ITERPTR, d): iter = lltype.malloc(ITERPTR.TO) iter.dict = d iter.index = d.num_ever_used_items return iter def _ll_dictnext_reversed(iter): dict = iter.dict if dict: entries = dict.entries index = iter.index - 1 while index >= 0: if entries.valid(index): iter.index = index return index index = index - 1 # clear the reference to the dict and prevent restarts iter.dict = lltype.nullptr(lltype.typeOf(iter).TO.dict.TO) raise StopIteration # _____________________________________________________________ # methods def ll_dict_get(dict, key, default): index = dict.lookup_function(dict, key, dict.keyhash(key), FLAG_LOOKUP) if index < 0: return default else: return dict.entries[index].value def ll_dict_setdefault(dict, key, default): hash = dict.keyhash(key) index = dict.lookup_function(dict, key, hash, FLAG_STORE) if index < 0: _ll_dict_setitem_lookup_done(dict, key, default, hash, -1) return default else: return dict.entries[index].value def ll_dict_copy(dict): DICT = lltype.typeOf(dict).TO newdict = DICT.allocate() newdict.entries = DICT.entries.TO.allocate(len(dict.entries)) newdict.num_live_items = dict.num_live_items newdict.num_ever_used_items = dict.num_ever_used_items if hasattr(DICT, 'fnkeyeq'): newdict.fnkeyeq = dict.fnkeyeq if hasattr(DICT, 'fnkeyhash'): newdict.fnkeyhash = dict.fnkeyhash i = 0 while i < newdict.num_ever_used_items: d_entry = newdict.entries[i] entry = dict.entries[i] ENTRY = lltype.typeOf(newdict.entries).TO.OF d_entry.key = entry.key if hasattr(ENTRY, 'f_valid'): d_entry.f_valid = entry.f_valid d_entry.value = entry.value if hasattr(ENTRY, 'f_hash'): d_entry.f_hash = entry.f_hash i += 1 ll_dict_reindex(newdict, _ll_len_of_d_indexes(dict)) return newdict ll_dict_copy.oopspec = 'odict.copy(dict)' def ll_dict_clear(d): if d.num_ever_used_items == 0: return DICT = lltype.typeOf(d).TO old_entries = d.entries d.entries = _ll_empty_array(DICT) ll_malloc_indexes_and_choose_lookup(d, DICT_INITSIZE) d.num_live_items = 0 d.num_ever_used_items = 0 d.resize_counter = DICT_INITSIZE * 2 # old_entries.delete() XXX ll_dict_clear.oopspec = 'odict.clear(d)' def ll_dict_update(dic1, dic2): if dic1 == dic2: return ll_prepare_dict_update(dic1, dic2.num_live_items) i = 0 while i < dic2.num_ever_used_items: entries = dic2.entries if entries.valid(i): entry = entries[i] hash = entries.hash(i) key = entry.key value = entry.value index = dic1.lookup_function(dic1, key, hash, FLAG_STORE) _ll_dict_setitem_lookup_done(dic1, key, value, hash, index) i += 1 ll_dict_update.oopspec = 'odict.update(dic1, dic2)' def ll_prepare_dict_update(d, num_extra): # Prescale 'd' for 'num_extra' items, assuming that most items don't # collide. If this assumption is false, 'd' becomes too large by at # most 'num_extra'. The logic is based on: # (d.resize_counter - 1) // 3 = room left in d # so, if num_extra == 1, we need d.resize_counter > 3 # if num_extra == 2, we need d.resize_counter > 6 etc. # Note however a further hack: if num_extra <= d.num_live_items, # we avoid calling _ll_dict_resize_to here. This is to handle # the case where dict.update() actually has a lot of collisions. # If num_extra is much greater than d.num_live_items the conditional_call # will trigger anyway, which is really the goal. x = num_extra - d.num_live_items jit.conditional_call(d.resize_counter <= x * 3, _ll_dict_resize_to, d, num_extra) # this is an implementation of keys(), values() and items() # in a single function. # note that by specialization on func, three different # and very efficient functions are created. def recast(P, v): if isinstance(P, lltype.Ptr): return lltype.cast_pointer(P, v) else: return v def _make_ll_keys_values_items(kind): def ll_kvi(LIST, dic): res = LIST.ll_newlist(dic.num_live_items) entries = dic.entries dlen = dic.num_ever_used_items items = res.ll_items() i = 0 p = 0 while i < dlen: if entries.valid(i): ELEM = lltype.typeOf(items).TO.OF if ELEM is not lltype.Void: entry = entries[i] if kind == 'items': r = lltype.malloc(ELEM.TO) r.item0 = recast(ELEM.TO.item0, entry.key) r.item1 = recast(ELEM.TO.item1, entry.value) items[p] = r elif kind == 'keys': items[p] = recast(ELEM, entry.key) elif kind == 'values': items[p] = recast(ELEM, entry.value) p += 1 i += 1 assert p == res.ll_length() return res ll_kvi.oopspec = 'odict.%s(dic)' % kind return ll_kvi ll_dict_keys = _make_ll_keys_values_items('keys') ll_dict_values = _make_ll_keys_values_items('values') ll_dict_items = _make_ll_keys_values_items('items') def ll_dict_contains(d, key): return ll_dict_contains_with_hash(d, key, d.keyhash(key)) def ll_dict_contains_with_hash(d, key, hash): i = d.lookup_function(d, key, hash, FLAG_LOOKUP) return i >= 0 def _ll_getnextitem(dic): if dic.num_live_items == 0: raise KeyError entries = dic.entries # find the last entry. It's unclear if the loop below is still # needed nowadays, because 'num_ever_used_items - 1' should always # point to the last active item (we decrease it as needed in # _ll_dict_del). Better safe than sorry. while True: i = dic.num_ever_used_items - 1 if entries.valid(i): break dic.num_ever_used_items -= 1 # we must remove the precise entry in the hashtable that points to 'i' ll_call_delete_by_entry_index(dic, entries.hash(i), i) return i def ll_dict_popitem(ELEM, dic): i = _ll_getnextitem(dic) entry = dic.entries[i] r = lltype.malloc(ELEM.TO) r.item0 = recast(ELEM.TO.item0, entry.key) r.item1 = recast(ELEM.TO.item1, entry.value) _ll_dict_del(dic, i) return r def ll_dict_pop(dic, key): index = dic.lookup_function(dic, key, dic.keyhash(key), FLAG_DELETE) if index < 0: raise KeyError value = dic.entries[index].value _ll_dict_del(dic, index) return value def ll_dict_pop_default(dic, key, dfl): index = dic.lookup_function(dic, key, dic.keyhash(key), FLAG_DELETE) if index < 0: return dfl value = dic.entries[index].value _ll_dict_del(dic, index) return value
python
import numpy as np i8 = np.int64() i4 = np.int32() u8 = np.uint64() b_ = np.bool_() i = int() f8 = np.float64() b_ >> f8 # E: No overload variant i8 << f8 # E: No overload variant i | f8 # E: Unsupported operand types i8 ^ f8 # E: No overload variant u8 & f8 # E: No overload variant ~f8 # E: Unsupported operand type # mypys' error message for `NoReturn` is unfortunately pretty bad # TODO: Reenable this once we add support for numerical precision for `number`s # a = u8 | 0 # E: Need type annotation
python
# -*- coding: utf-8 -*- import datetime, json, logging, os, subprocess from fast_reconcile_app import settings_app from django.conf import settings # from django.core.urlresolvers import reverse log = logging.getLogger(__name__) def get_commit(): """ Returns commit-string. Called by views.info() """ original_directory = os.getcwd() log.debug( 'BASE_DIR, ```%s```' % settings.BASE_DIR ) git_dir = settings.BASE_DIR log.debug( 'git_dir, ```%s```' % git_dir ) os.chdir( git_dir ) output_utf8 = subprocess.check_output( ['git', 'log'], stderr=subprocess.STDOUT ) output = output_utf8.decode( 'utf-8' ) os.chdir( original_directory ) lines = output.split( '\n' ) commit = lines[0] return commit def get_branch(): """ Returns branch. Called by views.info() """ original_directory = os.getcwd() git_dir = settings.BASE_DIR os.chdir( git_dir ) output_utf8 = subprocess.check_output( ['git', 'branch'], stderr=subprocess.STDOUT ) output = output_utf8.decode( 'utf-8' ) os.chdir( original_directory ) lines = output.split( '\n' ) branch = 'init' for line in lines: if line[0:1] == '*': branch = line[2:] break return branch def make_context( request, rq_now, info_txt, taken ): """ Builds and returns context. Called by views.info() """ cntxt = { 'request': { 'url': '%s://%s%s' % ( request.scheme, request.META.get( 'HTTP_HOST', '127.0.0.1' ), # HTTP_HOST doesn't exist for client-tests request.META.get('REQUEST_URI', request.META['PATH_INFO']) ), 'timestamp': str( rq_now ) }, 'response': { 'documentation': settings_app.README_URL, 'version': info_txt, 'elapsed_time': str( taken ) } } return cntxt
python
# Copyright 2019-2021 Foreseeti AB <https://foreseeti.com> # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import annotations import copy from typing import Any CONFIG_SCHEMA: dict[str, Any] = { "definitions": { "nonEmptyString": {"type": "string", "minLength": 1}, "nonEmptyStringArray": { "type": "array", "items": {"$ref": "#/definitions/nonEmptyString"}, "minItems": 1, }, "account": { "type": "object", "oneOf": [ { "properties": { "access_key": {"$ref": "#/definitions/nonEmptyString"}, "secret_key": {"$ref": "#/definitions/nonEmptyString"}, "session_token": {"$ref": "#/definitions/nonEmptyString"}, "role": {"$ref": "#/definitions/nonEmptyString"}, "regions": {"$ref": "#/definitions/nonEmptyStringArray"}, "endpoint_url": {"$ref": "#/definitions/nonEmptyString"}, }, "required": ["access_key", "secret_key", "regions"], "additionalProperties": False, }, { "properties": { "role": {"$ref": "#/definitions/nonEmptyString"}, "regions": {"$ref": "#/definitions/nonEmptyStringArray"}, "profile": {"$ref": "#/definitions/nonEmptyString"}, "endpoint_url": {"$ref": "#/definitions/nonEmptyString"}, }, "additionalProperties": False, }, ], }, "nonEmptyAccountArray": { "type": "array", "items": {"$ref": "#/definitions/account"}, "minItems": 1, }, }, "type": "object", "properties": {"accounts": {"$ref": "#/definitions/nonEmptyAccountArray"}}, "additionalProperties": False, "required": ["accounts"], } DATA_SCHEMA: dict[str, Any] = { "definitions": { "nonEmptyString": {"type": "string", "minLength": 1}, "stringArray": { "type": "array", "items": {"$ref": "#/definitions/nonEmptyString"}, }, "globalServices": { "type": "object", "properties": {}, "additionalProperties": True, "required": [], }, "regionServices": { "type": "object", "properties": {"region_name": {"$ref": "#/definitions/nonEmptyString"}}, "additionalProperties": True, "required": ["region_name"], }, "nonEmptyRegionServicesArray": { "type": "array", "items": {"$ref": "#/definitions/regionServices"}, "minItems": 1, }, "account": { "type": "object", "properties": { "account_id": {"$ref": "#/definitions/nonEmptyString"}, "account_aliases": {"$ref": "#/definitions/stringArray"}, "global": {"$ref": "#/definitions/globalServices"}, "regions": {"$ref": "#/definitions/nonEmptyRegionServicesArray"}, }, "additionalProperties": False, "required": ["account_id", "account_aliases", "global", "regions"], }, "nonEmptyAccountArray": { "type": "array", "items": {"$ref": "#/definitions/account"}, "minItems": 1, }, }, "type": "object", "properties": {"accounts": {"$ref": "#/definitions/nonEmptyAccountArray"}}, "additionalProperties": False, "required": ["accounts"], } def get_config_schema() -> dict[str, Any]: config_schema = copy.deepcopy(CONFIG_SCHEMA) return config_schema def get_data_schema() -> dict[str, Any]: # pylint: disable=import-outside-toplevel, cyclic-import from securicad.aws_collector import PARSER_VERSION, PARSER_VERSION_FIELD data_schema = copy.deepcopy(DATA_SCHEMA) data_schema["properties"][PARSER_VERSION_FIELD] = {"const": PARSER_VERSION} data_schema["required"].append(PARSER_VERSION_FIELD) return data_schema
python
# # Copyright 2016 The BigDL Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # from bigdl.dllib.utils.common import * def init_fl_context(target="localhost:8980"): callBigDlFunc("float", "initFLContext", target) class FLClientClosable(JavaValue): def __init__(self, jvalue=None, bigdl_type="float", *args): super().__init__(jvalue, bigdl_type, *args) def set_fl_client(self, fl_client): return callBigDlFunc(self.bigdl_type, "flClientClosableSetFLClient", self.value, fl_client) import unittest import socket from bigdl.dllib.utils.log4Error import invalidOperationError class FLTest(unittest.TestCase): def __init__(self, methodName='FLTest') -> None: super().__init__(methodName) self.port = 8980 self.port = self.get_available_port(self.port, self.port + 10) self.target = f"localhost:{self.port}" def get_available_port(self, port_start, port_end): def is_available(p): sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) result = sock.connect_ex(('127.0.0.1', p)) sock.close() return result != 0 for p in range(port_start, port_end): if is_available(p): return p else: logging.info(f"port {p} is not avaible, trying another...") invalidOperationError(False, f"can not find available port in range [{port_start}, {port_end}]")
python
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. """ This module contains collection of classes which implement collate functionalities for various tasks. Collaters should know what data to expect for each sample and they should pack / collate them into batches """ from __future__ import absolute_import, division, print_function, unicode_literals import numpy as np import torch from fairseq.data import data_utils as fairseq_data_utils class Seq2SeqCollater(object): """ Implements collate function mainly for seq2seq tasks This expects each sample to contain feature (src_tokens) and targets. This collator is also used for aligned training task. """ def __init__( self, feature_index=0, label_index=1, pad_index=1, eos_index=2, move_eos_to_beginning=True, ): self.feature_index = feature_index self.label_index = label_index self.pad_index = pad_index self.eos_index = eos_index self.move_eos_to_beginning = move_eos_to_beginning def _collate_frames(self, frames): """Convert a list of 2d frames into a padded 3d tensor Args: frames (list): list of 2d frames of size L[i]*f_dim. Where L[i] is length of i-th frame and f_dim is static dimension of features Returns: 3d tensor of size len(frames)*len_max*f_dim where len_max is max of L[i] """ len_max = max(frame.size(0) for frame in frames) f_dim = frames[0].size(1) res = frames[0].new(len(frames), len_max, f_dim).fill_(0.0) for i, v in enumerate(frames): res[i, : v.size(0)] = v return res def collate(self, samples): """ utility function to collate samples into batch for speech recognition. """ if len(samples) == 0: return {} # parse samples into torch tensors parsed_samples = [] for s in samples: # skip invalid samples if s["data"][self.feature_index] is None: continue source = s["data"][self.feature_index] if isinstance(source, (np.ndarray, np.generic)): source = torch.from_numpy(source) target = s["data"][self.label_index] if isinstance(target, (np.ndarray, np.generic)): target = torch.from_numpy(target).long() elif isinstance(target, list): target = torch.LongTensor(target) parsed_sample = {"id": s["id"], "source": source, "target": target} parsed_samples.append(parsed_sample) samples = parsed_samples id = torch.LongTensor([s["id"] for s in samples]) frames = self._collate_frames([s["source"] for s in samples]) # sort samples by descending number of frames frames_lengths = torch.LongTensor([s["source"].size(0) for s in samples]) frames_lengths, sort_order = frames_lengths.sort(descending=True) id = id.index_select(0, sort_order) frames = frames.index_select(0, sort_order) target = None target_lengths = None prev_output_tokens = None if samples[0].get("target", None) is not None: ntokens = sum(len(s["target"]) for s in samples) target = fairseq_data_utils.collate_tokens( [s["target"] for s in samples], self.pad_index, self.eos_index, left_pad=False, move_eos_to_beginning=False, ) target = target.index_select(0, sort_order) target_lengths = torch.LongTensor( [s["target"].size(0) for s in samples] ).index_select(0, sort_order) prev_output_tokens = fairseq_data_utils.collate_tokens( [s["target"] for s in samples], self.pad_index, self.eos_index, left_pad=False, move_eos_to_beginning=self.move_eos_to_beginning, ) prev_output_tokens = prev_output_tokens.index_select(0, sort_order) else: ntokens = sum(len(s["source"]) for s in samples) batch = { "id": id, "ntokens": ntokens, "net_input": {"src_tokens": frames, "src_lengths": frames_lengths}, "target": target, "target_lengths": target_lengths, "nsentences": len(samples), } if prev_output_tokens is not None: batch["net_input"]["prev_output_tokens"] = prev_output_tokens return batch
python
# coding: utf-8 import clinica.engine as ce class PetSurfaceLongitudinalCLI(ce.CmdParser): def define_name(self): """Define the sub-command name to run this pipeline.""" self._name = "pet-surface-longitudinal" def define_description(self): """Define a description of this pipeline.""" self._description = ( "Longitudinal surface-based processing of PET images:\n" "https://aramislab.paris.inria.fr/clinica/docs/public/latest/Pipelines/PET_Surface_Longitudinal/" ) def define_options(self): """Define the sub-command arguments.""" from clinica.engine.cmdparser import PIPELINE_CATEGORIES # Clinica compulsory arguments (e.g. BIDS, CAPS, group_label) clinica_comp = self._args.add_argument_group( PIPELINE_CATEGORIES["CLINICA_COMPULSORY"] ) clinica_comp.add_argument("bids_directory", help="Path to the BIDS directory.") clinica_comp.add_argument( "caps_directory", help="Path to the CAPS directory. (Filled with results from t1-freesurfer-longitudinal pipeline", ) clinica_comp.add_argument( "acq_label", type=str, help="Name of the PET tracer label in the acquisition entity " "(acq-<acq_label>).", ) clinica_comp.add_argument( "suvr_reference_region", choices=["cerebellumPons", "pons"], help="Intensity normalization using the average PET uptake in reference regions " "resulting in a standardized uptake value ratio (SUVR) map. It can be " "cerebellumPons (used for amyloid tracers) or pons (used for 18F-FDG tracers).", ) clinica_comp.add_argument( "pvc_psf_tsv", help="TSV file containing for each PET image its point spread function (PSF) measured " "in mm at x, y & z coordinates. Columns must contain: " "participant_id, session_id, acq_label, psf_x, psf_y and psf_z.", ) # Clinica standard arguments (e.g. --n_procs) self.add_clinica_standard_arguments() def run_command(self, args): """Run the pipeline with defined args.""" from networkx import Graph from clinica.utils.ux import print_crash_files_and_exit, print_end_pipeline from .pet_surface_pipeline import PetSurface parameters = { "acq_label": args.acq_label, "suvr_reference_region": args.suvr_reference_region, "pvc_psf_tsv": self.absolute_path(args.pvc_psf_tsv), "longitudinal": True, } pipeline = PetSurface( bids_directory=self.absolute_path(args.bids_directory), caps_directory=self.absolute_path(args.caps_directory), tsv_file=self.absolute_path(args.subjects_sessions_tsv), base_dir=self.absolute_path(args.working_directory), parameters=parameters, name=self.name, ) if args.n_procs: exec_pipeline = pipeline.run( plugin="MultiProc", plugin_args={"n_procs": args.n_procs} ) else: exec_pipeline = pipeline.run() if isinstance(exec_pipeline, Graph): print_end_pipeline( self.name, pipeline.base_dir, pipeline.base_dir_was_specified ) else: print_crash_files_and_exit(args.logname, pipeline.base_dir)
python
import time from threading import Thread from cassandra import ConsistencyLevel from ccmlib.node import ToolError from dtest import Tester, debug from tools import insert_c1c2, query_c1c2, since class TestRebuild(Tester): def __init__(self, *args, **kwargs): kwargs['cluster_options'] = {'start_rpc': 'true'} # Ignore these log patterns: self.ignore_log_patterns = [ # This one occurs when trying to send the migration to a # node that hasn't started yet, and when it does, it gets # replayed and everything is fine. r'Can\'t send migration request: node.*is down', # ignore streaming error during bootstrap r'Exception encountered during startup', r'Streaming error occurred' ] Tester.__init__(self, *args, **kwargs) def simple_rebuild_test(self): """ @jira_ticket CASSANDRA-9119 Test rebuild from other dc works as expected. """ keys = 1000 cluster = self.cluster cluster.set_configuration_options(values={'endpoint_snitch': 'org.apache.cassandra.locator.PropertyFileSnitch'}) node1 = cluster.create_node('node1', False, ('127.0.0.1', 9160), ('127.0.0.1', 7000), '7100', '2000', None, binary_interface=('127.0.0.1', 9042)) cluster.add(node1, True, data_center='dc1') # start node in dc1 node1.start(wait_for_binary_proto=True) # populate data in dc1 session = self.patient_exclusive_cql_connection(node1) self.create_ks(session, 'ks', {'dc1': 1}) self.create_cf(session, 'cf', columns={'c1': 'text', 'c2': 'text'}) insert_c1c2(session, n=keys, consistency=ConsistencyLevel.LOCAL_ONE) # check data for i in xrange(0, keys): query_c1c2(session, i, ConsistencyLevel.LOCAL_ONE) session.shutdown() # Bootstrapping a new node in dc2 with auto_bootstrap: false node2 = cluster.create_node('node2', False, ('127.0.0.2', 9160), ('127.0.0.2', 7000), '7200', '2001', None, binary_interface=('127.0.0.2', 9042)) cluster.add(node2, False, data_center='dc2') node2.start(wait_other_notice=True, wait_for_binary_proto=True) # wait for snitch to reload time.sleep(60) # alter keyspace to replicate to dc2 session = self.patient_exclusive_cql_connection(node2) session.execute("ALTER KEYSPACE ks WITH REPLICATION = {'class':'NetworkTopologyStrategy', 'dc1':1, 'dc2':1};") # alter system_auth -- rebuilding it no longer possible after # CASSANDRA-11848 prevented local node from being considered a source session.execute("ALTER KEYSPACE system_auth WITH REPLICATION = {'class':'NetworkTopologyStrategy', 'dc1':1, 'dc2':1};") session.execute('USE ks') self.rebuild_errors = 0 # rebuild dc2 from dc1 def rebuild(): try: node2.nodetool('rebuild dc1') except ToolError as e: if 'Node is still rebuilding' in e.stdout: self.rebuild_errors += 1 else: raise e class Runner(Thread): def __init__(self, func): Thread.__init__(self) self.func = func self.thread_exc_info = None def run(self): """ Closes over self to catch any exceptions raised by func and register them at self.thread_exc_info Based on http://stackoverflow.com/a/1854263 """ try: self.func() except Exception: import sys self.thread_exc_info = sys.exc_info() cmd1 = Runner(rebuild) cmd1.start() # concurrent rebuild should not be allowed (CASSANDRA-9119) # (following sleep is needed to avoid conflict in 'nodetool()' method setting up env.) time.sleep(.1) # we don't need to manually raise exeptions here -- already handled rebuild() cmd1.join() # manually raise exception from cmd1 thread # see http://stackoverflow.com/a/1854263 if cmd1.thread_exc_info is not None: raise cmd1.thread_exc_info[1], None, cmd1.thread_exc_info[2] # exactly 1 of the two nodetool calls should fail # usually it will be the one in the main thread, # but occasionally it wins the race with the one in the secondary thread, # so we check that one succeeded and the other failed self.assertEqual(self.rebuild_errors, 1, msg='rebuild errors should be 1, but found {}. Concurrent rebuild should not be allowed, but one rebuild command should have succeeded.'.format(self.rebuild_errors)) # check data for i in xrange(0, keys): query_c1c2(session, i, ConsistencyLevel.LOCAL_ONE) @since('2.2') def resumable_rebuild_test(self): """ @jira_ticket CASSANDRA-10810 Test rebuild operation is resumable """ self.ignore_log_patterns = self.ignore_log_patterns[:] + [r'Error while rebuilding node', r'Streaming error occurred on session with peer 127.0.0.3', r'Remote peer 127.0.0.3 failed stream session'] cluster = self.cluster cluster.set_configuration_options(values={'endpoint_snitch': 'org.apache.cassandra.locator.PropertyFileSnitch'}) # Create 2 nodes on dc1 node1 = cluster.create_node('node1', False, ('127.0.0.1', 9160), ('127.0.0.1', 7000), '7100', '2000', None, binary_interface=('127.0.0.1', 9042)) node2 = cluster.create_node('node2', False, ('127.0.0.2', 9160), ('127.0.0.2', 7000), '7200', '2001', None, binary_interface=('127.0.0.2', 9042)) cluster.add(node1, True, data_center='dc1') cluster.add(node2, True, data_center='dc1') node1.start(wait_for_binary_proto=True) node2.start(wait_for_binary_proto=True) # Insert data into node1 and node2 session = self.patient_exclusive_cql_connection(node1) self.create_ks(session, 'ks', {'dc1': 1}) self.create_cf(session, 'cf', columns={'c1': 'text', 'c2': 'text'}) insert_c1c2(session, n=10000, consistency=ConsistencyLevel.ALL) key = list(range(10000, 20000)) session = self.patient_exclusive_cql_connection(node2) session.execute('USE ks') insert_c1c2(session, keys=key, consistency=ConsistencyLevel.ALL) session.shutdown() # Create a new node3 on dc2 node3 = cluster.create_node('node3', False, ('127.0.0.3', 9160), ('127.0.0.3', 7000), '7300', '2002', None, binary_interface=('127.0.0.3', 9042), byteman_port='8300') cluster.add(node3, False, data_center='dc2') node3.start(wait_other_notice=False, wait_for_binary_proto=True) # Wait for snitch to be refreshed time.sleep(5) # Alter necessary keyspace for rebuild operation session = self.patient_exclusive_cql_connection(node3) session.execute("ALTER KEYSPACE ks WITH REPLICATION = {'class':'NetworkTopologyStrategy', 'dc1':1, 'dc2':1};") session.execute("ALTER KEYSPACE system_auth WITH REPLICATION = {'class':'NetworkTopologyStrategy', 'dc1':1, 'dc2':1};") # Path to byteman script which makes node2 throw an exception making rebuild fail script = ['./rebuild_failure_inject.btm'] node3.byteman_submit(script) # First rebuild must fail and data must be incomplete with self.assertRaises(ToolError, msg='Unexpected: SUCCEED'): debug('Executing first rebuild -> '), node3.nodetool('rebuild dc1') debug('Expected: FAILED') session.execute('USE ks') with self.assertRaises(AssertionError, msg='Unexpected: COMPLETE'): debug('Checking data is complete -> '), for i in xrange(0, 20000): query_c1c2(session, i, ConsistencyLevel.LOCAL_ONE) debug('Expected: INCOMPLETE') debug('Executing second rebuild -> '), node3.nodetool('rebuild dc1') debug('Expected: SUCCEED') # Check all streaming sessions completed, streamed ranges are skipped and verify streamed data node3.watch_log_for('All sessions completed') node3.watch_log_for('Skipping streaming those ranges.') debug('Checking data is complete -> '), for i in xrange(0, 20000): query_c1c2(session, i, ConsistencyLevel.LOCAL_ONE) debug('Expected: COMPLETE') @since('3.6') def rebuild_ranges_test(self): """ @jira_ticket CASSANDRA-10406 """ keys = 1000 cluster = self.cluster tokens = cluster.balanced_tokens_across_dcs(['dc1', 'dc2']) cluster.set_configuration_options(values={'endpoint_snitch': 'org.apache.cassandra.locator.PropertyFileSnitch'}) cluster.set_configuration_options(values={'num_tokens': 1}) node1 = cluster.create_node('node1', False, ('127.0.0.1', 9160), ('127.0.0.1', 7000), '7100', '2000', tokens[0], binary_interface=('127.0.0.1', 9042)) node1.set_configuration_options(values={'initial_token': tokens[0]}) cluster.add(node1, True, data_center='dc1') node1 = cluster.nodelist()[0] # start node in dc1 node1.start(wait_for_binary_proto=True) # populate data in dc1 session = self.patient_exclusive_cql_connection(node1) # ks1 will be rebuilt in node2 self.create_ks(session, 'ks1', {'dc1': 1}) self.create_cf(session, 'cf', columns={'c1': 'text', 'c2': 'text'}) insert_c1c2(session, n=keys, consistency=ConsistencyLevel.ALL) # ks2 will not be rebuilt in node2 self.create_ks(session, 'ks2', {'dc1': 1}) self.create_cf(session, 'cf', columns={'c1': 'text', 'c2': 'text'}) insert_c1c2(session, n=keys, consistency=ConsistencyLevel.ALL) session.shutdown() # Bootstraping a new node in dc2 with auto_bootstrap: false node2 = cluster.create_node('node2', False, ('127.0.0.2', 9160), ('127.0.0.2', 7000), '7200', '2001', tokens[1], binary_interface=('127.0.0.2', 9042)) node2.set_configuration_options(values={'initial_token': tokens[1]}) cluster.add(node2, False, data_center='dc2') node2.start(wait_other_notice=True, wait_for_binary_proto=True) # wait for snitch to reload time.sleep(60) # alter keyspace to replicate to dc2 session = self.patient_exclusive_cql_connection(node2) session.execute("ALTER KEYSPACE ks1 WITH REPLICATION = {'class':'NetworkTopologyStrategy', 'dc1':1, 'dc2':1};") session.execute("ALTER KEYSPACE ks2 WITH REPLICATION = {'class':'NetworkTopologyStrategy', 'dc1':1, 'dc2':1};") session.execute('USE ks1') # rebuild only ks1 with range that is node1's replica node2.nodetool('rebuild -ks ks1 -ts (%s,%s] dc1' % (tokens[1], str(pow(2, 63) - 1))) # check data is sent by stopping node1 node1.stop() for i in xrange(0, keys): query_c1c2(session, i, ConsistencyLevel.ONE) # ks2 should not be streamed session.execute('USE ks2') for i in xrange(0, keys): query_c1c2(session, i, ConsistencyLevel.ONE, tolerate_missing=True, must_be_missing=True)
python
from django.core.management.base import BaseCommand from schedule import models from django.utils import timezone from django.conf import settings import requests import requests.auth import logging logger = logging.getLogger(__name__) class Command(BaseCommand): def handle(self, *args, **options): scheduled = models.ScheduledPublication.objects.filter(scheduled__lt=timezone.now(), published=False) for schedule in scheduled: self.publish_article(schedule) @staticmethod def publish_article(schedule): try: message = { "articles": [{ "id": schedule.article_identifier }] } service = settings.DASHBOARD_PUBLISHING_SERVICE auth = requests.auth.HTTPBasicAuth(settings.PUBLISHING_SERVICE_USER, settings.PUBLISHING_SERVICE_PASSWORD) response = requests.post(service, json=message, auth=auth) if response.status_code is 200: schedule.published = True schedule.save() else: logger.error("response returned %s", response.status_code) except Exception as e: logger.error("An error has occurred. Exception: %s", e.message)
python
###################################### # # Nikolai Rozanov (C) 2017-Present # # [email protected] # ##################################### import numpy as np # # This file is a way of learning the Kernel and performing a hypothesis test, by computin the test statistics # class TEST(object): ''' main class test needs to have: get_tstat() get_estimate() reset(params1,params2) get_treshold get_power() ''' def __init__(self,test): self.__test = test # ####################################### # Optimise over the following parameters def learn_kernel(self,params_vec1,params_vec2,method='power'): ''' finds the optimal kernel wrt to (power, test stat itself.. others maybe later) parmas1, params2 must be the same length ''' if method=='power': vec = self.__learn_kernel_power(params_vec1,params_vec2) elif method=='tstat': vec = self.__learn_kernel_tstat(params_vec1,params_vec2) else: vec = [] amax = np.argmax(vec) max = np.max(vec) return max, amax, vec def __learn_kernel_power(self,params1,params2): ''' power - ''' num_ker = len(params1) powers = np.zeros(num_ker) for idx in range(num_ker): self.__test.reset(params1[idx],params2[idx]) powers[idx] = self.__test.get_power() return powers def __learn_kernel_tstat(self,params1,params2): ''' tstat - ''' num_ker = len(params1) powers = np.zeros(num_ker) for idx in range(num_ker): self.__test.reset(params1[idx],params2[idx]) powers[idx] = self.__test.get_tstat() return powers
python
import os from PIL import Image import tensorflow as tf from Fishnet import FishNets import numpy as np import json def onehot(label): n_sample = len(label) # n_class=max(label)+1 onehot_labels = np.zeros((n_sample, 6)) onehot_labels[np.arange(n_sample), label] = 1 return onehot_labels def read(file_list): # 构建文件名队列 x = tf.placeholder(tf.float32, [None, 224,224,3]) file_queue=tf.train.string_input_producer(file_list) # 读取与解码 reader=tf.WholeFileReader() _,value=reader.read(file_queue) image_a=tf.image.decode_jpeg(value,channels=3) image=tf.image.resize_images(image_a,[224,224]) image=tf.cast(image,tf.float32) image=tf.reshape(image,shape=[224,224,3]) # 批处理 inputs=tf.train.batch([image],batch_size=22,num_threads=1,capacity=1) network_planes = [64, 128, 256, 512, 512, 512, 384, 256, 320, 832, 1600] num_res_blks = [2, 2, 6, 2, 1, 1, 1, 1, 2, 2] num_trans_blks = [1, 1, 1, 1, 1, 4] mode = FishNets(6, network_planes, num_res_blks, num_trans_blks) value = mode(x, training=True) va=tf.argmax(value,1) # saver = tf.train.import_meta_graph("./tmp/train_model.ckpt") saver=tf.train.Saver() with tf.Session() as sess: #model = tf.train.latest_checkpoint("./tmp") #print(model) # saver.recover_last_checkpoints("./tmp/train_model.ckpt") saver.restore(sess,save_path="./tmp/train_model.ckpt") cood=tf.train.Coordinator() thread=tf.train.start_queue_runners(sess=sess,coord=cood) sess.run(tf.global_variables_initializer()) sess.run(tf.local_variables_initializer()) inputs=sess.run(inputs) prediction,values=sess.run([va,value],feed_dict={x:inputs}) for i in range(len(file_list)): print(prediction[i]) # result=[] # for i in range(len(file_list)): # file_list图片地址 # disease_dict={} # pic_file=file_list[i] # pic_file=pic_file[8:] # disease_dict["image_id"] = pic_file # disease_dict["disease_class"]=int(prediction[i])+1 # result.append(disease_dict) # with open ("./danyi.json",'w') as f: # f.write(json.dumps(result)) # print("done") cood.request_stop() cood.join(thread) filename=os.listdir("./image") file_list=[os.path.join("./image/",file) for file in filename] print(file_list) a=read(file_list) # def per_calss(imagefile): # image=Image.open(imagefile) # image=image.resize([227,227]) # image_array=np.array(image) # image=tf.cast(image_array,tf.float32) # image=tf.image.per_image_standardization(image) # image=tf.reshape(image,shape=[1,227,227,3]) # saver=tf.train.Saver() # with tf.Session() as sess: # save_model=tf.train.latest_checkpoint("./tmp") # saver.restore(sess,save_model) # image=sess.run(image) # prediction=sess.run(fc3,feed_dict={x:image}) # max_index=np.argmax(prediction) # print(max_index) # filename=os.listdir("./IDADP-PRCV2019-training/1") # print(filename) # file_list=[os.path.join("./dog/",file) for file in filename] # a=per_calss(file_list) # inputs=tf.nn.batch_normalization(inputs) # inputs_shape = inputs.get_shape().as_list() # batchsize, height, width, C = inputs_shape[0], inputs_shape[1], inputs_shape[2], inputs_shape[3] # filter = tf.Variable(tf.truncated_normal([1, 1, C, 1], dtype=tf.float32, stddev=0.1), name='weights') # filter1 = tf.Variable(tf.truncated_normal([1, 1, C, C], dtype=tf.float32, stddev=0.1), name='weights1') # query_conv = tf.nn.conv2d(inputs, filter, strides=[1, 1, 1, 1], padding='VALID') # print(query_conv) # key_conv = tf.nn.conv2d(inputs, filter, strides=[1, 1, 1, 1], padding='VALID') # print(key_conv) # value_conv = tf.nn.conv2d(inputs, filter1, strides=[1, 1, 1, 1], padding='VALID') # print(value_conv) # proj_query = tf.reshape(query_conv, [batchsize, width * height, -1]) # print(proj_query) # proj_key = tf.transpose((tf.reshape(key_conv, [batchsize, width * height, -1])), perm=[0, 2, 1]) # print(proj_key) # energy = tf.matmul(proj_query, proj_key) # print(energy) # attention = tf.nn.softmax(energy) # print(attention) # proj_value = tf.reshape(value_conv, [batchsize, width * height, -1]) # print(proj_value) # out = tf.matmul(attention, proj_value) # print(out) # out = tf.reshape(out, [batchsize, height, width, C]) # print(out) # # out = out + inputs
python
from numpy import dot, diag, ones, zeros, sqrt from openopt.kernel.ooMisc import norm def amsg2p(f, df, x0, epsilon, f_opt, gamma, callback = lambda x, f: False): # returns optim point and iteration number f0 = f(x0) if f0 - f_opt <= epsilon: return x0, 0 x, n = x0.copy(), x0.size df0 = df(x0) ndf = norm(df0) h, dzeta, p, B = gamma * (f0 - f_opt) / ndf, df0 / ndf, zeros(n), diag(ones(n, 'float64')) # TODO: add possibility to create B of type float128 k = 0 while True: k += 1 x -= h * dot(B, dzeta) F = f(x) r = callback(x, F) if r not in (0, False, None): break # user-demanded stop if F - f_opt <= epsilon: break DF = df(x) DF_dilated = dot(B.T, DF) nDF_dilated = norm(DF_dilated) dzeta_new, h = DF_dilated / nDF_dilated, gamma * (F-f_opt) / nDF_dilated lambda1, lambda2 = -dot(p, dzeta_new), -dot(dzeta, dzeta_new) c1, c2 = lambda1>0, lambda2>0 p = (lambda1 * p + lambda2 * dzeta)/sqrt(lambda1**2+lambda2**2) if c1 and c2 else dzeta if c2 and not c1 else zeros(n) if not c1 and not c2 else p mu = dot(p, dzeta_new) if -1 < mu < 0: S = sqrt(1-mu**2) nu = (1/S-1) * dzeta_new - (mu/S) * p B += dot(dot(B, nu.reshape(n, 1)), dzeta_new.reshape(1, n)) h /= S p = (p - mu * dzeta_new) / S else: p = zeros(n) dzeta = dzeta_new return x, k
python
import torch as t import torch.nn as nn import torch.nn.functional as f from config import config from torch.optim import Adam, SGD, Adagrad from torch.autograd import Variable from data_utils import batch_by_num from base_model import BaseModel, BaseModule import logging import os class RotatEModule(BaseModule): def __init__(self, n_ent, n_rel, config): super(RotatEModule, self).__init__() sigma = 0.2 self.gamma = nn.Parameter( t.Tensor([12.0]), requires_grad=False ) self.rel_re_embed = nn.Embedding(n_rel, config.dim) self.rel_im_embed = nn.Embedding(n_rel, config.dim) self.ent_re_embed = nn.Embedding(n_ent, config.dim) self.ent_im_embed = nn.Embedding(n_ent, config.dim) for param in self.parameters(): param.data.div_((config.dim / sigma ** 2) ** (1 / 6)) def forward(self, src, rel, dst): head_ie = self.ent_im_embed(src) head_re = self.ent_re_embed(src) relation_ie = self.rel_im_embed(rel) relation_re = self.rel_re_embed(rel) tail_ie = self.ent_im_embed(dst) tail_re = self.ent_re_embed(dst) re_score = head_re * relation_re - head_ie * relation_ie#*就是点积,哈达玛积 im_score = head_re * relation_ie + head_ie * relation_re#这两行就是复数乘积的公式(a+bj)*(c+dj)=(ac-bd)+(bc+ad)j re_score = re_score - tail_re im_score = im_score - tail_ie score = t.stack([re_score, im_score], dim = 0)#list中的每个元素是结果中第dim维的每个元素 #score=(x,x) score = score.norm(dim = 0)#每一dim上求一个L1范数(平方和开根) #dim=0,就是其他维的index不变,dim这一维从0到size求一个L1范数,最后的个数是除了dim这一维以外其他维size的乘积 score = self.gamma.item() - score.sum(dim = 2)#a number minus matrix return score def score(self, src, rel, dst): return -self.forward(src, rel, dst) def dist(self, src, rel, dst): return -self.forward(src, rel, dst) def prob_logit(self, src, rel, dst): return self.forward(src, rel, dst) class RotatE(BaseModel): def __init__(self, n_ent, n_rel, config): super(RotatE, self).__init__() self.mdl = RotatEModule(n_ent, n_rel, config) self.mdl#.cuda() self.config = config self.weight_decay = config.lam / config.n_batch def pretrain(self, train_data, corrupter, tester): src, rel, dst = train_data n_train = len(src) n_epoch = self.config.n_epoch n_batch = self.config.n_batch optimizer = Adam(self.mdl.parameters(), weight_decay=self.weight_decay) best_perf = 0 for epoch in range(n_epoch): epoch_loss = 0 if epoch % self.config.sample_freq == 0: rand_idx = t.randperm(n_train) src = src[rand_idx] rel = rel[rand_idx] dst = dst[rand_idx] src_corrupted, rel_corrupted, dst_corrupted = corrupter.corrupt(src, rel, dst) src_corrupted = src_corrupted#.cuda() rel_corrupted = rel_corrupted#.cuda() dst_corrupted = dst_corrupted#.cuda() for ss, rs, ts in batch_by_num(n_batch, src_corrupted, rel_corrupted, dst_corrupted, n_sample=n_train): self.mdl.zero_grad() label = t.zeros(len(ss)).type(t.LongTensor)#.cuda() loss = t.sum(self.mdl.softmax_loss(Variable(ss), Variable(rs), Variable(ts), label)) loss.backward() optimizer.step() epoch_loss += loss.data[0] logging.info('Epoch %d/%d, Loss=%f', epoch + 1, n_epoch, epoch_loss / n_train) if (epoch + 1) % self.config.epoch_per_test == 0: test_perf = tester() if test_perf > best_perf: self.save(os.path.join(config().task.dir, self.config.model_file)) best_perf = test_perf return best_perf
python
import math import numpy as np from typing import Tuple import torch import torch.nn as nn import torch.nn.functional as F import logging from torch.nn.utils.rnn import pack_padded_sequence, pad_packed_sequence from src.datasets.utility import find_sub_list from src.scripts.tools.utility import get_device class Embedder(torch.nn.Module): def __init__(self, vocab, config): super().__init__() self.scale_grad = config['scale_emb_grad_by_freq'] self.embedding_dim = vocab.vectors.shape[1] self.embeddings = torch.nn.Embedding(len(vocab), self.embedding_dim, scale_grad_by_freq=self.scale_grad) self.embeddings.weight.data.copy_(vocab.vectors) self.embeddings.weight.requires_grad = False self.vocab = vocab logging.info(f"Optimize embeddings = {config['optimize_embeddings']}") logging.info(f"Scale grad by freq: {self.scale_grad}") logging.info(f"Vocabulary size = {len(vocab.vectors)}") def forward(self, input): return self.embeddings(input) class CharEmbedder(nn.Module): def __init__(self, config, vocab): super().__init__() self.embeddings = nn.Embedding(len(vocab), config["char_embedding_size"], padding_idx=1) self.embeddings.weight.data.uniform_(-0.001, 0.001) self.dropout = nn.Dropout(p=config["dropout_rate"]) self.vocab = vocab self.char_conv = nn.Conv2d(1, # input channels config["char_channel_size"], # output channels (config["char_embedding_size"], config["char_channel_width"]) # kernel size ) def forward(self, input): """ :param x: (batch, seq_len, word_len) :return: (batch, seq_len, char_channel_size) """ batch_size = input.size(0) word_len = input.shape[-1] # (batch, seq_len, word_len, char_dim) x = self.dropout(self.embeddings(input)) char_dim = x.shape[-1] # (batch * seq_len, 1, char_dim, word_len) x = x.view(-1, char_dim, word_len).unsqueeze(1) # (batch * seq_len, char_channel_size, conv_len) x = self.char_conv(x).squeeze(-2) # (batch * seq_len, char_channel_size) x = F.max_pool1d(x, x.size(2)).squeeze(-1) # (batch, seq_len, char_channel_size) x = x.view(batch_size, -1, x.shape[-1]) return x class HighwayNetwork(nn.Module): def __init__(self, config): super().__init__() self.layers = config["highway_layers"] dim = config["highway_dim1"] + config["highway_dim2"] for i in range(self.layers): setattr(self, f'highway_linear{i}', nn.Sequential(nn.Linear(dim, dim), nn.ReLU())) gate = nn.Linear(dim, dim) # We should bias the highway layer to just carry its input forward when training starts. # We do that by setting the bias on gate affine transformation to be positive, because # that means `g` will be biased to be high, so we will carry the input forward. # The bias on `B(x)` is the second half of the bias vector in each Linear layer. gate.bias.data.fill_(1) setattr(self, f'highway_gate{i}', nn.Sequential(gate, nn.Sigmoid())) def forward(self, x1, x2): x = torch.cat([x1, x2], dim=-1) for i in range(self.layers): h = getattr(self, f'highway_linear{i}')(x) g = getattr(self, f'highway_gate{i}')(x) x = (1 - g) * h + g * x return x class Encoder(torch.nn.Module): def __init__(self, config): super(Encoder, self).__init__() self.config = config def get_output_dim(self): raise NotImplementedError("Objects need to implement this method!") class RNN(Encoder): def __init__(self, config): super(RNN, self).__init__(config) self.rnn = None def forward(self, inp, lengths=None, padding_value=0., batch_first=True): """ :param inp: Shape BATCH_SIZE x LEN x H_DIM """ if lengths is None: outp = self.rnn(inp)[0] else: sequence_len = inp.shape[1] inp_packed = pack_padded_sequence(inp, lengths, batch_first=batch_first, enforce_sorted=False) outp_packed = self.rnn(inp_packed)[0] outp, output_lengths = pad_packed_sequence(outp_packed, batch_first=batch_first, padding_value=padding_value, total_length=sequence_len) return outp def get_output_dim(self): return self.output_dim class BiLSTM(RNN): def __init__(self, config): super().__init__(config) self.hidden_size = config['RNN_nhidden'] self.layers = config['RNN_layers'] self.rnn = torch.nn.LSTM( config["RNN_input_dim"], self.hidden_size, self.layers, dropout=float(config['dropout_rate']), batch_first=True, bidirectional=True) self.output_dim = config['RNN_nhidden'] * 2 class LSTM(RNN): def __init__(self, config, init_hidden=None): super().__init__(config) self.hidden_size = config['RNN_nhidden'] self.layers = config['RNN_layers'] self.rnn = torch.nn.LSTM( config["RNN_input_dim"], self.hidden_size, self.layers, dropout=config['dropout_rate'], batch_first=True, bidirectional=False) self.output_dim = config['RNN_nhidden'] # @profile def combine_surface_forms(valid_span_probabilities, batch_size, hacks, p_to_rerank, passage_length, score, pad_token=0): if score == "logprobs": # !!!!!sentinel is automatically assumed in this case!!!! # presoftmax class score = log(P_class) + K # save K, turn scores into probabilities K = torch.FloatTensor( np.nanmax((valid_span_probabilities - torch.log_softmax(valid_span_probabilities, -1)).cpu().numpy(), -1)) \ .to( valid_span_probabilities.get_device() if valid_span_probabilities.get_device() >= 0 else torch.device( "cpu")) valid_span_probabilities = F.softmax(valid_span_probabilities, dim=-1) valid_span_probabilities = valid_span_probabilities.view(batch_size, passage_length, passage_length) valid_document_probabilities = valid_span_probabilities[:, 1:, 1:] valid_document_probabilities = valid_document_probabilities.reshape(batch_size, -1) passage_length -= 1 else: valid_document_probabilities = valid_span_probabilities # Re-ranking top-N based on surface form sorted_scores, indices = torch.sort(valid_document_probabilities, dim=-1, descending=True) span_start_indices = indices // (passage_length) span_end_indices = indices % (passage_length) N = p_to_rerank # top-N surface form reranking sorted_scores, span_start_indices, span_end_indices = sorted_scores[:, :N], \ span_start_indices[:, :N], \ span_end_indices[:, :N] if type(hacks["combine_surface_forms"][1]) == torch.Tensor: hacks["combine_surface_forms"] = hacks["combine_surface_forms"][0], \ hacks["combine_surface_forms"][1].tolist() ### Casting to python floats may produce slightly different results, due to FP instability, e.g.: # 28.7.2020, changed to pytorch vectorized addition # --------------------------------------------------------------------------------------- # Python floats # 3.158890103804879e-05 + 2.225152506696304e-09 # returns 3.1591126190555485e-05 # --------------------------------------------------------------------------------------- # Pytorch vectorized addition of floats # (torch.Tensor([3.158890103804879e-05]) + torch.Tensor([2.225152506696304e-09]) ).item() # returns 3.159112748107873e-05 # valid_document_probabilities_list = valid_document_probabilities.tolist() valid_document_probabilities_list = valid_document_probabilities for i in range(len(span_start_indices)): bool_arr_processed = [[False for _ in range(passage_length)] for _ in range(passage_length)] for a, e in zip(span_start_indices[i].tolist(), span_end_indices[i].tolist()): if bool_arr_processed[a][e]: continue # HERE assuming 0 in the pad token if hacks["combine_surface_forms"][1][i][a:e + 1] == [pad_token]: continue # OLD # processed.append((a, e)) # do not adjust value of other spans with this span bool_arr_processed[a][e] = True span_occurences = find_sub_list(hacks["combine_surface_forms"][1][i][a:e + 1], hacks["combine_surface_forms"][1][i]) if len(span_occurences) > 1: for span in span_occurences: if bool_arr_processed[span[0]][span[1]]: continue bool_arr_processed[span[0]][span[1]] = True valid_document_probabilities_list[i][a * passage_length + e] += \ valid_document_probabilities_list[i][span[0] * passage_length + span[1]] valid_document_probabilities_list[i][span[0] * passage_length + span[1]] = 0. # valid_document_probabilities = torch.FloatTensor(valid_document_probabilities_list) valid_document_probabilities = valid_document_probabilities_list if score == "logprobs": # turn back into pre-softmax scores valid_span_probabilities[:, 1:, 1:] = valid_document_probabilities.view(batch_size, passage_length, passage_length) valid_span_probabilities = valid_span_probabilities.view(batch_size, -1) valid_span_probabilities += K.unsqueeze(-1) return valid_span_probabilities class SpanPredictionModule(nn.Module): def predict(self, batch): start_pred_logits, end_pred_logits = self(batch) start_pred, end_pred = torch.nn.functional.softmax(start_pred_logits, dim=1), torch.nn.functional.softmax( end_pred_logits, dim=1) return self.decode(start_pred, end_pred) @staticmethod def decode(span_start_logits: torch.Tensor, span_end_logits: torch.Tensor, has_sentinel=False, score="logprobs") -> \ (torch.Tensor, torch.Tensor): """ This method has been borrowed from AllenNLP :param span_start_logits: :param span_end_logits: :return: """ # We call the inputs "logits" - they could either be unnormalized logits or normalized log # probabilities. A log_softmax operation is a constant shifting of the entire logit # vector, so taking an argmax over either one gives the same result. if span_start_logits.dim() != 2 or span_end_logits.dim() != 2: raise ValueError("Input shapes must be (batch_size, passage_length)") batch_size, passage_length = span_start_logits.size() device = span_start_logits.device # (batch_size, passage_length, passage_length) span_log_probs = span_start_logits.unsqueeze(2) + span_end_logits.unsqueeze(1) # if first token is sentinel, class, combinations (0,x) and (x,0); x!=0 are invalid # mask these if has_sentinel: span_log_probs[:, 1:, 0] = -math.inf span_log_probs[:, 0, 1:] = -math.inf # Only the upper triangle of the span matrix is valid; the lower triangle has entries where # the span ends before it starts. span_log_mask = torch.triu(torch.ones((passage_length, passage_length), device=device)).log().unsqueeze(0) valid_span_log_probs = span_log_probs + span_log_mask # Here we take the span matrix and flatten it, then find the best span using argmax. We # can recover the start and end indices from this flattened list using simple modular # arithmetic. # (batch_size, passage_length * passage_length) valid_span_log_probs = valid_span_log_probs.view(batch_size, -1) if score == "probs": valid_span_scores = F.softmax(valid_span_log_probs, dim=-1) elif score == "logprobs": valid_span_scores = valid_span_log_probs else: raise NotImplemented(f"Unknown score type \"{score}\"") best_span_scores, best_spans = valid_span_scores.max(-1) span_start_indices = best_spans // passage_length span_end_indices = best_spans % passage_length return best_span_scores, (span_start_indices, span_end_indices) @staticmethod def decode_wth_hacks(span_start_logits: torch.Tensor, span_end_logits: torch.Tensor, score="logprobs", p_to_rerank=100, has_sentinel=False, hacks={ "max_answer_length": 30, "combine_surface_forms": (False, None) }) -> \ (torch.Tensor, torch.Tensor): """ This method has been borrowed from AllenNLP :param span_start_logits: :param span_end_logits: :return: """ # We call the inputs "logits" - they could either be unnormalized logits or normalized log # probabilities. A log_softmax operation is a constant shifting of the entire logit # vector, so taking an argmax over either one gives the same result. if "combine_surface_forms" not in hacks: hacks["combine_surface_forms"] = (False, None) if hacks["combine_surface_forms"][0]: assert hacks["combine_surface_forms"][1] is not None if span_start_logits.dim() != 2 or span_end_logits.dim() != 2: raise ValueError("Input shapes must be (batch_size, passage_length)") batch_size, passage_length = span_start_logits.size() device = span_start_logits.device # (batch_size, passage_length, passage_length) span_log_probs = span_start_logits.unsqueeze(2) + span_end_logits.unsqueeze(1) # if first token is sentinel, class, combinations (0,x) and (x,0); x!=0 are invalid # mask these if has_sentinel: span_log_probs[:, 1:, 0] = -math.inf span_log_probs[:, 0, 1:] = -math.inf # Only the upper triangle of the span matrix is valid; the lower triangle has entries where # the span ends before it starts. span_log_mask = torch.triu(torch.ones((passage_length, passage_length), device=device)).log().unsqueeze(0) valid_span_log_probs = span_log_probs + span_log_mask spans_longer_than_maxlen_mask = torch.Tensor([[j - i + 1 > hacks["max_answer_length"] for j in range(passage_length)] for i in range(passage_length)]) \ .to(valid_span_log_probs.get_device() if valid_span_log_probs.get_device() >= 0 else torch.device("cpu")) valid_span_log_probs.masked_fill_(spans_longer_than_maxlen_mask.unsqueeze(0).bool(), -math.inf) # Here we take the span matrix and flatten it, then find the best span using argmax. We # can recover the start and end indices from this flattened list using simple modular # arithmetic. # (batch_size, passage_length * passage_length) valid_span_log_probs = valid_span_log_probs.view(batch_size, -1) if score == "probs": valid_span_scores = F.softmax(valid_span_log_probs, dim=-1) elif score == "logprobs": valid_span_scores = valid_span_log_probs else: raise NotImplemented(f"Unknown score type \"{score}\"") if hacks["combine_surface_forms"][0]: assert not (score == "probs" and has_sentinel), \ "Not a supported variant - probability decoding + has_sentinel" pad_token_id = 0 if len(hacks["combine_surface_forms"]) == 3: pad_token_id = hacks["combine_surface_forms"][-1] valid_span_scores = combine_surface_forms(valid_span_scores, batch_size, hacks, p_to_rerank, passage_length, score, pad_token=pad_token_id) best_span_scores, best_spans = valid_span_scores.max(-1) span_start_indices = best_spans // passage_length span_end_indices = best_spans % passage_length return best_span_scores, (span_start_indices, span_end_indices) @staticmethod def decode_topN_joint(valid_span_log_probs: torch.Tensor, N: int = 100) -> \ Tuple[torch.Tensor, Tuple[torch.Tensor, torch.Tensor]]: batch_size = valid_span_log_probs.shape[0] passage_length = valid_span_log_probs.shape[1] # Addition in log-domain = multiplication in real domain # This will create a matrix containing addition of each span_start_logit with span_end_logit # (batch_size, passage_length, passage_length) span_log_probs = valid_span_log_probs # Here we take the span matrix and flatten it, then find the best span using argmax. We # can recover the start and end indices from this flattened list using simple modular # arithmetic. # (batch_size, passage_length * passage_length) # valid_span_log_probs is a vector [s_00,s_01,...,s_0n,s10,s11,...,s1n, ... , sn0,sn1,..., snn] of span scores # e.g. s_01 is a score of answer span from token 0 to token 1 valid_span_log_probs = valid_span_log_probs.view(batch_size, -1) # see image above, part 2. # Turn all the log-probabilities into probabilities valid_span_probs = F.softmax(valid_span_log_probs, dim=-1) sorted_probs, indices = torch.sort(valid_span_probs, dim=-1, descending=True) # best_span_probs of shape batch_size now contains all probabilities for each best span in the batch # best_spans of shape batch_size now contains argmaxes of each answer from unrolled sequence valid_span_log_probs span_start_indices = indices // passage_length span_end_indices = indices % passage_length # return just N best return sorted_probs[:, :N], (span_start_indices[:, :N], span_end_indices[:, :N]) @staticmethod def decode_topN_joint_wth_hacks(valid_span_log_probs: torch.Tensor, N: int = 100, score="probs", p_to_rerank=100, has_sentinel=False, hacks={ "max_answer_length": 30, "combine_surface_forms": (False, None) }) -> \ Tuple[torch.Tensor, Tuple[torch.Tensor, torch.Tensor]]: """ This method has been borrowed from AllenNLP :param valid_span_log_probs: :return: """ if "combine_surface_forms" not in hacks: hacks["combine_surface_forms"] = (False, None) if hacks["combine_surface_forms"][0]: assert hacks["combine_surface_forms"][1] is not None batch_size = valid_span_log_probs.shape[0] passage_length = valid_span_log_probs.shape[1] if has_sentinel: valid_span_log_probs[:, 1:, 0] = -math.inf valid_span_log_probs[:, 0, 1:] = -math.inf # Here we take the span matrix and flatten it, then find the best span using argmax. We # can recover the start and end indices from this flattened list using simple modular # arithmetic. # (batch_size, passage_length * passage_length) spans_longer_than_maxlen_mask = torch.Tensor([[j - i + 1 > hacks["max_answer_length"] for j in range(passage_length)] for i in range(passage_length)]) \ .to(get_device(valid_span_log_probs)) valid_span_log_probs.masked_fill_(spans_longer_than_maxlen_mask.unsqueeze(0).bool(), -math.inf) valid_span_log_probs = valid_span_log_probs.view(batch_size, -1) if score == "probs": valid_span_scores = F.softmax(valid_span_log_probs, dim=-1) elif score == "logprobs": valid_span_scores = valid_span_log_probs else: raise NotImplemented(f"Unknown score type \"{score}\"") if hacks["combine_surface_forms"][0]: assert not (score == "probs" and has_sentinel), \ "Not a supported variant - proability decoding + has_sentinel" pad_token_id = 0 if len(hacks["combine_surface_forms"]) == 3: pad_token_id = hacks["combine_surface_forms"][-1] valid_span_scores = combine_surface_forms(valid_span_scores, batch_size, hacks, p_to_rerank, passage_length, score, pad_token=pad_token_id) sorted_probs, indices = torch.topk(valid_span_scores, k=N, dim=-1, largest=True) # best_span_probs of shape batch_size now contains topk probabilities for each best span in the batch # best_spans of shape batch_size now contains argmaxes of topk answers from unrolled sequence valid_span_log_probs span_start_indices = indices // passage_length span_end_indices = indices % passage_length return sorted_probs, (span_start_indices, span_end_indices) @staticmethod def decode_topN(span_start_logits: torch.Tensor, span_end_logits: torch.Tensor, N: int = 100) -> \ Tuple[torch.Tensor, Tuple[torch.Tensor, torch.Tensor]]: """ This method has been borrowed from AllenNLP :param span_start_logits: unnormalized start log probabilities :param span_end_logits: unnormalized end log probabilities :return: """ # We call the inputs "logits" - they could either be unnormalized logits or normalized log # probabilities. A log_softmax operation is a constant shifting of the entire logit # vector, so taking an argmax over either one gives the same result. if span_start_logits.dim() != 2 or span_end_logits.dim() != 2: raise ValueError("Input shapes must be (batch_size, document_length)") batch_size, passage_length = span_start_logits.size() device = span_start_logits.device # span_start_logits.unsqueeze(2) has shape: # (batch_size, passage_length, 1) # span_end_logits.unsqueeze(1) has shape: # (batch_size, 1, passage_length) # Addition in log-domain = multiplication in real domain # This will create a matrix containing addition of each span_start_logit with span_end_logit # (batch_size, passage_length, passage_length) span_log_probs = span_start_logits.unsqueeze(2) + span_end_logits.unsqueeze(1) # Only the upper triangle of the span matrix is valid; the lower triangle has entries where # the span ends before it starts. We will mask these values out span_log_mask = torch.triu(torch.ones((passage_length, passage_length), device=device)).log().unsqueeze(0) # The mask will look like this # 0000000 # X000000 # XX00000 # XXX0000 # XXXX000 # XXXXX00 # XXXXXX0 # where X are -infinity valid_span_log_probs = span_log_probs + span_log_mask # see image above, part 1. # Here we take the span matrix and flatten it, then find the best span using argmax. We # can recover the start and end indices from this flattened list using simple modular # arithmetic. # (batch_size, passage_length * passage_length) # valid_span_log_probs is a vector [s_00,s_01,...,s_0n,s10,s11,...,s1n, ... , sn0,sn1,..., snn] of span scores # e.g. s_01 is a score of answer span from token 0 to token 1 valid_span_log_probs = valid_span_log_probs.view(batch_size, -1) # see image above, part 2. # Turn all the log-probabilities into probabilities valid_span_probs = F.softmax(valid_span_log_probs, dim=-1) sorted_probs, indices = torch.sort(valid_span_probs, dim=-1, descending=True) # best_span_probs of shape batch_size now contains all probabilities for each best span in the batch # best_spans of shape batch_size now contains argmaxes of each answer from unrolled sequence valid_span_log_probs span_start_indices = indices // passage_length span_end_indices = indices % passage_length # return just N best return sorted_probs[:, :N], (span_start_indices[:, :N], span_end_indices[:, :N]) @staticmethod def decode_topN_with_hacks(span_start_logits: torch.Tensor, span_end_logits: torch.Tensor, *args, **kwargs): if span_start_logits.dim() != 2 or span_end_logits.dim() != 2: raise ValueError("Input shapes must be (batch_size, document_length)") batch_size, passage_length = span_start_logits.size() device = span_start_logits.device # span_start_logits.unsqueeze(2) has shape: # (batch_size, passage_length, 1) # span_end_logits.unsqueeze(1) has shape: # (batch_size, 1, passage_length) # Addition in log-domain = multiplication in real domain # This will create a matrix containing addition of each span_start_logit with span_end_logit # (batch_size, passage_length, passage_length) span_log_probs = span_start_logits.unsqueeze(2) + span_end_logits.unsqueeze(1) # Only the upper triangle of the span matrix is valid; the lower triangle has entries where # the span ends before it starts. We will mask these values out span_log_mask = torch.triu(torch.ones((passage_length, passage_length), device=device)).log().unsqueeze(0) valid_span_log_probs = span_log_probs + span_log_mask return SpanPredictionModule.decode_topN_joint_wth_hacks(valid_span_log_probs, *args, **kwargs) @staticmethod def decode_joint(valid_span_log_probs: torch.Tensor, score="probs", has_sentinel=False) -> \ (torch.Tensor, torch.Tensor): batch_size = valid_span_log_probs.shape[0] passage_length = valid_span_log_probs.shape[1] # if first token is sentinel, class, combinations (0,x) and (x,0); x!=0 are invalid # mask these if has_sentinel: valid_span_log_probs[:, 1:, 0] = -math.inf valid_span_log_probs[:, 0, 1:] = -math.inf # Here we take the span matrix and flatten it, then find the best span using argmax. We # can recover the start and end indices from this flattened list using simple modular # arithmetic. # (batch_size, passage_length * passage_length) valid_span_log_probs = valid_span_log_probs.view(batch_size, -1) if score == "probs": valid_span_scores = F.softmax(valid_span_log_probs, dim=-1) elif score == "logprobs": valid_span_scores = valid_span_log_probs else: raise NotImplemented(f"Unknown score type \"{score}\"") best_span_scores, best_spans = valid_span_scores.max(-1) span_start_indices = best_spans // passage_length span_end_indices = best_spans % passage_length return best_span_scores, (span_start_indices, span_end_indices) @staticmethod def decode_joint_with_hacks(valid_span_log_probs: torch.Tensor, score="probs", p_to_rerank=100, has_sentinel=False, hacks={ "max_answer_length": 30, "combine_surface_forms": (False, None) }) -> (torch.Tensor, torch.Tensor): """ This method has been borrowed from AllenNLP :param valid_span_log_probs: :return: """ if "combine_surface_forms" not in hacks: hacks["combine_surface_forms"] = (False, None) if hacks["combine_surface_forms"][0]: assert hacks["combine_surface_forms"][1] is not None batch_size = valid_span_log_probs.shape[0] passage_length = valid_span_log_probs.shape[1] # if first token is sentinel, class, combinations (0,x) and (x,0); x!=0 are invalid # mask these if has_sentinel: valid_span_log_probs[:, 1:, 0] = -math.inf valid_span_log_probs[:, 0, 1:] = -math.inf # Here we take the span matrix and flatten it, then find the best span using argmax. We # can recover the start and end indices from this flattened list using simple modular # arithmetic. # (batch_size, passage_length * passage_length) spans_longer_than_maxlen_mask = torch.Tensor([[j - i + 1 > hacks["max_answer_length"] for j in range(passage_length)] for i in range(passage_length)]) \ .to(valid_span_log_probs.get_device() if valid_span_log_probs.get_device() >= 0 else torch.device("cpu")) valid_span_log_probs.masked_fill_(spans_longer_than_maxlen_mask.unsqueeze(0).bool(), -math.inf) valid_span_log_probs = valid_span_log_probs.view(batch_size, -1) if score == "probs": valid_span_scores = F.softmax(valid_span_log_probs, dim=-1) elif score == "logprobs": valid_span_scores = valid_span_log_probs else: raise NotImplemented(f"Unknown score type \"{score}\"") if hacks["combine_surface_forms"][0]: assert not (score == "probs" and has_sentinel), \ "Not a supported variant - proability decoding + has_sentinel" pad_token_id = 0 if len(hacks["combine_surface_forms"]) == 3: pad_token_id = hacks["combine_surface_forms"][-1] valid_span_scores = combine_surface_forms(valid_span_scores, batch_size, hacks, p_to_rerank, passage_length, score, pad_token=pad_token_id) best_span_scores, best_spans = valid_span_scores.max(-1) span_start_indices = best_spans // passage_length span_end_indices = best_spans % passage_length return best_span_scores, (span_start_indices, span_end_indices) @staticmethod def decode_conditional(span_start_logits: torch.Tensor, span_end_logits: torch.Tensor, top_k_start_positions, beam_search_bestn, max_answer_length) -> \ (torch.Tensor, torch.Tensor): best_starts = [] best_ends = [] span_scores = [] max_n = [] for i, batch in enumerate(span_end_logits): best_starts_for_b = torch.empty([beam_search_bestn, beam_search_bestn], dtype=torch.int) best_ends_for_b = torch.empty([beam_search_bestn, beam_search_bestn], dtype=torch.int) span_scores_for_b = torch.empty([beam_search_bestn, beam_search_bestn], dtype=torch.float) # iteration over top n start logits max_prob = float("-inf") max_n.append(0) for n, option in enumerate(span_end_logits[i]): end_logits_softmax = torch.nn.functional.softmax(span_end_logits[i][n], dim=-1) try: start_logits_softmax = torch.nn.functional.softmax(span_start_logits[i], dim=-1)[ top_k_start_positions[i][n]] except IndexError as e: print(e) break total_prob = end_logits_softmax.max(-1)[0] + start_logits_softmax if total_prob > max_prob: max_prob = total_prob max_n[i] = n best_starts_for_b[n] = top_k_start_positions[i][n].repeat(beam_search_bestn) best_ends_for_b[n] = torch.topk(span_end_logits[i][n], beam_search_bestn).indices for j, be in enumerate(best_ends_for_b[n]): span_scores_for_b[j] = torch.topk(end_logits_softmax, beam_search_bestn).values[ j] + start_logits_softmax span_scores.append([float(s) for s in torch.flatten(span_scores_for_b)]) best_starts.append([int(s) for s in torch.flatten(best_starts_for_b)]) best_ends.append([int(e) for e in torch.flatten(best_ends_for_b)]) start_indexes, end_indexes, best_span_scores, logprobs_S, logprobs_E0, logprobs_Emax = \ best_starts, best_ends, span_scores, span_start_logits, span_end_logits[:, 0, :], \ span_end_logits[torch.arange(span_end_logits.size(0)), max_n, :] best_scores_f = [] start_indexes_f = [] end_indexes_f = [] for sib, eib, ssb in zip(start_indexes, end_indexes, best_span_scores): scores_l = [] end_l = [] start_l = [] for si, ei, ss in zip(sib, eib, ssb): if ei - si <= max_answer_length and ei >= si: scores_l.append(ss) end_l.append(ei) start_l.append(si) best_scores_f.append(scores_l) start_indexes_f.append(start_l) end_indexes_f.append(end_l) padded_S = torch.zeros(logprobs_E0.shape) padded_S[:logprobs_S.shape[0], :] = logprobs_S logprobs_S = padded_S return logprobs_S, logprobs_E0, logprobs_Emax, best_scores_f, start_indexes_f, end_indexes_f
python
# run tests to check coverage import os import asyncio import discord as dpy import prettify_exceptions prettify_exceptions.hook() import viper from viper.exts import discord basic_test = os.path.join("tests", "test_script.vp") discordpy_test = os.path.join("tests", "discordpy_script_test.vp") loop = asyncio.get_event_loop() loop.run_until_complete(viper.eval_file(basic_test)) # run the basic script class MockDpyObject: def __init__(self, **kwargs): for name, item in kwargs.items(): setattr(self, name, item) class MockDpyContext: def __init__(self): async def error(*args): print("SENDS: ", *args) return self.message self.send = error self.author = usr = MockDpyObject( name="Danny", nick=None, discriminator="0007", id=123456, send=error, mention="<@!123456>" ) self.me = MockDpyObject( name="OAuth2 Sucks", nick=None, discriminator="3136", id=168930860739985410, send=error, mention="<@!168930860739985410>" ) self.guild = guild = MockDpyObject( name="Discord.py", member_count=123, description="Discord.py Guild", id=336642139381301249, owner=usr, get_member = lambda i: None, get_member_name = lambda n: None ) self.author.guild = guild self.me.guild = guild self.channel = channel = MockDpyObject( id=336642776609456130, name="General", guild=guild, is_nsfw=lambda: False, is_news=lambda: False, mention="<#336642776609456130>", topic="Ahhh", send=error ) self.guild.text_channels = [channel] self.guild.get_channel = lambda i: channel self.message = MockDpyObject( content="Hi there", guild=guild, channel=channel, clean_content="Hi there", flags=None, jump_url="discord.com/url", author=usr ) runner = viper.Runtime() loop.run_until_complete(viper.eval_file(discordpy_test, injected={"ctx": discord.SafeAccessContext(runner, MockDpyContext())}, runtime=runner))
python
from flask import Flask from flask_sslify import SSLify app = Flask(__name__) app.config.from_object('config') sslify = SSLify(app) from jawfish import views
python