content
stringlengths 0
894k
| type
stringclasses 2
values |
---|---|
from clases.dia_mañana import *
from clases.yinyang import *
from clases.alternativa import *
if __name__ == "__main__":
print("¿Qué ejercicio quieres ver?:", "\n","1)Dia del mañana", "\n","2)Inmortal", "\n","3)Alternativa herencia multiple")
n =int(input("Número del ejercicio: "))
if n == 1:
destrucion = str(input("¿Qué ciudad quieres destruir, Los Ángeles o Nueva York?"))
if destrucion == "Los Ángeles":
la = LosAngeles()
del la
elif destrucion == "Nueva York":
ny = NuevaYork()
del ny
else:
print("La ciudad no es válida")
if n == 2:
yin = Yin()
yang = Yang()
del(yang)
if n == 3:
pared_norte = Pared("NORTE")
pared_oeste = Pared("OESTE")
pared_sur = Pared("SUR")
pared_este = Pared("ESTE")
ventana_norte = InterfazCristal(pared_norte, 0.5)
ventana_oeste = InterfazCristal(pared_oeste, 1)
ventana_sur = InterfazCristal(pared_sur, 2)
ventana_este = InterfazCristal(pared_este, 1)
casa = Casa(4, [pared_norte, pared_oeste, pared_sur, pared_este], [ventana_norte, ventana_este, ventana_oeste,ventana_sur])
print(casa.superficie_acristalada())
|
python
|
# -*- coding: utf-8 -*-
################################################################################
## Form generated from reading UI file 'expstatus.ui'
##
## Created by: Qt User Interface Compiler version 5.15.0
##
## WARNING! All changes made in this file will be lost when recompiling UI file!
################################################################################
from PySide2.QtCore import (
QCoreApplication,
QDate,
QDateTime,
QMetaObject,
QObject,
QPoint,
QRect,
QSize,
Qt,
QTime,
QUrl,
)
from PySide2.QtGui import (
QBrush,
QColor,
QConicalGradient,
QCursor,
QFont,
QFontDatabase,
QIcon,
QKeySequence,
QLinearGradient,
QPainter,
QPalette,
QPixmap,
QRadialGradient,
)
from PySide2.QtWidgets import *
class Ui_ExposureStatus(object):
def setupUi(self, ExposureStatus):
if not ExposureStatus.objectName():
ExposureStatus.setObjectName(u"ExposureStatus")
ExposureStatus.resize(260, 100)
ExposureStatus.setMinimumSize(QSize(260, 100))
ExposureStatus.setMaximumSize(QSize(520, 200))
font = QFont()
font.setPointSize(8)
ExposureStatus.setFont(font)
ExposureStatus.setIconSize(QSize(15, 15))
self.centralwidget = QWidget(ExposureStatus)
self.centralwidget.setObjectName(u"centralwidget")
self.gridLayout = QGridLayout(self.centralwidget)
self.gridLayout.setObjectName(u"gridLayout")
self.label_status = QLabel(self.centralwidget)
self.label_status.setObjectName(u"label_status")
font1 = QFont()
font1.setPointSize(12)
font1.setBold(True)
font1.setWeight(75)
self.label_status.setFont(font1)
self.label_status.setFrameShape(QFrame.StyledPanel)
self.label_status.setFrameShadow(QFrame.Sunken)
self.label_status.setAlignment(Qt.AlignCenter)
self.gridLayout.addWidget(self.label_status, 0, 0, 1, 1)
self.splitter = QSplitter(self.centralwidget)
self.splitter.setObjectName(u"splitter")
self.splitter.setOrientation(Qt.Horizontal)
self.label_integrating = QLabel(self.splitter)
self.label_integrating.setObjectName(u"label_integrating")
font2 = QFont()
font2.setPointSize(12)
font2.setBold(False)
font2.setWeight(50)
self.label_integrating.setFont(font2)
self.label_integrating.setFrameShape(QFrame.StyledPanel)
self.label_integrating.setFrameShadow(QFrame.Sunken)
self.label_integrating.setLineWidth(2)
self.label_integrating.setAlignment(Qt.AlignCenter)
self.splitter.addWidget(self.label_integrating)
self.label_reading = QLabel(self.splitter)
self.label_reading.setObjectName(u"label_reading")
self.label_reading.setFont(font2)
self.label_reading.setFrameShape(QFrame.StyledPanel)
self.label_reading.setFrameShadow(QFrame.Sunken)
self.label_reading.setLineWidth(2)
self.label_reading.setAlignment(Qt.AlignCenter)
self.splitter.addWidget(self.label_reading)
self.gridLayout.addWidget(self.splitter, 1, 0, 1, 1)
ExposureStatus.setCentralWidget(self.centralwidget)
self.retranslateUi(ExposureStatus)
QMetaObject.connectSlotsByName(ExposureStatus)
# setupUi
def retranslateUi(self, ExposureStatus):
ExposureStatus.setWindowTitle(
QCoreApplication.translate("ExposureStatus", u"ExpStatus", None)
)
# if QT_CONFIG(tooltip)
ExposureStatus.setToolTip(
QCoreApplication.translate("ExposureStatus", u"azcam exposure status", None)
)
# endif // QT_CONFIG(tooltip)
# if QT_CONFIG(whatsthis)
ExposureStatus.setWhatsThis("")
# endif // QT_CONFIG(whatsthis)
self.label_status.setText("")
self.label_integrating.setText(
QCoreApplication.translate("ExposureStatus", u"Exposing", None)
)
self.label_reading.setText(
QCoreApplication.translate("ExposureStatus", u"Reading", None)
)
# retranslateUi
|
python
|
first = list(input())
sec = list(input())
te = first + sec
te.sort()
third = list(input())
third.sort()
if te==third:
print("YES")
else:
print("NO")
s,i=sorted,input;print('YNEOS'[s(i()+i())!=s(i())::2])
|
python
|
# -*- coding: utf-8 -*-
from ..components import *
from ..container import *
from ..elements import *
__all__ = ['regression_report']
def regression_report(truth, predict, label=None, per_target=True,
target_names=None, title=None):
"""Regression report.
This method will compose a standard regression report, including
the summary and the result attachment.
Parameters
----------
truth : np.ndarray
Ground truth (correct) target values.
predict : np.ndarray
Predicted target values.
label : np.ndarray | list
If specified, will compute the regression scores for each label class.
per_target : bool
Whether or not to compute the regression score for each dimension?
(default True)
target_names : np.ndarray | list
Name of each dimension in regression results.
If not specified, will use the coordinate of each dimension, e.g.,
"(0,0,0)".
title : str
Optional title of this regression summary table.
"""
children = [
regression_summary(
truth=truth, predict=predict, label=label, per_target=per_target,
target_names=target_names
),
regression_result_attachment(
truth=truth, predict=predict, title='Regression Result'
)
]
if title:
return Section(title, children)
return Group(children)
|
python
|
# encoding: utf-8
# Copyright 2011 California Institute of Technology. ALL RIGHTS
# RESERVED. U.S. Government Sponsorship acknowledged.
def nullUpgradeStep(setupTool):
'''A null step for when a profile upgrade requires no custom activity.'''
|
python
|
#range_test function definition goes here
def range_test(num):
if num < 1 or num > 500:
return False
else:
return True
num = int(input("Enter a number: "))
if range_test(num):
print( "{:d} is in range.".format(num))
else:
print("The number you entered is outside the range!")
|
python
|
# Python - 3.6.0
def is_sator_square(tablet):
n = len(tablet)
for r in range(n):
for c in range(n):
if not (tablet[r][c] == tablet[-(r + 1)][-(c + 1)] == tablet[c][r] == tablet[-(c + 1)][-(r + 1)]):
return False
return True
|
python
|
from platform import system, release
from sys import version_info
from configparser import ConfigParser
from pyrfc import Connection, get_nwrfclib_version
config = ConfigParser()
config.read('pyrfc.cfg')
params = config._sections['test']
conn = Connection(**params)
print(('Platform:', system(), release()))
print(('Python version:', version_info))
print(('SAP NW RFC:', get_nwrfclib_version()))
result = conn.call('/COE/RBP_PAM_SERVICE_ORD_CHANG', IV_ORDERID='4711', IT_NOTICE_NOTIFICATION=[{'': 'ABCD'}, {'': 'XYZ'}])
for line in result['ET_STRING']:
print(line)
for line in result['ET_TABLE']:
print(line)
result = conn.call('/COE/RBP_PAM_SERVICE_ORD_CHANG', IV_ORDERID='4711', IT_NOTICE_NOTIFICATION=['ABCD', 'XYZ'])
for line in result['ET_STRING']:
print(line)
for line in result['ET_TABLE']:
print(line)
|
python
|
import platform, sys
if platform.system() == 'Windows': # pragma: no cover
WIN = True
else:
WIN = False
# True if we are running on Python 2.
PY2 = sys.version_info[0] == 2
if not PY2: # pragma: no cover
from urllib.parse import quote, unquote
string_type = str
unicode_text = str
byte_string = bytes
wsgi_string = str
def u_(s):
return str(s)
def bytes_(s):
return str(s).encode('ascii', 'strict')
def percent_encode(string, safe, encoding):
return quote(string, safe, encoding, errors='strict')
def percent_decode(string):
return unquote(string)
else: # pragma: no cover
from urllib import quote, unquote
string_type = basestring
unicode_text = unicode
byte_string = str
wsgi_string = str
def u_(s):
return unicode(s, 'utf-8')
def bytes_(s):
return str(s)
def percent_encode(string, **kwargs):
encoding = kwargs.pop('encoding')
return quote(string.encode(encoding), **kwargs)
def percent_decode(string):
return unquote(string)
def with_metaclass(meta, base=object):
"""Create a base class with a metaclass."""
return meta("NewBase", (base,), {})
|
python
|
"""Tests for flake8.plugins.manager.PluginManager."""
import mock
from flake8.plugins import manager
def create_entry_point_mock(name):
"""Create a mocked EntryPoint."""
ep = mock.Mock(spec=['name'])
ep.name = name
return ep
@mock.patch('entrypoints.get_group_all')
def test_calls_entrypoints_on_instantiation(get_group_all):
"""Verify that we call get_group_all when we create a manager."""
get_group_all.return_value = []
manager.PluginManager(namespace='testing.entrypoints')
get_group_all.assert_called_once_with('testing.entrypoints')
@mock.patch('entrypoints.get_group_all')
def test_calls_entrypoints_creates_plugins_automaticaly(get_group_all):
"""Verify that we create Plugins on instantiation."""
get_group_all.return_value = [
create_entry_point_mock('T100'),
create_entry_point_mock('T200'),
]
plugin_mgr = manager.PluginManager(namespace='testing.entrypoints')
get_group_all.assert_called_once_with('testing.entrypoints')
assert 'T100' in plugin_mgr.plugins
assert 'T200' in plugin_mgr.plugins
assert isinstance(plugin_mgr.plugins['T100'], manager.Plugin)
assert isinstance(plugin_mgr.plugins['T200'], manager.Plugin)
@mock.patch('entrypoints.get_group_all')
def test_handles_mapping_functions_across_plugins(get_group_all):
"""Verify we can use the PluginManager call functions on all plugins."""
entry_point_mocks = [
create_entry_point_mock('T100'),
create_entry_point_mock('T200'),
]
get_group_all.return_value = entry_point_mocks
plugin_mgr = manager.PluginManager(namespace='testing.entrypoints')
plugins = [plugin_mgr.plugins[name] for name in plugin_mgr.names]
assert list(plugin_mgr.map(lambda x: x)) == plugins
@mock.patch('entrypoints.get_group_all')
def test_local_plugins(get_group_all):
"""Verify PluginManager can load given local plugins."""
get_group_all.return_value = []
plugin_mgr = manager.PluginManager(
namespace='testing.entrypoints',
local_plugins=['X = path.to:Plugin']
)
assert plugin_mgr.plugins['X'].entry_point.module_name == 'path.to'
|
python
|
PyV8 = "PyV8"
Node = "Node"
JavaScriptCore = "JavaScriptCore"
SpiderMonkey = "SpiderMonkey"
JScript = "JScript"
PhantomJS = "PhantomJS"
SlimerJS = "SlimerJS"
Nashorn = "Nashorn"
Deno = "Deno"
|
python
|
from flask.sessions import SessionInterface, SessionMixin
from flask.json.tag import TaggedJSONSerializer
from werkzeug.datastructures import CallbackDict
from itsdangerous import BadSignature, want_bytes
from CTFd.cache import cache
from CTFd.utils import text_type
from CTFd.utils.security.signing import sign, unsign
from uuid import uuid4
import six
def total_seconds(td):
return td.days * 60 * 60 * 24 + td.seconds
class CachedSession(CallbackDict, SessionMixin):
"""
This code is mostly based off of the ServerSideSession from Flask-Session.
https://github.com/fengsp/flask-session/blob/master/flask_session/sessions.py#L37
"""
def __init__(self, initial=None, sid=None, permanent=None):
def on_update(self):
self.modified = True
CallbackDict.__init__(self, initial, on_update)
self.sid = sid
if permanent:
self.permanent = permanent
self.modified = False
def regenerate(self):
cache.delete(self.sid)
# Empty current sid and mark modified so the interface will give it a new one.
self.sid = None
self.modified = True
class CachingSessionInterface(SessionInterface):
"""
This code is partially based off of the RedisSessionInterface from Flask-Session with updates to properly
interoperate with Flask-Caching and be more inline with modern Flask (i.e. doesn't use pickle).
https://github.com/fengsp/flask-session/blob/master/flask_session/sessions.py#L90
"""
serializer = TaggedJSONSerializer()
session_class = CachedSession
def _generate_sid(self):
return str(uuid4())
def __init__(self, key_prefix, use_signer=True, permanent=False):
self.key_prefix = key_prefix
self.use_signer = use_signer
self.permanent = permanent
def open_session(self, app, request):
sid = request.cookies.get(app.session_cookie_name)
if not sid:
sid = self._generate_sid()
return self.session_class(sid=sid, permanent=self.permanent)
if self.use_signer:
try:
sid_as_bytes = unsign(sid)
sid = sid_as_bytes.decode()
except BadSignature:
sid = self._generate_sid()
return self.session_class(sid=sid, permanent=self.permanent)
if not six.PY2 and not isinstance(sid, text_type):
sid = sid.decode("utf-8", "strict")
val = cache.get(self.key_prefix + sid)
if val is not None:
try:
data = self.serializer.loads(val)
return self.session_class(data, sid=sid)
except Exception:
return self.session_class(sid=sid, permanent=self.permanent)
return self.session_class(sid=sid, permanent=self.permanent)
def save_session(self, app, session, response):
domain = self.get_cookie_domain(app)
path = self.get_cookie_path(app)
if not session:
if session.modified:
cache.delete(self.key_prefix + session.sid)
response.delete_cookie(
app.session_cookie_name, domain=domain, path=path
)
return
if session.modified:
httponly = self.get_cookie_httponly(app)
secure = self.get_cookie_secure(app)
expires = self.get_expiration_time(app, session)
samesite = self.get_cookie_samesite(app)
val = self.serializer.dumps(dict(session))
if session.sid is None:
session.sid = self._generate_sid()
cache.set(
key=self.key_prefix + session.sid,
value=val,
timeout=total_seconds(app.permanent_session_lifetime),
)
if self.use_signer:
session_id = sign(want_bytes(session.sid))
else:
session_id = session.sid
response.set_cookie(
app.session_cookie_name,
session_id,
expires=expires,
httponly=httponly,
domain=domain,
path=path,
secure=secure,
samesite=samesite,
)
|
python
|
import math
n = int(input("Enter the number till where the series ius to be printed = "))
for i in range(1,n+1):
k = math.pow(i,3)
j = k + 2*i
print(j)
|
python
|
#Write a function that prompts user to input his/her full name.
#After user enter's his/her full name, split it and store it in variables first_name and last_name.
count=0
k=0
name=str(input("Enter your full name: "))
s=name.split(" ")
print("The first name is:",s[0])
if len(s)==3:
print("The middle name is:",s[1])
print("The last name is:", s[2])
else:
print("The last name is:", s[1])
|
python
|
from typing import Tuple, Optional
from abc import ABC, abstractmethod
from mercury.msg.smart_grid import ElectricityOffer
from xdevs.models import Atomic, Port, PHASE_PASSIVE, INFINITY
from mercury.utils.history_buffer import EventHistoryBuffer
class EnergyProvider(Atomic, ABC):
def __init__(self, **kwargs):
self.provider_id: str = kwargs['provider_id']
self.actual_offer: Optional[float] = None
self.eventual_offer: Optional[float] = None
self.next_timeout: float = INFINITY
self._clock: float = 0
super().__init__('smart_grid_provider_{}'.format(self.provider_id))
self.out_electricity_offer = Port(ElectricityOffer, 'out_electricity_offer')
self.add_out_port(self.out_electricity_offer)
def deltint(self):
self._clock += self.sigma
self.actual_offer = self.eventual_offer
self.eventual_offer, self.next_timeout = self.schedule_next_offer()
self.hold_in(PHASE_PASSIVE, self.next_timeout)
def deltext(self, e):
self._clock += e
self.next_timeout -= e
self.hold_in(PHASE_PASSIVE, self.next_timeout)
def lambdaf(self):
self.out_electricity_offer.add(ElectricityOffer(self.provider_id, self.eventual_offer))
def initialize(self):
self.hold_in(PHASE_PASSIVE, 0)
def exit(self):
pass
def get_next_timeout(self):
return self.next_timeout
@abstractmethod
def schedule_next_offer(self) -> Tuple[Optional[float], float]:
""":return: tuple (new eventual offer, time to wait before publishing new offer)"""
pass
class EnergyProviderStatic(EnergyProvider):
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.eventual_offer = kwargs.get('offer', None)
def schedule_next_offer(self) -> Tuple[Optional[float], float]:
return self.actual_offer, INFINITY
class EnergyProviderHistory(EnergyProvider):
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.offer_column = kwargs.get('offer_column', 'offer')
self.buffer = EventHistoryBuffer(**kwargs)
if not self.buffer.column_exists(self.offer_column):
raise ValueError('dataframe does not have the mandatory column {}'.format(self.offer_column))
self.eventual_offer = self.buffer.initial_val[self.offer_column].item()
def schedule_next_offer(self) -> Tuple[float, float]:
eventual = self.actual_offer
next_time = self._clock
while eventual == self.actual_offer and next_time < INFINITY:
eventual = self.buffer.get_event()[self.offer_column].item()
next_time = self.buffer.time_of_next_event()
self.buffer.advance()
return eventual, next_time - self._clock
|
python
|
from abc import abstractmethod
from dataclasses import dataclass
from typing import List, Any, Callable, Dict, Tuple, NamedTuple, Union
from data_splitting import split_splits, LearnCurveJob, EvalJob
from seq_tag_util import calc_seqtag_f1_scores, Sequences
from util.worker_pool import GenericTask
@dataclass
class Experiment:
name: str
num_folds: int
jobs: List[LearnCurveJob]
score_task: GenericTask
def __str__(self):
return str({k: v for k, v in self.__dict__.items() if k not in ["jobs"]})
Splits = Dict[str, List[int]]
class SeqTagTaskData(NamedTuple):
data: Dict[str, List]
task_data: Any
class SeqTagScoreTask(GenericTask):
def __init__(self, params, data_supplier: Callable) -> None:
task_params = {"params": params, "data_supplier": data_supplier}
super().__init__(**task_params)
@staticmethod
@abstractmethod
def build_task_data(**task_params) -> SeqTagTaskData:
raise NotImplementedError
@classmethod
def process(cls, job: EvalJob, task_data: SeqTagTaskData):
splits = split_splits(job, task_data.data)
predictions = cls.predict_with_targets(splits, task_data.task_data)
return {
split_name: calc_seqtag_f1_scores(preds, targets)
for split_name, (preds, targets) in predictions.items()
}
@classmethod
@abstractmethod
def predict_with_targets(
cls, splits:Splits, params
) -> Dict[str, Tuple[Sequences, Sequences]]:
raise NotImplementedError
|
python
|
from SeeThru_Feeds.Model.Attribution import Attribution
from SeeThru_Feeds.Model.Properties.Properties import *
from SeeThru_Feeds.Model.Properties.PropertyManager import PropertyManager
class ComponentBase(PropertyManager, Attribution):
def component_execute(self):
"""
This function should be overridden by a subclass
This is where your component should start executing
Raises:
NotImplementedError: There is no execution method defined, please define it with 'component_execute'
"""
raise NotImplementedError("There is no execution method defined, please define it with 'component_execute'")
def run(self):
"""
This method will call the sub class' component_execute method
This is the only way a component should be executed as it
ensures that the properties of the component are valid
Returns:
ComponentBase: The component
"""
self.check_fillables()
# The fillable properties passed their parsing, therefore the component can be executed
self.component_execute()
return self
|
python
|
'''
Copyright 2021 Kyle Kowalczyk
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
from CiscoAutomationFramework.FirmwareBase import CiscoFirmware
from time import sleep
class IOS(CiscoFirmware):
@property
def uptime(self):
self.cli_to_privileged_exec_mode()
self.terminal_length('0')
device_output = self.transport.send_command_get_output('show version')
for line in device_output.splitlines():
if f'{self.transport.hostname.lower()} uptime' in line.lower():
return ' '.join(line.split()[3:])
return None
@property
def interfaces(self):
self.cli_to_privileged_exec_mode()
self.terminal_length('0')
raw_data = self.transport.send_command_get_output('show interfaces', buffer_size=500)
try:
parsed_data = [x.split()[0] for x in raw_data[2:-2] if not x.startswith(' ')]
except IndexError as _:
raise IndexError('Unexpected data from device, Unable to extract interface names from "show interfaces" command!')
return parsed_data
@property
def mac_address_table(self):
self.cli_to_privileged_exec_mode()
self.terminal_length('0')
raw_mac = self.transport.send_command_get_output('show mac address-table')
return '\n'.join(raw_mac[6:-2])
@property
def arp_table(self):
self.cli_to_privileged_exec_mode()
self.terminal_length('0')
raw_arp = self.transport.send_command_get_output('show ip arp')
return '\n'.join(raw_arp[2:-1])
@property
def running_config(self):
self.cli_to_privileged_exec_mode()
self.terminal_length('0')
running_config = self.transport.send_command_get_output('show running-config', buffer_size=100)
# if the running config grabbed is less than 4 lines and the prompt is not in the last 4 lines of the config
while len(running_config) < 4 and not any([True if self.prompt in x else False for x in reversed(running_config[-4:])]):
running_config += self.transport.get_output(buffer_size=100, no_command_sent_previous=True)
sleep(.1)
return '\n'.join(running_config[2:-2])
@property
def startup_config(self):
self.cli_to_privileged_exec_mode()
self.terminal_length('0')
config = self.transport.send_command_get_output('show startup-config', buffer_size=100)
while len(config) < 4 and not any([True if self.prompt in x else False for x in reversed(config[-4:])]):
config += self.transport.get_output(buffer_size=100, no_command_sent_previous=True)
sleep(.1)
return '\n'.join(config[2:-2])
def _terminal_length(self, n='0'):
self.cli_to_privileged_exec_mode()
return self.transport.send_command_get_output(f'terminal length {n}')
def _terminal_width(self, n='0'):
self.cli_to_privileged_exec_mode()
return self.transport.send_command_get_output(f'terminal width {n}')
def save_config(self):
self.cli_to_privileged_exec_mode()
self.transport.send_command('copy running-config startup-config')
data = self.transport.send_command_get_output('', timeout=15)
# if the prompt is in the last line of output and there is not a percent sign in any line of output we will
# interpret that as a succesful save
if self.transport.prompt in ''.join(data[-1:]) and not any('%' in line for line in data):
return True
return False
def add_local_user(self, username, password, password_code=0, *args, **kwargs):
kwarg_string = ' '.join([f'{key} {value}' for key, value in kwargs.items()])
command_string = f'username {username} {" ".join(args)} {kwarg_string} secret {password_code} {password}'
self.cli_to_config_mode()
return self.transport.send_command_get_output(command_string)
def delete_local_user(self, username):
self.cli_to_config_mode()
self.transport.send_command(f'no username {username}')
return self.transport.send_command_get_output('')
|
python
|
# Copyright (c) 2019, MD2K Center of Excellence
# - Nasir Ali <[email protected]>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import json
from flask import request
from flask_restx import Namespace, Resource
from .. import CC, apiserver_config
from ..core.data_models import user_login_model, user_register_model, error_model, auth_token_resp_model, \
user_settings_resp_model, user_registration_resp_model
from ..core.decorators import auth_required
auth_route = apiserver_config['routes']['user']
auth_api = Namespace(auth_route, description='Authentication service')
@auth_api.route('')
class Auth(Resource):
def get(self):
return {"message": "user route is working"}, 200
@auth_api.route('/<study_name>/register')
class Auth(Resource):
@auth_api.doc('')
@auth_api.expect(user_register_model(auth_api), validate=True)
@auth_api.response(400, 'All fields are required.', model=error_model(auth_api))
@auth_api.response(401, 'Invalid credentials.', model=error_model(auth_api))
@auth_api.response(200, 'User registration successful.', model=user_registration_resp_model(auth_api))
def post(self, study_name):
'''Post required fields (username, password, user_role, user_metadata, user_settings) to register a user'''
try:
username = request.get_json().get('username', None).strip()
user_password = request.get_json().get('password', None).strip()
#study_name = request.get_json().get('study_name', None).strip()
user_role = request.get_json().get('user_role', None).strip()
user_metadata = request.get_json().get('user_metadata', None)
user_settings = request.get_json().get('user_settings', None)
status = CC.get_or_create_instance(study_name=study_name).create_user(username, user_password, user_role, user_metadata, user_settings, encrypt_password=True)
if status:
return {"message": str(username) + " is created successfully."}, 200
else:
return {"message": "Cannot create, something went wrong."}, 400
except (ValueError, Exception) as err:
return {"message": str(err)}, 400
@auth_api.route('/<study_name>/login')
class Auth(Resource):
@auth_api.doc('')
@auth_api.expect(user_login_model(auth_api), validate=True)
@auth_api.response(400, 'User name and password cannot be empty.', model=error_model(auth_api))
@auth_api.response(401, 'Invalid credentials.', model=error_model(auth_api))
@auth_api.response(200, 'Authentication is approved', model=auth_token_resp_model(auth_api))
def post(self, study_name):
"""
authenticate a user
"""
username = request.get_json().get('username', None)
password = request.get_json().get('password', None)
if not username or not password:
return {"message": "User name and password cannot be empty."}, 401
login_status = CC.get_or_create_instance(study_name=study_name).connect(username, password, encrypt_password=True)
if login_status.get("status", False) == False:
return {"message": login_status.get("msg", "no-message-available")}, 401
token = login_status.get("auth_token")
user_uuid = CC.get_or_create_instance(study_name=study_name).get_user_id(username)
access_token = {"auth_token": token, 'user_uuid': user_uuid}
return access_token, 200
@auth_api.route('/<study_name>/config')
class Auth(Resource):
@auth_api.doc('')
@auth_required
@auth_api.header("Authorization", 'Bearer <JWT>', required=True)
@auth_api.response(400, 'Authorization code cannot be empty.', model=error_model(auth_api))
@auth_api.response(401, 'Invalid credentials.', model=error_model(auth_api))
@auth_api.response(200, 'Request successful', model=user_settings_resp_model(auth_api))
def get(self, study_name):
'''Post required fields (username, password, user_role, user_metadata, user_settings) to register a user'''
token = request.headers['Authorization']
token = token.replace("Bearer ", "")
try:
user_settings = CC.get_or_create_instance(study_name=study_name).get_user_settings(auth_token=token)
return {"user_settings": json.dumps(user_settings)}
except Exception as e:
return {"message", str(e)}, 400
|
python
|
import pytesseract
import jiwer
from PIL import Image
from os import listdir
from os.path import join, isfile
TEST_PATH = '/train/tesstrain/data/storysquad-ground-truth'
extractions = []
ground_truths = []
count = 0
for file_name in listdir(TEST_PATH):
file_path = join(TEST_PATH, file_name)
if count < 100 and file_path.endswith(".png") and isfile(file_path):
extraction = pytesseract.image_to_string(
Image.open(file_path),
lang='kaggle',
config='--tessdata-dir "/train/tessdata"' # set in top level Dockerfile on L72
)
ground_truth = None
ground_truth_path = file_path.replace(".png", ".gt.txt")
if isfile(ground_truth_path):
with open(ground_truth_path, mode='r') as f:
ground_truth = f.read()
extractions.append(extraction)
ground_truths.append(ground_truth)
count += 1
else:
continue
word_error_rate = jiwer.wer(
ground_truths,
extractions,
)
print(f"Model had word error rate of {100 * word_error_rate}%")
char_error_rate = jiwer.cer(
ground_truths,
extractions,
)
print(f"Model had char error rate of {100 *char_error_rate}%")
|
python
|
from typing import (
IO,
Any,
Iterable,
Sequence,
Tuple,
)
from eth_utils import (
ValidationError,
to_tuple,
)
from eth_utils.toolz import (
sliding_window,
)
from ssz.exceptions import (
DeserializationError,
SerializationError,
)
from ssz.sedes.base import (
CompositeSedes,
TSedes,
)
from ssz.utils import (
merkleize,
read_exact,
s_decode_offset,
)
@to_tuple
def _deserialize_fixed_size_items_and_offsets(stream, field_sedes):
for sedes in field_sedes:
if sedes.is_fixed_sized:
field_size = sedes.get_fixed_size()
field_data = read_exact(field_size, stream)
yield (sedes.deserialize(field_data), sedes)
else:
yield (s_decode_offset(stream), sedes)
class Container(CompositeSedes[Sequence[Any], Tuple[Any, ...]]):
def __init__(self, field_sedes: Sequence[TSedes]) -> None:
if len(field_sedes) == 0:
raise ValidationError("Cannot define container without any fields")
self.field_sedes = tuple(field_sedes)
#
# Size
#
@property
def is_fixed_sized(self):
return all(field.is_fixed_sized for field in self.field_sedes)
def get_fixed_size(self):
if not self.is_fixed_sized:
raise ValueError("Container contains dynamically sized elements")
return sum(field.get_fixed_size() for field in self.field_sedes)
#
# Serialization
#
def _get_item_sedes_pairs(self,
value: Sequence[Any],
) -> Tuple[Tuple[Any, TSedes], ...]:
return tuple(zip(value, self.field_sedes))
def _validate_serializable(self, value: Sequence[Any]) -> bytes:
if len(value) != len(self.field_sedes):
raise SerializationError(
f"Incorrect element count: Expected: {len(self.field_sedes)} / Got: {len(value)}"
)
#
# Deserialization
#
def deserialize_fixed_size_parts(self,
stream: IO[bytes],
) -> Iterable[Tuple[Tuple[Any], Tuple[int, TSedes]]]:
fixed_items_and_offets = _deserialize_fixed_size_items_and_offsets(
stream,
self.field_sedes,
)
fixed_size_values = tuple(
item
for item, sedes
in fixed_items_and_offets
if sedes.is_fixed_sized
)
offset_pairs = tuple(
(item, sedes)
for item, sedes
in fixed_items_and_offets
if not sedes.is_fixed_sized
)
return fixed_size_values, offset_pairs
@to_tuple
def deserialize_variable_size_parts(self,
offset_pairs: Tuple[Tuple[int, TSedes], ...],
stream: IO[bytes]) -> Iterable[Any]:
offsets, fields = zip(*offset_pairs)
*head_fields, last_field = fields
for sedes, (left_offset, right_offset) in zip(head_fields, sliding_window(2, offsets)):
field_length = right_offset - left_offset
field_data = read_exact(field_length, stream)
yield sedes.deserialize(field_data)
# simply reading to the end of the current stream gives us all of the final element data
final_field_data = stream.read()
yield last_field.deserialize(final_field_data)
def _deserialize_stream(self, stream: IO[bytes]) -> Tuple[Any, ...]:
if not self.field_sedes:
# TODO: likely remove once
# https://github.com/ethereum/eth2.0-specs/issues/854 is resolved
return tuple()
fixed_size_values, offset_pairs = self.deserialize_fixed_size_parts(stream)
if not offset_pairs:
return fixed_size_values
variable_size_values = self.deserialize_variable_size_parts(offset_pairs, stream)
fixed_size_parts_iter = iter(fixed_size_values)
variable_size_parts_iter = iter(variable_size_values)
value = tuple(
next(fixed_size_parts_iter) if sedes.is_fixed_sized else next(variable_size_parts_iter)
for sedes
in self.field_sedes
)
# Verify that both iterables have been fully consumed.
try:
next(fixed_size_parts_iter)
except StopIteration:
pass
else:
raise DeserializationError("Did not consume all fixed size values")
try:
next(variable_size_parts_iter)
except StopIteration:
pass
else:
raise DeserializationError("Did not consume all variable size values")
return value
#
# Tree hashing
#
def hash_tree_root(self, value: Tuple[Any, ...]) -> bytes:
merkle_leaves = tuple(
sedes.hash_tree_root(element)
for element, sedes in zip(value, self.field_sedes)
)
return merkleize(merkle_leaves)
|
python
|
import pytest
from lendingblock.const import Side, OrderType, Ccy
@pytest.fixture
async def wallets_org_id(lb, org_id):
for ccy in Ccy.BTC.name, Ccy.ETH.name, Ccy.LND.name:
await lb.execute(
f'organizations/{org_id}/wallets',
'POST',
json={
'address': f'{org_id}{ccy}',
'currency': ccy,
}
)
return org_id
@pytest.fixture
async def order_id(lb, wallets_org_id):
order_data = {
'org_id': wallets_org_id,
'type': OrderType.limit.name,
'side': Side.lend.name,
'tenor': '1d',
'amount': 10.0,
'currency': Ccy.BTC.name,
'price': 2.0,
}
order = await lb.execute('orders', 'POST', json=order_data)
return order['id']
async def test_create(lb, wallets_org_id):
order_data = {
'org_id': wallets_org_id,
'type': OrderType.limit.name,
'side': Side.lend.name,
'tenor': '1d',
'amount': 10.0,
'currency': Ccy.BTC.name,
'price': 2.0,
}
resp = await lb.orders.create(order_data)
assert 'id' in resp
async def test_get(lb, order_id):
resp = await lb.orders.get(order_id)
assert resp['id'] == order_id
async def test_get_list(lb, order_id):
resp = await lb.orders.get_list()
assert order_id in [order['id'] for order in resp]
async def test_delete(lb, order_id):
await lb.orders.delete(order_id)
all_orders = await lb.execute('orders')
assert order_id not in all_orders
|
python
|
# -*- coding: utf-8 -*-
from __future__ import print_function
from __future__ import absolute_import
import numpy as np
import tensorflow as tf
import scipy
import cPickle
import os
import glob
import random
import imageio
import scipy.misc as misc
log_device_placement = True
allow_soft_placement = True
gpu_options = 0.9 #multi-gpu
batch_size = 50
image_shape = [28*28]
z_dim = 30 #latent space reprsentation z proposed in the paper
gf_dim = 16
df_dim = 16
lr = 0.005
beta1 = 0.5
def batch_norm(x, is_training, epsilon=1e-5, decay=0.9, scope="batch_norm"):
out = tf.contrib.layers.batch_norm(x, decay=decay, updates_collections=None, epsilon=epsilon,
scale=True, is_training=is_training, scope=scope)
return out
def conv(x, filter_size, stride_width, stride_height, feature_in, feature_out, scope="conv2d",log_device_placement=True):
with tf.variable_scope(scope):
w = tf.get_variable("w", [filter_size, filter_size, feature_in, feature_out],
initializer=tf.truncated_normal_initializer(stddev=0.02))
b = tf.get_variable("b", [feature_out], initializer=tf.constant_initializer(0.0))
conv = tf.nn.conv2d(x, w, strides=[1, stride_width, stride_height, 1], padding='SAME') + b
return conv
def deconv(x, filter_size, stride_width, stride_height, feature_out, scope="deconv2d",log_device_placement=True):
with tf.variable_scope(scope):
w = tf.get_variable("w", [filter_size, filter_size, feature_out[-1], x.get_shape()[-1]],
initializer=tf.truncated_normal_initializer(stddev=0.02))
b = tf.get_variable("b", [feature_out[-1]], initializer=tf.constant_intializer(0.0))
deconv = tf.nn.conv2d_transpose(x, w, strides=[1, stride_width, stride_height, 1], output_shape=feature_out) + b
return deconv
def leakyrelu(x, leak=0.2, name='lrelu'):
with tf.variable_scope(name):
f1 = 0.5 * (1 + leak)
f2 = 0.5 * (1 - leak)
out = f1 * x + f2 * abs(x)
return out
def fc_layer(x, feature_in, feature_out, scope=None, with_w = False):
with tf.variable_scope(scope or "Linear"):
weights = tf.get_variable("weights", shape=[feature_in, feature_out], dtype=tf.float32,
initializer=tf.truncated_normal_initializer(stddev=0.02))
bias = tf.get_variable("bias", shape=[feature_out], dtype=tf.float32,
initializer=tf.constant_initializer(0.0))
if with_w:
return tf.matmul(x, weights) + bias, weights, bias
else:
return tf.matmul(x, weights) + bias
def init_embedding(size, dimension, stddev=0.01, scope="Embedding"):
with tf.variable_scope(scope):
return tf.get_variable("E", shape=[size, 1, 1, dimension], dtype=tf.float32,
initializer=tf.truncated_normal_initializer(stddev=stddev))
def merge(image, size):
height, width, channel = image[1], image[2], image[3]
img = np.zeros(height * size[0], width * size[1], channel)
print(img.shape)
for i, j in enumerate(image):
index = i % size[1]
jndex = j / size[2]
img[jndex*height:jndex*height + height, index*width:index*width + width] = image
#or img[jndex*height:jndex*height + height, index*width:index*width+width, :] = image
return img
def image_norm(image):
normalized = (image/127.5) - 1
return image
#def dense_batch_norm(x, number_out, phase_train, name='bn'): #BN necessary?
#beta = tf.get_variable(name + '/fc_beta', shape=[number_out], initializer=tf.constant_initializer(0.0))
#gamma = tf.get_variable(name + 'fc_gamma', shape=[number_out], initializer=tf.random_normal_initializer(mean=1.0, stddev=0.02))
#batch_mean, batch_var = tf.nn.moments(x, [0], name=name + '/fc_moments')
#ema = tf.train.ExponentialMovingAverage(decay=0.9)
#def mean_var_update():
# ema_apply_op = ema.apply([batch_mean, batch_var])
# with tf.control_dependencies(ema_apply_op):
# return tf.identity(batch_mean), tf.identity(batch_var)
#mean ,var = tf.cond(name=phase_train, mean_var_update, lambda: (ema.average(batch_mean), ema.average(batch_var)))
#normed = tf.nn.batch_normalization(x, mean, var, beta, gamma, 1e-5)
#return normed
#def global_batch_norm(x, number_out, phase_train, name='bn'): #BN necessary?
#beta = tf.get_variable(name + '/beta', shape=[number_out], initializer=tf.constant_initializer(0.0))
#gamma = tf.get_variable(name + '/gamma', shape=[number_out], initializer=tf.random_normal_initializer(mean=1.0, stddev=0.02))
#batch_mean, batch_var = tf.nn.moments(x, [0, 1, 2], name=name + '/moments')
#ema = tf.train.ExponentialMovingAverage(decay=0.9)
#def mean_var_update():
# ema_apply_op = ema.apply([batch_mean, batch_var])
# with tf.control_dependencies(ema_apply_op):
# return tf.identity(batch_mean), tf.identity(batch_var)
#mean, var = tf.cond(name=phase_train, mean_var_update, lambda: (ema.average(batch_mean), ema.average(batch_var)))
#normed = tf.nn.batch_normalization(x, mean, var, beta, gamma, 1e-5)
#return normed
def mini_batch_dis(x, num_kernels=100, dim_kernel=5, init=False, name='MD'): #decrease mode loss
num_inputs = df_dim*4
theta = tf.get_variable(name+'/theta', [num_inputs, num_kernels, dim_kernel], initializer=tf.random_normal_initializer(stddev=0.05))
log_weight_scale = tf.get_variable(name+'/lws', [num_kernels, dim_kernel], initializer=tf.constant_initializer(0.0))
W = tf.matmul(theta, tf.expand_dims(tf.exp(log_weight_scale)/tf.sqrt(tf.reduce_sum(tf.square(theta),0)), 0))
W = tf.reshape(W,[-1, num_kernels*dim_kernel])
x = tf.reshape(x, [batch_size, num_inputs])
ac = tf.reshape(tf.matmul(x, W), [-1, num_kernels, dim_kernel])
diff = tf.matmul(tf.reduce_sum(tf.abs(tf.sub(tf.expand_dims(ac, 3), tf.expand_dims(tf.transpose(ac, [1, 2, 0]),0))), 2),
1-tf.expand_dims(tf.constant(np.eye(batch_size), dtype=np.float32), 1))
out = tf.reduce_sum(tf.exp(-diff),2) / tf.reduce_sum(tf.exp(-diff))
return tf.concat([x, diff], 1)
def conv2d(x, output_filters, kh=5, kw=5, sh=2, sw=2, stddev=0.02, scope="conv2d"):
with tf.variable_scope(scope):
shape = x.get_shape().as_list()
W = tf.get_variable('W', [kh, kw, shape[-1], output_filters],
initializer=tf.truncated_normal_initializer(stddev=stddev))
#print(W.shape) (5, 5, 3, 64)
b = tf.get_variable('b', [output_filters], initializer=tf.constant_initializer(0.0))
W_conv = tf.nn.conv2d(x, W, strides=[1, sh, sw, 1], padding='SAME')
return tf.reshape(tf.nn.bias_add(W_conv, b), W_conv.get_shape())#reshape depends
def deconv2d(x, output_shape, kh=5, kw=5, sh=2, sw=2, stddev=0.02, scope="deconv2d"):
with tf.variable_scope(scope):
input_shape = x.get_shape().as_list()
w = tf.get_variable('w', [kh, kw, output_shape[-1], input_shape[-1]],
initializer=tf.truncated_normal_initializer(stddev=stddev))
b = tf.get_variable('b', [output_shape[-1]], initializer=tf.constant_initializer(0.0))
w_deconv = tf.nn.conv2d_transpose(x, w, output_shape=output_shape, strides=[1, sh, sw, 1])
return tf.reshape(tf.nn.bias_add(w_deconv, b), w_deconv.get_shape())
def batch_norm(x, is_training, epsilon=1e-5, decay=0.9, scope="batch_norm"):
return tf.contrib.layers.batch_norm(x, decay=decay, updates_collections=None, epsilon=epsilon,
scale=True, is_training=is_training, scope=scope)
#----------------------unit-test for conv&deconv
reader = tf.WholeFileReader()
directory = tf.train.string_input_producer(['/home/linkwong/Zeroshot-GAN/model/image.png'])
key, value = reader.read(directory)
image_tensor = tf.image.decode_png(value)
initialize = tf.global_variables_initializer()
generator_dim = 64
discriminator_dim = 64
output_width = 256
with tf.Session() as sess:
sess.run(initialize)
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(coord=coord)
for i in range(1):
image = image_tensor.eval()
image = tf.image.resize_images(image, [256, 256]) #resize the image into 256*256
print(image.shape)
image_ten = tf.convert_to_tensor(image, tf.float32) #convert the image into tensor
print(image_ten.shape)
coord.request_stop()
coord.join(threads)
image_ten = tf.expand_dims(image_ten, 0) #(1, 256, 256, 3)
image_conv_1 = conv2d(image_ten, generator_dim, scope="conv_1") #(1, 128, 128, 64)
image_conv_2 = conv2d(image_conv_1, generator_dim*2, scope="conv_2")#(1, 64, 64, 128)
image_conv_3 = conv2d(image_conv_2, generator_dim*4, scope="conv_3")#(1, 32, 32, 256)
image_conv_4 = conv2d(image_conv_3, generator_dim*8, scope="conv_4")#(1, 16, 16, 512)
image_conv_5 = conv2d(image_conv_4, generator_dim*8, scope="conv_5")#(1, 8, 8, 512)
image_conv_6 = conv2d(image_conv_5, generator_dim*8, scope="conv_6")#(1, 4, 4, 512)
image_conv_7 = conv2d(image_conv_6, generator_dim*8, scope="conv_7")#(1, 2, 2, 512)
image_conv_8 = conv2d(image_conv_7, generator_dim*8, scope="conv_8")#(1, 1, 1, 512)
#print(image_conv_8.shape)
image_deconv_8 = deconv2d(image_conv_8, [1, 2, 2, generator_dim*8], scope="deconv_8")#(1, 2, 2, 512)
image_deconv_7 = deconv2d(image_deconv_8, [1, 4, 4, generator_dim*8], scope="deconv_7")#(1, 4, 4, 512)
image_deconv_6 = deconv2d(image_deconv_7, [1, 8, 8, generator_dim*8], scope="deconv_6")#(1, 8, 8, 512)
image_deconv_5 = deconv2d(image_deconv_6, [1, 16, 16, generator_dim*8], scope="deconv_5")#(1, 16, 16, 512)
image_deconv_4 = deconv2d(image_deconv_5, [1, 32, 32, generator_dim*4], scope="deconv_4")#(1, 32, 32, 256)
image_deconv_3 = deconv2d(image_deconv_4, [1, 64, 64, generator_dim*2], scope="deconv_3")#(1, 64, 64, 128)
image_deconv_2 = deconv2d(image_deconv_3, [1, 128, 128, generator_dim], scope="deconv_2")#(1, 128, 128, 64)
image_deconv_1 = deconv2d(image_deconv_2, [1, 256, 256, 3], scope="deconv_1")
#print(image_deconv_1.shape)
|
python
|
from flask_wtf import FlaskForm
from wtforms.validators import DataRequired, Length, Email, EqualTo, ValidationError
from wtforms import TextField, TextAreaField, SubmitField, validators, ValidationError,StringField, PasswordField, SubmitField, BooleanField
class LoginForm(FlaskForm):
email = StringField('Email', validators=[DataRequired(), Email()])
password = PasswordField('Password', validators=[DataRequired()])
submit = SubmitField('Login')
|
python
|
#
# Copyright (c) 2020 Carsten Igel.
#
# This file is part of puckdb
# (see https://github.com/carstencodes/puckdb).
#
# License: 3-clause BSD, see https://opensource.org/licenses/BSD-3-Clause
#
import unittest
import tempfile
import os
import time
import puckdb
class BasicTest(unittest.TestCase):
def test_no_crash(self):
with tempfile.TemporaryDirectory() as tmp_dir:
file: str = os.path.join(str(tmp_dir), "test.db")
db = puckdb.PuckDB(file, True, False)
db.set("test", 1)
time.sleep(2) # Wait for worker to complete
db2 = puckdb.PuckDB(file, False, False)
value = db2.get("test")
print(db2.getall())
self.assertEqual(value, 1)
if __name__ == "__main__":
unittest.main()
|
python
|
#!/usr/bin/env python
"""
Inherits the stuff from tests.csvk – i.e. csvkit.tests.utils
"""
from tests.csvk import *
from tests.csvk import CSVKitTestCase as BaseCsvkitTestCase
import unittest
from unittest.mock import patch
from unittest import skip as skiptest
from unittest import TestCase
import warnings
from io import StringIO
from parameterized import parameterized
from subprocess import Popen, PIPE # soon to be deprecated
from subprocess import check_output as sub_check_output
import sys
from typing import List as ListType, Optional as OptionalType
from csvmedkit import agate
from csvmedkit.exceptions import *
warnings.filterwarnings("ignore", category=DeprecationWarning)
class CmkTestCase(BaseCsvkitTestCase):
def cmd_output(self, command: str) -> str:
output = sub_check_output(command, shell=True, stderr=sys.stderr)
return output.decode("utf-8")
def assertCmdLines(self, command: str, rows, newline_at_eof=True):
lines = self.cmd_output(command).split("\n")
if newline_at_eof:
rows.append("")
for i, row in enumerate(rows):
self.assertEqual(lines[i], row)
self.assertEqual(len(lines), len(rows))
# TODO: probably will deprecate pipe_output and assertPipedLines for being too
# clunky for my tastes
def pipe_output(self, commands: ListType[str]) -> OptionalType[str]:
"""
each command is a list of strings, representing a command and argument, e.g.
['head', '-n', '5', 'examples/dummy.csv'],
['csvflatten', '-P'],
"""
output = None # StringIO()
cmdcount = len(commands)
pipes = []
for i, cmd in enumerate(commands, 1):
if i == 1:
p = Popen(cmd, stdout=PIPE)
elif i == cmdcount:
pass # manually instantiate last command with context manager
else:
p = Popen(cmd, stdin=pipes[-1].stdout, stdout=PIPE)
pipes.append(p)
with Popen(commands[-1], stdin=pipes[-1].stdout, stdout=PIPE) as foo:
output = foo.communicate()[0].decode("utf-8")
foo.kill()
# pipes[0].stdout.close()
for p in pipes:
p.wait()
p.stdout.close()
# p.kill()
return output
def pipe_output_as_list(self, commands) -> ListType[str]:
return self.pipe_output(commands).split("\n")
def assertPipedLines(self, commands, rows, newline_at_eof=True):
lines = self.pipe_output_as_list(commands)
if newline_at_eof:
rows.append("")
for i, row in enumerate(rows):
self.assertEqual(lines[i], row)
self.assertEqual(len(lines), len(rows))
|
python
|
from src.gui.alert import alert
def show_statistics(app):
"""Creates an alert that displays all statistics of the user
Args:
app (rumps.App): The App object of the main app
"""
message_string = "\n".join(f"{k} {str(i)}" for k, i in app.statistics.values())
alert(
title="Statistics",
message=message_string,
)
|
python
|
#!/usr/bin/env ipython
import numpy as np
import ipdb
import matplotlib.pyplot as plt
import seaborn as sns
import derived_results
import results_utils
from results_utils import ExperimentIdentifier
plt.style.use('ggplot')
def run_checks(cfg_name, model, diffinit, data_privacy='all', convergence_point=None):
fail = False
if convergence_point is None:
print('Computing convergence point...')
metric = 'binary_crossentropy' # TODO make work for multiclass
convergence_point, _ = derived_results.find_convergence_point(cfg_name, model, diffinit, tolerance=3, metric=metric, data_privacy=data_privacy)
print('convergence point:', convergence_point)
print('Checking for incomplete experiments...')
incomplete = check_for_incomplete_experiments(cfg_name, model, t=convergence_point, diffinit=diffinit, data_privacy=data_privacy)
if incomplete is True:
print('[debug] Passed check for incomplete expeirments')
else:
print('[debug] Failed check for incomplete expeirments')
fail = True
print(incomplete)
# make sure the same seed always has the same initialisation
print('Checking for initialisation violations...')
init_violations = check_for_different_initialisations_with_same_seed(cfg_name, model, diffinit=diffinit)
if init_violations is True:
print('[debug] Passed check for different initialisations')
else:
print('[debug] Failed check for different initialisations')
fail = True
if fail:
result = 'Fail'
else:
result = 'Pass'
return result
def check_for_incomplete_experiments(cfg_name, model, t=5, diffinit=True, data_privacy='all'):
"""
find experiments where data does not reach time t
if t is None, we're just looking for experiments where the file is empty
"""
exp_df = results_utils.get_available_results(cfg_name, model, diffinit=diffinit, data_privacy=data_privacy)
print('Found', exp_df.shape[0], 'experiments!')
incomplete = []
for i, row in exp_df.iterrows():
replace_index = row['replace']
seed = row['seed']
exp = ExperimentIdentifier(cfg_name, model, replace_index=replace_index, seed=seed, diffinit=diffinit, data_privacy=data_privacy)
if not exp.exists():
print(f'WARNING: Experiment {exp.path_stub()} doesn\'t exist?')
continue
loss = exp.load_loss(verbose=False)
if np.nanmax(loss['t']) < t:
incomplete.append((replace_index, seed))
if len(incomplete) == 0:
print('Found no issues')
return True
else:
print('Found', len(incomplete), 'incomplete experiments')
return incomplete
def check_for_different_initialisations_with_same_seed(cfg_name, model, diffinit=True):
"""
same seed should always imply same initialisation
"""
exp_df = results_utils.get_available_results(cfg_name, model, diffinit=diffinit, data_privacy='all')
exp_df = exp_df.iloc[np.random.permutation(exp_df.shape[0]), :]
if diffinit:
seeds = set(exp_df['seed'].unique())
print('Found seeds:', seeds)
else:
seeds = set(['any'])
seed_weights = dict()
violations = []
for i, row in exp_df.iterrows():
replace_index = row['replace']
seed_identifier = row['seed']
exp = ExperimentIdentifier(cfg_name, model, replace_index=replace_index,
seed=seed_identifier, diffinit=diffinit,
data_privacy='all')
identifier = exp.path_stub()
if not exp.exists():
print(f'WARNING: Experiment {identifier} doesn\'t exist - skipping')
continue
if diffinit:
seed = seed_identifier
else:
seed = 'any'
try:
assert seed in seeds
except AssertionError:
ipdb.set_trace()
# only care about starting weights
weights = exp.load_weights(iter_range=(0, 0), verbose=False, sort=False)
if seed in seed_weights:
known_weights = seed_weights[seed][1]
for i, w in enumerate(known_weights):
if np.array_equal(weights, w):
seed_weights[seed][0][i].add(identifier)
# stop iterating
break
else:
print('WARNING! Found new initial setting in experiment', identifier, 'for seed', seed_identifier)
violations.append(set([identifier]))
seed_weights[seed][0].append(set([identifier]))
seed_weights[seed][1].append(weights)
else:
print('First instance of weights for seed', seed_identifier, 'in experiment', identifier)
seed_weights[seed] = ([set([identifier])], [weights])
if len(violations) > 0:
print('Violations found')
return violations
else:
print('all experiments with the same seed have the same initialisation')
return True
def compare_loss_with_without_diffinit(cfg_name: str, model: str, t: int = 2000):
df_diffinit = results_utils.get_available_results(cfg_name, model, diffinit=True, data_privacy='all')
df_fixinit = results_utils.get_available_results(cfg_name, model, diffinit=False, data_privacy='all')
loss_diff = []
loss_fix = []
if 'cifar10' in cfg_name:
metric = 'ce'
else:
metric = 'binary_crossentropy'
for idx, row in df_diffinit.iterrows():
replace_index = row['replace']
seed = row['seed']
diffinit = True
exp = ExperimentIdentifier(cfg_name, model, replace_index=replace_index,
seed=seed, diffinit=diffinit,
data_privacy='all')
loss = exp.load_loss(iter_range=(t, t+1), verbose=False)
try:
loss = float(loss.loc[loss['minibatch_id'] == 'VALI'][metric])
loss_diff.append(loss)
except ValueError:
print(f'skipping {row} due to can\'t convert to float')
for idx, row in df_fixinit.iterrows():
replace_index = row['replace']
seed = row['seed']
diffinit = False
exp = ExperimentIdentifier(cfg_name, model, replace_index=replace_index,
seed=seed, diffinit=diffinit,
data_privacy='all')
loss = exp.load_loss(iter_range=(t, t+1), verbose=False)
try:
loss = float(loss.loc[loss['minibatch_id'] == 'VALI'][metric])
loss_fix.append(loss)
except ValueError:
print(f'skipping {row} due to can\'t convert to float')
fig, axarr = plt.subplots(nrows=1, ncols=1)
sns.distplot(loss_fix, label='fixed init', ax=axarr)
sns.distplot(loss_diff, label='diff init', ax=axarr)
axarr.set_xlabel('loss')
axarr.legend()
plt.savefig(f'{cfg_name}_losses.png')
plt.clf()
plt.close()
def compare_learning_curves(cfg_name: str, model: str):
agg_diff = derived_results.AggregatedLoss(cfg_name, model, 'all').load(diffinit=True, generate_if_needed=True)
agg_fix = derived_results.AggregatedLoss(cfg_name, model, 'all').load(diffinit=False, generate_if_needed=True)
if 'cifar10' in cfg_name:
metric = 'ce'
else:
metric = 'binary_crossentropy'
print(agg_diff.head())
fig, axarr = plt.subplots(nrows=2, ncols=1)
axarr[0].plot(agg_diff.index, agg_diff[f'{metric}_mean_train'], label='diff init', color='blue')
axarr[0].fill_between(agg_diff.index, agg_diff[f'{metric}_mean_train'] - agg_diff[f'{metric}_std_train'], agg_diff[f'{metric}_mean_train'] + agg_diff[f'{metric}_std_train'], alpha=0.1, color='blue', label='_nolegend_')
axarr[0].plot(agg_fix.index, agg_fix[f'{metric}_mean_train'], label='fixed init', color='green')
axarr[0].fill_between(agg_fix.index, agg_fix[f'{metric}_mean_train'] - agg_fix[f'{metric}_std_train'], agg_fix[f'{metric}_mean_train'] + agg_fix[f'{metric}_std_train'], alpha=0.1, color='lightgreen', label='_nolegend_')
axarr[1].plot(agg_diff.index, agg_diff[f'{metric}_mean_vali'], label='diff init', color='blue', linestyle='--')
axarr[1].fill_between(agg_diff.index, agg_diff[f'{metric}_mean_vali'] - agg_diff[f'{metric}_std_vali'], agg_diff[f'{metric}_mean_vali'] + agg_diff[f'{metric}_std_vali'], alpha=0.1, color='blue', label='_nolegend_')
axarr[1].plot(agg_fix.index, agg_fix[f'{metric}_mean_vali'], label='fix init', color='green', linestyle='--')
axarr[1].fill_between(agg_fix.index, agg_fix[f'{metric}_mean_vali'] - agg_fix[f'{metric}_std_vali'], agg_fix[f'{metric}_mean_vali'] + agg_fix[f'{metric}_std_vali'], alpha=0.1, color='green', label='_nolegend_')
axarr[0].set_ylabel('ce train')
axarr[1].set_ylabel('ce vali')
for ax in axarr:
ax.legend()
plt.savefig(f'{cfg_name}_learning_curves.png')
plt.clf()
plt.close()
|
python
|
from room import Room
from player import Player
from item import Item
import sys
import os
# Declare all the rooms
room = {
'outside': Room("Outside Cave Entrance", "North of you, the cave mount beckons"),
'foyer': Room("Foyer", """Dim light filters in from the south. Dusty
passages run north and east."""),
'overlook': Room("Grand Overlook", """A steep cliff appears before you, falling
into the darkness. Ahead to the north, a light flickers in
the distance, but there is no way across the chasm."""),
'narrow': Room("Narrow Passage", """The narrow passage bends here from west
to north. The smell of gold permeates the air."""),
'treasure': Room("Treasure Chamber", """You've found the long-lost treasure
chamber! Sadly, it has already been completely emptied by
earlier adventurers. The only exit is to the south."""),
'dungeon': Room("Unlocked Creepy Dungeon", """You've found an unlocked prison cell where someone perished."""),
'hall': Room("Decrepit Royal Hall", """You've found a large hall that might have once held royal parties."""),
}
# Link rooms together
room['outside'].n_to = room['foyer']
room['foyer'].s_to = room['outside']
room['foyer'].n_to = room['overlook']
room['foyer'].e_to = room['narrow']
room['foyer'].w_to = room['hall']
room['overlook'].s_to = room['foyer']
room['overlook'].n_to = room['hall']
room['narrow'].w_to = room['foyer']
room['narrow'].n_to = room['treasure']
room['narrow'].e_to = room['dungeon']
room['narrow'].s_to = room['hall']
room['treasure'].s_to = room['narrow']
room['treasure'].n_to = room['dungeon']
room['dungeon'].e_to = room['narrow']
room['hall'].s_to = room['overlook']
room['hall'].e_to = room['foyer']
item = {
'weapon': Item('sword', 'Sharp blade used to slash horrifying creatures.'),
'light': Item('lantern', 'illuminates whatever room the traveler is in.'),
'potion': Item('potion', 'life-giving serum of undetermined age in an old glass bottle.'),
}
room['overlook'].AddItem(item['weapon'])
room['overlook'].AddItem(item['potion'])
room['foyer'].AddItem(item['light'])
room['foyer'].AddItem(item['potion'])
room['dungeon'].AddItem(item['weapon'])
room['narrow'].AddItem(item['light'])
room['hall'].AddItem(item['weapon'])
room['hall'].AddItem(item['potion'])
# Make a new player object that is currently in the 'outside' room.
player_name = input("What's your name, explorer?")
player = Player(player_name, room['outside'])
'''
import os
os.chdir('E:\\projects\\LambdaSchool\\m6\\61b1\\src')
exec(open('adv.py').read())
'''
# Write a loop that:
while True:
# * Prints the current description (the textwrap module might be useful here).
current_room_name = player.CurrentRoomName()
current_room_description = player.CurrentRoomDescription()
for key, value in room.items():
if current_room_name == value.room_name:
current_room_key = key
# * Prints the current room name
print(
f"Explorer {player_name}, you now find yourself in the {current_room_name} room. {current_room_description}.\n")
# * Waits for user input and decides what to do.
direction = input(
f"Which direction do you want to go, Traveler {player_name}? \n\nMenu: \nn for north\ns for south\nw for west\ne for east\ni or inventory to list your inventory\nsearch to look for items in {current_room_name}\nq to quit\n\nCommand:")
print('----------------------------------')
user_input = direction.split(' ')
if(len(user_input) == 1):
# If the user enters "q", quit the game.
if direction == 'q':
print('You ran away from the cave, terrified.')
sys.exit()
if direction == 'i':
player.ListInventory()
if direction == 'inventory':
player.ListInventory()
if direction == 'search':
room[current_room_key].ListItems()
else:
if current_room_name == 'Outside Cave Entrance':
if direction == 'n':
# If the user enters a cardinal direction, attempt to move to the room there.
player.MoveToRoom(room['outside'].n_to)
if direction != 'n':
# Print an error message if the movement isn't allowed.
print(f"You cannot move that direction. Select another direction to travel in.")
pass
elif current_room_name == 'Foyer':
if direction == 'n':
player.MoveToRoom(room['foyer'].n_to)
if direction == 's':
player.MoveToRoom(room['foyer'].s_to)
if direction == 'e':
player.MoveToRoom(room['foyer'].e_to)
if direction == 'w':
player.MoveToRoom(room['foyer'].w_to)
if direction != 'n' or direction != 's' or direction != 'w' or direction != 'e':
# Print an error message if the movement isn't allowed.
print(
f"You cannot move that direction. Select another direction to travel in.")
pass
elif current_room_name == 'Grand Overlook':
if direction == 's':
player.MoveToRoom(room['overlook'].s_to)
if direction == 'n':
player.MoveToRoom(room['overlook'].n_to)
if direction != 's' or direction != 'n':
# Print an error message if the movement isn't allowed.
print(
f"You cannot move that direction. Select another direction to travel in.")
pass
elif current_room_name == 'Narrow Passage':
if direction == 'n':
player.MoveToRoom(room['narrow'].n_to)
if direction == 'w':
player.MoveToRoom(room['narrow'].w_to)
if direction == 's':
player.MoveToRoom(room['narrow'].s_to)
if direction == 'e':
player.MoveToRoom(room['narrow'].e_to)
if direction != 'n' or direction != 's' or direction != 'w' or direction != 'e':
# Print an error message if the movement isn't allowed.
print(
f"You cannot move that direction. Select another direction to travel in.")
pass
elif current_room_name == 'Treasure Chamber':
if direction == 's':
player.MoveToRoom(room['treasure'].s_to)
if direction == 'n':
player.MoveToRoom(room['treasure'].n_to)
if direction != 'n' or direction != 's':
# Print an error message if the movement isn't allowed.
print(
f"You cannot move that direction. Select another direction to travel in.")
pass
elif current_room_name == 'Unlocked Creepy Dungeon':
if direction == 'e':
player.MoveToRoom(room['dungeon'].e_to)
if direction != 'e':
# Print an error message if the movement isn't allowed.
print(
f"You cannot move that direction. Select another direction to travel in.")
pass
elif current_room_name == 'Decrepit Royal Hall':
if direction == 's':
player.MoveToRoom(room['hall'].s_to)
if direction == 'e':
player.MoveToRoom(room['hall'].e_to)
if direction != 's' or direction != 'e':
# Print an error message if the movement isn't allowed.
print(
f"You cannot move that direction. Select another direction to travel in.")
pass
elif (len(user_input) == 2):
# item stuff here
if direction == 'search':
room[current_room_key].ListItems()
# save item name in variable
current_item = user_input[1]
current_command = user_input[0]
rooms_items = room[current_room_key].items
print()
# when dropping an item remove from inventory and add to room items
if current_command == 'drop':
print(f'You try to drop a {current_item}.')
inventory_items = player.inventory
for item in inventory_items:
if item.item_name == current_item:
player.DropInventoryItem(item)
room[current_room_key].AddItem(item)
# notify user you have picked up or dropped off an item
player.ItemDropped(current_item)
if (current_command == 'get') or (current_command == 'take'):
print(f'You try to pick up a {current_item}.')
# check if item in room items dictionary
for item in rooms_items:
# if no, say 'that item is not found in this room'
if item.item_name == current_item:
found_item = item.item_name
# if yes, use 'get' or take' to add to inventory
if (current_command == 'get') or (current_command == 'take'):
player.AddInventoryItem(item)
# notify user you have picked up or dropped off an item
player.ItemFound(found_item)
room[current_room_key].ItemPickedUp(item)
else:
found_item = 'not found'
if found_item == 'not found':
room[current_room_key].ItemNotFound(current_item)
print()
|
python
|
from .employee import *
|
python
|
#!/usr/bin/python3
# -*- coding: utf-8 -*-
from scipy.stats import geom
import matplotlib.pyplot as plt
import numpy as np
def testGeom():# {{{
"""
Geometric Distribution (discrete)
Notes
-----
伯努利事件进行k次, 第一次成功的概率
为什么是几何分布呢, 为什么不叫几毛分布?
与几何数列有关 (乘积倍数)
p: 成功的概率
q: 失败的概率(1-p)
k: 第一次成功时的经历的次数 (前k-1次是失败的)
geom.pmf(k, p), (1-p)**(k-1)*p)
"""
# 准备数据: 已知 p.
# X轴: 第k次才成功
# Y轴: 概率
p = 0.4
xs = np.arange(geom.ppf(0.01, p), geom.ppf(0.99, p), step = 1)
# E(X) = 1/p, D(X) = (1-p)/p**2
mean, var, skew, kurt = geom.stats(p, moments='mvsk')
print("mean: %.2f, var: %.2f, skew: %.2f, kurt: %.2f" % (mean, var, skew, kurt))
fig, axs = plt.subplots(2, 2)
# 显示pmf1
ys = geom.pmf(xs, p)
axs[0][0].plot(xs, ys, 'bo', markersize=5, label='geom pmf')
axs[0][0].vlines(xs, 0, ys, colors='b', linewidth=5, alpha=0.5, label='vline pmf')
axs[0][0].legend(loc='best', frameon=False)
# 显示pmf2
ys = (1-p)**(xs-1)*p
axs[0][1].plot(xs, ys, 'bo', markersize=5, label='geom pmf')
axs[0][1].vlines(xs, 0, ys, colors='b', linewidth=5, alpha=0.5, label='vline pmf')
axs[0][1].legend(loc='best', frameon=False)
axs[0][1].set_title('ys = (1-p)**(xs-1)*p')
# 显示cdf P(X<=x)
ys = geom.cdf(xs, p)
axs[1][0].plot(xs, ys, 'bo', markersize=5, label='geom cdf')
axs[1][0].legend(loc='best', frameon=False)
print(np.allclose(xs, geom.ppf(ys, p))) # ppf:y-->x cdf:x-->y
# 生成随机数据(random variables)
data = geom.rvs(p, size=1000)
import sys
sys.path.append("../../thinkstats")
import Pmf
pmf = Pmf.MakePmfFromList(data)
xs, ys = pmf.Render()
axs[1][1].plot(xs, ys, 'bo', markersize=5, label='rvs-pmf')
plt.show()
# }}}
if __name__ == "__main__":
testGeom()
|
python
|
import base64
import json
import os
import twitter
import boto3
from time import sleep
from src.sam_quest import handle_game_state
# Constants
AWS_REGION = 'AWS_REGION'
total_processed = 0
# Environment Variables
aws_region = os.environ.get(AWS_REGION, 'us-west-2')
dynamodb_table_name = os.environ.get('TABLE_NAME', 'test-twitter-table')
def get_api_credentials():
return {
'consumer_key': os.getenv('CONSUMER_KEY'),
'consumer_secret': os.getenv('CONSUMER_SECRET'),
'access_token_key': os.getenv('ACCESS_TOKEN_KEY'),
'access_token_secret': os.getenv('ACCESS_TOKEN_SECRET')
}
# Clients
print('Setting up dynamodb table connection.')
dynamodb_table = boto3.resource('dynamodb', region_name=aws_region).Table(dynamodb_table_name)
print('Setting up twitter client.')
twitter_api = twitter.Api(**get_api_credentials())
def lambda_handler(event, context):
# global dynamodb_table
global total_processed
posts = [json.loads(base64.b64decode(record['kinesis']['data'])) for record in event['Records']]
# To avoid twitter throttling/locking, sleep 10 seconds after 10 records have been processed
if total_processed >= 10:
sleep(10)
total_processed = 0
handle_game_state(posts, twitter_api, dynamodb_table)
total_processed += len(posts)
|
python
|
import math
import re
from termcolor import colored
from sympy import Interval
import locale
locale.setlocale(locale.LC_ALL, '')
def round_sig(number, precision=4):
""" Round number with given number of significant numbers - precision
Args:
number (number): number to round
precision (int): number of significant numbers
"""
if number == 0.0:
return number
return round(number, precision - int(math.floor(math.log10(abs(number)))) - 1)
def parse_numbers(text: str):
""" Converts string into a list of numbers
Note: some nasty notations will not pass, such as "-e7"
Args:
text (string): input string
"""
numbers = '0123456789'
last_char = ""
new_text = ""
for char in text:
if char in numbers:
last_char = char
new_text = new_text + char
## Minus is ok, unless two goes in row
elif char == "-" and last_char != "-":
last_char = char
new_text = new_text + char
## . goes only between numbers, or in begining of number
elif char == "." and (last_char == " " or last_char in numbers):
last_char = char
new_text = new_text + char
## e goes in between number or after -
elif char == "e" and (last_char == "-" or last_char in numbers):
last_char = char
new_text = new_text + char
else:
last_char = char
new_text = new_text + " "
# print(new_text)
return [float(i) for i in new_text.split()]
def is_float(value):
""" Returns whether given values is float """
try:
float(value)
return True
except Exception:
return False
def to_sympy_intervals(intervals: list):
""" Converts list of lists or pairs into list of Intervals"""
return list(map(lambda x: x if isinstance(x, Interval) else Interval(x[0], x[1]), intervals))
def ineq_to_constraints(functions: list, intervals: list, decoupled=True, silent: bool = True):
""" Converts expressions and intervals into constraints
list of expressions, list of intervals -> constraints
Args:
functions: (list of strings) array of functions
intervals (list of intervals): array of pairs, low and high bound
decoupled (bool): if True returns 2 constraints for a single interval
silent (bool): if silent printed output is set to minimum
Example:
["x+3"],[[0,1]] -> ["0<= x+3 <=1"]
Returns:
(list) of constraints
"""
if len(functions) is not len(intervals):
if not silent:
print(colored(f"len of functions {len(functions)} and intervals {len(intervals)} does not correspond", "red"))
raise Exception(f"Constraints cannot be computed. len of functions {len(functions)} and intervals {len(intervals)} does not correspond.")
## Catching wrong interval errors
try:
spam = []
for index in range(len(functions)):
## debug
# print(colored(f"type of the function is {type(functions[index])}", "blue"))
## name intervals
if isinstance(intervals[index], Interval):
low = intervals[index].start
high = intervals[index].end
else:
low = intervals[index][0]
high = intervals[index][1]
if decoupled or not isinstance(functions[index], str):
if not isinstance(functions[index], str):
if not silent:
print("SYMPY")
spam.append(functions[index] >= low)
spam.append(functions[index] <= high)
else:
spam.append(functions[index] + " >= " + str(low))
spam.append(functions[index] + " <= " + str(high))
else:
## Old
# spam.append(functions[index] + " >= " + str(low))
# spam.append(functions[index] + " <= " + str(high))
## New
spam.append(str(low) + " <= " + functions[index] + " <= " + str(high))
## Slightly slower
# spam.append(f"{low} <= {functions[index]} <= {high}")
## Slow
# spam.append(f"{functions[index]} in Interval({low}, {high})")
return spam
except TypeError as error:
if "EmptySet" in str(error):
raise Exception("ineq_to_constraints", "Some intervals are incorrect (lover bound > upper bound)")
elif "FiniteSet" in str(error):
raise Exception("ineq_to_constraints", "Some intervals are incorrect (empty)")
except Exception as err:
print("Unhandled exception", err)
raise err
def constraints_to_ineq(constraints: list, silent: bool = True, debug: bool = False):
""" Converts constraints to inequalities if possible
constraints -> list of expressions, list of intervals
Args:
constraints (list of strings): properties to be converted
silent (bool): if silent printed output is set to minimum
debug (bool): if True extensive print will be used
Example:
["x+3>=0","x+3<=1"] -> ["x+3"],[[0,1]]
"""
if debug:
silent = False
if len(constraints) % 2:
if not silent:
print(colored("Number of constraints is not even, some intervals would be invalid", "red"))
raise Exception(f"Number of constraints is not even, some intervals would be invalid")
funcs = []
intervals = []
is_odd = False
index = 0
for prop in constraints:
spam = "None"
if debug:
print(f"Constraint {index + 1} before splitting", prop)
try:
spam = prop.replace("<=", "<").replace(">=", "<").replace("=>", "<").replace("=<", "<").replace(">", "<")
spam = spam.split("<")
except AttributeError:
print()
if debug:
print(f"Constraint {index+1} after splitting", spam)
if len(spam) <= 1:
if not silent:
print(colored(f"Constraint {index+1} is not in a form of inequality", "red"))
return False
elif len(spam) > 2:
if not silent:
print(colored(f"Constraint {index+1} has more than one inequality sign", "red"))
if spam[0].replace('.', '', 1).isdigit():
egg = [spam[0], spam[1]]
for indexx in range(2, len(spam)):
egg[1] = egg[1] + "".join(filter(lambda x: x in ["=", "<", ">"], prop[len(egg[0]):len(egg[0]) + 2])) + spam[indexx]
spam = egg
print(spam)
elif spam[-1].replace('.', '', 1).isdigit():
egg = [spam[0]]
for indexx in range(1, len(spam)-1):
egg[0] = egg[0] + "".join(filter(lambda x: x in ["=", "<", ">"], prop[len(egg[0]):len(egg[0]) + 2])) + spam[indexx]
egg.append(spam[-1])
spam = egg
print(spam)
else:
return False
# ## Searching for < > which were in brackets
# brackets = []
# for part in spam:
# brackets.append(part.count("(") - part.count(")"))
# brackets_count = 0
# ## more right brackets in the first part
# if brackets[0] < 0:
# return False
# ## sum the brackets until I find balance
# for index, part in enumerate(brackets):
# brackets_count = brackets_count + part
# ## found the split
# if brackets_count == 0 and sum(brackets[index +1:]):
# TODO
try:
## The right-hand-side is number
float(spam[1])
if debug:
print("right-hand-side ", float(spam[1]))
except ValueError:
spam = [f"{spam[0]} -( {spam[1]})", 0]
## If we are at odd position check
if is_odd:
if debug:
print("is odd")
print("funcs[-1]", funcs[-1])
print("spam[0]", spam[0])
## whether the previous function is the same as the new one
if funcs[-1] == spam[0]:
# if yes, add the other value of the interval
if debug:
print("Adding value")
print("len(funcs)", len(funcs))
print("[intervals[len(funcs)-1], spam[1]]", [intervals[len(funcs)-1], spam[1]])
intervals[len(funcs)-1] = [intervals[len(funcs)-1], spam[1]]
else:
funcs.append(spam[0])
intervals.append(spam[1])
is_odd = not is_odd
index = index + 1
## Sort the intervals
index = 0
for interval_index in range(len(intervals)):
if len(intervals[interval_index]) != 2:
if not silent:
print(colored(f"Constraint {index + 1} does not have proper number of boundaries", "red"))
raise Exception(f"Constraint {index + 1} does not have proper number of boundaries")
if debug:
print("sorted([float(intervals[interval_index][0]), float(intervals[interval_index][1])])", sorted([float(intervals[interval_index][0]), float(intervals[interval_index][1])]))
intervals[interval_index] = sorted([float(intervals[interval_index][0]), float(intervals[interval_index][1])])
if debug:
print("Interval(intervals[interval_index][0], intervals[interval_index][1]) ", Interval(intervals[interval_index][0], intervals[interval_index][1]))
intervals[interval_index] = Interval(intervals[interval_index][0], intervals[interval_index][1])
index = index + 1
if debug:
print("funcs: ", funcs)
print("intervals: ", intervals)
return funcs, intervals
def decouple_constraints(constraints: list, silent: bool = True, debug: bool = False):
""" Decouples constrains with more two inequalities into two separate constraints:
Args:
constraints (list of strings): properties to be converted
silent (bool): if silent printed output is set to minimum
debug (bool): if True extensive print will be used
Example:
["-8 <= x+3 <= 0"] -> ["-8 <= x+3", "x+3 <= 0"]
"""
new_constraints = []
for index, constraint in enumerate(constraints):
new_constraints.extend(decouple_constraint(constraint, silent=silent, debug=debug))
return new_constraints
def decouple_constraint(constraint: str, silent: bool = True, debug: bool = False):
""" Decouples constrains with more two inequalities into two separate constraints:
Args:
constraint (string): property to be converted
silent (bool): if silent printed output is set to minimum
debug (bool): if True extensive print will be used
Example:
"-8 <= x+3 <= 0" -> ["-8 <= x+3", "x+3 <= 0"]
"""
new_constraints = []
pattern = r" < | > | >= | <= | = | => | =<"
match = re.findall(pattern, constraint)
if debug:
print("constraint", constraint)
print("match", match)
if len(match) == 0:
raise Exception(f"No <,>,>=, <=,= symbols in constrain")
elif len(match) == 1:
new_constraints.append(constraint)
elif len(match) == 2:
parts = re.split(pattern, constraint)
new_constraints.append(match[0].join(parts[:2]))
new_constraints.append(match[0].join(parts[1:]))
else:
raise Exception(f"More than two <,>,>=, <=,= symbols in constrain!")
return new_constraints
# def couple_constraints(constraints: str, silent: bool = True, debug: bool = False):
# """ couples constrains with the same internal parts into a single constraints
#
# Args:
# constraints (string): properties to be converted
# silent (bool): if silent printed output is set to minimum
# debug (bool): if True extensive print will be used
#
# Example:
# ["-8 <= x+3", "x+3 <= 0"] -> "-8 <= x+3 <= 0"
# """
# new_constraints = []
# pattern = r" < | > | >= | <= | = | => | =<"
# match = re.findall(pattern, constraint)
# if debug:
# print("constraint", constraint)
# print("match", match)
# if len(match) == 0:
# raise Exception(f"No <,>,>=, <=,= symbols in constrain")
# elif len(match) == 1:
# new_constraints.append(constraint)
# elif len(match) == 2:
# parts = re.split(pattern, constraint)
# new_constraints.append(match[0].join(parts[:2]))
# new_constraints.append(match[0].join(parts[1:]))
# else:
# raise Exception(f"More than two <,>,>=, <=,= symbols in constrain!")
# return new_constraints
def add_white_spaces(expression):
""" Adds white spaces in between <,>,=,<=, and >= so it can be easily parsed
Example:
"0.2>=p*q >=0.1" ---> "0.2 >= p*q >= 0.1"
"""
just_equal = r"[^\s<>]=[^<>]|[^<>]=[^\s<>]"
match = re.findall(just_equal, expression)
# print(match)
if match:
expression.replace("=", " = ")
with_equal = r"[^\s]>=|[^\s]<=|[^\s]=>|[^\s]=<|>=[^\s]|<=[^\s]|=>[^\s]|=<[^\s]"
match = re.findall(with_equal, expression)
# print(match)
if match:
greater_eq_check = True
smaller_eq_check = True
eq_greater_check = True
eq_smaller_check = True
for item in match:
if ">=" in item and greater_eq_check:
expression = expression.replace(">=", " >= ")
greater_eq_check = False
if "<=" in item and smaller_eq_check:
expression = expression.replace("<=", " <= ")
smaller_eq_check = False
if "=>" in item and eq_greater_check:
expression = expression.replace("=>", " >= ")
greater_eq_check = False
if "=<" in item and eq_smaller_check:
expression = expression.replace("=<", " <= ")
smaller_eq_check = False
without_equal = r"<[^\s=]|>[^\s=]|[^\s=]<|[^\s=]>"
match = re.findall(without_equal, expression)
# print(match)
if match:
greater_check = True
smaller_check = True
for item in match:
if ">" in item and greater_check:
expression = expression.replace(">", " > ")
greater_check = False
if "<" in item and smaller_check:
expression = expression.replace("<", " < ")
smaller_check = False
expression = re.sub(r' +', ' ', expression).strip()
return expression
def normalise_constraint(constraint: str, silent: bool = True, debug: bool = False):
""" Transforms the constraint into normalised form
Args:
constraint (string): constraint to be normalised
silent (bool): if silent printed output is set to minimum
debug (bool): if True extensive print will be used
Example:
"0.2 >= p >= 0.1" ---> "0.1 <= p <= 0.2"
"0.2 >= p" ---> "p <= 0.2"
"""
constraint = add_white_spaces(constraint)
pattern = r" < | > | >= | <= | = | => | =<"
match = re.findall(pattern, constraint)
spam = re.split(pattern, constraint)
if debug:
print("constraint", constraint)
print("match", match)
print("split", spam)
if match == [' >= ']:
return f"{spam[1]} <= {spam[0]}"
elif match == [' > ']:
return f"{spam[1]} < {spam[0]}"
elif match == [' = '] and is_float(spam[0]):
return f"{spam[1]} = {spam[0]}"
if match == [' >= ', ' >= ']:
return f"{spam[2]} <= {spam[1]} <= {spam[0]}"
elif match == [' > ', ' > ']:
return f"{spam[2]} < {spam[1]} < {spam[0]}"
elif match == [' > ', ' >= ']:
return f"{spam[2]} <= {spam[1]} < {spam[0]}"
elif match == [' >= ', ' > ']:
return f"{spam[2]} < {spam[1]} <= {spam[0]}"
return constraint
def put_exp_left(constraint: str, silent: bool = True, debug: bool = False):
""" put exp of the constraint on the left side
Args:
constraint (string): constraint to be normalised
silent (bool): if silent printed output is set to minimum
debug (bool): if True extensive print will be used
Example:
"0.2 <= p" ---> "p >= 0.2"
"""
pattern = r" < | > | >= | <= | = | => | =<"
match = re.findall(pattern, constraint)
spam = re.split(pattern, constraint)
try:
float(spam[0])
if match == [' >= ']:
return f"{spam[1]} <= {spam[0]}"
elif match == [' > ']:
return f"{spam[1]} < {spam[0]}"
elif match == [' = '] and is_float(spam[0]):
return f"{spam[1]} = {spam[0]}"
elif match == [' <= '] and is_float(spam[0]):
return f"{spam[1]} >= {spam[0]}"
elif match == [' < '] and is_float(spam[0]):
return f"{spam[1]} > {spam[0]}"
except ValueError:
return constraint
def split_constraints(constraints):
""" Splits normalised constraint in parts divided by (in)equality sign
Example constraint:
["0.7 < p+q < 0.8"] --> [("0.7", "p+q", "0.8")]
["0.7 < p+q"] --> [("0.7", "p+q", None)]
"""
return list(map(split_constraint, constraints))
def split_constraint(constraint):
""" Splits normalised constraint in parts divided by (in)equality sign
Example constraint:
"0.7 < p+q < 0.8" --> ["0.7", "p+q", "0.8"]
"0.7 < p+q" --> ["0.7", "p+q", None]
"""
## uniformize (in)equality signs and skip white spaces
constraint = re.sub(r"\s*(<=|>=|=>|=<)\s*", "<", constraint)
constraint = re.sub(r"\s*[<>=]\s*", "<", constraint)
match = re.findall("<", constraint)
if len(match) == 2:
## Double interval bound
# print(item)
parts = constraint.split("<")
constraint = [parts[0], parts[1], parts[2]]
elif len(match) == 1:
## Single interval bound
# print(item)
constraint = constraint.split("<")
## Check which side is number
if is_float(constraint[0]):
constraint = [constraint[0], constraint[1], None]
else:
constraint = [None, constraint[0], constraint[1]]
else:
raise Exception("Given constrain more than two (in)equality signs")
return constraint
def parse_interval_bounds(line: str, parse_param=False):
""" Parses interval bounds of list of inequalities separated by ,/;
Returns list of pairs - intervals
Args:
line (str): line to parse
parse_param (bool): if True return param name instead
Example:
"0<=p<=1/2;" --> [[0, 0.5]]
"1/4<=q<=0.75, 0<=p<=1/2" --> [[0.25, 0.75], [0, 0.5]]
"0<=p;" --> [[0, None]]
"""
line = line.replace(";", ",")
params = []
inequalities = line.split(",")
## Filter nonempty inequalities
inequalities = list(filter(lambda x: x != "", inequalities))
inequalities = [split_constraint(inequality) for inequality in inequalities]
## Eval boundaries and omit the middle
for index, item in enumerate(inequalities):
inequalities[index][0] = eval(item[0]) if item[0] is not None else None
if parse_param:
params.append(inequalities[index][1])
del inequalities[index][1]
inequalities[index][1] = eval(item[1]) if item[1] is not None else None
if parse_param:
return params
else:
return inequalities
def to_interval(points: list):
""" Transforms the set of points into set of intervals - Orthogonal hull
Args:
points (list of tuples): which are the points
Example:
POINT INTERVALS
A B X Y
[(0, 2), (1, 3)] --> [[0, 1], [2, 3]]
Example 2:
A B C X Y
[(0, 2), (1, 5), (4, 3)] --> [[0, 4], [2, 5]]
Example 3:
A B C X Y Z
[(0, 2, 9), (1, 5, 0), (4, 3, 6)] --> [[0, 4], [2, 5], [0, 9]]
"""
intervals = []
for dimension in range(len(points[0])):
interval = [points[0][dimension], points[0][dimension]]
for point in range(len(points)):
if interval[0] > points[point][dimension]:
interval[0] = points[point][dimension]
if interval[1] < points[point][dimension]:
interval[1] = points[point][dimension]
intervals.append(interval)
return intervals
|
python
|
import copy
import json
import unittest
from typing import Any, Dict
import avro.schema # type: ignore
from wicker import schema
from wicker.core.errors import WickerSchemaException
from wicker.schema import dataloading, dataparsing, serialization
from wicker.schema.schema import PRIMARY_KEYS_TAG
from wicker.testing.codecs import Vector, VectorCodec
TEST_SCHEMA = schema.DatasetSchema(
fields=[
schema.IntField("label", description="Label of the example"),
schema.RecordField(
"lidar_point_cloud",
fields=[
schema.RecordField(
"lidar_metadata",
fields=[
schema.StringField(
"lidar_model",
description="Model of lidar used to generate data",
),
schema.DoubleField(
"lidar_calibration_error",
description="Some lidar calibration metric",
required=False,
),
],
description="Some metadata about the lidar",
),
],
description="Lidar point cloud data",
),
schema.LongField("timestamp_ns", description="Some timestamp field in ns"),
schema.FloatField("ego_speed", description="Absolute speed of ego"),
schema.BoolField("qc", description="A quality control field", required=False),
schema.RecordField(
"extra_metadata",
description="Extra metadata",
fields=[
schema.IntField("meta_1", description="Metadata 1"),
schema.IntField("meta_2", description="Metadata 2", required=False),
],
required=False,
),
schema.ArrayField(
schema.StringField("array_stringfield", description="some array"),
),
],
primary_keys=["timestamp_ns"],
)
TEST_EXAMPLE_REQUIRED: Dict[str, Any] = {
"label": 0,
"lidar_point_cloud": {
"lidar_metadata": {
"lidar_model": "harambe",
},
},
"timestamp_ns": 1337,
"ego_speed": 1337.1337,
"array_stringfield": ["foo", "bar", "baz"],
}
# When we load the example all the keys for any unset non-required fields will are added
TEST_EXAMPLE_LOAD_REQUIRED = copy.deepcopy(TEST_EXAMPLE_REQUIRED)
TEST_EXAMPLE_LOAD_REQUIRED.update(
{
"lidar_point_cloud": {
"lidar_metadata": {
"lidar_model": "harambe",
"lidar_calibration_error": None,
},
},
"qc": None,
"extra_metadata": None,
}
)
# Example with everything field set
TEST_EXAMPLE_FULL = copy.deepcopy(TEST_EXAMPLE_REQUIRED)
TEST_EXAMPLE_FULL.update(
{
"lidar_point_cloud": {
"lidar_metadata": {
"lidar_model": "harambe",
"lidar_calibration_error": 1.337,
},
},
"qc": True,
"extra_metadata": {
"meta_1": 1,
"meta_2": 2,
},
}
)
TEST_SERIALIZED_JSON_V2 = {
"_description": "",
"_json_version": 2,
PRIMARY_KEYS_TAG: '["timestamp_ns"]',
"fields": [
{"_description": "Label of the example", "name": "label", "type": "int"},
{
"_description": "Lidar point cloud data",
"name": "lidar_point_cloud",
"type": {
"fields": [
{
"_description": "Some metadata about the lidar",
"name": "lidar_metadata",
"type": {
"fields": [
{
"_description": "Model of lidar used to generate data",
"name": "lidar_model",
"type": "string",
},
{
"_description": "Some lidar calibration metric",
"name": "lidar_calibration_error",
"type": ["null", "double"],
},
],
"name": "lidar_metadata",
"type": "record",
},
}
],
"name": "lidar_point_cloud",
"type": "record",
},
},
{"_description": "Some timestamp field in ns", "name": "timestamp_ns", "type": "long"},
{"_description": "Absolute speed of ego", "name": "ego_speed", "type": "float"},
{"_description": "A quality control field", "name": "qc", "type": ["null", "boolean"]},
{
"_description": "Extra metadata",
"name": "extra_metadata",
"type": [
"null",
{
"fields": [
{"_description": "Metadata 1", "name": "meta_1", "type": "int"},
{"_description": "Metadata 2", "name": "meta_2", "type": ["null", "int"]},
],
"name": "extra_metadata",
"type": "record",
},
],
},
{
"name": "array_stringfield",
"type": {"_description": "some array", "items": "string", "name": "array_stringfield", "type": "array"},
},
],
"name": "fields",
"type": "record",
}
class TestSchemaParseExample(unittest.TestCase):
def setUp(self) -> None:
self.maxDiff = None
def test_parse_full_example(self) -> None:
parsed_example = dataparsing.parse_example(TEST_EXAMPLE_FULL, TEST_SCHEMA)
self.assertEqual(parsed_example, TEST_EXAMPLE_FULL)
def test_parse_required_fields(self) -> None:
parsed_example = dataparsing.parse_example(TEST_EXAMPLE_REQUIRED, TEST_SCHEMA)
self.assertEqual(parsed_example, TEST_EXAMPLE_LOAD_REQUIRED)
def test_fail_required(self) -> None:
example = copy.deepcopy(TEST_EXAMPLE_REQUIRED)
del example["label"]
with self.assertRaises(WickerSchemaException) as e:
dataparsing.parse_example(example, TEST_SCHEMA)
self.assertIn("Example missing keys", str(e.exception))
example = copy.deepcopy(TEST_EXAMPLE_REQUIRED)
del example["lidar_point_cloud"]
with self.assertRaises(WickerSchemaException) as e:
dataparsing.parse_example(example, TEST_SCHEMA)
self.assertIn("Example missing keys", str(e.exception))
def test_fail_type_int(self) -> None:
example = copy.deepcopy(TEST_EXAMPLE_FULL)
example["label"] = "SHOULD_BE_INT"
with self.assertRaises(WickerSchemaException) as e:
dataparsing.parse_example(example, TEST_SCHEMA)
self.assertIn("Error at path label", str(e.exception))
def test_fail_type_long(self) -> None:
example = copy.deepcopy(TEST_EXAMPLE_FULL)
example["timestamp_ns"] = "SHOULD_BE_LONG"
with self.assertRaises(WickerSchemaException) as e:
dataparsing.parse_example(example, TEST_SCHEMA)
self.assertIn("Error at path timestamp_ns", str(e.exception))
def test_fail_type_bool(self) -> None:
example = copy.deepcopy(TEST_EXAMPLE_FULL)
example["qc"] = "SHOULD_BE_BOOL"
with self.assertRaises(WickerSchemaException) as e:
dataparsing.parse_example(example, TEST_SCHEMA)
self.assertIn("Error at path qc", str(e.exception))
def test_fail_type_float(self) -> None:
example = copy.deepcopy(TEST_EXAMPLE_FULL)
example["ego_speed"] = "SHOULD_BE_FLOAT"
with self.assertRaises(WickerSchemaException) as e:
dataparsing.parse_example(example, TEST_SCHEMA)
self.assertIn("Error at path ego_speed", str(e.exception))
def test_fail_type_double(self) -> None:
example: Dict[str, Any] = copy.deepcopy(TEST_EXAMPLE_FULL)
example["lidar_point_cloud"]["lidar_metadata"]["lidar_calibration_error"] = "SHOULD_BE_DOUBLE"
with self.assertRaises(WickerSchemaException) as e:
dataparsing.parse_example(example, TEST_SCHEMA)
self.assertIn(
"Error at path lidar_point_cloud.lidar_metadata.lidar_calibration_error",
str(e.exception),
)
def test_fail_type_string(self) -> None:
example: Dict[str, Any] = copy.deepcopy(TEST_EXAMPLE_FULL)
example["lidar_point_cloud"]["lidar_metadata"]["lidar_model"] = 1337
with self.assertRaises(WickerSchemaException) as e:
dataparsing.parse_example(example, TEST_SCHEMA)
self.assertIn(
"Error at path lidar_point_cloud.lidar_metadata.lidar_model",
str(e.exception),
)
def test_fail_type_record(self) -> None:
example = copy.deepcopy(TEST_EXAMPLE_FULL)
example["lidar_point_cloud"] = "SHOULD_BE_DICT"
with self.assertRaises(WickerSchemaException) as e:
dataparsing.parse_example(example, TEST_SCHEMA)
self.assertIn("Error at path lidar_point_cloud", str(e.exception))
def test_fail_keys_record(self) -> None:
example = copy.deepcopy(TEST_EXAMPLE_FULL)
del example["label"]
with self.assertRaises(WickerSchemaException) as e:
dataparsing.parse_example(example, TEST_SCHEMA)
self.assertIn("Error at path :", str(e.exception))
def test_fail_type_array(self) -> None:
example = copy.deepcopy(TEST_EXAMPLE_FULL)
example["array_stringfield"] = "foo"
with self.assertRaises(WickerSchemaException) as e:
dataparsing.parse_example(example, TEST_SCHEMA)
self.assertIn("Error at path array_stringfield:", str(e.exception))
def test_fail_element_type_array(self) -> None:
example = copy.deepcopy(TEST_EXAMPLE_FULL)
example["array_stringfield"] = [1, 2, 3]
with self.assertRaises(WickerSchemaException) as e:
dataparsing.parse_example(example, TEST_SCHEMA)
self.assertIn("Error at path array_stringfield.elem[0]:", str(e.exception))
class TestSchemaValidation(unittest.TestCase):
def test_schema_no_primary_keys(self) -> None:
with self.assertRaises(WickerSchemaException) as e:
schema.DatasetSchema(fields=[], primary_keys=[])
self.assertIn("The primary_keys attribute can not be empty", str(e.exception))
def test_schema_invalid_primary_keys(self) -> None:
with self.assertRaises(WickerSchemaException) as e:
schema.DatasetSchema(fields=[], primary_keys=["unknown_key"])
self.assertIn("'unknown_key' not found", str(e.exception))
def test_schema_required_primary_keys(self) -> None:
schema.DatasetSchema(fields=[schema.StringField("car_id")], primary_keys=["car_id"])
with self.assertRaises(WickerSchemaException) as e:
schema.DatasetSchema(
fields=[schema.StringField("car_id", required=False)],
primary_keys=["car_id"],
)
self.assertIn("must have the 'required' tag, but 'car_id' doesn't", str(e.exception))
def test_schema_invalid_primary_keys_type(self) -> None:
bad_fields = [
schema.FloatField("float_key"),
schema.DoubleField("double_key"),
schema.RecordField("record_key", fields=[]),
]
for f in bad_fields:
with self.assertRaises(WickerSchemaException, msg=f"field.name={f.name}") as e:
schema.DatasetSchema(fields=[f], primary_keys=[f.name])
self.assertIn(f"'{f.name}' cannot be a primary key", str(e.exception), msg=f"field.name={f.name}")
class TestSchemaSerialization(unittest.TestCase):
def setUp(self) -> None:
self.maxDiff = None
def test_schema_name_error_dashes(self) -> None:
with self.assertRaises(ValueError):
schema.StringField(name="name-with-dashes")
def test_schema_name_error_start_with_number(self) -> None:
with self.assertRaises(ValueError):
schema.StringField(name="0foo")
with self.assertRaises(ValueError):
schema.StringField(name="0")
def test_schema_name_single_char(self) -> None:
schema.StringField(name="q")
def test_serialize_to_str(self) -> None:
serialized = serialization.dumps(TEST_SCHEMA)
avro.schema.parse(serialized)
self.assertEqual(json.loads(serialized), TEST_SERIALIZED_JSON_V2)
def test_serialize_to_str_pretty(self) -> None:
serialized = serialization.dumps(TEST_SCHEMA, pretty=True)
avro.schema.parse(serialized)
self.assertEqual(serialized, json.dumps(TEST_SERIALIZED_JSON_V2, sort_keys=True, indent=4))
def test_loads(self) -> None:
self.assertEqual(
TEST_SCHEMA,
serialization.loads(json.dumps(TEST_SERIALIZED_JSON_V2)),
)
def test_loads_bad_type(self) -> None:
with self.assertRaises(WickerSchemaException):
serialized_bad_type = copy.deepcopy(TEST_SERIALIZED_JSON_V2)
serialized_bad_type["fields"][0]["type"] = "BAD_TYPE_123" # type: ignore
serialization.loads(json.dumps(serialized_bad_type))
def test_loads_bad_type_nullable(self) -> None:
with self.assertRaises(WickerSchemaException):
serialized_bad_type = copy.deepcopy(TEST_SERIALIZED_JSON_V2)
serialized_bad_type["fields"][0]["type"] = ["null", "BAD_TYPE_123"] # type: ignore
serialization.loads(json.dumps(serialized_bad_type))
class TestSchemaLoading(unittest.TestCase):
def test_load(self) -> None:
loaded_example = dataloading.load_example(TEST_EXAMPLE_FULL, TEST_SCHEMA)
# Assert that values are semantically equal
self.assertEqual(TEST_EXAMPLE_FULL, loaded_example)
# Assert that the IDs of the values are also equal (no additional copies of the data were created)
self.assertEqual(id(TEST_EXAMPLE_FULL["label"]), id(loaded_example["label"]))
self.assertEqual(
id(TEST_EXAMPLE_FULL["lidar_point_cloud"]["lidar_metadata"]["lidar_model"]),
id(loaded_example["lidar_point_cloud"]["lidar_metadata"]["lidar_model"]),
)
self.assertEqual(
id(TEST_EXAMPLE_FULL["lidar_point_cloud"]["lidar_metadata"]["lidar_calibration_error"]),
id(loaded_example["lidar_point_cloud"]["lidar_metadata"]["lidar_calibration_error"]),
)
self.assertEqual(id(TEST_EXAMPLE_FULL["timestamp_ns"]), id(loaded_example["timestamp_ns"]))
self.assertEqual(id(TEST_EXAMPLE_FULL["ego_speed"]), id(loaded_example["ego_speed"]))
self.assertEqual(id(TEST_EXAMPLE_FULL["qc"]), id(loaded_example["qc"]))
# Assert that the IDs of dictionaries and lists are not equal (data is not modified in-place)
self.assertNotEqual(id(TEST_EXAMPLE_FULL), id(loaded_example))
self.assertNotEqual(
id(TEST_EXAMPLE_FULL["lidar_point_cloud"]),
id(loaded_example["lidar_point_cloud"]),
)
self.assertNotEqual(
id(TEST_EXAMPLE_FULL["lidar_point_cloud"]["lidar_metadata"]),
id(loaded_example["lidar_point_cloud"]["lidar_metadata"]),
)
self.assertNotEqual(id(TEST_EXAMPLE_FULL["extra_metadata"]), id(loaded_example["extra_metadata"]))
self.assertNotEqual(id(TEST_EXAMPLE_FULL["array_stringfield"]), id(loaded_example["array_stringfield"]))
def test_load_columns_required(self) -> None:
subset_example = copy.deepcopy(TEST_EXAMPLE_REQUIRED)
del subset_example["label"]
loaded_example = dataloading.load_example(subset_example, TEST_SCHEMA)
# Assert that values are semantically equal
self.assertEqual(subset_example, loaded_example)
def test_load_extra_keys_ignored(self) -> None:
extra_keys_example = copy.deepcopy(TEST_EXAMPLE_FULL)
extra_keys_example["extra_key_foo"] = 1
loaded_example = dataloading.load_example(extra_keys_example, TEST_SCHEMA)
# Assert that values are semantically equal
self.assertEqual(TEST_EXAMPLE_FULL, loaded_example)
def test_load_record_from_list_kv_tuples(self) -> None:
example = copy.deepcopy(TEST_EXAMPLE_FULL)
example["lidar_point_cloud"]["lidar_metadata"] = [
(k, v) for k, v in example["lidar_point_cloud"]["lidar_metadata"].items()
]
example["lidar_point_cloud"] = [(k, v) for k, v in example["lidar_point_cloud"].items()]
parsed_example = dataloading.load_example(example, TEST_SCHEMA)
self.assertEqual(parsed_example, TEST_EXAMPLE_FULL)
def test_schema(testcase: unittest.TestCase, schema_to_test: schema.DatasetSchema) -> None:
serialized = serialization.dumps(schema_to_test, pretty=True)
loaded = serialization.loads(serialized)
loaded_serialized = serialization.dumps(schema_to_test, pretty=True)
testcase.assertEqual(loaded, schema_to_test, msg=f"{serialized} vs {loaded_serialized}")
testcase.assertTrue(json.loads(serialized))
testcase.assertTrue(avro.schema.parse(serialized))
class TestObjectSchemas(unittest.TestCase):
OBJECT_FIELD = schema.ObjectField("encoded_vector", VectorCodec(compression_method=12), required=False)
SCHEMA = schema.DatasetSchema(
fields=[OBJECT_FIELD, schema.StringField("sample_id")],
primary_keys=["sample_id"],
)
EXAMPLE = {"sample_id": "sample000", "encoded_vector": Vector([1, 2, 3, 4])}
EXAMPLE_BAD_TYPE = {"sample_id": "sample000", "encoded_vector": [1, 2, 3, 4]}
EXAMPLE_NONE = {"sample_id": "sample000"}
def test_serialization(self) -> None:
serialized = serialization.dumps(TestObjectSchemas.SCHEMA, pretty=True)
loaded = serialization.loads(serialized)
loaded_serialized = serialization.dumps(TestObjectSchemas.SCHEMA, pretty=True)
self.assertEqual(loaded, TestObjectSchemas.SCHEMA, msg=f"{serialized} vs {loaded_serialized}")
self.assertTrue(json.loads(serialized))
self.assertTrue(avro.schema.parse(serialized))
def test_deserialization_unknown_codec(self) -> None:
# Test the case where we want to deserialize a schema and we don't have the necessary codec.
serialized = serialization.dumps(TestObjectSchemas.SCHEMA, pretty=True)
serialized = serialized.replace("VectorCodec", "UnknownCodec")
# By default we should fail
with self.assertRaises(WickerSchemaException):
loaded = serialization.loads(serialized)
# But with the treat_objects_as_bytes=True we should be able to do it.
loaded = serialization.loads(serialized, treat_objects_as_bytes=True)
serialized2 = serialization.dumps(loaded, pretty=True)
# Make sure that if we reserialize the schema that got loaded with treat_objects_as_bytes=True, we get the
# same thing as the original schema.
self.assertEqual(serialized, serialized2)
def test_good_example1(self) -> None:
# Check parsing and loading
parsed_example = dataparsing.parse_example(TestObjectSchemas.EXAMPLE, TestObjectSchemas.SCHEMA)
self.assertTrue(isinstance(parsed_example["encoded_vector"], bytes))
loaded_schema = serialization.loads(serialization.dumps(TestObjectSchemas.SCHEMA, pretty=True))
loaded_example = dataloading.load_example(parsed_example, loaded_schema)
self.assertEqual(loaded_example, TestObjectSchemas.EXAMPLE)
assert isinstance(loaded_schema.schema_record.fields[0], schema.ObjectField) # Make mypy happy
assert isinstance(loaded_schema.schema_record.fields[0].codec, VectorCodec)
self.assertEqual(loaded_schema.schema_record.fields[0].codec.compression_method, 12)
def test_example_none(self) -> None:
parsed_example = dataparsing.parse_example(TestObjectSchemas.EXAMPLE_NONE, TestObjectSchemas.SCHEMA)
loaded_example = dataloading.load_example(parsed_example, TestObjectSchemas.SCHEMA)
self.assertEqual(loaded_example, TestObjectSchemas.EXAMPLE_NONE)
def test_bad_validation(self) -> None:
with self.assertRaises(WickerSchemaException):
dataparsing.parse_example(TestObjectSchemas.EXAMPLE_BAD_TYPE, TestObjectSchemas.SCHEMA)
def test_loads_bad_l5ml_metatype(self) -> None:
with self.assertRaises(WickerSchemaException) as err:
serialized_bad_type = json.loads(serialization.dumps(TestObjectSchemas.SCHEMA))
serialized_bad_type["fields"][0]["_l5ml_metatype"] = "BAD_TYPE_123" # type: ignore
serialization.loads(json.dumps(serialized_bad_type))
self.assertIn("Unhandled _l5ml_metatype for avro bytes type: BAD_TYPE_123", str(err.exception))
|
python
|
from oslo_config import cfg
from oslo_log import log as logging
from nca47.common.i18n import _
from nca47.common.i18n import _LI
from nca47.common.exception_zdns import ZdnsErrMessage
from nca47.common.exception import NonExistDevices
from nca47.api.controllers.v1 import tools
import requests
import json
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
DNS_DRIVER = None
ZONES_AGENT_OPTS = [
cfg.StrOpt('host_ip',
default='0.0.0.0',
help=_('The IP address on which nca47-zdns_driver listens.')),
cfg.PortOpt('port',
default=20120,
help=_('The TCP port on which nca47-zdns_driver listens.')),
cfg.StrOpt('view_id',
default='telecom',
help=_('The TCP view_id on which nca47-zdns_driver listens.')),
cfg.StrOpt('auth_name',
default='admin',
help=_('The TCP auth_name on which nca47-zdns_driver'
'listens.')),
cfg.StrOpt('auth_pw',
default='zdns',
help=_('The TCP auth_pw on which nca47-zdns_driver listens.')),
]
CONF = cfg.CONF
opt_group = cfg.OptGroup(name='zdns',
title='Options for the nca47-zdns_driver service')
CONF.register_group(opt_group)
CONF.register_opts(ZONES_AGENT_OPTS, opt_group)
class fake_dns_driver():
def __init__(self):
self.host = 'https://fake_ip'
self.port = CONF.zdns.port
self.view_id = CONF.zdns.view_id
self.auth_name = CONF.zdns.auth_name
self.auth_pw = CONF.zdns.auth_pw
@classmethod
def get_instance(cls):
global DNS_DRIVER
if not DNS_DRIVER:
DNS_DRIVER = cls()
return DNS_DRIVER
def create_zone(self, context, zone):
url = (self.host + ":" + str(self.port) +
'/views/' + self.view_id + '/zones')
LOG.info(_LI("create zones:" + url))
return {" fake create zone": "success"}
def update_zone_owners(self, context, zone, zone_id):
url = (self.host + ":" + str(self.port) + '/views/' +
self.view_id + '/zones/' + zone_id + '/owners')
LOG.info(_LI("update_zone_owners:" + url))
return {"fake update zone owners zone": "success"}
def update_zone(self, context, zone, zone_id):
url = (self.host + ":" + str(self.port) +
'/views/' + self.view_id + '/zones/' + zone_id)
LOG.info(_LI("update zones :" + url))
return {"fake update_zone zone": "success"}
def delete_zone(self, context, zone_id):
url = (self.host + ":" + str(self.port) + '/views/' + self.view_id +
'/zones/' + zone_id)
LOG.info(_LI("delete zones :" + url))
return {"fake delete_zone zone": "success"}
def create_rrs(self, context, rrs, zone_id):
url = (str(self.host) + ":" + str(self.port) + '/views/' +
self.view_id + '/zones/' + str(zone_id) + '/rrs')
LOG.info(_LI("create rrs:" + url))
res = {
"fake comment": "", "name": "www.baidu.", "type": "A",
"ttl": 1200, "state": "",
"href": "/views/default/zones/www.baidu/rrs/"
"www.baidu.$1200$A$MTk4LjIwMi4zOC40OA==",
"klass": "IN", "rdata": "198.202.38.48",
"reverse_name": "baidu.www",
"id": "www.baidu.$1200$A$MTk4LjIwMi4zOC40OA==",
"is_shared": ""
}
return res
def update_rrs(self, context, rrs, zone_id, rrs_id):
url = (self.host + ":" + str(self.port) + '/views/' + self.view_id +
'/zones/' + zone_id + '/rrs/' + rrs_id)
LOG.info(_LI("update rrs:" + url))
return {"id": "update_rrs", "ttl": "100",
"name": "www.baidu.com", "type": "A"}
def delete_rrs(self, context, zone_id, rrs_id):
url = (self.host + ":" + str(self.port) + '/views/' + self.view_id +
'/zones/' + zone_id + '/rrs/' + rrs_id)
LOG.info(_LI("delete rrs :" + url))
return {"fake delete_rss": "success"}
def del_cache(self, context, cache_dic):
url = (self.host + ":" + str(self.port) + '/cache/clean')
LOG.info(_LI("delete cache :" + url))
return {"fake clean cache": "success"}
def get_zone_one(self, context, zone_id):
url = (self.host + ":" + str(self.port) +
'/views/' + self.view_id + '/zones/' + zone_id)
LOG.info(_LI("view one zone :" + url))
return {"fake get_zone_one": "success"}
def get_zones(self, context):
url = (self.host + ":" + str(self.port) +
'/views/' + self.view_id + '/zones')
LOG.info(_LI("view all zone :" + url))
return {"fake get_zones": "success"}
def get_rrs(self, context, zone_id):
url = (self.host + ":" + str(self.port) + '/views/' + self.view_id +
'/zones/' + zone_id + '/rrs')
LOG.info(_LI("get_rrs :" + url))
res = {
"total_size": 2, "page_num": 1,
"resources":
[
{
"comment": "", "name": "www.baidu.",
"type": "NS", "ttl": 3600, "state": "",
"href": "/views/default/zones/www.baidu/rrs/"
"www.baidu.$3600$NS$bnMud3d3LmJhaWR1Lg==",
"klass": "IN", "rdata": "ns.www.baidu.",
"reverse_name": "baidu.www",
"id": "www.baidu.$3600$NS$bnMud3d3LmJhaWR1Lg==",
"is_shared": ""
},
{
"comment": "", "name": "ns.www.baidu.",
"type": "A", "ttl": 3600, "state": "",
"href": "/views/default/zones/www.baidu/rrs/"
"ns.www.baidu.$3600$A$MTI3LjAuMC4x",
"klass": "IN", "rdata": "127.0.0.1",
"reverse_name": "baidu.www.ns",
"id": "ns.www.baidu.$3600$A$MTI3LjAuMC4x",
"is_shared": ""
}
],
"page_size": 2
}
return res
def create_region(self, context, region):
LOG.info(_LI("create regions..."))
return {"region_id": "123456", "refcnt": "123456"}
def delete_region(self, context, region):
LOG.info(_LI("delete regions :"))
return {"fake delete_region region": "success"}
def create_member(self, context, member):
LOG.info(_LI("create members..."))
return {"id": "member123456"}
def delete_member(self, context, member):
LOG.info(_LI("delete members :"))
return {"fake delete_member member": "success"}
def create_sp_policy(self, context, policy):
LOG.info(_LI("create policys..."))
return {"sp_policy_id": "policy123456"}
def delete_sp_policy(self, context, policy):
LOG.info(_LI("delete policys :"))
return {"fake delete_sp_policy policy": "success"}
def update_sp_policy(self, context, policy):
LOG.info(_LI("update policys :"))
return {"fake update_sp_policy policy": "success"}
# this is a gmember operation
def create_gmember(self, context, obj_dic):
values = ["ip", "port", "enable", "name"]
driver_dic = tools.input_dic(values, obj_dic)
gslb_obj = {}
gslb_obj["gmember_name"] = obj_dic['name']
gslb_obj["current_user"] = self.auth_name
dic = tools.dict_merge(driver_dic, gslb_obj)
dic.pop('name')
LOG.info(_LI("create the gmember values with dic format"
"is %(json)s of dervice"), {"json": dic})
url = (self.host + ":" + str(self.port) +
'/dc/' + obj_dic["gslb_zone_name"] + "/gmember")
LOG.info(_LI("create gmember url:" + url))
return {"refcnt": "10", "id": "test_gmember_id"}
def delete_gmember(self, context, obj_dic):
gmember_id = obj_dic["gmember_id"]
driver_dic = {}
driver_dic["current_user"] = self.auth_name
url = (self.host + ":" + str(self.port) + '/dc/' +
obj_dic["gslb_zone_name"] + "/gmember/" + gmember_id)
return {"result": "successed"}
def update_gmember(self, context, obj_dic):
name = obj_dic["gmember_name"]
gslb_obj = ["enable"]
driver_dic = tools.input_dic(gslb_obj, obj_dic)
driver_dic["current_user"] = self.auth_name
LOG.info(_LI("update the gmember values with dic format\
is %(json)s of dervice"),
{"json": driver_dic})
url = (self.host + ":" + str(self.port) + '/dc/' +
obj_dic["gslb_zone_name"] + "/gmember/" + name)
return {"update": "successed"}
# this is a hm_template operation
def create_hm_template(self, context, obj_dic):
gslb_obj = ["name", "types", "check_interval", "timeout",
"max_retries", "max_retries", "sendstring",
"recvstring", "username", "password"]
driver_dic = tools.input_dic(gslb_obj, obj_dic)
driver_dic["current_user"] = self.auth_name
LOG.info(_LI("create the hm_template values with dic format"
"is %(json)s of dervice"), {"json": driver_dic})
url = (self.host + ":" + str(self.port) +
'/hm_template')
return {"refcnt": "10", "id": "test_hm_template_id"}
def delete_hm_template(self, context, obj_dic):
name = obj_dic["hm_template_id"]
driver_dic = {}
driver_dic["current_user"] = self.auth_name
url = (self.host + ":" + str(self.port) + '/hm_template/' +
name)
return {"result": "successed"}
def update_hm_template(self, context, obj_dic):
name = obj_dic["hm_template_id"]
gslb_obj = ["check_interval", "timeout",
"max_retries", "max_retries", "sendstring",
"recvstring", "username", "password"]
driver_dic = tools.input_dic(gslb_obj, obj_dic)
driver_dic["current_user"] = self.auth_name
driver_dic["username"] = self.auth_name
driver_dic["password"] = self.auth_pw
LOG.info(_LI("update the hm_template values with dic format\
is %(json)s of dervice"),
{"json": driver_dic})
url = (self.host + ":" + str(self.port) +
'/hm_template/' + name)
return {"update": "successed"}
def create_syngroup(self, context, obj_dic):
gslb_obj = ["name", "dcs", "probe_range", "pass"]
driver_dic = tools.input_dic(gslb_obj, obj_dic)
driver_dic["current_user"] = self.auth_name
LOG.info(_LI("create the syngroup values with dic format\
is %(json)s of dervice"),
{"json": driver_dic})
url = (self.host + ":" + str(self.port) +
'/syngroup')
headers = {'Content-type': 'application/json'}
data = json.dumps(driver_dic)
auth = (self.auth_name, self.auth_pw)
LOG.info(_LI("create syngroup url:" + url))
obj_dic['id'] = obj_dic['name']
return obj_dic
def delete_syngroup(self, context, obj_dic):
name = obj_dic["name"]
driver_dic = {}
driver_dic["current_user"] = self.auth_name
url = (self.host + ":" + str(self.port) + '/syngroup/' +
name)
headers = {'Content-type': 'application/json'}
data = json.dumps(driver_dic)
auth = (self.auth_name, self.auth_pw)
LOG.info(_LI("delete syngroup url :" + url))
obj_dic['id'] = obj_dic['name']
return obj_dic
def update_syngroup(self, context, obj_dic):
name = obj_dic["name"]
gslb_obj = ["dcs", "probe_range", "pass"]
driver_dic = tools.input_dic(gslb_obj, obj_dic)
driver_dic["current_user"] = self.auth_name
LOG.info(_LI("update the syngroup values with dic format\
is %(json)s of dervice"),
{"json": driver_dic})
url = (self.host + ":" + str(self.port) + '/syngroup/' +
name)
headers = {'Content-type': 'application/json'}
data = json.dumps(driver_dic)
auth = (self.auth_name, self.auth_pw)
LOG.info(_LI("create syngroup url:" + url))
obj_dic['id'] = obj_dic['name']
return obj_dic
def create_gpool(self, context, obj_dic):
gslb_obj = ["name", "enable", "ttl", "max_addr_ret", "cname",
"first_algorithm", "second_algorithm", "fallback_ip",
"hms", "pass", "gmember_list", "warning"]
driver_dic = tools.input_dic(gslb_obj, obj_dic)
driver_dic["current_user"] = self.auth_name
LOG.info(_LI("create the gpool values with dic format\
is %(json)s of dervice"),
{"json": driver_dic})
url = (self.host + ":" + str(self.port) +
'/gpool')
headers = {'Content-type': 'application/json'}
data = json.dumps(driver_dic)
auth = (self.auth_name, self.auth_pw)
LOG.info(_LI("create gpool url:" + url))
obj_dic['refcnt'] = 12
obj_dic['id'] = obj_dic['name']
return obj_dic
def update_gpool(self, context, obj_dic):
name = obj_dic["name"]
gslb_obj = ["enable", "ttl", "max_addr_ret", "cname",
"first_algorithm", "second_algorithm", "fallback_ip",
"hms", "pass", "gmember_list", "warning"]
driver_dic = tools.input_dic(gslb_obj, obj_dic)
driver_dic["current_user"] = self.auth_name
LOG.info(_LI("update the gpool values with dic format\
is %(json)s of dervice"),
{"json": driver_dic})
url = (self.host + ":" + str(self.port) + '/gpool/' +
name)
headers = {'Content-type': 'application/json'}
data = json.dumps(driver_dic)
auth = (self.auth_name, self.auth_pw)
LOG.info(_LI("create gpool url:" + url))
obj_dic['refcnt'] = 12
obj_dic['id'] = obj_dic['name']
return obj_dic
def delete_gpool(self, context, obj_dic):
name = obj_dic["name"]
driver_dic = {}
driver_dic["current_user"] = self.auth_name
url = (self.host + ":" + str(self.port) + '/gpool/' +
name)
headers = {'Content-type': 'application/json'}
data = json.dumps(driver_dic)
auth = (self.auth_name, self.auth_pw)
LOG.info(_LI("delete gpool url :" + url))
return obj_dic
def create_gmap(self, context, obj_dic):
gslb_obj = ["name", "enable", "algorithm", "last_resort_pool",
"gpool_list"]
driver_dic = tools.input_dic(gslb_obj, obj_dic)
driver_dic["current_user"] = self.auth_name
LOG.info(_LI("create the gmap values with dic format\
is %(json)s of dervice"),
{"json": driver_dic})
url = (self.host + ":" + str(self.port) +
'/gmap')
headers = {'Content-type': 'application/json'}
data = json.dumps(driver_dic)
auth = (self.auth_name, self.auth_pw)
LOG.info(_LI("create gmap url:" + url))
obj_dic['id'] = obj_dic['name']
return obj_dic
def delete_gmap(self, context, obj_dic):
name = obj_dic["name"]
driver_dic = {}
driver_dic["current_user"] = self.auth_name
url = (self.host + ":" + str(self.port) + '/gmap/' +
name)
headers = {'Content-type': 'application/json'}
data = json.dumps(driver_dic)
auth = (self.auth_name, self.auth_pw)
LOG.info(_LI("delete gmap url :" + url))
return obj_dic
def update_gmap(self, context, obj_dic):
name = obj_dic["name"]
gslb_obj = ["enable", "algorithm", "last_resort_pool",
"gpool_list"]
driver_dic = tools.input_dic(gslb_obj, obj_dic)
driver_dic["current_user"] = self.auth_name
LOG.info(_LI("update the gmap values with dic format\
is %(json)s of dervice"),
{"json": driver_dic})
url = (self.host + ":" + str(self.port) + '/gmap/' +
name)
headers = {'Content-type': 'application/json'}
data = json.dumps(driver_dic)
auth = (self.auth_name, self.auth_pw)
LOG.info(_LI("create gmap url:" + url))
return obj_dic
|
python
|
from flask import Flask, render_template, make_response, abort, jsonify, request, url_for
import os
import matplotlib.pyplot as plt
from io import BytesIO
from utils import *
import json
app = Flask(__name__, template_folder="tp4/templates", static_folder="tp4/static")
app_dir = os.getcwd()
db_ensembl = "tp4/data/ensembl_hs63_simple.sqlite"
@app.route("/")
def cover_page():
atlas = get_atlas(db_ensembl)
return render_template("cover_page.html", atlas_list=atlas)
@app.route("/parts/<part>/genes")
def genes_by_part(part):
genes = get_genes(db_ensembl, part)
return render_template("genes_list.html", part=part, genes=genes)
@app.route("/genes/<gene_id>")
def gene_page(gene_id):
gene_data = get_gene_data(db_ensembl, gene_id)
data_transcript = get_transcipts_data(db_ensembl, gene_id)
data_part = get_part_by_gene(db_ensembl, gene_id)
return render_template("gene_page.html", gene_data=gene_data, data_transcript=data_transcript, data_part=data_part)
@app.route("/genes/<gene_id>/parts.png")
def build_hist(gene_id):
data_part = get_part_by_gene(db_ensembl, gene_id)
count_dict = {}
for part in data_part:
transcript_count = get_transcipts_count(db_ensembl, gene_id, part)
count_dict[part] = transcript_count[0][0]
bins = list(count_dict.keys())
values = list(count_dict.values())
fig, ax = plt.subplots()
ax = plt.bar(bins, values)
fig.autofmt_xdate(rotation=45)
fig.tight_layout()
b = BytesIO()
fig.savefig(b, format="png")
# fig.savefig("/TP_flask/tp4/tmp/hist.png", format="png")
resp = make_response(b.getvalue())
resp.headers['content-type'] = 'image/png'
return resp
@app.route("/api/genes/<gene_id>", methods=["GET"])
def gene_json(gene_id):
try:
gene = unwrap_gene(db_ensembl, gene_id)
transcripts_array = unwrap_transcript(db_ensembl, gene_id)
gene["transcripts"] = transcripts_array
return jsonify(gene), 200
except:
error = {"error": "This gene doesn't exist in the database."}
return jsonify(error), 404
@app.route("/api/genes/", methods=["GET"])
def genes_list():
all_genes = get_all_genes(db_ensembl)
offset = request.args.get('offset')
if offset:
all_genes = all_genes[int(offset):]
try:
all_genes = all_genes[:100]
except:
pass
gene_list = []
for gene in all_genes:
gene_id = gene[0]
gene_dict = unwrap_gene(db_ensembl, gene_id)
transcripts_array = unwrap_transcript(db_ensembl, gene_id)
gene_dict["transcript_count"] = int(len(transcripts_array))
gene_dict["href"] = url_for("gene_json", gene_id=gene_id)
gene_list.append(gene_dict)
return jsonify(gene_list)
@app.route("/api/genes/", methods=["POST"])
# curl -X POST -H "Content-Type: application/json" -d @test.json http://127.0.0.1:5000/api/genes/
def gene_post():
req = request.get_json()
keys = ["Ensembl_Gene_ID", "Associated_Gene_Name", "Chromosome_Name", "Band", "Strand", "Gene_End", "Gene_Start"]
for key in req.keys():
if key in keys:
pass
else:
error = {"error": "la clé {key} n'existe pas".format(key=key)}
return jsonify(error), 400
# try:
# if type(req["Ensembl_Gene_ID"]) == str and type(req["Chromosome_Name"]) == str and type(req["Band"]) == str and type(req["Associated_Gene_Name"]) == str:
# pass
# if type(req["Gene_Start"]) == int and type(req["Gene_End"]) == int and type(req["Strand"]) == int:
# pass
# except:
# error = {"error": "Ce gène n'existe pas"}
# return jsonify(error), 404
# print(req)
return str("test")
|
python
|
# -*- coding: utf-8 -*-
# @Author: JanKinCai
# @Date: 2019-12-26 23:15:03
# @Last Modified by: JanKinCai
# @Last Modified time: 2019-12-28 00:02:51
from interact import interacts
config = {
"ipv4": {
"type": "string",
"regex": r"^\d{1,3}.\d{1,3}.\d{1,3}.\d{1,3}$",
"default": "192.168.166.12",
"description": "IPv4 address"
}
}
if __name__ == "__main__":
"""
IPv4 address [192.168.166.12]: 22
Error: Invalided `22`
IPv4 address [192.168.166.12]: 192.168.166.2
"""
print(interacts(config).ipv4)
|
python
|
# -*- coding: utf-8 -*-
# Copyright (C) 2018 Intel Corporation
#
# SPDX-License-Identifier: Apache-2.0
"""
CLI logger
"""
import logging
import sys
import os
import io
VERBOSE = any(arg.startswith("-v") for arg in sys.argv)
XML_REPORTING = any(arg.startswith("-x") for arg in sys.argv)
class StreamHandler(logging.StreamHandler):
_buffer = []
def emit(self, record):
"""
Emit a record.
If a formatter is specified, it is used to format the record.
The record is then written to the stream with a trailing newline. If
exception information is present, it is formatted using
traceback.print_exception and appended to the stream. If the stream
has an 'encoding' attribute, it is used to determine how to do the
output to the stream.
"""
try:
msg = self.format(record)
if VERBOSE or not XML_REPORTING or record.levelno > logging.DEBUG:
self._buffer.append(msg)
stream = self.stream
if VERBOSE or record.levelno > logging.DEBUG:
stream.write(msg)
stream.write(self.terminator)
self.flush()
except Exception:
self.handleError(record)
class Formatter(logging.Formatter):
"""This class inherits from the logging.Formatter class and is a custom
formatter used to enable different formats based on log level.
"""
fmt = "%(asctime)s - %(levelname)s - %(message)s"
datefmt = "%m-%d-%Y %H:%M:%S"
if sys.version_info < (3, 0, 0):
result_fmt = "%(message)s"
elif sys.version_info < (3, 3, 4):
result_fmt = logging._STYLES['{']("{message}")
else:
result_fmt = logging._STYLES['{'][0]("{message}")
RESULT = 70
def __init__(self, fmt=None):
logging.Formatter.__init__(self, fmt=Formatter.fmt, datefmt=Formatter.datefmt)
def format(self, record):
if sys.version_info < (3, 0, 0):
format_orig = self._fmt
else:
format_orig = self._style
if record.levelno == Formatter.RESULT:
if sys.version_info < (3, 0, 0):
self._fmt = Formatter.result_fmt
else:
self._style = Formatter.result_fmt
result = logging.Formatter.format(self, record)
if sys.version_info < (3, 0, 0):
self._fmt = format_orig
else:
self._style = format_orig
return result
class Logger(io.TextIOBase):
"""This is a wrapper for Python's logging class that simplifies the setup and teardown of logging.
This is a singleton design, meaning that only one logger is ever instantiated, even if the constructor is
used in multiple places.
The Logger class extends the TextIOBase class and can be used as an output stream
for redirection. This is used elsewhere to reassign stdout and stderr to the logger
object. This allows the logger to capture ``print()`` statements.
"""
def write(self, *args, **kwargs):
self.__print(*args, **kwargs)
self.flush()
def flush(self, *args, **kwargs):
Logger.sh.flush()
def read(self, *args, **kwargs):
return sys.__stdin__.read(*args, **kwargs)
def readline(self, *args, **kwargs):
return sys.__stdin__.readline(*args, **kwargs)
def getvalue(self):
return "".join(Logger.sh._buffer)
instances = 0
logger = None
filename = "ucs_cli"
term = "\n"
encoding = "utf8"
def __print(self, message):
Logger.logger.log(70, message)
def __init__(self, level="NOTSET"):
"""Creates an internal logger object from the logging library.
**USAGE**
>>> lgr = Logger()
>>> lgr = Logger("DEBUG")
:param level: The logging level which determines what messages get output.
The default is "NOTSET" which enables all messages.
:type level: String
:returns: Logger object
:rtype: Logger
"""
if Logger.instances < 1:
path = os.getenv("HOME")
Logger.logger = logging.getLogger('')
self.setLevel(level)
Logger.formatter = Formatter()
Logger.sh = StreamHandler(sys.stdout)
Logger.sh.terminator = ""
Logger.fhLog = logging.FileHandler("{0}/{1}.log".format(path, Logger.filename), mode='a')
Logger.fhLog.terminator = ""
Logger.sh.setFormatter(Logger.formatter)
Logger.fhLog.setFormatter(Logger.formatter)
Logger.logger.addHandler(Logger.sh)
Logger.logger.addHandler(Logger.fhLog)
logging.addLevelName(60, "SUCCESS")
logging.addLevelName(70, "RESULT")
Logger.instances += 1
def setLevel(self, level):
"""Sets the logging level
The level determines what messages get output
The levels are:
============ ======
**Level** **# val**
------------ ------
RESULT 70
SUCCESS 60
ERROR 40
WARNING 30
INFO 20
DEBUG 10
NOTSET 0
============ ======
:param level: The logging level minimum to output
:type level: String
:returns: Nothing
:rtype: None
"""
level = level.upper()
if level == "DEBUG":
logLev = logging.DEBUG
elif level == "INFO":
logLev = logging.INFO
elif level == "WARNING":
logLev = logging.WARNING
elif level == "ERROR":
logLev = logging.ERROR
elif level == "SUCCESS":
logLev = logging.SUCCESS
elif level == "RESULT":
logLev = logging.RESULT
else:
logLev = logging.NOTSET
Logger.logger.setLevel(logLev)
def tearDown(self):
"""Safely deletes the logger object"""
Logger.logger.removeHandler(Logger.sh)
Logger.sh.flush()
Logger.sh.close()
Logger.instances -= 1
def info(self, message):
"""Logs a message at the info level
**USAGE**
>>> lgr = Logger()
>>> lgr.info("Your message here")
01-15-2016 15:13:22 - INFO - Your message here
:param message: The message to be logged
:type message: String
"""
message = str(message)
Logger.logger.info(message+Logger.term)
def debug(self, message):
"""Logs a message at the debug level
**USAGE**
>>> lgr = Logger()
>>> lgr.debug("Your message here")
01-15-2016 15:14:20 - DEBUG - Your message here
:param message: The message to be logged
:type message: String
"""
message = str(message)
Logger.logger.debug(message+Logger.term)
def warning(self, message):
"""Logs a message at the warning level using the non-deprecated warning() method
Args:
message (str): The message to be logged
"""
message = str(message)
Logger.logger.warning(message+Logger.term)
def error(self, message):
"""Logs a message at the error level
**USAGE**
>>> lgr = Logger()
>>> lgr.error("Your message here")
01-15-2016 15:16:24 - ERROR - Your message here
:param message: The message to be logged
:type message: String
"""
message = str(message)
Logger.logger.error(message+Logger.term)
def exception(self, message):
"""Logs a message and prints full traceback at the error level.
Leverages the python logging class' automatic traceback printing functionality when logging.exception is
called in an except clause.
Args:
message (str): User message to be logged before traceback
"""
message = str(message)
Logger.logger.exception(message+Logger.term)
def log(self, level, message):
"""Logs a message at the level passed to it
**USAGE**
>>> lgr = Logger()
>>> lgr.log(9001, "His log level is over 9000!")
01-15-2016 15:18:10 - Level 9001 - His log level is over 9000!
:param level: The log level of the message
:type level: Integer
:param message: The message to be logged
:type message: String
"""
message = str(message)
Logger.logger.log(level, message+Logger.term)
def success(self, message):
"""Logs a message at the success level
**USAGE**
>>> lgr = Logger()
>>> lgr.success("Your message here")
01-15-2016 15:19:16 - SUCCESS - Your message here
:param message: The message to be logged
:type message: String
"""
message = str(message)
Logger.logger.log(60, message+Logger.term)
def result(self, message):
"""Logs a message at the result level. The alternate
name for this function is log_print. They are mapped
to each other.
**USAGE**
>>> lgr = Logger()
>>> lgr.result("Your message here")
Your message here
>>> lgr.log_print("Your message here")
Your message here
:param message: The message to be logged
:type message: String
"""
message = str(message)
Logger.logger.log(70, message+Logger.term)
log_print = result
|
python
|
#
# WSGI entry point for RD
#
from rdr_service.main import app as application
if __name__ == "__main__":
application.run()
|
python
|
import logging
from spaceone.core.base import CoreObject
from spaceone.core.transaction import Transaction
_LOGGER = logging.getLogger(__name__)
class BaseConnector(CoreObject):
def __init__(self, transaction: Transaction = None, config: dict = None, **kwargs):
super().__init__(transaction=transaction)
self.config = config or {}
self._load_interceptors()
for key, value in kwargs.items():
setattr(self, key, value)
def _load_interceptors(self):
interceptors = self.config.get('interceptors', [])
for interceptor in interceptors:
backend = interceptor.get('backend')
method = interceptor.get('method')
options = interceptor.get('options', {})
if backend and method:
self._set_interceptor(backend, method, options)
def _set_interceptor(self, backend, method, options):
try:
interceptor_module, func_name = self._get_interceptor_module(backend)
if hasattr(self, method):
interceptor_func = getattr(self, method)
setattr(self, method,
getattr(interceptor_module, func_name)(interceptor_func, options))
except Exception as e:
_LOGGER.error(f'{self.__class__.__name__} Interceptor Load Error: {e}')
@staticmethod
def _get_interceptor_module(backend):
module_name, func_name = backend.rsplit('.', 1)
interceptor_module = __import__(module_name, fromlist=[func_name])
return interceptor_module, func_name
|
python
|
import string
from api.create_sync_video_job import UAICensorCreateSyncVideoJobApi
from api.create_async_video_job import UAICensorCreateAsyncVideoJobApi
from operation.utils import parse_unrequired_args
from operation.base_datastream_operation import UAICensorBaseDatastreamOperation
class UAICensorCreateVideoJobOp(UAICensorBaseDatastreamOperation):
def __init__(self, parser):
super(UAICensorCreateVideoJobOp, self).__init__(parser)
def _add_video_job_args(self, parser):
args_parser = parser.add_argument_group(
'Video-Params', 'Video Censor Job Parameters'
)
args_parser.add_argument(
'--type',
type=str,
required=True,
choices=['sync', 'async'],
help='Censor type for current job, '
'choose from "sync, async"'
)
args_parser.add_argument(
'--scenes',
type=str,
required=True,
help='Scenes for current job, '
'choose from "porn, politician, terror",'
'join with "," if with more than one scene'
)
args_parser.add_argument(
'--url',
type=str,
required=True,
help='Url of censor video'
)
args_parser.add_argument(
'--interval',
type=int,
required=False,
default=25,
help='Frame interval for current video, default is 25'
)
args_parser.add_argument(
'--callback',
type=str,
required=False,
help='User callback url'
)
def _parse_video_job_args(self, args):
self.censor_type = args['type']
scenes = args['scenes']
self.scenes = string.split(scenes, ",")
self.url = args['url']
self.interval = args['interval']
self.callback = parse_unrequired_args(args, 'callback')
def _add_args(self):
super(UAICensorCreateVideoJobOp, self)._add_args()
self._add_video_job_args(self.parser)
def _parse_args(self, args):
super(UAICensorCreateVideoJobOp, self)._parse_args(args)
self._parse_video_job_args(args)
def cmd_run(self, args):
self._parse_args(args)
if self.censor_type == "sync":
caller = UAICensorCreateSyncVideoJobApi(signature=self.signature,
public_key=self.public_key,
resource_id=self.resource_id,
timestamp=self.timestamp,
scenes=self.scenes,
url=self.url,
interval=self.interval)
else:
caller = UAICensorCreateAsyncVideoJobApi(signature=self.signature,
public_key=self.public_key,
resource_id=self.resource_id,
timestamp=self.timestamp,
scenes=self.scenes,
url=self.url,
interval=self.interval,
callback=self.callback)
return caller.call_api()
|
python
|
# -*- coding: utf-8 -*-
from dart_fss import api, auth, corp, errors, filings, fs, utils, xbrl
from dart_fss.auth import set_api_key, get_api_key
from dart_fss.corp import get_corp_list
from dart_fss.filings import search
from dart_fss.fs import extract
from dart_fss.xbrl import get_xbrl_from_file
__all__ = [
'api',
'auth', 'set_api_key', 'get_api_key',
'corp', 'get_corp_list',
'errors',
'filings', 'search',
'fs', 'extract',
'utils',
'xbrl', 'get_xbrl_from_file'
]
from ._version import get_versions
__version__ = get_versions()['version']
del get_versions
|
python
|
from random import randint as rand
Map = {0:42, 1:43, 2:45}
n = rand(0, rand(0, int(input())))
with open("in", "w+") as f:
f.write(chr(rand(48, 57)))
i, operators = 0, 0
while i < n:
if i == operators:
f.write(chr(rand(48, 57)))
i += 1
continue
op = rand(0, 1)
if op:
f.write(chr(Map[rand(0, 2)]))
operators += 1
else:
f.write(chr(rand(48, 57)))
i += 1
while operators < n:
f.write(chr(Map[rand(0, 2)]))
operators += 1
stack = []
with open("in", "r") as f:
for c in f.readline():
char = ord(c)
if char < 48:
a = stack.pop()
if char == 42:
stack[-1] *= a
elif char == 43:
stack[-1] += a
else:
stack[-1] -= a
else:
stack.append(char - 48)
print(stack[0])
|
python
|
#!/usr/bin/env python
# Eclipse SUMO, Simulation of Urban MObility; see https://eclipse.org/sumo
# Copyright (C) 2008-2020 German Aerospace Center (DLR) and others.
# This program and the accompanying materials are made available under the
# terms of the Eclipse Public License 2.0 which is available at
# https://www.eclipse.org/legal/epl-2.0/
# This Source Code may also be made available under the following Secondary
# Licenses when the conditions for such availability set forth in the Eclipse
# Public License 2.0 are satisfied: GNU General Public License, version 2
# or later which is available at
# https://www.gnu.org/licenses/old-licenses/gpl-2.0-standalone.html
# SPDX-License-Identifier: EPL-2.0 OR GPL-2.0-or-later
# @file Assignment.py
# @author Jakob Erdmann
# @author Yun-Pang Floetteroed
# @author Michael Behrisch
# @date 2008-03-28
"""
This script is for executing the traffic assignment.
Three assignment models are available:
- incremental
- c-logit
- lohse
The c-logit model are set as default.
"""
from __future__ import absolute_import
from __future__ import print_function
import os
import sys
import datetime
import math
import operator
from xml.sax import make_parser
from optparse import OptionParser
sys.path.append(os.path.join(os.environ["SUMO_HOME"], 'tools'))
import sumolib.net # noqa
from network import Net, DistrictsReader, ExtraSignalInformationReader # noqa
from dijkstra import dijkstraBoost, dijkstraPlain, dijkstra # noqa
from inputs import getMatrix, getConnectionTravelTime # noqa
from outputs import timeForInput, outputODZone, outputNetwork, outputStatistics, sortedVehOutput, linkChoicesOutput # noqa
from assign import doSUEAssign, doLohseStopCheck, doSUEVehAssign, doIncAssign # noqa
from tables import updateCurveTable # noqa
def initLinkChoiceMap(net, startVertices, endVertices, matrixPshort, linkChoiceMap, odPairsMap):
odpairCounts = 0
for start, startVertex in enumerate(startVertices):
odPairsMap[startVertex.label] = {}
for end, endVertex in enumerate(endVertices):
if startVertex.label != endVertex.label and matrixPshort[start][end] > 0.:
odpairCounts += 1
odPairsMap[startVertex.label][endVertex.label] = odpairCounts
for e in net._detectedEdges:
if e.detected not in linkChoiceMap:
linkChoiceMap[e.detected] = {}
linkChoiceMap[e.detected][odpairCounts] = 0.
return linkChoiceMap
def main():
# for measuring the required time for reading input files
inputreaderstart = datetime.datetime.now()
foutlog = open('%s_log.txt' % options.type, 'w')
foutlog.write(
'The stochastic user equilibrium traffic assignment will be executed with the %s model.\n' % options.type)
foutlog.write(
'All vehicular releasing times are determined randomly(uniform).\n')
matrices = options.mtxpsfile.split(",")
parser = make_parser()
if options.verbose:
print("Reading net")
print('net file:', options.netfile)
net = Net()
sumolib.net.readNet(options.netfile, net=net)
parser.setContentHandler(DistrictsReader(net))
parser.parse(options.confile)
if options.sigfile:
parser.setContentHandler(ExtraSignalInformationReader(net))
parser.parse(options.sigfile)
foutlog.write('- Reading network: done.\n')
foutlog.write('number of total startVertices:%s\n' % net.getstartCounts())
foutlog.write('number of total endVertices:%s\n' % net.getendCounts())
if options.verbose:
print(net.getfullEdgeCounts(), "edges read (internal edges included)")
if options.curvefile:
updateCurveTable(options.curvefile)
if options.hours == 24.:
assignHours = 16.
else:
assignHours = options.hours
for edge in net.getEdges():
if edge._lanes:
edge.getCapacity()
edge.getAdjustedCapacity(net)
edge.estcapacity *= assignHours
edge.getConflictLink()
if options.dijkstra == 'boost':
net.createBoostGraph()
if options.verbose:
print("after link reduction:", net.getfullEdgeCounts(), "edges read")
# calculate link travel time for all district connectors
getConnectionTravelTime(net._startVertices, net._endVertices)
foutlog.write('- Initial calculation of link parameters : done.\n')
# the required time for reading the network
timeForInput(inputreaderstart)
if options.debug:
outputNetwork(net)
# initialize the map for recording the number of the assigned vehicles
AssignedVeh = {}
# initialize the map for recording the number of the assigned trips
AssignedTrip = {}
smallDemand = []
linkChoiceMap = {}
odPairsMap = {}
for start, startVertex in enumerate(net._startVertices):
AssignedVeh[startVertex] = {}
AssignedTrip[startVertex] = {}
smallDemand.append([])
for end, endVertex in enumerate(net._endVertices):
AssignedVeh[startVertex][endVertex] = 0
AssignedTrip[startVertex][endVertex] = 0.
smallDemand[-1].append(0.)
# initialization
vehID = 0
matrixSum = 0.0
lohse = (options.type == "lohse")
incremental = (options.type == "incremental")
checkKPaths = False
if not incremental and options.kPaths > 1:
checkKPaths = True
if not incremental:
net.initialPathSet()
starttime = datetime.datetime.now()
# initialize the file for recording the routes
if options.odestimation:
net.getDetectedEdges(options.outputdir)
else:
foutroute = open('routes.rou.xml', 'w')
print('<?xml version="1.0"?>\n<!-- generated on %s by $Id: Assignment.py v1_3_1+0411-36956f96df [email protected] 2019-01-23 11:12:48 +0000 $ -->\n<routes>' % starttime, file=foutroute) # noqa
# for counter in range (0, len(matrices)):
for counter, matrix in enumerate(matrices):
# delete all vehicle information related to the last matrix for saving
# the disk space
vehicles = []
iterInterval = 0
matrixPshort, startVertices, endVertices, CurrentMatrixSum, begintime, assignPeriod, Pshort_EffCells, \
matrixSum, smallDemandRatio = getMatrix(net, options.verbose, matrix, matrixSum, options.demandscale)
options.hours = float(assignPeriod)
smallDemandPortion = math.ceil(
float(options.maxiteration) / 2. * smallDemandRatio)
if float(smallDemandPortion) != 0.:
iterInterval = math.ceil(
float(options.maxiteration) / float(smallDemandPortion))
departtime = begintime * 3600
if options.verbose:
print('the analyzed matrices:', counter)
print('Begintime:', begintime, "O'Clock")
print('departtime', departtime)
print('Matrix und OD Zone already read for Interval', counter)
print('CurrentMatrixSum:', CurrentMatrixSum)
foutlog.write('Reading matrix and O-D zones: done.\n')
foutlog.write(
'Matrix und OD Zone already read for Interval:%s\n' % counter)
foutlog.write('CurrentMatrixSum:%s\n' % CurrentMatrixSum)
foutlog.write('number of current startVertices:%s\n' %
len(startVertices))
foutlog.write('number of current endVertices:%s\n' % len(endVertices))
if options.odestimation:
linkChoiceMap.clear()
odPairsMap.clear()
linkChoiceMap = initLinkChoiceMap(
net, startVertices, endVertices, matrixPshort, linkChoiceMap, odPairsMap)
for edge in net.getEdges():
edge.flow = 0.
edge.helpflow = 0.
edge.actualtime = edge.freeflowtime
edge.helpacttime = edge.freeflowtime
edge.fTT = 0.
edge.TT = 0.
edge.delta = 0.
edge.helpacttimeEx = 0.
# the number of origins, the umber of destinations and the number of
# the OD pairs
len(startVertices)
len(endVertices)
# output the origin and destination zones and the number of effective
# OD pairs
if options.debug:
# matrixCounter)
outputODZone(startVertices, endVertices, Pshort_EffCells, counter)
if incremental:
print('begin the incremental assignment!')
iter = 0
options.lamda = 0.
while iter < options.maxiteration:
foutlog.write(
'- Current iteration(not executed yet):%s\n' % iter)
iter += 1
if iterInterval != 0 and operator.mod(iter, iterInterval) == 0:
assignSmallDemand = True
else:
assignSmallDemand = False
for start, startVertex in enumerate(startVertices):
targets = set()
for end, endVertex in enumerate(endVertices):
if assignSmallDemand and matrixPshort[start][end] > 0. and matrixPshort[start][end] < 1.:
smallDemand[start][end] = matrixPshort[
start][end] / float(smallDemandPortion)
if matrixPshort[start][end] > 1. or (assignSmallDemand and smallDemand[start][end] > 0.):
targets.add(endVertex)
if len(targets) > 0:
if options.dijkstra == 'boost':
D, P = dijkstraBoost(
net._boostGraph, startVertex.boost)
elif options.dijkstra == 'plain':
D, P = dijkstraPlain(startVertex, targets)
elif options.dijkstra == 'extend':
D, P = dijkstra(startVertex, targets)
vehID, smallDemand, linkChoiceMap = doIncAssign(
net, vehicles, options.verbose, options.maxiteration, options.odestimation,
endVertices, start, startVertex, matrixPshort, smallDemand,
D, P, AssignedVeh, AssignedTrip, vehID, assignSmallDemand, linkChoiceMap, odPairsMap)
if options.dijkstra != 'extend':
linkMap = net._fullEdges
else:
linkMap = net._edges
for edge in linkMap.itervalues():
edge.getActualTravelTime(options, False)
if options.dijkstra == 'boost':
edge.boost.weight = edge.helpacttime
else:
print('begin the', options.type, " assignment!")
# initialization for the clogit and the lohse assignment model
iter_outside = 1
newRoutes = 1
stable = False
first = True
# begin the traffic Assignment
while newRoutes > 0:
foutlog.write('- SUE iteration:%s\n' % iter_outside)
# Generate the effective routes als intital path solutions,
# when considering k shortest paths (k is defined by the user.)
if checkKPaths:
checkPathStart = datetime.datetime.now()
newRoutes = net.calcKPaths(
options.verbose, options.kPaths, newRoutes, startVertices, endVertices, matrixPshort,
options.gamma)
checkPathEnd = datetime.datetime.now() - checkPathStart
foutlog.write(
'- Time for finding the k-shortest paths: %s\n' % checkPathEnd)
foutlog.write(
'- Finding the k-shortest paths for each OD pair: done.\n')
if options.verbose:
print('iter_outside:', iter_outside)
print('number of k shortest paths:', options.kPaths)
print('number of new routes:', newRoutes)
elif not checkKPaths and iter_outside == 1 and counter == 0:
print('search for the new path')
newRoutes = net.findNewPath(
startVertices, endVertices, newRoutes, matrixPshort, options.gamma, lohse, options.dijkstra)
checkKPaths = False
if options.verbose:
print('iter_outside:', iter_outside)
print('number of new routes:', newRoutes)
stable = False
iter_inside = 1
while not stable:
if options.verbose:
print('iter_inside:', iter_inside)
stable = doSUEAssign(
net, options, startVertices, endVertices, matrixPshort, iter_inside, lohse, first)
# The matrixPlong and the matrixTruck should be added when
# considering the long-distance trips and the truck trips.
if lohse:
stable = doLohseStopCheck(
net, options, stable, iter_inside, options.maxiteration, foutlog)
iter_inside += 1
if options.verbose:
print('stable:', stable)
newRoutes = net.findNewPath(
startVertices, endVertices, newRoutes, matrixPshort, options.gamma, lohse, options.dijkstra)
first = False
iter_outside += 1
if newRoutes < 3 and iter_outside > int((options.maxiteration) / 2):
newRoutes = 0
if iter_outside > options.maxiteration:
print('The max. number of iterations is reached!')
foutlog.write(
'The max. number of iterations is reached!\n')
foutlog.write(
'The number of new routes and the parameter stable will be set to zero and ' +
'True respectively.\n')
print('newRoutes:', newRoutes)
stable = True
newRoutes = 0
# update the path choice probability and the path flows as well as
# generate vehicle data
vehID = doSUEVehAssign(net, vehicles, options, counter, matrixPshort,
startVertices, endVertices, AssignedVeh, AssignedTrip, vehID, lohse)
# output the generated vehicular releasing times and routes, based on
# the current matrix
print('done with the assignment') # debug
if options.odestimation:
linkChoicesOutput(net, startVertices, endVertices, matrixPshort,
linkChoiceMap, odPairsMap, options.outputdir, starttime)
else:
sortedVehOutput(vehicles, departtime, options, foutroute)
if not options.odestimation:
foutroute.write('</routes>\n')
foutroute.close()
# output the global performance indices
assigntime = outputStatistics(net, starttime, len(matrices))
foutlog.write(
'- Assignment is completed and all required information is generated. ')
foutlog.close()
if options.verbose:
print('Duration for traffic assignment:', assigntime)
print('Total assigned vehicles:', vehID)
print('Total number of the assigned trips:', matrixSum)
optParser = OptionParser()
optParser.add_option("-m", "--matrix-file", dest="mtxpsfile",
help="read OD matrix for passenger vehicles from FILE (mandatory)", metavar="FILE")
optParser.add_option("-G", "--globalmatrix-file", dest="glbmtxfile",
help="read daily OD matrix for passenger vehicles from FILE (mandatory)", metavar="FILE")
optParser.add_option("-n", "--net-file", dest="netfile",
help="read SUMO network from FILE (mandatory)", metavar="FILE")
optParser.add_option("-d", "--district-file", dest="confile",
help="read OD Zones from FILE (mandatory)", metavar="FILE")
optParser.add_option("-s", "--extrasignal-file", dest="sigfile",
help="read extra/updated signal timing plans from FILE", metavar="FILE")
optParser.add_option("-u", "--crCurve-file", dest="curvefile",
help="read parameters used in cost functions from FILE", metavar="FILE")
optParser.add_option("-k", "--k-shortest-paths", dest="kPaths", type="int",
default=8, help="number of the paths should be found at the first iteration")
optParser.add_option("-i", "--max-sue-iteration", dest="maxiteration", type="int",
default=20, help="maximum number of the assignment iterations")
optParser.add_option("-t", "--sue-tolerance", dest="sueTolerance", type="float",
default=0.001, help="difference tolerance for the convergence in the c-logit model")
optParser.add_option("-a", "--alpha", dest="alpha", type="float",
default=0.15, help="alpha value to determine the commonality factor")
optParser.add_option("-g", "--gamma", dest="gamma", type="float",
default=1., help="gamma value to determine the commonality factor")
optParser.add_option("-l", "--lambda", dest="lamda", type="float",
default=0.3, help="lambda value to determine the penalty time due to queue")
optParser.add_option("-U", "--under-value", dest="under", type="float",
default=0.15, help="parameter 'under' to determine auxiliary link cost")
optParser.add_option("-p", "--upper-value", dest="upper", type="float",
default=0.5, help="parameter 'upper' to determine auxiliary link cost")
optParser.add_option("-X", "--parameter-1", dest="v1", type="float",
default=2.5, help="parameter 'v1' to determine auxiliary link cost in the lohse model")
optParser.add_option("-y", "--parameter-2", dest="v2", type="float",
default=4., help="parameter 'v2' to determine auxiliary link cost in the lohse model")
optParser.add_option("-z", "--parameter-3", dest="v3", type="float",
default=0.002, help="parameter 'v3' to determine auxiliary link cost in the lohse model")
optParser.add_option("-c", "--convergence-parameter-1", dest="cvg1", type="float",
default=1., help="parameter 'cvg1' to calculate the convergence value in the lohse model")
optParser.add_option("-o", "--convergence-parameter-2", dest="cvg2", type="float",
default=1., help="parameter 'cvg2' to calculate the convergence value in the lohse model")
optParser.add_option("-q", "--convergence-parameter-3", dest="cvg3", type="float",
default=10., help="parameter 'cvg3' to calculate the convergence value in the lohse model")
optParser.add_option("-v", "--verbose", action="store_true", dest="verbose",
default=False, help="tell me what you are doing")
optParser.add_option("-b", "--debug", action="store_true", dest="debug",
default=False, help="debug the program")
optParser.add_option("-e", "--type", dest="type", type="choice",
choices=('clogit', 'lohse', 'incremental'),
default="clogit", help="type of assignment [default: %default]")
optParser.add_option("-H", "--hours", dest="hours", type="float",
default=1., help="the analysing period(hours)")
optParser.add_option("-r", "--profile", action="store_true", dest="profile",
default=False, help="writing profiling info")
optParser.add_option("-+", "--dijkstra", dest="dijkstra", type="choice",
choices=('extend', 'plain', 'boost'),
default="plain", help="use penalty, plain(original) or boost in dijkstra implementation " +
"[default: %default]")
optParser.add_option("-x", "--odestimation", action="store_true", dest="odestimation",
default=False, help="generate trips for OD estimation")
optParser.add_option("-f", "--scale-factor", dest="demandscale",
type="float", default=1., help="scale demand by ")
optParser.add_option("-O", "--output-dir", dest="outputdir",
default=os.getcwd(), help="define the output directory name and path")
(options, args) = optParser.parse_args()
if not options.netfile or not options.confile or not options.mtxpsfile:
optParser.print_help()
sys.exit()
if options.profile:
import hotshot
import hotshot.stats
hotshotFile = "hotshot_%s_stats" % options.type
prof = hotshot.Profile(hotshotFile)
prof.runcall(main)
prof.close()
s = hotshot.stats.load(hotshotFile)
s.strip_dirs().sort_stats("time").print_stats(20)
else:
main()
|
python
|
# Imports
from utils import labeled_loader, suncet_fine_tune, config
import tensorflow as tf
import time
# Constants
STEPS_PER_EPOCH = int(config.SUPPORT_SAMPLES // config.SUPPORT_BS)
TOTAL_STEPS = config.FINETUNING_EPOCHS * STEPS_PER_EPOCH
# Prepare Dataset object for the support samples
# Note - no augmentation
support_ds = labeled_loader.get_support_ds(aug=False, bs=config.SUPPORT_BS)
print("Data loaders prepared.")
# Initialize encoder and optimizer
wide_resnet_enc = tf.keras.models.load_model(config.PRETRAINED_MODEL)
for layer in wide_resnet_enc.layers:
if isinstance(layer, tf.keras.layers.BatchNormalization):
layer.trainable = False
else:
layer.trainable = True
scheduled_lr = tf.keras.experimental.CosineDecay(
initial_learning_rate=0.1, decay_steps=TOTAL_STEPS
)
optimizer = tf.keras.optimizers.SGD(learning_rate=scheduled_lr, momentum=0.9)
print("Model and optimizer initialized.")
############## Training ##############
for e in range(config.FINETUNING_EPOCHS):
print(f"=======Starting epoch: {e}=======")
epoch_suncet_loss_avg = tf.keras.metrics.Mean()
start_time = time.time()
for i, (set_one, set_two) in enumerate(support_ds):
if i == STEPS_PER_EPOCH:
break
# Concat the 2x views from the support set.
support_images = tf.concat([set_one[0], set_two[0]], axis=0)
support_labels = tf.concat([set_one[1], set_two[1]], axis=0)
# Note: no label-smoothing: https://git.io/Jskgu
support_labels = tf.one_hot(support_labels, depth=10)
# Perform training step
batch_suncet_loss, gradients = suncet_fine_tune.train_step(
(support_images, support_labels), wide_resnet_enc
)
# Update the parameters of the encoder
optimizer.apply_gradients(zip(gradients, wide_resnet_enc.trainable_variables))
epoch_suncet_loss_avg.update_state(batch_suncet_loss)
print(
f"Epoch: {e} SUNCET Loss: "
f"{epoch_suncet_loss_avg.result():.3f}"
f" Time elapsed: {time.time() - start_time:.2f} secs"
)
print("")
# Serialize model
wide_resnet_enc.save(config.FINETUNED_MODEL)
print(f"Encoder serialized to : {config.FINETUNED_MODEL}")
|
python
|
"""
The MIT License (MIT)
Copyright (c) 2016 Jake Lussier (Stanford University)
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
"""
TODO: License info
Program to stream (and possibly record) live or recorded data from kitchen.
Works by spawning sensor-specific streaming threads based on the
specified FridgeConfig's recording_streams attributes.
Each streaming thread constantly puts data onto the data queue.
The writeData() function gets this data from the queue.
Record events are registered through user input or specific environment
changes (eg, when lights come ON). In such cases, writeData()
writes data to disk. User input is received through the
image window or the terminal and handled by handleKeyPress().
"""
from kitchen import *
import argparse, sys, os, time, inspect, logging, threading, cv2, Queue, json, socket, requests
from os.path import *
from utils.general_utils import *
from utils.cv_utils import *
from utils.logging_utils import *
from config.fridge_config import *
from audio.audio_writer import *
from data_stream.stream_utils import *
def handleCharacter(ch, time_str, caller_name, config):
"""Handles character.
Handles character. Options: q (quit)
Args:
ch: Character.
time_str: Time string.
caller_name: Calling function's name.
"""
global quit_all
name = inspect.stack()[0][3]
try:
if ch == 'q':
logging.info("%s:%s Quitting." % (caller_name, name,))
quit_all = True
except Exception as e:
handleException("%s:%s"%(caller_name,name), e)
quit_all = True
def streamKeyboardStreams(config_sensor_streams):
"""Stream keyboard data.
Args:
config_sensor_streams: Keyboard streams,
"""
global write_q, quit_all
name = inspect.stack()[0][3]
try:
logger = Logger(name, interval=60*5, updates_per_second=True)
all_kb_streams = [w for v in config_sensor_streams.values() for w in v]
if not all_kb_streams: return
while not quit_all and streamsNotDone(all_kb_streams):
logger.update()
for (config, streams) in config_sensor_streams.items():
for stream in streams:
if passStream(stream, all_streams): continue
t = stream.getCurrentTime()
tstr = dateTimeToTimeString(t)
data = stream.getCurrentData()
if data:
handleCharacter(data, tstr, name, config)
if args.record: write_q.put((stream, t, data+"\n"))
stream.updateCurrent()
time.sleep(0.001)
except Exception as e:
handleException(name, e)
quit_all = True
def writeData():
"""Removes data from the data queue and writes to appropriate stream.
Calls get() on the data queue. Sleeps if the data is too new.
When data is far enough in the past, writes to stream.
"""
global write_q, quit_all
name = inspect.stack()[0][3]
last_closed = currentTime(all_streams)
try:
logger = Logger(name, interval=60*5)
while not (quit_all and write_q.empty()):
logger.update()
try:
(stream, t, data) = write_q.get(timeout=writer_buffer_time)
now = currentTime(all_streams)
if (now-t).total_seconds() < writer_buffer_time:
time.sleep(writer_buffer_time)
except:
continue
with writing_lock:
stream.write(t, data)
if (now-last_closed).total_seconds() > 1.0:
[v.close(t) for v in all_streams]
last_closed = now
except Exception as e:
handleException(name, e)
quit_all = True
def streamAudioStreams(config_sensor_streams):
"""Stream audio data.
Args:
config_sensor_streams: Audio streams,
"""
global write_q, quit_all
name = inspect.stack()[0][3]
try:
logger = Logger(name, interval=60*5, updates_per_second=True)
all_audio_streams = [w for v in config_sensor_streams.values() for w in v]
if not all_audio_streams: return
while not quit_all and streamsNotDone(all_audio_streams):
logger.update()
for streams in config_sensor_streams.values():
for stream in streams:
if passStream(stream, all_streams): continue
t = stream.getCurrentTime()
data = stream.getCurrentData()
if args.record: write_q.put((stream, t, data))
stream.updateCurrent()
time.sleep(0.001)
except Exception as e:
handleException(name, e)
quit_all = True
def streamCameraStreams(config_sensor_streams):
"""Stream camera data, setup visualization, and handle uncovering events.
Stream camera data. Also display the vis image and handle user input.
In the event of an uncovering (light=ON), create writers for
appropriate setup. For a covering (light=OFF), close the writers.
Note that this function is run in the main thread.
Args:
config_sensor_streams: Camera streams.
"""
global write_q, quit_all
name = inspect.stack()[0][3]
##writer = Cv2VideoWriter("demo.mp4", 25)
try:
logger = Logger(name, interval=60*5, updates_per_second=True, memory_usage=True)
all_camera_streams = [w for v in config_sensor_streams.values() for w in v]
if not all_camera_streams: return
# For each config, whether a camera is uncovered and how many frames recorded in this interval.
uncovered, recorded = dict([(v, False) for v in config_sensor_streams.keys()]), {}
vis_str, vis_shape = "vis", None
vis_ims = dict([(v[0], [None for w in range(len(v[1]))]) for v in config_sensor_streams.items()])
if args.display: cv2.namedWindow(vis_str)
while (not quit_all and streamsNotDone(all_camera_streams)):
logger.update()
for (config, streams) in config_sensor_streams.items():
if not streams: continue
for (i, stream) in enumerate(streams):
# Read data and add to data queue.
if passStream(stream, all_streams): continue
t = stream.getCurrentTime()
#if uncovered[config]: print "Open", (t-t_open).total_seconds(), "secs"
tstr = dateTimeToTimeString(t)
im = stream.getCurrentData()
sm_im = resize(im, 1.0/args.display_downscale)
if vis_shape==None: vis_shape = sm_im.shape[:2]
if args.record:
write_q.put((stream,t,im))
stream.updateCurrent()
# Update the visualization for this stream.
c = (255,0,0) if config=="fridge" else (0,0,255)
#cv2.rectangle(sm_im, (0,0), (sm_im.shape[1]-1, sm_im.shape[0]-1), c, 10)
vis_ims[config][i] = sm_im
light = 0 if None in vis_ims[config] else \
max([np.mean(np.max(v, axis=2)) for v in vis_ims[config]])
# Handle state changes
definitely_closed, definitely_open = light<50, light>100
if not uncovered[config] and definitely_open:
t_open = t
logging.info("%s OPEN %s." % (name, tstr))
# Create writers.
if args.record:
with writing_lock:
[v.createWriter(t, time_buffer=writer_buffer_time, fps=30) \
for v in config_streams[config]]
# Send pulse command to expiring containers.
if args.msg_containers and api_config:
auth = ""
url_base = api_config["base-url"] + str(api_config["app-api-port"])
url = url_base + "/inventory"
irs = requests.get(url, headers={"Authorization":auth}).json()
alert_iids = [v["item_id"] for v in irs if v["remaining_time"] <= 0 \
and "item_beacon_id" in v]
warning_iids = [v["item_id"] for v in irs \
if v["remaining_time"] > 0 and v["remaining_time"] < 7 \
and "item_beacon_id" in v]
data = {"item_ids": alert_iids, "animation": "ledOn",
"duration": 3000, "color": "ff0000"}
url = url_base + "/containerAnimation?" + json.dumps(data)
requests.post(url, headers={"Authorization":auth})
data = {"item_ids": warning_iids, "animation": "ledOn",
"duration": 3000, "color": "ff7700"}
url = url_base + "/containerAnimation?" + json.dumps(data)
requests.post(url, headers={"Authorization":auth})
uncovered[config], start_time, recorded[config] = True, time.time(), 0
elif uncovered[config] and definitely_closed:
logging.info("%s CLOSED." % (name,))
fps = recorded[config] / (time.time()-start_time)
logging.info("%s video fps = %.1f." % (name, fps))
# Close writers.
if args.record:
with writing_lock:
[v.setWriterEndTimes(t) for v in config_streams[config]]
uncovered[config] = False
if uncovered[config]: recorded[config] += 1
vis_im = tileImages([cv2.resize(w, vis_shape[::-1]) if w!=None else np.zeros(vis_shape)\
for v in vis_ims.values() for w in v])
if args.display:
cv2.imshow(vis_str, vis_im)
handleCharacter(getKey(cv2.waitKey(1 if args.live else 10)), tstr, name, config)
##writer.write(vis_im)
except Exception as e:
handleException(name, e)
quit_all = True
def streamLoadCellStreams(config_sensor_streams):
"""Stream load cell data.
Args:
config_sensor_streams: Load cell streams,
"""
global write_q, quit_all
name = inspect.stack()[0][3]
try:
logger = Logger(name, interval=60*5, updates_per_second=True)
all_load_cell_streams = [w for v in config_sensor_streams.values() for w in v]
if not all_load_cell_streams: return
while not quit_all and streamsNotDone(all_load_cell_streams):
logger.update()
for (config, streams) in config_sensor_streams.items():
for stream in streams:
if passStream(stream, all_streams): continue
t = stream.getCurrentTime()
data = stream.getCurrentData()
if args.record: write_q.put((stream, t, "%f\n"%data))
stream.updateCurrent()
time.sleep(0.005)
except Exception as e:
handleException(name, e)
quit_all = True
def streamRfidAntennaStreams(config_sensor_streams):
"""Stream RFID antenna data.
Args:
config_sensor_streams: RFID antenna streams.
"""
global write_q, quit_all, config_tags
name = inspect.stack()[0][3]
try:
logger = Logger(name, interval=60*5, updates_per_second=True)
all_antenna_streams = [w for v in config_sensor_streams.values() for w in v]
if not all_antenna_streams: return
while not quit_all and streamsNotDone(all_antenna_streams):
logger.update()
for (config, streams) in config_sensor_streams.items():
for stream in streams:
if passStream(stream, all_streams): continue
t = stream.getCurrentTime()
data = stream.getCurrentData()
if args.record:
[write_q.put((stream, v.time,str(v)+"\n")) for v in data]
# Update tags. List has unique tags with most recent
# reads and is in descending RSSI order.
tt = sorted(config_tags[config] + data, key=lambda x: x.time)
tt = dict([(v.epc, v) for v in tt if (t-v.time).total_seconds()<2]).values()
config_tags[config] = sorted(tt, key=lambda x: -x.rssi)
stream.updateCurrent()
time.sleep(0.001)
except Exception as e:
handleException(name, e)
quit_all = True
def streamBarcodeStreams(config_sensor_streams):
"""Stream barcode data.
Args:
config_sensor_streams: barcode streams.
"""
global write_q, quit_all
name = inspect.stack()[0][3]
try:
logger = Logger(name, interval=60*5, updates_per_second=True)
all_barcode_streams = [w for v in config_sensor_streams.values() for w in v]
if not all_barcode_streams: return
while not quit_all and streamsNotDone(all_barcode_streams):
logger.update()
for (config, streams) in config_sensor_streams.items():
for stream in streams:
if passStream(stream, all_streams): continue
t = stream.getCurrentTime()
tstr = dateTimeToTimeString(t)
barcode = stream.getCurrentData()
if args.record:
if barcode: write_q.put((stream,barcode.time,str(barcode)+"\n"))
stream.updateCurrent()
time.sleep(0.001)
except Exception as e:
handleException(name, e)
quit_all = True
def streamBleStreams(config_sensor_streams):
"""Stream BLE data.
Args:
config_sensor_streams: BLE streams.
"""
global write_q, quit_all
name = inspect.stack()[0][3]
try:
logger = Logger(name, interval=60*5, updates_per_second=True)
all_ble_streams = [w for v in config_sensor_streams.values() for w in v]
if not all_ble_streams: return
while not quit_all and streamsNotDone(all_ble_streams):
logger.update()
for (config, streams) in config_sensor_streams.items():
for stream in streams:
if passStream(stream, all_streams): continue
t = stream.getCurrentTime()
tstr = dateTimeToTimeString(t)
beacons = stream.getCurrentData()
if args.record:
for beacon in beacons:
write_q.put((stream,beacon.time,str(beacon)+"\n"))
stream.updateCurrent()
time.sleep(0.001)
except Exception as e:
handleException(name, e)
quit_all = True
def getKitchenInfo():
db_config = json.loads(open(DB_CONFIG).read())
name_to_kid = dict([(d["name"].lower(), d["id"]) for d \
in db_config["entries"]["Kitchen"]])
hostname = socket.gethostname().lower()
kid = name_to_kid[hostname]
return kid, join(DATA, "Kitchen%07d" % kid)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Kitchen streaming.')
parser.add_argument("--output", help="Output directory.")
parser.add_argument("--fridge-config", help="Fridge configuration.")
parser.add_argument("--api-config", help="Service configuration.")
parser.add_argument("--frame-width", help="Frame width.", type=int)
parser.add_argument("--frame-height", help="Frame height.", type=int)
parser.add_argument("--display-downscale", help="Display downscale.", type=int, default=1)
parser.add_argument("--live", help="Live stream.", action="store_true")
parser.add_argument("--record", help="Record data.", action="store_true")
parser.add_argument("--msg-containers", help="Msg containers.", action="store_true")
parser.add_argument("--display", help="Display video.", action="store_true")
args = parser.parse_args()
if args.record and not args.live:
print("Error: cannot record recorded stream.")
parser.print_help()
sys.exit(89)
try:
kitchen_id, output = getKitchenInfo()
except Exception as e:
print ("Error: must record from known machine.")
sys.exit(89)
if not args.output: args.output = output
if args.frame_width and args.frame_height:
shape = (args.frame_width, args.frame_height)
elif not args.frame_width and not args.frame_height:
shape = None
else:
print("Error: must specify both width and height or neither.")
parser.print_help()
sys.exit(89)
configureLogging("%s.log" % args.output.rstrip("/"))
api_config = None if not args.api_config else json.loads(open(args.api_config).read())
configs = [FridgeConfig(args.fridge_config)]
# TODO(jake): remove hack
ble_addrs = ["f86aa431ba22"]
# Initialize global list of config tags and all streams. The former is a dict
# from config (eg, "fridge") to list of tags.
# Latter is simply a list of streams.
config_streams, config_tags = [dict([(w.name,[]) for w in configs]) \
for v in range(2)]
all_streams = []
# Initialize control and stream threads.
control_threads = [threading.Thread(target=v) for v in [writeData]]
stream_threads = []
# For each stream type (eg, AudioStream, BarcodeStream), for each config,
# initialize and store the streams. Append to stream threads.
stream_names = set([w for v in configs for w in v.recording_streams.keys()])
for stream_name in stream_names:
config_sensor_streams = {}
for config in configs:
sensor_names = config.recording_streams[stream_name]
sensor_paths = [join(args.output, "%s%s%s"%(config.name,stream_name,v)) \
for v in sensor_names]
for p in sensor_paths:
if args.record and not exists(p): os.makedirs(p)
if args.live: # Initialize StreamLive
config_sensor_streams[config.name] = [eval(stream_name+"Live")(n, p, shape=shape, addrs=ble_addrs) \
for (n, p) in zip(sensor_names, sensor_paths)]
else: # initialize StreamRecorded
config_sensor_streams[config.name] = map(eval(stream_name+"Recorded"), sensor_paths)
config_streams[config.name] += config_sensor_streams[config.name]
all_streams += config_sensor_streams[config.name]
if stream_name == "CameraStream":
camera_config_streams = config_sensor_streams
else:
f = eval("stream%ss"%stream_name)
stream_threads.append(threading.Thread(target=f, args=(config_sensor_streams,)))
# Initialize global writer and control variables.
write_q, writing_lock, writer_buffer_time = Queue.Queue(), threading.Lock(), 3.0
quit_all = False
# Kick off threads
for t in control_threads+stream_threads:
t.daemon = True
t.start()
# Stream camera data in main thread.
streamCameraStreams(camera_config_streams)
# Kick off and then join threads.
[t.join() for t in stream_threads]
quit_all = True
[t.join() for t in control_threads]
# Release all streams.
[v.release() for v in all_streams]
|
python
|
'''
Faça um programa que leia um número inteiro e mostre na tela o suscessor e seu antecessor.
'''
print('===== Exercício 05 =====')
n1 = int(input('Digite um número: '))
print(f'O antecessor de {n1} é {n1-1} e o sucessor é {n1+1}')
|
python
|
"""
1265, print immutable linked list reverse
Difficulty: medium
You are given an immutable linked list, print out all values of each node in reverse with the help of the following interface:
ImmutableListNode: An interface of immutable linked list, you are given the head of the list.
You need to use the following functions to access the linked list (you can't access the ImmutableListNode directly):
ImmutableListNode.printValue(): Print value of the current node.
ImmutableListNode.getNext(): Return the next node.
The input is only given to initialize the linked list internally. You must solve this problem without modifying the linked list. In other words, you must operate the linked list using only the mentioned APIs.
Example 1:
Input: head = [1,2,3,4]
Output: [4,3,2,1]
"""
# """
# This is the ImmutableListNode's API interface.
# You should not implement it, or speculate about its implementation.
# """
class ImmutableListNode:
def __init__(self, registry, val=None, next=None):
self.registry = registry
self.val = val
self.next = next
def printValue(self) -> None: # print the value of this node.
self.registry.print(self.val)
def getNext(self) -> 'ImmutableListNode': # return the next node.
return self.next
class Solution:
def printLinkedListInReverse(self, head: 'ImmutableListNode') -> None:
if head.getNext() is None:
head.printValue()
else:
self.printLinkedListInReverse(head.getNext())
head.printValue()
class Registry():
def __init__(self):
self.print_order = []
def print(self, val):
self.print_order.append(val)
def list_to_linked_list(a, registry):
head = ImmutableListNode(registry, a[0])
curr = head
for i in a[1:]:
curr.next = ImmutableListNode(registry, i)
curr = curr.next
return head
if __name__ == "__main__":
s = Solution()
reg = Registry()
a = [1,2,3,4,5,6]
head = list_to_linked_list(a, reg)
s.printLinkedListInReverse(head)
assert reg.print_order == [6,5,4,3,2,1]
|
python
|
"""
Module providing AES256 symmetric encryption services
If run as a Python command-line script, module will interactively prompt
for a password, then print out the corresponding encoded db_config.ini
password parameters suitable for cut/pasting into API .ini config files.
Copyright (C) 2016 ERT Inc.
"""
import getpass
from Crypto.Cipher import AES
from Crypto.Util import Padding #Requires PyCrypto v2.7+
from Crypto import Random
__author__ = "Brandon J. Van Vaerenbergh <[email protected]>, "
def encode(plaintext_string, key, salt):
"""
AES256 encrypts a string, using provided key & 128bit salt
The first 128bits of the returned ciphertext is an exact copy of the
provided salt (to facilitate decryption). To store salt separately
from the actual ciphertext, simply split off the first 128bits.
Keyword Parameter:
plaintext_string -- String to be enciphered.
key -- Bytes, representing an 256bit AES private key.
salt -- Bytes, representing a randomly generated (one-time-use),
128bit long AES Initialization Vector which will be used to salt
the resulting ciphertext.
>>> key = (b'\\x02\\xd2d\\xfb\\x84Q\\xed?\\x92\\xda\\xcd\\x9a/)'
... b'\\x15\\xdc\\xb5~\\\\\\x03\\xeby\\xa7\\xfb&#\\xb8'
... b'\\xd1y+a\\x86')
>>> s = b'\\x94\\x99$y\\x83B\\x85N\\x94E\\x01L\\xe5\\xba\\xea\\xdf'
>>> encode("Hello, World!", key, s)
b'\\x94\\x99$y\\x83B\\x85N\\x94E\\x01L\\xe5\\xba\\xea\\xdf\\t\\xbc\\x84\\xf8L\\xd5adz\\x1bl\\x9f\\x9c\\x1db\\xb1'
"""
assert len(key) >= 256/8, "Private key must be 256bit, minimum"
assert len(salt) == 128/8, "Expected (exactly) 128bit long salt"
#per dlitz.net/software/pycrypto/api/current/Crypto.Cipher.AES-module.html
aes256_cbc = AES.new(key, AES.MODE_CBC, IV=salt)
# PKCS#7 CMS pad the input(AES requires input length as multiple of 16 bit)
try:
plaintext_bytes = plaintext_string.encode('utf-8')
except AttributeError:
plaintext_bytes = plaintext_string #Seems like it's already bytes
padded_plaintext = Padding.pad(plaintext_bytes, 16, style='pkcs7')
ciphertext = aes256_cbc.encrypt(padded_plaintext)
# store our known-length iv for reuse,by simply prepending to ciphertext
return salt + ciphertext #(safe to do) salt just stops rainbow table
def decode(salted_ciphertext_bytes, key):
"""
Decrypts an AES256 enciphered String via provided key & 128bit salt
Keyword Parameter:
salted_ciphertext_bytes -- Bytes, representing a randomly generated
(one-time-use) 128bit long AES Initialization Vector & an AES256
encyphered String which was salted with the one-time IV. The first
128bits of 'salted_ciphertext_bytes' represent the salt, with
remainder containing the encoded cyphertext.
key -- Bytes, representing an 256bit AES private key.
>>> key = (b'\\x02\\xd2d\\xfb\\x84Q\\xed?\\x92\\xda\\xcd\\x9a/)'
... b'\\x15\\xdc\\xb5~\\\\\\x03\\xeby\\xa7\\xfb&#\\xb8'
... b'\\xd1y+a\\x86')
>>> ciphertext = (b'\\x94\\x99$y\\x83B\\x85N\\x94E\\x01L\\xe5\\xba'
... b'\\xea\\xdf\\t\\xbc\\x84\\xf8L\\xd5adz\\x1bl'
... b'\\x9f\\x9c\\x1db\\xb1')
>>> decode(ciphertext, key)
'Hello, World!'
"""
assert len(key) >= 256/8, "Private key must be 256bit, minimum"
# per convention for this Module, the first 128 ciphertext bits represent
# the randomly generated Initialization Vector used during encryption
salt_byte_length = int(128/8)
salt = salted_ciphertext_bytes[:salt_byte_length]
ciphertext = salted_ciphertext_bytes[salt_byte_length:]
#per dlitz.net/software/pycrypto/api/current/Crypto.Cipher.AES-module.html
aes256_cbc = AES.new(key, AES.MODE_CBC, IV=salt)
padded_plaintext_bytes = aes256_cbc.decrypt(ciphertext)
plaintext_bytes = Padding.unpad(padded_plaintext_bytes, 16, style='pkcs7')
return plaintext_bytes.decode('utf-8')
def interactive_mode():
"""
Command-line password entry+confirmation prompt,prints encoded form
"""
pw1 = getpass.getpass("Enter db connection password: ")
pw2 = getpass.getpass("Re-enter password, to confirm: ")
if pw1 != pw2:
print("ERROR: Passwords do not match, try again.")
interactive_mode()
# use appears to have accurately entered the pw. Now encrypt.
random_bytes = Random.new()
key_length_bytes = int(256/8)
salt_length_bytes = int(128/8)
one_time_key = random_bytes.read(key_length_bytes)
one_time_salt = random_bytes.read(salt_length_bytes)
ciphertext = encode(pw1, one_time_key, one_time_salt)
msg = 'Warehouse API .ini password parameters (paste both into .ini file):'
print(msg)
print('ciphertext_key = {}'.format(one_time_key))
print('ciphertext = {}'.format(ciphertext))
exit(0)
if __name__ == '__main__':
interactive_mode()
|
python
|
#!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Regression tests for Endpoints server in devappserver2."""
import json
import os.path
from google.testing.pybase import googletest
from google.appengine.tools.devappserver2 import regtest_utils
from google.appengine.tools.devappserver2.endpoints import endpoints_server
class EndpointsServerRegtest(regtest_utils.BaseTestCase):
"""Tests that the development server can correctly serve static content."""
def setUp(self):
super(EndpointsServerRegtest, self).setUp()
server_path = os.path.join(self.devappserver2_path,
'endpoints/testdata/app.yaml')
self.start_server([server_path])
def test_rest_get(self):
"""Test that a GET request to a REST API works."""
status, content, headers = self.fetch_url('default', 'GET',
'/_ah/api/test_service/v1/test')
self.assertEqual(200, status)
self.assertEqual('application/json', headers['Content-Type'])
response_json = json.loads(content)
self.assertEqual({'text': 'Test response'}, response_json)
def test_rest_post(self):
"""Test that a POST request to a REST API works."""
body = json.dumps({'name': 'MyName', 'number': 23})
send_headers = {'content-type': 'application/json'}
status, content, headers = self.fetch_url('default', 'POST',
'/_ah/api/test_service/v1/t2path',
body, send_headers)
self.assertEqual(200, status)
self.assertEqual('application/json', headers['Content-Type'])
response_json = json.loads(content)
self.assertEqual({'text': 'MyName 23'}, response_json)
def test_cors(self):
"""Test that CORS headers are handled properly."""
send_headers = {'Origin': 'test.com',
'Access-control-request-method': 'GET',
'Access-Control-Request-Headers': 'Date,Expires'}
status, _, headers = self.fetch_url('default', 'GET',
'/_ah/api/test_service/v1/test',
headers=send_headers)
self.assertEqual(200, status)
self.assertEqual(headers[endpoints_server._CORS_HEADER_ALLOW_ORIGIN],
'test.com')
self.assertIn('GET',
headers[endpoints_server._CORS_HEADER_ALLOW_METHODS].split(
','))
self.assertEqual(headers[endpoints_server._CORS_HEADER_ALLOW_HEADERS],
'Date,Expires')
def test_rpc(self):
"""Test that an RPC request works."""
body = json.dumps([{'jsonrpc': '2.0',
'id': 'gapiRpc',
'method': 'testservice.t2name',
'params': {'name': 'MyName', 'number': 23},
'apiVersion': 'v1'}])
send_headers = {'content-type': 'application-rpc'}
status, content, headers = self.fetch_url('default', 'POST',
'/_ah/api/rpc',
body, send_headers)
self.assertEqual(200, status)
self.assertEqual('application/json', headers['Content-Type'])
response_json = json.loads(content)
self.assertEqual([{'result': {'text': 'MyName 23'},
'id': 'gapiRpc'}], response_json)
def test_echo_datetime_message(self):
"""Test sending and receiving a datetime."""
body = json.dumps({'milliseconds': 5000, 'time_zone_offset': 60})
send_headers = {'content-type': 'application/json'}
status, content, headers = self.fetch_url(
'default', 'POST', '/_ah/api/test_service/v1/echo_datetime_message',
body, send_headers)
self.assertEqual(200, status)
self.assertEqual('application/json', headers['Content-Type'])
response_json = json.loads(content)
self.assertEqual({'milliseconds': 5000, 'time_zone_offset': 60},
response_json)
def test_echo_datetime_field(self):
"""Test sending and receiving a message that includes a datetime."""
body_json = {'datetime_value': '2013-03-13T15:29:37.883000+08:00'}
body = json.dumps(body_json)
send_headers = {'content-type': 'application/json'}
status, content, headers = self.fetch_url(
'default', 'POST', '/_ah/api/test_service/v1/echo_datetime_field',
body, send_headers)
self.assertEqual(200, status)
self.assertEqual('application/json', headers['Content-Type'])
response_json = json.loads(content)
self.assertEqual(body_json, response_json)
def test_discovery_config(self):
"""Test that the discovery configuration looks right."""
status, content, headers = self.fetch_url(
'default', 'GET', '/_ah/api/discovery/v1/apis/test_service/v1/rest')
self.assertEqual(200, status)
self.assertEqual('application/json; charset=UTF-8', headers['Content-Type'])
response_json = json.loads(content)
self.assertRegexpMatches(
response_json['baseUrl'],
r'^http://localhost(:\d+)?/_ah/api/test_service/v1/$')
self.assertRegexpMatches(response_json['rootUrl'],
r'^http://localhost(:\d+)?/_ah/api/$')
if __name__ == '__main__':
googletest.main()
|
python
|
import psycopg2
# printStackTrace prints as follows for postgres connection error
# --------------------------------------------------------------------------------
# Error connecting postgres database:
# --------------------------------------------------------------------------------
# Traceback (most recent call last):
# File "C:/Users/padma/github/beginners-py-learn/src/advance_stuff/exception_handling_logging.py", line 12, in <module>
# conn_postgresql = psycopg2.connect(database="abc", user="abc", password="abc", host="127.0.0.1", port="5432")
# File "C:\Users\padma\github\beginners-py-learn\venv\lib\site-packages\psycopg2\__init__.py", line 122, in connect
# conn = _connect(dsn, connection_factory=connection_factory, **kwasync)
# psycopg2.OperationalError: connection to server at "127.0.0.1", port 5432 failed: Connection refused (0x0000274D/10061)
# Is the server running on that host and accepting TCP/IP connections?
# --------------------------------------------------------------------------------
#
import traceback
def printStackTrace(message:str) -> None:
traceback_error_msg = traceback.format_exc()
print(f'{80*"-"}\n{message}:\n{80*"-"}\n{traceback_error_msg}{80*"-"}')
conn_postgresql = None
try :
conn_postgresql = psycopg2.connect(database="abc123", user="abc123", password="abc123", host="127.0.0.1", port="5432")
print("Database opened successfully")
cur = conn_postgresql.cursor()
cur.execute("SELECT fname || ' ' || lname as StudentName, cid as ClassId from students")
rows = cur.fetchall()
for row in rows:
print("StudentName =", row[0])
print("ClassId =", row[1], "\n")
except Exception:
'''
Printing stack trace
'''
printStackTrace('Error connecting postgres database')
import sys
sys.exit(-1)
if conn_postgresql:
conn_postgresql.close()
|
python
|
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'Participant'
db.create_table(u'precon_participant', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('creation_time', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)),
('modification_time', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, blank=True)),
('nonce', self.gf('django.db.models.fields.CharField')(default='sc7mha', unique=True, max_length=6)),
('name', self.gf('django.db.models.fields.CharField')(max_length=50)),
('email', self.gf('django.db.models.fields.EmailField')(max_length=50)),
('phone', self.gf('django.db.models.fields.CharField')(max_length=15, null=True, blank=True)),
('anything_else', self.gf('django.db.models.fields.TextField')(max_length=1000, null=True, blank=True)),
('max_panels', self.gf('django.db.models.fields.CharField')(default='0', max_length=10)),
))
db.send_create_signal(u'precon', ['Participant'])
# Adding M2M table for field slots_attending on 'Participant'
m2m_table_name = db.shorten_name(u'precon_participant_slots_attending')
db.create_table(m2m_table_name, (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('participant', models.ForeignKey(orm[u'precon.participant'], null=False)),
('slot', models.ForeignKey(orm[u'precon.slot'], null=False))
))
db.create_unique(m2m_table_name, ['participant_id', 'slot_id'])
# Adding M2M table for field slots_available on 'Participant'
m2m_table_name = db.shorten_name(u'precon_participant_slots_available')
db.create_table(m2m_table_name, (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('participant', models.ForeignKey(orm[u'precon.participant'], null=False)),
('slot', models.ForeignKey(orm[u'precon.slot'], null=False))
))
db.create_unique(m2m_table_name, ['participant_id', 'slot_id'])
# Adding M2M table for field slots_maybe on 'Participant'
m2m_table_name = db.shorten_name(u'precon_participant_slots_maybe')
db.create_table(m2m_table_name, (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('participant', models.ForeignKey(orm[u'precon.participant'], null=False)),
('slot', models.ForeignKey(orm[u'precon.slot'], null=False))
))
db.create_unique(m2m_table_name, ['participant_id', 'slot_id'])
# Adding model 'Panelist'
db.create_table(u'precon_panelist', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('name', self.gf('django.db.models.fields.CharField')(unique=True, max_length=50)),
('participant', self.gf('django.db.models.fields.related.ForeignKey')(related_name='panelists', on_delete=models.SET_NULL, default=None, to=orm['precon.Participant'], blank=True, null=True)),
))
db.send_create_signal(u'precon', ['Panelist'])
# Adding model 'PanelProposal'
db.create_table(u'precon_panelproposal', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('name', self.gf('django.db.models.fields.CharField')(unique=True, max_length=100)),
('type', self.gf('django.db.models.fields.CharField')(default='Panel', max_length=50)),
('blurb', self.gf('django.db.models.fields.TextField')(max_length=4000)),
('needs_panelists', self.gf('django.db.models.fields.BooleanField')(default=True)),
('suggested_by', self.gf('django.db.models.fields.related.ForeignKey')(blank=True, related_name='panelproposals_suggested', null=True, to=orm['precon.Panelist'])),
))
db.send_create_signal(u'precon', ['PanelProposal'])
# Adding M2M table for field panelists on 'PanelProposal'
m2m_table_name = db.shorten_name(u'precon_panelproposal_panelists')
db.create_table(m2m_table_name, (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('panelproposal', models.ForeignKey(orm[u'precon.panelproposal'], null=False)),
('panelist', models.ForeignKey(orm[u'precon.panelist'], null=False))
))
db.create_unique(m2m_table_name, ['panelproposal_id', 'panelist_id'])
# Adding model 'PanelProposalResponse'
db.create_table(u'precon_panelproposalresponse', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('creation_time', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)),
('modification_time', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, blank=True)),
('participant', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['precon.Participant'])),
('panel_proposal', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['precon.PanelProposal'])),
('attending_interest', self.gf('django.db.models.fields.CharField')(default='not interested in attending', max_length=50)),
('presenting_interest', self.gf('django.db.models.fields.CharField')(default='not interested in presenting', max_length=50)),
('presenting_comments', self.gf('django.db.models.fields.TextField')(max_length=1000, null=True, blank=True)),
('attending_comments', self.gf('django.db.models.fields.TextField')(max_length=1000, null=True, blank=True)),
))
db.send_create_signal(u'precon', ['PanelProposalResponse'])
# Adding model 'Panel'
db.create_table(u'precon_panel', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('type', self.gf('django.db.models.fields.CharField')(default='Panel', max_length=50)),
('name', self.gf('django.db.models.fields.CharField')(unique=True, max_length=100)),
('blurb', self.gf('django.db.models.fields.TextField')(max_length=4000)),
('room', self.gf('django.db.models.fields.related.ForeignKey')(blank=True, related_name='panels', null=True, to=orm['precon.Room'])),
('panel_proposal', self.gf('django.db.models.fields.related.ForeignKey')(blank=True, related_name='panels_accepted', null=True, to=orm['precon.PanelProposal'])),
('moderator', self.gf('django.db.models.fields.related.ForeignKey')(blank=True, related_name='panels_moderating', null=True, to=orm['precon.Panelist'])),
('needs_projector', self.gf('django.db.models.fields.BooleanField')(default=False)),
))
db.send_create_signal(u'precon', ['Panel'])
# Adding M2M table for field panelists on 'Panel'
m2m_table_name = db.shorten_name(u'precon_panel_panelists')
db.create_table(m2m_table_name, (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('panel', models.ForeignKey(orm[u'precon.panel'], null=False)),
('panelist', models.ForeignKey(orm[u'precon.panelist'], null=False))
))
db.create_unique(m2m_table_name, ['panel_id', 'panelist_id'])
# Adding M2M table for field slot on 'Panel'
m2m_table_name = db.shorten_name(u'precon_panel_slot')
db.create_table(m2m_table_name, (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('panel', models.ForeignKey(orm[u'precon.panel'], null=False)),
('slot', models.ForeignKey(orm[u'precon.slot'], null=False))
))
db.create_unique(m2m_table_name, ['panel_id', 'slot_id'])
# Adding model 'Schedule'
db.create_table(u'precon_schedule', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('name', self.gf('django.db.models.fields.CharField')(unique=True, max_length=20)),
))
db.send_create_signal(u'precon', ['Schedule'])
# Adding model 'Day'
db.create_table(u'precon_day', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('name', self.gf('django.db.models.fields.CharField')(max_length=20)),
))
db.send_create_signal(u'precon', ['Day'])
# Adding model 'Slot'
db.create_table(u'precon_slot', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('schedule', self.gf('django.db.models.fields.related.ForeignKey')(related_name='slots', to=orm['precon.Schedule'])),
('name', self.gf('django.db.models.fields.CharField')(max_length=20)),
('day', self.gf('django.db.models.fields.related.ForeignKey')(blank=True, related_name='slots', null=True, to=orm['precon.Day'])),
))
db.send_create_signal(u'precon', ['Slot'])
# Adding model 'Room'
db.create_table(u'precon_room', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('schedule', self.gf('django.db.models.fields.related.ForeignKey')(related_name='rooms', to=orm['precon.Schedule'])),
('name', self.gf('django.db.models.fields.CharField')(max_length=20)),
))
db.send_create_signal(u'precon', ['Room'])
# Adding model 'Change'
db.create_table(u'precon_change', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('description', self.gf('django.db.models.fields.TextField')(max_length=4000)),
))
db.send_create_signal(u'precon', ['Change'])
# Adding model 'SiteConfig'
db.create_table(u'precon_siteconfig', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('current_schedule', self.gf('django.db.models.fields.related.ForeignKey')(default=None, to=orm['precon.Schedule'], null=True, on_delete=models.SET_NULL, blank=True)),
))
db.send_create_signal(u'precon', ['SiteConfig'])
def backwards(self, orm):
# Deleting model 'Participant'
db.delete_table(u'precon_participant')
# Removing M2M table for field slots_attending on 'Participant'
db.delete_table(db.shorten_name(u'precon_participant_slots_attending'))
# Removing M2M table for field slots_available on 'Participant'
db.delete_table(db.shorten_name(u'precon_participant_slots_available'))
# Removing M2M table for field slots_maybe on 'Participant'
db.delete_table(db.shorten_name(u'precon_participant_slots_maybe'))
# Deleting model 'Panelist'
db.delete_table(u'precon_panelist')
# Deleting model 'PanelProposal'
db.delete_table(u'precon_panelproposal')
# Removing M2M table for field panelists on 'PanelProposal'
db.delete_table(db.shorten_name(u'precon_panelproposal_panelists'))
# Deleting model 'PanelProposalResponse'
db.delete_table(u'precon_panelproposalresponse')
# Deleting model 'Panel'
db.delete_table(u'precon_panel')
# Removing M2M table for field panelists on 'Panel'
db.delete_table(db.shorten_name(u'precon_panel_panelists'))
# Removing M2M table for field slot on 'Panel'
db.delete_table(db.shorten_name(u'precon_panel_slot'))
# Deleting model 'Schedule'
db.delete_table(u'precon_schedule')
# Deleting model 'Day'
db.delete_table(u'precon_day')
# Deleting model 'Slot'
db.delete_table(u'precon_slot')
# Deleting model 'Room'
db.delete_table(u'precon_room')
# Deleting model 'Change'
db.delete_table(u'precon_change')
# Deleting model 'SiteConfig'
db.delete_table(u'precon_siteconfig')
models = {
u'precon.change': {
'Meta': {'ordering': "['-id']", 'object_name': 'Change'},
'description': ('django.db.models.fields.TextField', [], {'max_length': '4000'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
u'precon.day': {
'Meta': {'object_name': 'Day'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '20'})
},
u'precon.panel': {
'Meta': {'ordering': "['name']", 'object_name': 'Panel'},
'blurb': ('django.db.models.fields.TextField', [], {'max_length': '4000'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'moderator': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'panels_moderating'", 'null': 'True', 'to': u"orm['precon.Panelist']"}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'}),
'needs_projector': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'panel_proposal': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'panels_accepted'", 'null': 'True', 'to': u"orm['precon.PanelProposal']"}),
'panelists': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'panels'", 'null': 'True', 'symmetrical': 'False', 'to': u"orm['precon.Panelist']"}),
'room': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'panels'", 'null': 'True', 'to': u"orm['precon.Room']"}),
'slot': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'panels'", 'null': 'True', 'symmetrical': 'False', 'to': u"orm['precon.Slot']"}),
'type': ('django.db.models.fields.CharField', [], {'default': "'Panel'", 'max_length': '50'})
},
u'precon.panelist': {
'Meta': {'ordering': "['name']", 'object_name': 'Panelist'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '50'}),
'participant': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'panelists'", 'on_delete': 'models.SET_NULL', 'default': 'None', 'to': u"orm['precon.Participant']", 'blank': 'True', 'null': 'True'})
},
u'precon.panelproposal': {
'Meta': {'ordering': "['name']", 'object_name': 'PanelProposal'},
'blurb': ('django.db.models.fields.TextField', [], {'max_length': '4000'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'}),
'needs_panelists': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'panelists': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'panelproposals_panelist'", 'null': 'True', 'symmetrical': 'False', 'to': u"orm['precon.Panelist']"}),
'suggested_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'panelproposals_suggested'", 'null': 'True', 'to': u"orm['precon.Panelist']"}),
'type': ('django.db.models.fields.CharField', [], {'default': "'Panel'", 'max_length': '50'})
},
u'precon.panelproposalresponse': {
'Meta': {'object_name': 'PanelProposalResponse'},
'attending_comments': ('django.db.models.fields.TextField', [], {'max_length': '1000', 'null': 'True', 'blank': 'True'}),
'attending_interest': ('django.db.models.fields.CharField', [], {'default': "'not interested in attending'", 'max_length': '50'}),
'creation_time': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modification_time': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'panel_proposal': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['precon.PanelProposal']"}),
'participant': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['precon.Participant']"}),
'presenting_comments': ('django.db.models.fields.TextField', [], {'max_length': '1000', 'null': 'True', 'blank': 'True'}),
'presenting_interest': ('django.db.models.fields.CharField', [], {'default': "'not interested in presenting'", 'max_length': '50'})
},
u'precon.participant': {
'Meta': {'ordering': "['name']", 'object_name': 'Participant'},
'anything_else': ('django.db.models.fields.TextField', [], {'max_length': '1000', 'null': 'True', 'blank': 'True'}),
'creation_time': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '50'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'max_panels': ('django.db.models.fields.CharField', [], {'default': "'0'", 'max_length': '10'}),
'modification_time': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'nonce': ('django.db.models.fields.CharField', [], {'default': "'y8wm3c'", 'unique': 'True', 'max_length': '6'}),
'panel_proposals_responded': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'participants_responded'", 'to': u"orm['precon.PanelProposal']", 'through': u"orm['precon.PanelProposalResponse']", 'blank': 'True', 'symmetrical': 'False', 'null': 'True'}),
'phone': ('django.db.models.fields.CharField', [], {'max_length': '15', 'null': 'True', 'blank': 'True'}),
'slots_attending': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'participants_attending'", 'null': 'True', 'symmetrical': 'False', 'to': u"orm['precon.Slot']"}),
'slots_available': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'participants_available'", 'null': 'True', 'symmetrical': 'False', 'to': u"orm['precon.Slot']"}),
'slots_maybe': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'participants_maybe'", 'null': 'True', 'symmetrical': 'False', 'to': u"orm['precon.Slot']"})
},
u'precon.room': {
'Meta': {'object_name': 'Room'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '20'}),
'schedule': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'rooms'", 'to': u"orm['precon.Schedule']"})
},
u'precon.schedule': {
'Meta': {'object_name': 'Schedule'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '20'})
},
u'precon.siteconfig': {
'Meta': {'object_name': 'SiteConfig'},
'current_schedule': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'to': u"orm['precon.Schedule']", 'null': 'True', 'on_delete': 'models.SET_NULL', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
u'precon.slot': {
'Meta': {'object_name': 'Slot'},
'day': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'slots'", 'null': 'True', 'to': u"orm['precon.Day']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '20'}),
'schedule': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'slots'", 'to': u"orm['precon.Schedule']"})
}
}
complete_apps = ['precon']
|
python
|
import pathlib
from . import UMRLogging
from .UMRType import ChatType, ForwardTypeEnum, DefaultForwardTypeEnum, LogLevel
from pydantic import BaseModel, validator
from typing import Dict, List, Union, Type, Optional, Generic, AnyStr, DefaultDict
from typing_extensions import Literal
import importlib
import yaml
import json
# load config from home directory
__ALL__ = [
'config',
'register_driver_config',
'register_extension_config',
'reload_config',
'save_config',
'load_extensions'
]
logger = UMRLogging.get_logger('Config')
def load_extensions():
"""
Shared logic for loading both drivers and extensions (import and register only)
"""
ext_names = config.Extensions
if ext_names:
for e in ext_names:
globals()[e] = importlib.import_module(e)
class BaseDriverConfig(BaseModel):
Base: str
class BaseExtensionConfig(BaseModel):
Extension: str
class Default(BaseModel):
From: str
To: str
ToChat: Union[int, str]
ToChatType: ChatType
ForwardType: DefaultForwardTypeEnum
class Topology(BaseModel):
From: str
FromChat: Union[int, str]
FromChatType: ChatType
To: str
ToChat: Union[int, str]
ToChatType: ChatType
ForwardType: ForwardTypeEnum
class ForwardList(BaseModel):
Topology: Optional[List[Topology]]
Default: Optional[List[Default]]
Accounts: Dict[str, Union[int, str]] = {}
@validator('Topology')
def generate_empty_list_if_none(cls, v):
if not v:
return []
else:
return v
@validator('Default')
def generate_empty_list_if_none2(cls, v):
if not v:
return []
else:
return v
def construct_union(modules: List, names):
eval_string = ', '.join([i.__module__ + '.' + i.__name__ for i in modules])
if names:
if eval_string:
eval_string += ', ' + names.__name__
else:
eval_string = names.__name__
return eval(f'Union[{eval_string}]')
class BasicConfig(BaseModel):
DataRoot: str = '/root/coolq/data/image'
LogRoot: str = '/var/log/umr'
CommandPrefix: str = '!!'
Extensions: Optional[List[str]]
BotAdmin: Optional[Dict[str, List[Union[int, str]]]]
LogLevel: Optional[Dict[str, LogLevel]]
ForwardList: ForwardList
Driver: Optional[Dict[str, BaseDriverConfig]]
ExtensionConfig: Optional[Dict[str, BaseExtensionConfig]]
@validator('Extensions', pre=True, always=True)
def generate_empty_list_if_none(cls, v):
return v or []
@validator('Driver', pre=True, always=True)
def generate_empty_dict_if_none(cls, v):
return v or {}
@validator('ExtensionConfig', pre=True, always=True)
def generate_empty_dict_if_none2(cls, v):
return v or {}
@validator('BotAdmin', pre=True, always=True)
def generate_empty_dict_if_none3(cls, v):
return v or {}
@validator('LogLevel', pre=True, always=True)
def generate_empty_dict_if_none4(cls, v):
return v or {}
home = str(pathlib.Path.home())
config = BasicConfig(**yaml.load(open(f'{home}/.umr/config.yaml'), yaml.FullLoader))
driver_config = []
extension_config = []
def register_driver_config(custom_config):
driver_config.append(custom_config)
def register_extension_config(custom_config):
extension_config.append(custom_config)
def reload_config():
global config
class FullConfig(BaseModel):
DataRoot: str = '/root/coolq/data/image'
LogRoot: str = '/var/log/umr'
CommandPrefix: str = '!!'
Extensions: Optional[List[str]]
BotAdmin: Optional[Dict[str, List[Union[int, str]]]]
LogLevel: Optional[Dict[str, LogLevel]]
ForwardList: ForwardList
Driver: Optional[Dict[str, construct_union(driver_config, BaseDriverConfig)]]
ExtensionConfig: Optional[Dict[str, construct_union(extension_config, BaseExtensionConfig)]]
@validator('Extensions', pre=True, always=True)
def generate_empty_list_if_none(cls, v):
return v or []
@validator('Driver', pre=True, always=True)
def generate_empty_dict_if_none(cls, v):
return v or {}
@validator('ExtensionConfig', pre=True, always=True)
def generate_empty_dict_if_none2(cls, v):
return v or {}
@validator('BotAdmin', pre=True, always=True)
def generate_empty_dict_if_none3(cls, v):
return v or {}
@validator('LogLevel', pre=True, always=True)
def generate_empty_dict_if_none4(cls, v):
return v or {}
config = FullConfig(**yaml.load(open(f'{home}/.umr/config.yaml'), yaml.FullLoader))
def save_config():
yaml.dump(json.loads(config.json()), open(f'{home}/.umr/config.yaml', 'w'), default_flow_style=False)
|
python
|
import torch.nn as nn
import torch.nn.functional as F
from CGS.gnn.IGNN.IGNNLayer import ImplicitGraph
from CGS.gnn.IGNN.utils import get_spectral_rad
from CGS.nn.MLP import MLP
from CGS.nn.MPNN import AttnMPNN
class IGNN(nn.Module):
def __init__(self,
node_dim: int,
edge_dim: int,
lifted_dim: int, # bias function input dim
hidden_dim: int, # hidden state dim (state of the fp equation)
output_dim: int,
activation: str,
num_hidden_gn: int,
mlp_num_neurons: list = [128],
reg_num_neurons: list = [64, 32]):
super(IGNN, self).__init__()
self.encoder = AttnMPNN(node_in_dim=node_dim,
edge_in_dim=edge_dim,
node_hidden_dim=64,
edge_hidden_dim=64,
node_out_dim=lifted_dim,
edge_out_dim=1, # will be ignored
num_hidden_gn=num_hidden_gn,
node_aggregator='sum',
mlp_params={'num_neurons': mlp_num_neurons,
'hidden_act': activation,
'out_act': activation})
self.ignn = ImplicitGraph(lifted_dim, hidden_dim, None, kappa=0.9)
self.decoder = MLP(hidden_dim, output_dim,
hidden_act=activation,
num_neurons=reg_num_neurons)
def forward(self, g, nf, ef):
"""
1. Transform input graph with node/edge features to the bias terms of the fixed point equations
2. Solve fixed point eq
3. Decode the solution with MLP.
"""
unf, _ = self.encoder(g, nf, ef)
adj = g.adj().to(nf.device)
adj_rho = get_spectral_rad(adj)
z = self.ignn(None, adj, unf.T, F.relu, adj_rho, A_orig=None).T
pred = self.decoder(z)
return pred
|
python
|
def interative_test_xdev_embed():
"""
CommandLine:
xdoctest -m dev/interactive_embed_tests.py interative_test_xdev_embed
Example:
>>> interative_test_xdev_embed()
"""
import xdev
with xdev.embed_on_exception_context:
raise Exception
def interative_test_ipdb_embed():
"""
CommandLine:
xdoctest -m dev/interactive_embed_tests.py interative_test_ipdb_embed
Example:
>>> interative_test_ipdb_embed()
"""
import ipdb
with ipdb.launch_ipdb_on_exception():
raise Exception
|
python
|
import unittest
from maturin_sample import hello
class SampleTestCase(unittest.TestCase):
def test_hello(self):
self.assertEqual(hello([]), 0)
self.assertEqual(hello([5]), 1)
self.assertEqual(hello([9, 1, 5, 2, 3]), 5)
if __name__ == "__main__":
unittest.main()
|
python
|
import pytest
from optional import Optional
from optional.something import Something
class TestSomething(object):
def test_can_not_instantiate_with_a_none_value(self):
with pytest.raises(ValueError, match='\\AInvalid value for Something: None\\Z'):
Something(value=None, optional=Optional)
def test_can_instantiate_with_any_other_value(self):
assert Something(value=23, optional=Optional) == Optional.of(23)
|
python
|
from board import Board,MoveRecommendation
from math import inf as infinity
import random
def __player_turn(board: Board):
set_field = input('Enter the number of the field you want to play?[1:' + str(board.size**2) + ']')
print('field id', set_field)
if(set_field!=''):
board.player_set(int(set_field))
def negamax(board_state: Board, depth: int, is_human_turn: bool):
best = MoveRecommendation(None, None, +infinity if is_human_turn else -infinity, False)
# we have arrived at a simulated state where the game is done, or the last available cell was reached
# now returning the score of this final move
game_over, score = board_state.check_game_finished(is_human_turn)
if depth == 0 or game_over:
if(game_over): best.score = score
return best # MoveRecommendation(-1, -1, score, game_over)
# Loop all available cells, if we are here
for cell in board_state.get_empty_cells():
# Try and set all combinations
row, col = cell[0], cell[1]
# Simulate the next available move
board_state.set_cell(row, col, is_human_turn)
# recurse the function with the board and the new state set
negamax_move = negamax(board_state, depth - 1, not(is_human_turn))
negamax_move.score = -negamax_move.score
# we get the score of this move and all possible combinations after that
# Undo the move, to reset the board to the actual state
board_state.free_cell(row, col)
if negamax_move.score > best.score:
best = negamax_move
best.column = col
best.row = row
return best
def minimax(board_state: Board, depth: int, is_human_turn: bool):
best = MoveRecommendation(None, None, +infinity if is_human_turn else -infinity, False)
# we have arrived at a simulated state where the game is done, or the last available cell was reached
# now returning the score of this final move
game_over, score = board_state.check_game_finished(is_human_turn)
if depth == 0 or game_over:
if(game_over): best.score = score
return best # MoveRecommendation(-1, -1, score, game_over)
# Loop all available cells, if we are here
for cell in board_state.get_empty_cells():
row, col = cell[0], cell[1]
# Try and set all combinations
# Simulate the next available move
board_state.set_cell(row, col, is_human_turn)
# recurse the function with the board and the new state set
minimax_move = minimax(board_state, depth - 1, not(is_human_turn))
# Undo the move, to reset the board to the actual state
board_state.free_cell(row, col)
if not(is_human_turn):
# pick the max score/move when computers turn
if minimax_move.score > best.score:
best = minimax_move
best.column = col
best.row = row
else:
# pick the lowest score/move when players turn
if minimax_move.score < best.score:
best = minimax_move
best.column = col
best.row = row
return best
def __computer_turn(board: Board):
depth = len(board.get_empty_cells())
game_over, score = board.check_game_finished(False)
if depth == 0 or game_over:
return
if depth == board.size**2: # when a new board is started (first move), pick random
row = random.randint(0,board.size-1)
column = random.randint(0,board.size-1)
else:
move = minimax(board, depth, False)
row, column = move.row, move.column
print('Computer decided for following move:', row, '-', column)
board.set_cell(row, column, False)
def main():
board = Board(3)
# board.set_state([
# [1 ,1 ,-1],
# [-1 ,1 ,1],
# [0 ,0 ,1]
# ])
board.render()
# check who should start
is_humans_turn = (input('Player to start first? [y/n]: ').upper()=='Y')
# Main loop of this game
while not(board.board_full()) and not(board.check_game_finished(is_humans_turn)[0]): #reversing the is_humans_turn, because we want to check the previous turn
if is_humans_turn:
__player_turn(board)
else:
__computer_turn(board)
is_humans_turn = not(is_humans_turn)
board.render()
if(board.player_won()):
print("The player won !!! Congratulations")
elif(board.computer_won()):
print("The computer won !!! He's too strong")
elif(board.board_full()):
print("It's a draw !")
if __name__ == "__main__": main()
|
python
|
#!/usr/bin/env python3
import os, re, json
import sqlite3
from markov import Markov
SQLITE_DATABASE = os.path.join(os.path.dirname(os.path.realpath(__file__)), "chains.db")
CHAT_HISTORY_DIRECTORY = os.path.join(os.path.dirname(os.path.realpath(__file__)), "..", "..", "..", "@history")
def get_metadata():
with open(os.path.join(CHAT_HISTORY_DIRECTORY, "metadata", "users.json"), "r") as f:
entries = json.load(f)
user_names = {entry["id"]: entry["name"] for entry in entries}
user_real_names = {entry["id"]: entry["profile"]["real_name"] for entry in entries}
with open(os.path.join(CHAT_HISTORY_DIRECTORY, "metadata", "channels.json"), "r") as f:
entries = json.load(f)
channel_names = {entry["id"]: entry["name"] for entry in entries}
return user_names, user_real_names, channel_names
USER_NAMES_BY_ID, USER_REAL_NAMES_BY_ID, CHANNEL_NAMES_BY_ID = get_metadata()
def server_text_to_text(server_text):
"""Returns `server_text`, a string in Slack server message format, converted into a plain text string. The transformation can lose some information for escape sequences, such as link labels."""
assert isinstance(server_text, str), "`server_text` must be a string rather than \"{}\"".format(server_text)
text_without_special_sequences = re.sub(r"<[^<>]*>", "", server_text)
assert "<" not in text_without_special_sequences and ">" not in text_without_special_sequences, "Invalid special sequence in server text \"{}\", perhaps some text needs to be escaped"
# process link references
def process_special_sequence(match):
original, body = match.group(0), match.group(1).split("|")[0]
if body.startswith("#"): # channel reference
return "#" + CHANNEL_NAMES_BY_ID[body[1:]] if body[1:] in CHANNEL_NAMES_BY_ID else original
if body.startswith("@"): # user reference
return "@" + USER_NAMES_BY_ID[body[1:]] if body[1:] in USER_NAMES_BY_ID else original
if body.startswith("!"): # special command
if body == "!channel": return "@channel"
if body == "!group": return "@group"
if body == "!everyone": return "@everyone"
return body # link, should remove angle brackets and label in order to allow it to linkify
raw_text = re.sub(r"<(.*?)>", process_special_sequence, server_text)
return raw_text.replace("<", "<").replace(">", ">").replace("&", "&")
def get_message_text(message):
"""Returns the text value of `message` if it is a valid text message, or `None` otherwise"""
if message.get("type") == "message" and isinstance(message.get("ts"), str):
if isinstance(message.get("text"), str) and isinstance(message.get("user"), str): # normal message
return server_text_to_text(message["text"])
if message.get("subtype") == "message_changed" and isinstance(message.get("message"), dict) and isinstance(message["message"].get("user"), str) and isinstance(message["message"].get("text"), str): # edited message
return server_text_to_text(message["message"]["text"])
return None
def get_history_files():
"""Returns a mapping from channel IDs to absolute file paths of their history entries"""
for dirpath, _, filenames in os.walk(CHAT_HISTORY_DIRECTORY):
result = {}
for history_file in filenames:
channel_id, extension = os.path.splitext(os.path.basename(history_file))
if extension != ".json": continue
result[channel_id] = os.path.join(dirpath, history_file)
return result
return {}
connection = sqlite3.connect(SQLITE_DATABASE)
connection.execute("DROP TABLE IF EXISTS counts")
connection.execute("DROP TABLE IF EXISTS chain")
connection.execute("CREATE TABLE counts (key TEXT PRIMARY KEY, count INTEGER)")
connection.execute("CREATE TABLE chain (key TEXT, next_word TEXT, occurrences INTEGER)")
connection.execute("CREATE INDEX chain_key_index ON chain (key)")
markov = Markov(2) # Markov model with 2 word look-behind
for channel_id, history_file in get_history_files().items():
with open(history_file, "r") as f:
for entry in f:
text = get_message_text(json.loads(entry))
if text is not None:
markov.train(Markov.tokenize_text(text))
connection.executemany(
"INSERT INTO counts VALUES (?, ?)",
(("\n".join(key), occurrences) for key, occurrences in markov.counts.items())
)
connection.executemany(
"INSERT INTO chain VALUES (?, ?, ?)",
(("\n".join(key), next_word, occurrences) for key, next_mapping in markov.chain.items()
for next_word, occurrences in next_mapping.items())
)
connection.commit()
connection.close()
|
python
|
#!/usr/bin/python
fichier = open('day6_input.txt')
groupes_txt = fichier.read().split('\n\n')
compteur = 0
for g in groupes_txt:
reponses = set()
for q in g.split('\n'):
for r in q:
reponses.add(r)
print(reponses)
compteur = compteur + len(reponses)
print('fin',compteur)
|
python
|
from cereal import car
from common.realtime import DT_CTRL
from common.numpy_fast import interp, clip
from selfdrive.config import Conversions as CV
from selfdrive.car import apply_std_steer_torque_limits, create_gas_command
from selfdrive.car.gm import gmcan
from selfdrive.car.gm.values import DBC, CanBus, CarControllerParams, REGEN_CARS
from opendbc.can.packer import CANPacker
VisualAlert = car.CarControl.HUDControl.VisualAlert
def actuator_hystereses(final_pedal, pedal_steady):
# hyst params... TODO: move these to VehicleParams
pedal_hyst_gap = 0.01 # don't change pedal command for small oscillations within this value
# for small pedal oscillations within pedal_hyst_gap, don't change the pedal command
if final_pedal == 0.:
pedal_steady = 0.
elif final_pedal > pedal_steady + pedal_hyst_gap:
pedal_steady = final_pedal - pedal_hyst_gap
elif final_pedal < pedal_steady - pedal_hyst_gap:
pedal_steady = final_pedal + pedal_hyst_gap
final_pedal = pedal_steady
return final_pedal, pedal_steady
last_logged_pedal = 0.0
class CarController():
def __init__(self, dbc_name, CP, VM):
self.pedal_steady = 0.
self.start_time = 0.
self.apply_steer_last = 0
self.lka_icon_status_last = (False, False)
self.steer_rate_limited = False
self.car_fingerprint = CP.carFingerprint
self.params = CarControllerParams()
self.packer_pt = CANPacker(DBC[CP.carFingerprint]['pt'])
self.packer_obj = CANPacker(DBC[CP.carFingerprint]['radar'])
self.packer_ch = CANPacker(DBC[CP.carFingerprint]['chassis'])
def update(self, enabled, CS, frame, actuators,
hud_v_cruise, hud_show_lanes, hud_show_car, hud_alert):
P = self.params
# Send CAN commands.
can_sends = []
# STEER
lkas_enabled = enabled and not CS.out.steerWarning and CS.out.vEgo > P.MIN_STEER_SPEED
if (frame % P.STEER_STEP) == 0:
if lkas_enabled:
new_steer = actuators.steer * P.STEER_MAX
apply_steer = apply_std_steer_torque_limits(new_steer, self.apply_steer_last, CS.out.steeringTorque, P)
self.steer_rate_limited = new_steer != apply_steer
else:
apply_steer = 0
self.apply_steer_last = apply_steer
idx = (frame // P.STEER_STEP) % 4
can_sends.append(gmcan.create_steering_control(self.packer_pt, CanBus.POWERTRAIN, apply_steer, idx, lkas_enabled))
### GAS/BRAKE ###
# no output if not enabled, but keep sending keepalive messages
# treat pedals as one
if CS.CP.enableGasInterceptor and self.car_fingerprint in REGEN_CARS:
#It seems in L mode, accel / decel point is around 1/5
#-1-------AEB------0----regen---0.15-------accel----------+1
# Shrink gas request to 0.85, have it start at 0.2
# Shrink brake request to 0.85, first 0.15 gives regen, rest gives AEB
zero = 40/256
gas = (1-zero) * actuators.gas + zero
regen = clip(actuators.brake, 0., zero) # Make brake the same size as gas, but clip to regen
# aeb = actuators.brake*(1-zero)-regen # For use later, braking more than regen
final_pedal = gas - regen
if not enabled:
# Since no input technically maps to 0.15, send 0.0 when not enabled to avoid
# controls mismatch.
final_pedal = 0.0
#TODO: Use friction brake via AEB for harder braking
# apply pedal hysteresis and clip the final output to valid values.
final_pedal, self.pedal_steady = actuator_hystereses(final_pedal, self.pedal_steady)
pedal_gas = clip(final_pedal, 0., 1.)
if (frame % 4) == 0:
idx = (frame // 4) % 4
# send exactly zero if apply_gas is zero. Interceptor will send the max between read value and apply_gas.
# This prevents unexpected pedal range rescaling
can_sends.append(create_gas_command(self.packer_pt, pedal_gas, idx))
# Send dashboard UI commands (ACC status), 25hz
if (frame % 4) == 0:
send_fcw = hud_alert == VisualAlert.fcw
can_sends.append(gmcan.create_acc_dashboard_command(self.packer_pt, CanBus.POWERTRAIN, enabled, hud_v_cruise * CV.MS_TO_KPH, hud_show_car, send_fcw))
# Radar needs to know current speed and yaw rate (50hz),
# and that ADAS is alive (10hz)
time_and_headlights_step = 10
# tt = frame * DT_CTRL
if frame % time_and_headlights_step == 0:
idx = (frame // time_and_headlights_step) % 4
#can_sends.append(gmcan.create_adas_time_status(CanBus.OBSTACLE, int((tt - self.start_time) * 60), idx))
#can_sends.append(gmcan.create_adas_headlights_status(self.packer_obj, CanBus.OBSTACLE))
speed_and_accelerometer_step = 2
if frame % speed_and_accelerometer_step == 0:
idx = (frame // speed_and_accelerometer_step) % 4
#can_sends.append(gmcan.create_adas_steering_status(CanBus.OBSTACLE, idx))
#can_sends.append(gmcan.create_adas_accelerometer_speed_status(CanBus.OBSTACLE, CS.out.vEgo, idx))
if frame % P.ADAS_KEEPALIVE_STEP == 0:
can_sends += gmcan.create_adas_keepalive(CanBus.POWERTRAIN)
# Show green icon when LKA torque is applied, and
# alarming orange icon when approaching torque limit.
# If not sent again, LKA icon disappears in about 5 seconds.
# Conveniently, sending camera message periodically also works as a keepalive.
lka_active = lkas_enabled == 1
lka_critical = lka_active and abs(actuators.steer) > 0.9
lka_icon_status = (lka_active, lka_critical)
if frame % P.CAMERA_KEEPALIVE_STEP == 0 or lka_icon_status != self.lka_icon_status_last:
steer_alert = hud_alert == VisualAlert.steerRequired
can_sends.append(gmcan.create_lka_icon_command(CanBus.SW_GMLAN, lka_active, lka_critical, steer_alert))
self.lka_icon_status_last = lka_icon_status
return can_sends
|
python
|
import json
import pytest
from common.assertions import equal_json_strings
from common.methods import anonymize, anonymizers, decrypt
@pytest.mark.api
def test_given_anonymize_called_with_valid_request_then_expected_valid_response_returned():
request_body = """
{
"text": "hello world, my name is Jane Doe. My number is: 034453334",
"anonymizers": {
"DEFAULT": { "type": "replace", "new_value": "ANONYMIZED" },
"PHONE_NUMBER": { "type": "mask", "masking_char": "*", "chars_to_mask": 4, "from_end": true }
},
"analyzer_results": [
{ "start": 24, "end": 32, "score": 0.8, "entity_type": "NAME" },
{ "start": 24, "end": 28, "score": 0.8, "entity_type": "FIRST_NAME" },
{ "start": 29, "end": 32, "score": 0.6, "entity_type": "LAST_NAME" },
{ "start": 48, "end": 57, "score": 0.95,
"entity_type": "PHONE_NUMBER" }
]
}
"""
response_status, response_content = anonymize(request_body)
expected_response = (
"""{"text": "hello world, my name is ANONYMIZED. My number is: 03445****", "items": [{"anonymizer": "mask", "entity_type": "PHONE_NUMBER", "start": 50, "end": 59, "anonymized_text": "03445****"}, {"anonymizer": "replace", "entity_type": "NAME", "start": 24, "end": 34, "anonymized_text": "ANONYMIZED"}]}"""
)
assert response_status == 200
assert equal_json_strings(expected_response, response_content)
@pytest.mark.api
def test_given_anonymize_called_with_empty_text_then_invalid_input_message_returned():
request_body = """
{
"text": "",
"anonymizers": {
"DEFAULT": { "type": "replace", "new_value": "ANONYMIZED" }
},
"analyzer_results": [
{ "start": 24, "end": 32, "score": 0.8, "entity_type": "NAME" }
]
}
"""
response_status, response_content = anonymize(request_body)
expected_response = '{"error": "Invalid input, text can not be empty"}'
assert response_status == 422
assert equal_json_strings(expected_response, response_content)
@pytest.mark.api
def test_given_anonymize_called_with_empty_analyzer_results_then_unchanged_text_is_returned():
request_body = """
{
"text": "hello world! nice to meet you!",
"anonymizers": {
"DEFAULT": { "type": "replace", "new_value": "ANONYMIZED" },
"PHONE_NUMBER": { "type": "mask", "masking_char": "*", "chars_to_mask": 4, "from_end": true }
},
"analyzer_results": [
]
}
"""
response_status, response_content = anonymize(request_body)
expected_response = """{"text": "hello world! nice to meet you!", "items": []}"""
assert response_status == 200
assert equal_json_strings(expected_response, response_content)
@pytest.mark.api
def test_given_anonymize_called_with_deformed_body_then_internal_server_error_returned():
request_body = """
{
"text": "hello world, my name is Jane Doe. My number is: 034453334",
"anonymizers": {
"DEFAULT": {"type": "replace", "new_value": "ANONYMIZED"},
},
"analyzer_results": [
{"start": 24, "end": 32, "score": 0.8, "entity_type": "NAME"},
{"start": 24, "end": 28, "score": 0.8, "entity_type": "FIRST_NAME"},
]
}
"""
response_status, response_content = anonymize(request_body)
expected_response = '{"error": "Internal server error"}'
assert response_status == 500
assert equal_json_strings(expected_response, response_content)
@pytest.mark.api
def test_given_anonymizers_called_then_expected_anonymizers_list_returned():
response_status, response_content = anonymizers()
expected_response = """
["hash", "mask", "redact", "replace", "encrypt"]
"""
assert response_status == 200
assert equal_json_strings(expected_response, response_content)
@pytest.mark.api
def test_given_decrypt_called_with_encrypted_text_then_decrypted_text_returned():
request_body = """
{
"key": "1111111111111111",
"text": "e6HnOMnIxbd4a8Qea44LshQDnjvxwzBIaAz+YqHNnMW2mC5r3AWoay8Spsoajyyy"
}
"""
response_status, response_content = decrypt(request_body)
expected_response = """
{
"result": "text_for_encryption"
}
"""
assert response_status == 200
assert equal_json_strings(expected_response, response_content)
@pytest.mark.api
def test_given_decrypt_called_with_invalid_key_then_invalid_input_response_returned():
request_body = """
{
"key": "invalidkey",
"text": "e6HnOMnIxbd4a8Qea44LshQDnjvxwzBIaAz+YqHNnMW2mC5r3AWoay8Spsoajyyy"
}
"""
response_status, response_content = decrypt(request_body)
expected_response = """
{
"error": "Invalid input, key must be of length 128, 192 or 256 bits"
}
"""
assert response_status == 422
assert equal_json_strings(expected_response, response_content)
@pytest.mark.api
def test_given_decrypt_called_with_missing_key_then_invalid_input_response_returned():
request_body = """
{
"text": "e6HnOMnIxbd4a8Qea44LshQDnjvxwzBIaAz+YqHNnMW2mC5r3AWoay8Spsoajyyy"
}
"""
response_status, response_content = decrypt(request_body)
expected_response = """
{
"error": "Expected parameter key"
}
"""
assert response_status == 422
assert equal_json_strings(expected_response, response_content)
@pytest.mark.api
def test_given_decrypt_called_with_missing_text_then_invalid_input_response_returned():
request_body = """
{
"key": "1111111111111111"
}
"""
response_status, response_content = decrypt(request_body)
expected_response = """
{
"error": "Expected parameter text"
}
"""
assert response_status == 422
assert equal_json_strings(expected_response, response_content)
@pytest.mark.api
def test_given_decrypt_called_with_missing_payload_then_bad_request_response_returned():
request_body = """
{ }
"""
response_status, response_content = decrypt(request_body)
expected_response = """
{
"error": "Invalid request json"
}
"""
assert response_status == 400
assert equal_json_strings(expected_response, response_content)
@pytest.mark.api
def test_given_encrypt_called_then_decrypt_returns_the_original_encrypted_text():
text_for_encryption = "text_for_encryption"
key = "1111111111111111"
anonymize_request = {
"text": text_for_encryption,
"anonymizers": {"DEFAULT": {"type": "encrypt", "key": key}},
"analyzer_results": [
{
"start": 0,
"end": len(text_for_encryption),
"score": 0.8,
"entity_type": "NAME",
}
],
}
_, anonymize_response_content = anonymize(json.dumps(anonymize_request))
encrypted_text = json.loads(anonymize_response_content)["text"]
decrypt_request = {"text": encrypted_text, "key": key}
_, decrypt_response_content = decrypt(json.dumps(decrypt_request))
decrypted_text = json.loads(decrypt_response_content)["result"]
assert encrypted_text != text_for_encryption
assert decrypted_text == text_for_encryption
|
python
|
import sys
import maya.OpenMaya as OpenMaya
import maya.OpenMayaMPx as OpenMayaMPx
import maya.cmds as cmds
__author__ = 'Haarm-Pieter Duiker'
__copyright__ = 'Copyright (C) 2016 - Duiker Research Corp'
__license__ = ''
__maintainer__ = 'Haarm-Pieter Duiker'
__email__ = '[email protected]'
__status__ = 'Production'
__major_version__ = '1'
__minor_version__ = '0'
__change_version__ = '0'
__version__ = '.'.join((__major_version__,
__minor_version__,
__change_version__))
kPluginNodeName = "PBRTSubSurfaceMaterial"
kPluginNodeClassify = "shader/surface"
kPluginNodeId = OpenMaya.MTypeId(0x8704D)
class subsurface(OpenMayaMPx.MPxNode):
def __init__(self):
OpenMayaMPx.MPxNode.__init__(self)
mName = OpenMaya.MObject()
mG = OpenMaya.MObject()
mScale = OpenMaya.MObject()
mEta = OpenMaya.MObject()
mSigmaA = OpenMaya.MObject()
mSigmaS = OpenMaya.MObject()
mKr = OpenMaya.MObject()
mKt = OpenMaya.MObject()
mRemapRoughness = OpenMaya.MObject()
mURoughness = OpenMaya.MObject()
mVRoughness = OpenMaya.MObject()
mBump = OpenMaya.MObject()
mOutColor = OpenMaya.MObject()
def compute(self, plug, block):
if plug == subsurface.mOutColor:
resultColor = OpenMaya.MFloatVector(0.0,0.0,0.0)
outColorHandle = block.outputValue( subsurface.mOutColor )
outColorHandle.setMFloatVector(resultColor)
outColorHandle.setClean()
else:
return OpenMaya.kUnknownParameter
def nodeCreator():
return subsurface()
def nodeInitializer():
nAttr = OpenMaya.MFnNumericAttribute()
eAttr = OpenMaya.MFnEnumAttribute()
sAttr = OpenMaya.MFnTypedAttribute()
try:
subsurface.mName = eAttr.create("Preset", "pre" )
eAttr.setKeyable(1)
eAttr.setStorable(1)
eAttr.setReadable(1)
eAttr.setWritable(1)
Materials = ["None",
"Apple",
"Chicken1",
"Chicken2",
"Cream",
"Ketchup",
"Marble",
"Potato",
"Skimmilk",
"Skin1",
"Skin2",
"Spectralon",
"Wholemilk",
"Lowfat Milk",
"Reduced Milk",
"Regular Milk",
"Espresso",
"Mint Mocha Coffee",
"Lowfat Soy Milk",
"Regular Soy Milk",
"Lowfat Chocolate Milk",
"Regular Chocolate Milk",
"Coke",
"Pepsi",
"Sprite",
"Gatorade",
"Chardonnay",
"White Zinfandel",
"Merlot",
"Budweiser Beer",
"Coors Light Beer",
"Clorox",
"Apple Juice",
"Cranberry Juice",
"Grape Juice",
"Ruby Grapefruit Juice",
"White Grapefruit Juice",
"Shampoo",
"Strawberry Shampoo",
"Head & Shoulders Shampoo",
"Lemon Tea Powder",
"Orange Powder",
"Pink Lemonade Powder",
"Cappuccino Powder",
"Salt Powder",
"Sugar Powder",
"Suisse Mocha Powder",
"Pacific Ocean Surface Water"]
for i in range(len(Materials)):
eAttr.addField(Materials[i], i)
# Default to Skin1
eAttr.setDefault(9)
subsurface.mG = nAttr.create("g","g", OpenMaya.MFnNumericData.kFloat)
nAttr.setKeyable(1)
nAttr.setStorable(1)
nAttr.setReadable(1)
nAttr.setWritable(1)
nAttr.setConnectable(0)
nAttr.setDefault(0.0)
subsurface.mScale = nAttr.create("scale","sc", OpenMaya.MFnNumericData.kFloat)
nAttr.setKeyable(1)
nAttr.setStorable(1)
nAttr.setReadable(1)
nAttr.setWritable(1)
nAttr.setConnectable(0)
nAttr.setDefault(1.0)
subsurface.mEta = nAttr.create("eta","eta", OpenMaya.MFnNumericData.kFloat)
nAttr.setKeyable(1)
nAttr.setStorable(1)
nAttr.setReadable(1)
nAttr.setWritable(1)
nAttr.setConnectable(0)
nAttr.setDefault(1.33)
subsurface.mSigmaA = nAttr.createColor("sigma_a", "sa")
nAttr.setKeyable(1)
nAttr.setStorable(1)
nAttr.setReadable(1)
nAttr.setWritable(1)
nAttr.setDefault(0.0011, 0.0024, 0.014)
subsurface.mSigmaS = nAttr.createColor("sigma_s", "ss")
nAttr.setKeyable(1)
nAttr.setStorable(1)
nAttr.setReadable(1)
nAttr.setWritable(1)
nAttr.setDefault(2.55, 3.21, 3.77)
subsurface.mKr = nAttr.createColor("Kr", "kr")
nAttr.setKeyable(1)
nAttr.setStorable(1)
nAttr.setReadable(1)
nAttr.setWritable(1)
nAttr.setDefault(1.0, 1.0, 1.0)
subsurface.mKt = nAttr.createColor("Kt", "kt")
nAttr.setKeyable(1)
nAttr.setStorable(1)
nAttr.setReadable(1)
nAttr.setWritable(1)
nAttr.setDefault(1.0, 1.0, 1.0)
subsurface.mRemapRoughness = nAttr.create("remapRoughness", "rr", OpenMaya.MFnNumericData.kBoolean, True)
nAttr.setKeyable(1)
nAttr.setStorable(1)
nAttr.setReadable(1)
nAttr.setWritable(1)
nAttr.setConnectable(0)
subsurface.mURoughness = nAttr.create("uRoughness","ur", OpenMaya.MFnNumericData.kFloat)
nAttr.setKeyable(1)
nAttr.setStorable(1)
nAttr.setReadable(1)
nAttr.setWritable(1)
nAttr.setDefault(-1.0)
subsurface.mVRoughness = nAttr.create("vRoughness","vr", OpenMaya.MFnNumericData.kFloat)
nAttr.setKeyable(1)
nAttr.setStorable(1)
nAttr.setReadable(1)
nAttr.setWritable(1)
nAttr.setDefault(-1.0)
subsurface.mBump = nAttr.create("bumpmap", "b", OpenMaya.MFnNumericData.kFloat)
nAttr.setKeyable(1)
nAttr.setStorable(1)
nAttr.setReadable(1)
nAttr.setWritable(1)
nAttr.setDefault(-1.0)
subsurface.mOutColor = nAttr.createColor("outColor", "oc")
nAttr.setStorable(0)
nAttr.setHidden(0)
nAttr.setReadable(1)
nAttr.setWritable(0)
except:
sys.stderr.write("Failed to create attributes\n")
raise
try:
subsurface.addAttribute(subsurface.mName)
subsurface.addAttribute(subsurface.mG)
subsurface.addAttribute(subsurface.mScale)
subsurface.addAttribute(subsurface.mEta)
subsurface.addAttribute(subsurface.mSigmaA)
subsurface.addAttribute(subsurface.mSigmaS)
subsurface.addAttribute(subsurface.mKr)
subsurface.addAttribute(subsurface.mKt)
subsurface.addAttribute(subsurface.mRemapRoughness)
subsurface.addAttribute(subsurface.mURoughness)
subsurface.addAttribute(subsurface.mVRoughness)
subsurface.addAttribute(subsurface.mBump)
subsurface.addAttribute(subsurface.mOutColor)
except:
sys.stderr.write("Failed to add attributes\n")
raise
try:
subsurface.attributeAffects (subsurface.mKr, subsurface.mOutColor)
except:
sys.stderr.write("Failed in setting attributeAffects\n")
raise
# initialize the script plug-in
def initializePlugin(mobject):
mplugin = OpenMayaMPx.MFnPlugin(mobject)
try:
mplugin.registerNode( kPluginNodeName, kPluginNodeId, nodeCreator,
nodeInitializer, OpenMayaMPx.MPxNode.kDependNode, kPluginNodeClassify )
except:
sys.stderr.write( "Failed to register node: %s" % kPluginNodeName )
raise
# uninitialize the script plug-in
def uninitializePlugin(mobject):
mplugin = OpenMayaMPx.MFnPlugin(mobject)
try:
mplugin.deregisterNode( kPluginNodeId )
except:
sys.stderr.write( "Failed to deregister node: %s" % kPluginNodeName )
raise
|
python
|
import os
import subprocess
from multiprocessing import Pool, cpu_count
import numpy as np
from energy_demand.read_write import read_weather_data
def my_function(simulation_number):
print('simulation_number ' + str(simulation_number))
# Run smif
run_commands = [
"smif run energy_demand_constrained_A",
"smif run energy_demand_constrained_A_dm_water",
"smif run energy_demand_constrained_A_dm_water_space",
"smif run energy_demand_constrained_B",
"smif run energy_demand_constrained_B_dm_water",
"smif run energy_demand_constrained_B_dm_water_space",
"smif run energy_demand_constrained_C",
"smif run energy_demand_constrained_C_dm_water",
"smif run energy_demand_constrained_C_dm_water_space",
"smif run energy_demand_constrained_D",
"smif run energy_demand_constrained_D_dm_water",
"smif run energy_demand_constrained_D_dm_water_space"]
os.system(run_commands[simulation_number])
return
simulation_number = range(1) #all scenarios
if __name__ == "__main__":
with Pool(int(cpu_count()/2)) as pool:
pool.map(
my_function,
simulation_number,
chunksize=1)
'''
for i in range(2):
# Activate virtual environement
bashCommand = "activate ed"
os.system(bashCommand)
# Run smif
bashCommand = "smif -v run ed_constrained_pop-baseline16_econ-c16_fuel-c16"
os.system(bashCommand)
#process = subprocess.Popen(bashCommand.split(), stdout=subprocess.PIPE)
#output, error = process.communicate()
'''
'''
#import this
from multiprocessing import Pool, cpu_count
#import any other packages
import numpy as np
def my_function(simulation_number):
print('simulation_number')
return
simulation_list = [1,2,3,4,5,6,7,8,9,10]
if __name__ == "__main__":
with Pool(int(cpu_count()/2)) as pool:
pool.map(my_function,simulation_list,chunksize=1)
'''
'''
#import sh
#sh.cd('C:/Users/cenv0553/ed')
#print(sh.pwd())
#stream = os.popen("cd C:/Users/cenv0553/ed")
'''
|
python
|
'''Netconf implementation for IOSXE devices'''
from ncclient import manager
from ncclient.transport.errors import TransportError
from ncclient.operations.rpc import RPCError
import xmltodict
def compare_proposed_to_running(proposed_config, running_config):
'''Return diff between *proposed_config* and *running_config*.'''
# remove empty lines from playbook
for line in proposed_config:
if len(line) == 0:
proposed_config.remove(line)
final_config = proposed_config[:]
# all commands starting with "no "
no_commands = [
line.strip() for line in final_config if line.startswith('no ')
]
# all other commands
commands = [
line.strip() for line in final_config if not line.startswith('no ')
]
# commands starting with "no " that have a matching line in running_config
# which means that it shall be included in the final_config committed to
# device. all other "no " commands shall be disregarded when committing
# the configuration.
no_commands_real = []
for line in running_config:
for no_line in no_commands:
if line == no_line.lstrip('no '):
no_commands_real.append(no_line)
if line in commands:
commands.remove(line)
return commands + no_commands_real
def reconnect_device(func):
'''When a method is using this decorator and self.reconnect == True, try
to reconnect to the device if a TransportError exception is thrown by
ncclient. This typically happens if the router has disconnected the
connection due to inactivity.'''
def inner(self, *args, **kwargs):
'''Wrap decorated function and reconnect as wanted.'''
if self.reconnect == True:
try:
return func(self, *args, **kwargs)
except TransportError:
self.connect()
return func(self, *args, **kwargs)
else:
return func(self, *args, **kwargs)
return inner
class IfMissingError(Exception):
'''raise if interface is missing in router'''
pass
class BGPMissingError(Exception):
'''raise if BGP configuration is missing in router'''
pass
class VRFMissingError(Exception):
'''raise if VRF configuration is missing in router'''
pass
class ConfigDeployError(Exception):
'''raise if configuration could not be deployed to router'''
pass
class IOSXEDevice(object):
'''Implements methods for configuration retrieval and update'''
def __init__(self, hostname, username, password, reconnect=True, port=22):
self.hostname = hostname
self.username = username
self.password = password
self.reconnect = reconnect
self.port = port
self.handle = None
def connect(self):
'''Returns True if connect to device is successful.'''
self.handle = manager.connect_ssh(
self.hostname,
username=self.username,
password=self.password,
port=self.port,
hostkey_verify=False)
def disconnect(self):
'''Returns True if disconnect from device is successful.'''
try:
self.handle.close_session()
except TransportError:
return True # already disconnected
@reconnect_device
def get_config(self):
'''Returns running config in device as list.'''
response = xmltodict.parse(self.handle.get().xml)
return response['rpc-reply']['data']['cli-config-data-block'].split(
'\n')
@reconnect_device
def exec_command(self, command):
'''Returns output of executed command as list.'''
netconf_filter = """
<filter>
<config-format-text-block>
<text-filter-spec>| begin ^end </text-filter-spec>
</config-format-text-block>
<oper-data-format-text-block>
<exec>{command}</exec>
</oper-data-format-text-block>
</filter>""".format(command=command)
response = xmltodict.parse(self.handle.get(netconf_filter).xml)
return response['rpc-reply'] \
['data']['cli-oper-data-block']['item']['response'].split('\n')
@reconnect_device
def edit_config(self, commands):
'''Returns True if commit of *commands* to running configuration is
successful.'''
config = """
<config>
<cli-config-data-block>
{commands}
</cli-config-data-block>
</config>""".format(commands=commands)
try:
response = xmltodict.parse(
self.handle.edit_config(
target='running', config=config).xml)
return 'ok' in response['rpc-reply'] # Got <ok /> tag
except RPCError:
raise ConfigDeployError
@reconnect_device
def save_config(self):
'''Returns true if save of running configuration is successful.'''
return '[OK]' in self.exec_command('copy running startup')
def get_interface_config(self, interface_name):
'''Return configuration for *interface_name*'''
config = self.get_config()
interface_config = None
in_interface = False
for line in config:
if not in_interface:
if line.startswith('interface {interface_name}'.format(
interface_name=interface_name)):
interface_config = [line]
in_interface = True
else:
if line.startswith('!'): # end of interface block
break
else:
interface_config.append(line)
if interface_config is None:
raise IfMissingError
else:
return [x.strip('\n') for x in interface_config]
def get_bgp_config(self):
'''Return bgp configuration in device'''
config = self.get_config()
bgp_config = None
in_bgp = False
for line in config:
if not in_bgp:
if line.startswith('router bgp'):
bgp_config = [line]
in_bgp = True
else:
if line.startswith('!'): # end of router bgp block
break
else:
bgp_config.append(line)
if bgp_config is None:
raise BGPMissingError
else:
return [x.strip('\n') for x in bgp_config]
def get_vrf_definition_config(self, vrf_name):
'''Return vrf definition configuration in device'''
config = self.get_config()
vrf_definition_config = None
in_vrf_definition = False
for line in config:
if not in_vrf_definition:
if line.startswith('vrf definition {0}'.format(vrf_name)):
vrf_definition_config = [line]
in_vrf_definition = True
else:
if line.startswith('!'): # end of vrf definition block
break
else:
vrf_definition_config.append(line)
if vrf_definition_config is None:
raise VRFMissingError
else:
return [x.strip('\n') for x in vrf_definition_config]
|
python
|
# AUTOGENERATED! DO NOT EDIT! File to edit: nbs/03_model_splits.ipynb (unless otherwise specified).
__all__ = ['bert_SeqClassification_split', 'roberta_SeqClassification_split', 'gpt2_lmhead_split',
'distilbert_SeqClassification_split', 'albert_SeqClassification_split']
# Cell
#export
from fastcore.all import *
from fastai2.basics import *
from transformers import AutoModelForSequenceClassification, AutoModelWithLMHead
# Cell
''' Print the model, look at its architecture, then write down the split '''
def bert_SeqClassification_split(m:nn.Module):
# 12 layers, 110M params
return L(m.bert.embeddings, *m.bert.encoder.layer, m.bert.pooler, m.classifier).map(params)
# Cell
def roberta_SeqClassification_split(m:nn.Module):
return L(m.roberta.embeddings, *m.roberta.encoder.layer, m.roberta.pooler, m.classifier).map(params)
# Cell
def gpt2_lmhead_split(m:nn.Module):
# 12-layer, 768-hidden, 12-heads, 117M parameters.
return L(m.transformer.wte, m.transformer.wpe, *m.transformer.h, m.lm_head).map(params)
# Cell
def distilbert_SeqClassification_split(m:nn.Module):
# 6 layers, 66M params
return L(m.distilbert.embeddings, *m.distilbert.transformer.layer, m.pre_classifier, m.classifier).map(params)
def albert_SeqClassification_split(m: nn.Module):
return L(m.albert.embeddings, *m.albert.encoder.albert_layer_groups, m.albert.pooler, m.classifier).map(params)
|
python
|
from .main import Wav2Vec2STTTorch
|
python
|
import subprocess
import logging
from subprocess import PIPE
import tempfile
import json, os, re
from github import Github, GithubException
from datetime import datetime
"""
search-demo-mkdocs-material Submodule Update PR for Uncle Archie
Notes:
- search-demo is private-www
- fake-docs is submodule
- install webhooks for uncle archie
- make changes to fake-docs (submodule) and make a pull request
- merge the pull request to trigger the hook function
Description:
This is a bit of an odd "CI test" because it isn't exactly a CI test, but it is
part of a step-by-step CI workflow.
This hook listens for incoming push to master events from fake-docs
When this type of event occurs, the hook opens an "update submodules" PR
in the search-demo-mkdocs-material repo
(At that point, a new webhook is triggered and Uncle Archie will run a continuous
integration test on the newly-opened pull request.)
"""
HTDOCS="/www/archie.nihdatacommons.us/htdocs"
def process_payload(payload, meta, config):
"""
Look for any push events to the repositories
that are search-demo-mkdocs-material submodules.
When we get a push event, we should figure out
whether it is on the master branch, and if so,
we open a new pull request in search-demo-mkdocs-material
that updates this submodule.
Strategy:
- use the shell, because it will work
- clone a local copy of private-www
- create a new branch
- update submodules
- push new branch to github
- use hub (https://hub.github.com/) to create PR from command line
$ GITHUB_TOKEN="XXXXX" hub pull-request -b charlesreid1:master -h charlesreid1:fix-readme -m 'Fix readme'
"""
# Set parameters for the submodule update PR opener
params = {
'repo_whitelist' : ['charlesreid1/fake-docs'],
'task_name' : 'Uncle Archie search-demo-mkdocs-material Submodules Update PR',
'pass_msg' : 'The search-demo-mkdocs-material submodules update PR passed!',
'fail_msg' : 'The search-demo-mkdocs-material submodules update PR failed.',
}
repo_name = payload['repository']['name']
full_repo_name = payload['repository']['full_name']
sub_name = payload['repository']['name']
full_sub_name = payload['repository']['full_name']
# This must be the use-case-library repo
if full_repo_name not in params['repo_whitelist']:
logging.debug("Skipping search demo submodule PR: this is not the search-demo-mkdocs-material repo")
return
# This must be a pull request
if 'pull_request' not in payload.keys():
logging.debug("Skipping search demo submodule PR: this is not a pull request")
return
if 'action' not in payload.keys():
logging.debug("Skipping search demo submodule PR: this is not a pull request")
return
if payload['action']!='closed':
logging.debug("Skipping search demo submodule PR: this pull request has not been closed yet")
return
# We want PRs that are being merged
if 'merge_commit_sha' not in payload['pull_request']:
logging.debug("Skipping search demo submodule PR: this pull request was not merged")
# -----------------------------------------------
# start private-www submodule update PR
logging.info("Starting search demo submodule PR for %s"%(full_repo_name))
unique = datetime.now().strftime("%Y%m%d%H%M%S")
unique_filename = "search_demo_update_submodules_%s"%(unique)
######################
# make space.
######################
scratch_dir = tempfile.mkdtemp()
FNULL = open(os.devnull, 'w')
######################
# clone.
######################
# Remember: you can only read() the output
# of a PIPEd process once.
abort = False
parent_repo_name = "search-demo-mkdocs-material"
# This is always the repo we clone
parent_repo_url = "[email protected]:charlesreid1/%s"%(parent_repo_name)
# get the API token
token = config['github_access_token']
clonecmd = ['git','clone','--recursive','-b','master',parent_repo_url]
logging.debug("Running clone cmd %s"%(' '.join(clonecmd)))
cloneproc = subprocess.Popen(
clonecmd,
stdout=PIPE,
stderr=PIPE,
cwd=scratch_dir
)
status_failed, status_file = record_and_check_output(cloneproc,"git clone",unique_filename)
if status_failed:
build_status = "fail"
abort = True
######################
# unique branch name
######################
now = datetime.now().strftime("%Y%m%d_%H%M%S")
branch_name = "update_submodules_%s"%(now)
######################
# Create new branch
# from master branch HEAD
######################
if not abort:
repo_dir = os.path.join(scratch_dir, parent_repo_name)
cocmd = ['git','checkout','-b',branch_name]
coproc = subprocess.Popen(
cocmd,
stdout=PIPE,
stderr=PIPE,
cwd=repo_dir
)
status_failed, status_file = record_and_check_output(coproc,"git checkout",unique_filename)
if status_failed:
build_status = "fail"
abort = True
######################
# Check out the master branch of the submodule
# and pull the latest changes from upstream
######################
if not abort:
submodule_dir_relative = os.path.join('docs', repo_name)
submodule_dir = os.path.join(repo_dir, submodule_dir_relative)
subcocmd = ['git','checkout','master']
subcoproc = subprocess.Popen(
subcocmd,
stdout=PIPE,
stderr=PIPE,
cwd=submodule_dir
)
status_failed, status_file = record_and_check_output(subcoproc,"git checkout submodule",unique_filename)
if status_failed:
build_status = "fail"
abort = True
pullcmd = ['git','pull','origin','master']
pullproc = subprocess.Popen(
pullcmd,
stdout=PIPE,
stderr=PIPE,
cwd=submodule_dir
)
status_failed, status_file = record_and_check_output(pullproc,"git pull submodule",unique_filename)
if status_failed:
build_status = "fail"
abort = True
######################
# Add commit push the new submodule
######################
commit_msg = '[Uncle Archie] Updating submodule %s'%(full_sub_name)
pr_msg = commit_msg
if not abort:
# Add the submodule
addcmd = ['git','add',submodule_dir_relative]
addproc = subprocess.Popen(
addcmd,
stdout=PIPE,
stderr=PIPE,
cwd=repo_dir
)
status_failed, status_file = record_and_check_output(addproc,"git add submodule",unique_filename)
if status_failed:
build_status = "fail"
abort = True
# Commit the new submodule
commitcmd = ['git','commit',submodule_dir_relative,'-m',commit_msg]
commitproc = subprocess.Popen(
commitcmd,
stdout=PIPE,
stderr=PIPE,
cwd=repo_dir
)
status_failed, status_file = record_and_check_output(commitproc,"git commit submodule",unique_filename)
if status_failed:
build_status = "fail"
abort = True
pushcmd = ['git','push','origin',branch_name]
pushproc = subprocess.Popen(
pushcmd,
stdout=PIPE,
stderr=PIPE,
cwd=repo_dir
)
status_failed, status_file = record_and_check_output(pushproc,"git push origin branch",unique_filename)
if status_failed:
build_status = "fail"
abort = True
######################
# New pull request
######################
if not abort:
# Store the github token in an environment var for hub
os.environ['GITHUB_TOKEN'] = token
hubcmd = ['hub','pull-request',
'-b','charlesreid1:master',
'-h',branch_name,
'-m',pr_msg]
hubproc = subprocess.Popen(
hubcmd,
stdout=PIPE,
stderr=PIPE,
cwd=repo_dir
)
status_failed, status_file = record_and_check_output(hubproc,"create pull request",unique_filename)
if status_failed:
build_status = "fail"
abort = True
######################
# Clean up github token
######################
os.environ['GITHUB_TOKEN'] = ""
# end private-www submodule update PR
# -----------------------------------------------
if not abort:
logging.info("search demo submodule PR succeeded for submodule %s"%(full_repo_name))
else:
logging.info("search demo submodule PR failed for submodule %s"%(full_repo_name))
return
def record_and_check_output(proc,label,unique_filename):
"""
Given a process, get the stdout and stderr streams
and record them in an output file that can be provided
to users as a link. Also return a boolean on whether
there was a problem with the process.
Run this function on the last/most important step
in your CI test.
"""
output_path = os.path.join(HTDOCS,'output')
output_file = os.path.join(output_path,unique_filename)
out = proc.stdout.read().decode('utf-8').lower()
err = proc.stderr.read().decode('utf-8').lower()
lines = [ "======================\n",
"======= STDOUT =======\n",
out,
"\n\n",
"======================\n",
"======= STDERR =======\n",
err,
"\n\n"]
with open(output_file,'w') as f:
[f.write(j) for j in lines]
logging.info("Results from process %s:"%(label))
logging.info("%s"%(out))
logging.info("%s"%(err))
logging.info("Recorded in file %s"%(output_file))
if "exception" in out or "exception" in err:
return True, unique_filename
if "error" in out or "error" in err:
return True, unique_filename
return False, unique_filename
def check_for_errors(proc,label):
"""
Given a process, get the stdout and stderr streams and look for
exceptions or errors. Return a boolean whether there was a problem.
"""
out = proc.stdout.read().decode('utf-8').lower()
err = proc.stderr.read().decode('utf-8').lower()
logging.info("Results from process %s:"%(label))
logging.info("%s"%(out))
logging.info("%s"%(err))
if "exception" in out or "exception" in err:
return True
if "error" in out or "error" in err:
return True
return False
if __name__=="__main__":
process_payload({'type':'test','name':'private_www'},{'a':1,'b':2})
|
python
|
#
# DMG 136
#
import os
import csv
import random
import numpy as np
from .dice import dice
from .utils import csv2dict
from .utils import filterDictList
data_dir = os.path.join(
os.path.dirname(
os.path.realpath(__file__)
),
"data"
)
files = {
"ART_AND_GEMSTONES": os.path.join(data_dir, 'ART_AND_GEMSTONES.csv'),
"MAGIC_ITEMS": os.path.join(data_dir, 'MAGIC_ITEMS.csv'),
"INDIVIDUAL_TREASURE": os.path.join(data_dir, 'INDIVIDUAL_TREASURE.csv'),
"HOARD_TREASURE": os.path.join(data_dir, 'HOARD_TREASURE.csv')
}
tables = {
'ART_AND_GEMSTONES': csv2dict(files['ART_AND_GEMSTONES']),
'MAGIC_ITEMS': csv2dict(files['MAGIC_ITEMS']),
'INDIVIDUAL_TREASURE': csv2dict(files['INDIVIDUAL_TREASURE']),
'HOARD_TREASURE': csv2dict(files['HOARD_TREASURE'])
}
def newTreasure():
return {
'cp': 0,
'sp': 0,
'ep': 0,
'gp': 0,
'pp': 0,
'gemstones': [],
'art_objects': [],
'magic_items': []
}
def makeTreasure(opts):
treasure = newTreasure()
treasure['cp'] = dice.rollDice( int(opts['CP_n']), int(opts['CP_d']) ) * int(opts['CP_m'])
treasure['sp'] = dice.rollDice( int(opts['SP_n']), int(opts['SP_d']) ) * int(opts['SP_m'])
treasure['ep'] = dice.rollDice( int(opts['EP_n']), int(opts['EP_d']) ) * int(opts['EP_m'])
treasure['gp'] = dice.rollDice( int(opts['GP_n']), int(opts['GP_d']) ) * int(opts['GP_m'])
treasure['pp'] = dice.rollDice( int(opts['PP_n']), int(opts['PP_d']) ) * int(opts['PP_m'])
if 'GEMSTONES_n' in opts:
if 0 != int(opts['GEMSTONES_n']):
treasure['gemstones'] = [getGemstone(opts['GEMSTONES_c']) for _ in range( dice.rollDice(int(opts['GEMSTONES_n']), int(opts['GEMSTONES_d'])) ) ]
if 'ARTOBJECTS_n' in opts:
if 0 != int(opts['ARTOBJECTS_n']):
treasure['art_objects'] = [getArtObject(opts['ARTOBJECTS_c']) for _ in range( dice.rollDice(int(opts['ARTOBJECTS_n']), int(opts['ARTOBJECTS_d'])) ) ]
if 'MAGIC_ITEMS_n' in opts:
magic_items = []
parts_n = opts['MAGIC_ITEMS_n'].split(';')
parts_d = opts['MAGIC_ITEMS_d'].split(';')
parts_t = opts['MAGIC_ITEMS_t'].split(';')
for i in range(len(parts_n)):
if 0 != int(parts_n[i]):
magic_items += [getMagicItem(parts_t[i]) for _ in range(dice.rollDice(int(parts_n[i]), int(parts_d[i])))]
treasure['magic_items'] = magic_items
return treasure
# ART_AND_GEMSTONES
def _getArtOrGemstone(_cost, _type):
# normalize gp
_cost = str(_cost)
if "gp" not in _cost:
_cost = '{0}gp'.format(_cost)
#.end
table = [elem for elem in tables['ART_AND_GEMSTONES'] if elem['TYPE'] == _type and elem['COST'] == _cost]
if 0 == len(table):
return None
return "{0} ({1})".format(random.choice(table)['NAME'], _cost)
def getGemstone(gp):
return _getArtOrGemstone(gp, 'gemstone')
def getArtObject(gp):
return _getArtOrGemstone(gp, 'art_object')
# MAGIC_ITEMS
def getMagicItem(table_name):
table = filterDictList(tables['MAGIC_ITEMS'], 'TABLE', table_name)
items = []
propabilities = []
for elem in table:
items.append(elem['MAGIC_ITEM'])
parts = elem['PROBABILITY'].split('-')
if 1 == len(parts):
propabilities.append(0.01)
else:
max_value = int(parts[1])
min_value = int(parts[0])-1
if "00" == parts[0]:
min_value = 100
if "00" == parts[1]:
max_value = 100
probability = ( max_value-min_value ) / 100
propabilities.append( probability )
return np.random.choice(
items,
1,
p=propabilities
)[0]
# INDIVIDUAL_TREASURE
def individual(cr):
CR = None
if 0 <= cr and 4 >= cr:
CR = '0-4'
elif 5 <= cr and 10 >= cr:
CR = '5-10'
elif 11 <= cr and 16 >= cr:
CR = '11-16'
elif 17 <= cr:
CR = '17-20'
table = filterDictList(tables['INDIVIDUAL_TREASURE'], 'CR', CR)
rows = []
propabilities = []
for elem in table:
rows.append(elem)
parts = elem['PROBABILITY'].split('-')
if 1 == len(parts):
propabilities.append(0.01)
else:
max_value = int(parts[1])
min_value = int(parts[0])-1
if "00" == parts[0]:
min_value = 100
if "00" == parts[1]:
max_value = 100
probability = ( max_value-min_value ) / 100
propabilities.append( probability )
row = np.random.choice(
rows,
1,
p=propabilities
)[0]
return makeTreasure(row)
# HOARD_TREASURE
def hoard(cr):
CR = None
if 0 <= cr and 4 >= cr:
CR = '0-4'
elif 5 <= cr and 10 >= cr:
CR = '5-10'
elif 11 <= cr and 16 >= cr:
CR = '11-16'
elif 17 <= cr:
CR = '17-20'
table = filterDictList(tables['HOARD_TREASURE'], 'CR', CR)
rows = []
propabilities = []
for elem in table:
rows.append(elem)
parts = elem['PROBABILITY'].split('-')
if 1 == len(parts):
propabilities.append(0.01)
else:
max_value = int(parts[1])
min_value = int(parts[0])-1
if "00" == parts[0]:
min_value = 100
if "00" == parts[1]:
max_value = 100
probability = ( max_value-min_value ) / 100
propabilities.append( probability )
row = np.random.choice(
rows,
1,
p=propabilities
)[0]
return makeTreasure(row)
|
python
|
# -*- cpy-indent-level: 4; indent-tabs-mode: nil -*-
# ex: set expandtab softtabstop=4 shiftwidth=4:
#
# Copyright (C) 2019 Contributor
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from sqlalchemy import (
Boolean,
Column,
ForeignKey,
)
from sqlalchemy.orm import (
backref,
relation,
)
from aquilon.aqdb.model import (
Fqdn,
Resource,
)
_TN = 'shared_sn'
class SharedServiceName(Resource):
"""Shared service name resources"""
__tablename__ = _TN
resource_id = Column(ForeignKey(Resource.id, ondelete='CASCADE',
name='shared_sn_resource_id_fk'),
primary_key=True)
# if true, indicates that address-aliases should be created from the FQDN
# to particular service addresses in the same resourcegroup.
sa_aliases = Column(Boolean, nullable=False)
# FQDN is the 'shared service name' that is chosen -- must be a valid name
# that the address-alias records can be created against
fqdn_id = Column(ForeignKey(Fqdn.id), nullable=False, index=True)
fqdn = relation(Fqdn, lazy=False, innerjoin=True,
backref=backref('shared_service_names'))
__table_args__ = ({'info': {'unique_fields': ['name', 'holder']}},)
__mapper_args__ = {'polymorphic_identity': _TN}
|
python
|
import sys
import os.path
import subprocess
import sublime
try:
from .sublime_connection import SublimeConnection
from .common import msg, shared as G, utils, flooui
from .common.exc_fmt import str_e
assert G and G and utils and msg
except ImportError:
from sublime_connection import SublimeConnection
from common.exc_fmt import str_e
from common import msg, shared as G, utils, flooui
PY2 = sys.version_info < (3, 0)
def get_workspace_window(abs_path):
found = False
workspace_window = None
unfucked_path = utils.unfuck_path(abs_path)
for w in sublime.windows():
for f in w.folders():
if utils.unfuck_path(f) == unfucked_path:
workspace_window = w
found = True
break
if found:
break
return workspace_window
def open_workspace_window2(abs_path, cb):
if sublime.platform() == 'linux':
subl = open('/proc/self/cmdline').read().split(chr(0))[0]
elif sublime.platform() == 'osx':
floorc = utils.load_floorc_json()
subl = floorc.get('SUBLIME_EXECUTABLE')
if not subl:
settings = sublime.load_settings('Floobits.sublime-settings')
subl = settings.get('sublime_executable', '/Applications/Sublime Text 2.app/Contents/SharedSupport/bin/subl')
if not os.path.exists(subl):
return sublime.error_message('''Can't find your Sublime Text executable at %s.
Please add "sublime_executable": "/path/to/subl" to your ~/.floorc.json and restart Sublime Text''' % subl)
elif sublime.platform() == 'windows':
subl = sys.executable
else:
raise Exception('WHAT PLATFORM ARE WE ON?!?!?')
command = [subl]
if get_workspace_window(abs_path) is None:
command.append('--new-window')
command.append('--add')
command.append(G.PROJECT_PATH)
msg.debug('command:', command)
p = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
poll_result = p.poll()
msg.debug('poll:', poll_result)
cb()
def open_workspace_window3(abs_path, cb):
def finish(w):
folders = []
project_data = w.project_data() or {}
try:
folders = project_data.get('folders', [])
except Exception:
pass
p = utils.unfuck_path(abs_path)
if not [utils.unfuck_path(f.get('path')) == p for f in folders]:
folders.insert(0, {'path': abs_path})
project_data['folders'] = folders
w.set_project_data(project_data)
cb(w)
def get_empty_window():
for w in sublime.windows():
project_data = w.project_data()
try:
folders = project_data.get('folders', [])
if len(folders) == 0 or not folders[0].get('path'):
# no project data. co-opt this window
return w
except Exception as e:
print('project_data.get():', str_e(e))
try:
folders = w.folders()
if len(folders) == 0:
# no project data. co-opt this window
return w
except Exception as e:
print(str_e(e))
def wait_empty_window(i):
if i > 10:
print('Too many failures trying to find an empty window. Using active window.')
return finish(sublime.active_window())
w = get_empty_window()
if w:
return finish(w)
return utils.set_timeout(wait_empty_window, 50, i + 1)
w = get_workspace_window(abs_path) or get_empty_window()
if w:
return finish(w)
sublime.run_command('new_window')
wait_empty_window(0)
class SublimeUI(flooui.FlooUI):
def _make_agent(self, context, owner, workspace, auth, join_action):
"""@returns new Agent()"""
return SublimeConnection(owner, workspace, context, auth, join_action)
def user_y_or_n(self, context, prompt, affirmation_txt, cb):
"""@returns True/False"""
# TODO: optionally use Sublime 3's new yes_no_cancel_dialog
return cb(bool(sublime.ok_cancel_dialog(prompt, affirmation_txt)))
def user_select(self, context, prompt, choices_big, choices_small, cb):
"""@returns (choice, index)"""
choices = choices_big
if choices_small:
choices = [list(x) for x in zip(choices_big, choices_small)]
def _cb(i):
if i >= 0:
return cb(choices_big[i], i)
return cb(None, -1)
flags = 0
if hasattr(sublime, 'KEEP_OPEN_ON_FOCUS_LOST'):
flags |= sublime.KEEP_OPEN_ON_FOCUS_LOST
utils.set_timeout(context.show_quick_panel, 1, choices, _cb, flags)
def user_dir(self, context, prompt, initial, cb):
"""@returns a String directory (probably not expanded)"""
self.user_charfield(context, prompt, initial, cb)
def user_charfield(self, context, prompt, initial, cb):
"""@returns String"""
utils.set_timeout(context.show_input_panel, 1, prompt, initial, cb, None, None)
@utils.inlined_callbacks
def get_a_window(self, abs_path, cb):
"""opens a project in a window or something"""
if PY2:
yield open_workspace_window2, abs_path
else:
yield open_workspace_window3, abs_path
while True:
workspace_window = get_workspace_window(abs_path)
if workspace_window is not None:
break
yield lambda cb: utils.set_timeout(cb, 50)
# TODO: calling focus_view() on a view in the window doesn't focus the window :(
cb(workspace_window)
|
python
|
# Generated by Django 3.2.9 on 2022-02-15 08:57
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('clients', '0026_servicerequest_facility'),
('common', '0015_faq'),
('staff', '0002_alter_staff_default_facility'),
]
operations = [
migrations.AddField(
model_name='staff',
name='addresses',
field=models.ManyToManyField(blank=True, related_name='staff_addresses', to='common.Address'),
),
migrations.AddField(
model_name='staff',
name='contacts',
field=models.ManyToManyField(blank=True, related_name='staff_contacts', to='common.Contact'),
),
migrations.AddField(
model_name='staff',
name='identifiers',
field=models.ManyToManyField(related_name='staff_identifiers', to='clients.Identifier'),
),
]
|
python
|
from airflow import DAG
from airflow.operators.dummy_operator import DummyOperator
from datetime import datetime
with DAG('example_dag', start_date=datetime.now()) as dag:
op = DummyOperator(task_id='op')
|
python
|
import os
from PIL import Image, ImageQt
import tkinter as tk
import sys
from PyQt5 import QtWidgets
from PyQt5.QtWidgets import QApplication, QWidget, QMainWindow, QVBoxLayout, QLabel, QSlider, QStackedLayout, QPushButton, QFileDialog
from PyQt5.QtGui import QFont, QImage, QPixmap
from PyQt5.QtCore import Qt
from copy import deepcopy
class ImageViewer(QMainWindow):
front_idx = None
slider_flag = True
height = 400
width =400
def __init__(self):
super().__init__()
self.initUI()
def initUI(self):
self.setAcceptDrops(True)
self.layout_for_wids = QStackedLayout()
self.central_wid = QWidget()
## INPUT WIDGET
self.input_wid = QWidget()
input_label = QLabel("<font size=20>↓</font><br/>Drag and Drop<br/>Image here!")
input_label.setStyleSheet("color: gray;"
"font: bold;"
"qproperty-alignment: AlignCenter;"
"border-style: solid;"
"border-radius: 10px;"
"border-width: 2px;"
"border-color: gray;"
"border-radius: 30px")
input_vbox = QVBoxLayout()
input_vbox.addWidget(input_label)
self.layout_for_wids.addWidget(self.input_wid)
self.input_wid.setLayout(input_vbox)
self.central_wid.setLayout(self.layout_for_wids)
self.setCentralWidget(self.central_wid)
self.setWindowTitle('White to Transparent')
self.setGeometry(300, 300, self.width, self.height)
self.front_wid = 1
def add_output_widget(self):
global output_label, img, main_data, slider
self.output_wid = QWidget()
output_label = QLabel(self)
## Slider
slider = QSlider(Qt.Horizontal, self)
slider.setRange(0, 100)
slider.setSingleStep(2)
slider.setValue(100)
value = slider.value()
## Save Button
btn = QPushButton('Save!', self)
## Main Image
img = Image.open(file_path)
img = img.convert('RGBA')
main_data = img.getdata()
## Display Default Image
pixmap = QPixmap.fromImage(self.get_transparent_image(value))
scaled_pixmap=pixmap.scaled(int(self.width*0.7), int(self.height*0.7), Qt.KeepAspectRatio)
output_label.setPixmap(scaled_pixmap)
output_label.setAlignment(Qt.AlignHCenter | Qt.AlignTop)
## Connection
slider.sliderMoved.connect(self.sliderMoved)
btn.clicked.connect(self.Save_clicked)
output_vbox = QVBoxLayout()
output_vbox.addWidget(output_label)
output_vbox.addWidget(btn)
output_vbox.addWidget(slider)
self.layout_for_wids.addWidget(self.output_wid)
self.output_wid.setLayout(output_vbox)
self.output_wid.resize(self.width, self.height)
def Save_clicked(self):
fname = QFileDialog.getSaveFileName(self)[0]
print(fname)
threshold = 255/100*(slider.value())
cur_data = []
cur_img = deepcopy(img)
for item in main_data:
if item[0] > threshold and item[1] > threshold and item[2] > threshold:
cur_data.append((255, 255, 255, 0))
else:
cur_data.append(item)
cur_img.putdata(cur_data)
if not fname.endswith("png") or fname.endswith("PNG"):
fname+='.png'
cur_img.save(os.path.join(fname), "PNG")
def sliderMoved(self, val):
if val > 0:
pixmap = QPixmap.fromImage(self.get_transparent_image(val))
scaled_pixmap=pixmap.scaled(int(self.width*0.7), int(self.height*0.7), Qt.KeepAspectRatio)
output_label.setPixmap(scaled_pixmap)
# reference : https://gist.github.com/peace098beat/db8ef7161508e6500ebe
def dragEnterEvent(self, event):
if event.mimeData().hasUrls():
event.accept()
else:
event.ignore()
def dropEvent(self, event):
global file_path
files = [u.toLocalFile() for u in event.mimeData().urls()]
for f in files:
print(f)
file_path = f
self.add_output_widget()
self.output_wid.show()
self.input_wid.hide()
def get_transparent_image(self, scale):
threshold = 255/100*scale
cur_data = []
cur_img = deepcopy(img)
for item in main_data:
if item[0] > threshold and item[1] > threshold and item[2] > threshold:
cur_data.append((255, 255, 255, 0))
else:
cur_data.append(item)
cur_img.putdata(cur_data)
qim = ImageQt.ImageQt(cur_img)
return qim
if __name__ == "__main__":
app = QApplication([__file__])
main = ImageViewer()
main.show()
sys.exit(app.exec_())
|
python
|
from common.remote_execution.SSHConf import sshConfig
import socket
from Constants import *
import json
from dataModels.KubeCluster import KubeCluster
cluster_management_objects={}
def get_cluster_management_object(kube_cluster_name):
if kube_cluster_name not in cluster_management_objects:
return None
else:
return cluster_management_objects[kube_cluster_name]
def push_cluster_management_object(kube_cluster_name,cluster_management_object):
cluster_management_objects[kube_cluster_name]=cluster_management_object
def get_ssh_conf_object(machine):
ssh_conf=None
if machine["sshFilePath"] is not None:
ssh_conf = sshConfig(ssh_host=machine["ipAddress"], ssh_user=machine["userName"],
ssh_pass_file=machine["sshFilePath"],ssh_pass=None)
else:
ssh_conf = sshConfig(ssh_host=machine["ipAddress"], ssh_user=machine["userName"],
ssh_pass=machine["password"])
return ssh_conf
def get_join_token(remote_executor):
output=remote_executor.executeRemoteCommand("kubeadm token create --print-join-command ")
if output.errCode!=0:
raise Exception("Not able to fetch join token.%s", output.errString)
return output.outString.strip()
def validate_error(output, message):
if output.errCode != 0:
raise Exception("%s,%s " %(message,output.errString))
def validate_output(output,text):
if text not in output.outString:
raise Exception("%s is not in %s" %(text,output.outString))
def get_available_port(host,count):
port_list=[]
for port in range(POLYAXON_NODE_PORT_RANGE_START, POLYAXON_NODE_PORT_RANGE_END):
try:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
res = sock.connect_ex((host, port))
if res !=0:
port_list.append(port)
if len(port_list)==count:
break
finally:
sock.close()
if len(port_list) != count:
raise Exception("Ports are not availablwe")
return port_list
def get_volume_usage(volume_name,cluster_management):
disk_space_free_txt = "Disk Space Free"
total_disk_space_txt = "Total Disk Space"
disk_space_free = 0
total_disk_space = 0
executor=cluster_management.master_machine[EXECUTOR]
output=executor.executeRemoteCommand("gluster volume status "+volume_name+" detail")
lines = output.outString.split("\n")
for line in lines:
line = line.strip()
if disk_space_free_txt in line:
arr = line.split(":")
space = arr[1]
space = space.replace("GB", "")
disk_space_free += float(space)
if total_disk_space_txt in line:
if total_disk_space_txt in line:
arr = line.split(":")
space = arr[1]
space = space.replace("GB", "")
total_disk_space += float(space)
disk_space_free=float(disk_space_free)/GLUSTER_DEFAULT_REP_FACTOR
return str(disk_space_free)
def object_to_json(obj):
return json.dumps(obj, default=lambda o: o.__dict__)
def get_cluster_id(cluster_name,user_id):
cluster=KubeCluster.select().where((KubeCluster.name==cluster_name)&(KubeCluster.userId==user_id)).get()
return cluster
def mount_gf_volume(cluster_management, volume_name, mount_path):
executor=cluster_management.master_machine[EXECUTOR]
executor.executeRemoteCommand("mkdir -p "+mount_path)
# validate_error(output,"Mount dataset path creation.Path is {0}".format(mount_path))
cmd="umount {0}".format(mount_path)
executor.executeRemoteCommand(cmd)
cmd = "mount -o acl,rw -t glusterfs {0}:/{1} {2}".format(DEFAULT_GLUSTER_SERVER,volume_name,mount_path)
output=executor.executeRemoteCommand(cmd)
validate_error(output,"Mount dataset path "+mount_path)
fstab_entry="{0}:/{1} {2} glusterfs acl,rw,defaults,_netdev,x-systemd.automount 0 0".format(DEFAULT_GLUSTER_SERVER,
volume_name,
mount_path)
# fstab_entry = DEFAULT_GLUSTER_SERVER + ":/" + volume_name + " " + mount_path + " glusterfs acl,rw,defaults,_netdev,x-systemd.automount 0 0"
cmd="echo {0} >> /etc/fstab".format(fstab_entry)
executor.executeRemoteCommand(cmd)
def get_gfs_endpoint_name(cluster_management,namespace_name):
api_response = cluster_management.kube_api.list_namespaced_endpoints(namespace_name)
endpont_name=None
for item in api_response.items:
if DYNAMIC_GLUSTERFS_ENDPOINT_STARTS_WITH in item.metadata.name:
endpont_name=item.metadata.name
break
return endpont_name
|
python
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
""" build dict interface """
import argparse
import os
from collections import defaultdict
import six
def build_dict(input_path,
output_path,
col_nums,
feq_threshold=5,
sep=' ',
extra_words=None,
stop_words=None):
"""build dict"""
values = defaultdict(int)
for file_name in os.listdir(input_path):
file_path = os.path.join(input_path, file_name)
if not os.path.isfile(file_path):
continue
if six.PY3:
input_file = open(file_path, 'r', encoding='utf8')
else:
input_file = open(file_path, 'r')
for i, l in enumerate(input_file.readlines()):
cols = l.strip().split('\t')
selected_cols = ""
for j in col_nums:
selected_cols += cols[j - 1]
for w in selected_cols.split(sep):
values[w] = values.get(w, 0) + 1
output_file_path = os.path.join(output_path, "vocab.txt")
id_index = 0
with open(output_file_path, "w", encoding='utf8') as f:
for v, count in sorted(values.items(), key=lambda x: x[1], reverse=True):
if count < feq_threshold or v in stop_words:
break
# f.write("%s\t%d\n" % (v, count))
f.write("%s\t%d\n" % (v, id_index))
id_index += 1
build_in_vocab = ["[PAD]", "[CLS]", "[SEP]", "[MASK]", "[UNK]"]
for vocab in build_in_vocab:
extra_words.insert(0, vocab)
for w in extra_words:
if (w in values and values[w] < feq_threshold) or w not in values:
if six.PY3:
f.write((u"%s\t%d\n" % (w, id_index)))
else:
f.write((u"%s\t%d\n" % (w, id_index)).encode('utf-8'))
id_index += 1
def main():
parser = argparse.ArgumentParser(description='main')
parser.add_argument("-i", "--input", type=str)
parser.add_argument("-o", "--output", type=str)
parser.add_argument("-sep", "--seperator", type=str, default=' ')
parser.add_argument("-c", "--column_number", type=str, default='1')
parser.add_argument("-thr", "--feq_threshold", type=int, default='5')
parser.add_argument("-ew", "--extra_words", type=str, nargs='+', default=[])
parser.add_argument("-sw", "--stop_words", type=str, nargs='+', default=[])
# 停用词
args = parser.parse_args()
col_nums = args.column_number.split(',')
col_nums = list(map(int, col_nums))
data_files = os.listdir(args.input)
assert len(data_files) > 0, "%s is an empty directory" % args.input
mkdirlambda = lambda x: os.makedirs(x) if not os.path.exists(x) else True
mkdirlambda(args.output)
build_dict(
input_path=args.input,
output_path=args.output,
feq_threshold=args.feq_threshold,
sep=' ',
col_nums=col_nums,
extra_words=args.extra_words,
stop_words=args.stop_words)
if __name__ == '__main__':
main()
|
python
|
# Databricks notebook source
# MAGIC %md # Python REST Client using Wrapper Module
# COMMAND ----------
# MAGIC %md ####Load REST Class
# COMMAND ----------
# MAGIC %run ./008-REST_API_Py_Requests_Lib
# COMMAND ----------
# MAGIC %md ####Initialize REST Object
# COMMAND ----------
import datetime
import json
# initialize the DBC_Rest Object
rest = DBC_REST_API("https://eastus2.azuredatabricks.net", "replaceWithYourToken")
# COMMAND ----------
# MAGIC %md ####1) Need to get cluster ID of cluster we want job to run on
# MAGIC When running a job that creates its own cluster the cluster name will be of the form: job-[job_id]-run-[run_id]
# COMMAND ----------
cluster_name = 'Tritium'
# Get cluster ID based on cluster name
runnerClusterId = rest.get_clusterID(cluster_name, comp_type = 'eq', state = 'RUNNING')
print 'Runner Cluster Id: %s' % runnerClusterId
# COMMAND ----------
# MAGIC %md #### 2) Get Job_ID of job you want to run on the cluster
# MAGIC Job ID can be found in the jobs tab next to the job name, alternatively we can look it up by name as well
# COMMAND ----------
# look up by name
job_name = 'test_dbutils_context'
job_id = rest.get_jobID(job_name)
if job_id < 0:
raise ValueError("Error retrieving Job ID. Verify job name '{0}' exists.".format(job_name))
print("Job ID is {0}".format(job_id))
# COMMAND ----------
# MAGIC %md ####3) Update existing Job with Cluster ID
# MAGIC Now that we have the Cluster ID, the job to run on the cluster need to be updated with the correct cluster ID
# MAGIC (this assumes job was initially configured to run on 'Existing cluster')
# COMMAND ----------
new_jobsettings = {"existing_cluster_id":runnerClusterId}
print new_jobsettings
# Reset Job's cluster id
rq1 = rest.reset_job(job_id, new_jobsettings)
if rq1.status_code != 200:
raise ValueError('Trouble resetting job cluster id for '+job_name, rq1.status_code)
# COMMAND ----------
# MAGIC %md ####4) Run Job with parameters
# COMMAND ----------
source = "databricks-johndoe/test-input/"
test_target = "databricks-johndoe/test-output/"
run_mode = 'test'
params = {'source': source, 'target': test_target, 'run_mode': run_mode}
# run the job
req = rest.run_job(job_id, params)
# Verify job started ok
if req.status_code == 200:
# Run job returns run_id and number_in_job. Save for polling and logging of data
number_in_job=req.json()['number_in_job']
print('Job Number: {0}'.format(number_in_job))
run_id=req.json()['run_id']
print('Run ID: {0}'.format(run_id))
|
python
|
#!/usr/bin/python3
import sys
import time
sys.path.append("./shared")
#from sbmloader import SBMObject # location of sbm file format loader
from ktxloader import KTXObject # location of ktx file format loader
#from sbmath import m3dDegToRad, m3dRadToDeg, m3dTranslateMatrix44, m3dRotationMatrix44, m3dMultiply, m3dOrtho, m3dPerspective, rotation_matrix, translate, m3dScaleMatrix44
fullscreen = True
import numpy.matlib
import numpy as np
import math
try:
from OpenGL.GLUT import *
from OpenGL.GL import *
from OpenGL.GLU import *
from OpenGL.raw.GL.ARB.vertex_array_object import glGenVertexArrays, glBindVertexArray
except:
print ('''
ERROR: PyOpenGL not installed properly.
''')
sys.exit()
identityMatrix = [1,0,0,0, 0,1,0,0, 0,0,1,0, 0,0,0,1]
render_prog = GLuint(0)
render_vao = GLuint(0)
tex_alien_array = GLuint(0)
rain_buffer = GLuint(0)
droplet_x_offset = []
droplet_rot_speed = []
droplet_fall_speed = []
seed = 0x13371337
import random
import ctypes
random.seed (0x13371337)
def random_float():
# global seed
# res=0.0
# tmp=0
# seed *= 16807;
# tmp = seed ^ (seed >> 4) ^ (seed << 15);
# res = (tmp >> 9) | 0x3F800000;
# return (res - 1.0);
return (random.random() - 1.0)
class Scene:
def __init__(self, width, height):
global render_prog
global render_vao
global tex_alien_array
global rain_buffer
global droplet_x_offset, droplet_rot_speed, droplet_fall_speed
self.width = width
self.height = height
vs = GLuint(0)
fs = GLuint(0)
vs_source = '''
#version 410 core
layout (location = 0) in int alien_index;
out VS_OUT
{
flat int alien;
vec2 tc;
} vs_out;
struct droplet_t
{
float x_offset;
float y_offset;
float orientation;
float unused;
};
layout (std140) uniform droplets
{
droplet_t droplet[256];
};
void main(void)
{
const vec2[4] position = vec2[4](vec2(-0.5, -0.5),
vec2( 0.5, -0.5),
vec2(-0.5, 0.5),
vec2( 0.5, 0.5));
vs_out.tc = position[gl_VertexID].xy + vec2(0.5);
float co = cos(droplet[alien_index].orientation);
float so = sin(droplet[alien_index].orientation);
mat2 rot = mat2(vec2(co, so),
vec2(-so, co));
vec2 pos = 0.25 * rot * position[gl_VertexID];
gl_Position = vec4(pos.x + droplet[alien_index].x_offset,
pos.y + droplet[alien_index].y_offset,
0.5, 1.0);
vs_out.alien = alien_index % 64;
}
'''
fs_source = '''
#version 410 core
layout (location = 0) out vec4 color;
in VS_OUT
{
flat int alien;
vec2 tc;
} fs_in;
uniform sampler2DArray tex_aliens;
void main(void)
{
color = texture(tex_aliens, vec3(fs_in.tc, float(fs_in.alien)));
}
'''
vs = glCreateShader(GL_VERTEX_SHADER)
glShaderSource(vs, vs_source)
glCompileShader(vs)
glGetShaderInfoLog(vs)
fs = glCreateShader(GL_FRAGMENT_SHADER)
glShaderSource(fs, fs_source)
glCompileShader(fs)
glGetShaderInfoLog(vs)
render_prog = glCreateProgram()
glAttachShader(render_prog, vs)
glAttachShader(render_prog, fs)
glLinkProgram(render_prog)
glDeleteShader(vs)
glDeleteShader(fs)
glGetProgramInfoLog(render_prog)
glGenVertexArrays(1, render_vao)
glBindVertexArray(render_vao)
ktxobj = KTXObject()
tex_alien_array = ktxobj.ktx_load("aliens.ktx")
glBindTexture(GL_TEXTURE_2D_ARRAY, tex_alien_array)
glTexParameteri(GL_TEXTURE_2D_ARRAY, GL_TEXTURE_MIN_FILTER, GL_LINEAR_MIPMAP_LINEAR)
glGenBuffers(1, rain_buffer)
glBindBuffer(GL_UNIFORM_BUFFER, rain_buffer)
glBufferData(GL_UNIFORM_BUFFER, 256*4*4, None, GL_DYNAMIC_DRAW)
for i in range(0, 256):
droplet_x_offset.append(random_float() * 2.0 - 1.0)
droplet_rot_speed.append( (random_float() + 0.5) * (-3.0 if (i & 1) else 3.0) )
droplet_fall_speed.append ( random_float() + 0.2 )
glBindVertexArray(render_vao);
glEnable(GL_BLEND);
glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA);
def display(self):
global rain_buffer
green = [ 0.0, 0.1, 0.0, 0.0 ]
currentTime = time.time()
t = currentTime
glViewport(0, 0, self.width, self.height)
glClearBufferfv(GL_COLOR, 0, green)
glUseProgram(render_prog);
glBindBufferBase(GL_UNIFORM_BUFFER, 0, rain_buffer);
droplet = glMapBufferRange(GL_UNIFORM_BUFFER, 0, 256*4*4, GL_MAP_WRITE_BIT | GL_MAP_INVALIDATE_BUFFER_BIT)
float_array = ((ctypes.c_float * 4) * 256).from_address(droplet)
for i in range(0, 256):
float_array[i][0] = droplet_x_offset[i] + 2
float_array[i][1] = 2.0-math.fmod((t + float(i)) * droplet_fall_speed[i], 4.31 ) * random_float()
float_array[i][2] = droplet_rot_speed[i] * t * random_float() * math.pi
float_array[i][3] = 0.0
glUnmapBuffer(GL_UNIFORM_BUFFER);
for alien_index in range(0, 256):
glVertexAttribI1i(0, alien_index);
glDrawArrays(GL_TRIANGLE_STRIP, 0, 4);
glutSwapBuffers()
def reshape(self, width, height):
self.width = width
self.height = height
def keyboard(self, key, x, y ):
global fullscreen
print ('key:' , key)
if key == b'\x1b': # ESC
sys.exit()
elif key == b'f' or key == b'F': #fullscreen toggle
if (fullscreen == True):
glutReshapeWindow(512, 512)
glutPositionWindow(int((1360/2)-(512/2)), int((768/2)-(512/2)))
fullscreen = False
else:
glutFullScreen()
fullscreen = True
print('done')
def init(self):
pass
def timer(self, blah):
glutPostRedisplay()
glutTimerFunc( int(1/60), self.timer, 0)
time.sleep(1/60.0)
if __name__ == '__main__':
start = time.time()
glutInit()
glutInitDisplayMode(GLUT_RGBA | GLUT_DOUBLE | GLUT_DEPTH)
glutInitWindowSize(512, 512)
w1 = glutCreateWindow('OpenGL SuperBible - Alien Rain')
glutInitWindowPosition(int((1360/2)-(512/2)), int((768/2)-(512/2)))
fullscreen = False
many_cubes = False
#glutFullScreen()
scene = Scene(512,512)
glutReshapeFunc(scene.reshape)
glutDisplayFunc(scene.display)
glutKeyboardFunc(scene.keyboard)
glutIdleFunc(scene.display)
#glutTimerFunc( int(1/60), scene.timer, 0)
scene.init()
glutMainLoop()
|
python
|
# -*- coding: utf-8 -*-
import numpy as np
import pytest
from pandas import DataFrame, Index, Series, Timestamp
from pandas.util.testing import assert_almost_equal
def _assert_almost_equal_both(a, b, **kwargs):
"""
Check that two objects are approximately equal.
This check is performed commutatively.
Parameters
----------
a : object
The first object to compare.
b : object
The second object to compare.
kwargs : dict
The arguments passed to `assert_almost_equal`.
"""
assert_almost_equal(a, b, **kwargs)
assert_almost_equal(b, a, **kwargs)
def _assert_not_almost_equal(a, b, **kwargs):
"""
Check that two objects are not approximately equal.
Parameters
----------
a : object
The first object to compare.
b : object
The second object to compare.
kwargs : dict
The arguments passed to `assert_almost_equal`.
"""
try:
assert_almost_equal(a, b, **kwargs)
msg = ("{a} and {b} were approximately equal "
"when they shouldn't have been").format(a=a, b=b)
pytest.fail(msg=msg)
except AssertionError:
pass
def _assert_not_almost_equal_both(a, b, **kwargs):
"""
Check that two objects are not approximately equal.
This check is performed commutatively.
Parameters
----------
a : object
The first object to compare.
b : object
The second object to compare.
kwargs : dict
The arguments passed to `tm.assert_almost_equal`.
"""
_assert_not_almost_equal(a, b, **kwargs)
_assert_not_almost_equal(b, a, **kwargs)
@pytest.mark.parametrize("a,b", [
(1.1, 1.1), (1.1, 1.100001), (np.int16(1), 1.000001),
(np.float64(1.1), 1.1), (np.uint32(5), 5),
])
def test_assert_almost_equal_numbers(a, b):
_assert_almost_equal_both(a, b)
@pytest.mark.parametrize("a,b", [
(1.1, 1), (1.1, True), (1, 2), (1.0001, np.int16(1)),
])
def test_assert_not_almost_equal_numbers(a, b):
_assert_not_almost_equal_both(a, b)
@pytest.mark.parametrize("a,b", [
(0, 0), (0, 0.0), (0, np.float64(0)), (0.000001, 0),
])
def test_assert_almost_equal_numbers_with_zeros(a, b):
_assert_almost_equal_both(a, b)
@pytest.mark.parametrize("a,b", [
(0.001, 0), (1, 0),
])
def test_assert_not_almost_equal_numbers_with_zeros(a, b):
_assert_not_almost_equal_both(a, b)
@pytest.mark.parametrize("a,b", [
(1, "abc"), (1, [1, ]), (1, object()),
])
def test_assert_not_almost_equal_numbers_with_mixed(a, b):
_assert_not_almost_equal_both(a, b)
@pytest.mark.parametrize(
"left_dtype", ["M8[ns]", "m8[ns]", "float64", "int64", "object"])
@pytest.mark.parametrize(
"right_dtype", ["M8[ns]", "m8[ns]", "float64", "int64", "object"])
def test_assert_almost_equal_edge_case_ndarrays(left_dtype, right_dtype):
# Empty compare.
_assert_almost_equal_both(np.array([], dtype=left_dtype),
np.array([], dtype=right_dtype),
check_dtype=False)
def test_assert_almost_equal_dicts():
_assert_almost_equal_both({"a": 1, "b": 2}, {"a": 1, "b": 2})
@pytest.mark.parametrize("a,b", [
({"a": 1, "b": 2}, {"a": 1, "b": 3}),
({"a": 1, "b": 2}, {"a": 1, "b": 2, "c": 3}),
({"a": 1}, 1), ({"a": 1}, "abc"), ({"a": 1}, [1, ]),
])
def test_assert_not_almost_equal_dicts(a, b):
_assert_not_almost_equal_both(a, b)
@pytest.mark.parametrize("val", [1, 2])
def test_assert_almost_equal_dict_like_object(val):
dict_val = 1
real_dict = dict(a=val)
class DictLikeObj:
def keys(self):
return "a",
def __getitem__(self, item):
if item == "a":
return dict_val
func = (_assert_almost_equal_both if val == dict_val
else _assert_not_almost_equal_both)
func(real_dict, DictLikeObj(), check_dtype=False)
def test_assert_almost_equal_strings():
_assert_almost_equal_both("abc", "abc")
@pytest.mark.parametrize("a,b", [
("abc", "abcd"), ("abc", "abd"), ("abc", 1), ("abc", [1, ]),
])
def test_assert_not_almost_equal_strings(a, b):
_assert_not_almost_equal_both(a, b)
@pytest.mark.parametrize("a,b", [
([1, 2, 3], [1, 2, 3]), (np.array([1, 2, 3]), np.array([1, 2, 3])),
])
def test_assert_almost_equal_iterables(a, b):
_assert_almost_equal_both(a, b)
@pytest.mark.parametrize("a,b", [
# Class is different.
(np.array([1, 2, 3]), [1, 2, 3]),
# Dtype is different.
(np.array([1, 2, 3]), np.array([1., 2., 3.])),
# Can't compare generators.
(iter([1, 2, 3]), [1, 2, 3]), ([1, 2, 3], [1, 2, 4]),
([1, 2, 3], [1, 2, 3, 4]), ([1, 2, 3], 1),
])
def test_assert_not_almost_equal_iterables(a, b):
_assert_not_almost_equal(a, b)
def test_assert_almost_equal_null():
_assert_almost_equal_both(None, None)
@pytest.mark.parametrize("a,b", [
(None, np.NaN), (None, 0), (np.NaN, 0),
])
def test_assert_not_almost_equal_null(a, b):
_assert_not_almost_equal(a, b)
@pytest.mark.parametrize("a,b", [
(np.inf, np.inf), (np.inf, float("inf")),
(np.array([np.inf, np.nan, -np.inf]),
np.array([np.inf, np.nan, -np.inf])),
(np.array([np.inf, None, -np.inf], dtype=np.object_),
np.array([np.inf, np.nan, -np.inf], dtype=np.object_)),
])
def test_assert_almost_equal_inf(a, b):
_assert_almost_equal_both(a, b)
def test_assert_not_almost_equal_inf():
_assert_not_almost_equal_both(np.inf, 0)
@pytest.mark.parametrize("a,b", [
(Index([1., 1.1]), Index([1., 1.100001])),
(Series([1., 1.1]), Series([1., 1.100001])),
(np.array([1.1, 2.000001]), np.array([1.1, 2.0])),
(DataFrame({"a": [1., 1.1]}), DataFrame({"a": [1., 1.100001]}))
])
def test_assert_almost_equal_pandas(a, b):
_assert_almost_equal_both(a, b)
def test_assert_almost_equal_object():
a = [Timestamp("2011-01-01"), Timestamp("2011-01-01")]
b = [Timestamp("2011-01-01"), Timestamp("2011-01-01")]
_assert_almost_equal_both(a, b)
def test_assert_almost_equal_value_mismatch():
msg = "expected 2\\.00000 but got 1\\.00000, with decimal 5"
with pytest.raises(AssertionError, match=msg):
assert_almost_equal(1, 2)
@pytest.mark.parametrize("a,b,klass1,klass2", [
(np.array([1]), 1, "ndarray", "int"),
(1, np.array([1]), "int", "ndarray"),
])
def test_assert_almost_equal_class_mismatch(a, b, klass1, klass2):
msg = """numpy array are different
numpy array classes are different
\\[left\\]: {klass1}
\\[right\\]: {klass2}""".format(klass1=klass1, klass2=klass2)
with pytest.raises(AssertionError, match=msg):
assert_almost_equal(a, b)
def test_assert_almost_equal_value_mismatch1():
msg = """numpy array are different
numpy array values are different \\(66\\.66667 %\\)
\\[left\\]: \\[nan, 2\\.0, 3\\.0\\]
\\[right\\]: \\[1\\.0, nan, 3\\.0\\]"""
with pytest.raises(AssertionError, match=msg):
assert_almost_equal(np.array([np.nan, 2, 3]),
np.array([1, np.nan, 3]))
def test_assert_almost_equal_value_mismatch2():
msg = """numpy array are different
numpy array values are different \\(50\\.0 %\\)
\\[left\\]: \\[1, 2\\]
\\[right\\]: \\[1, 3\\]"""
with pytest.raises(AssertionError, match=msg):
assert_almost_equal(np.array([1, 2]), np.array([1, 3]))
def test_assert_almost_equal_value_mismatch3():
msg = """numpy array are different
numpy array values are different \\(16\\.66667 %\\)
\\[left\\]: \\[\\[1, 2\\], \\[3, 4\\], \\[5, 6\\]\\]
\\[right\\]: \\[\\[1, 3\\], \\[3, 4\\], \\[5, 6\\]\\]"""
with pytest.raises(AssertionError, match=msg):
assert_almost_equal(np.array([[1, 2], [3, 4], [5, 6]]),
np.array([[1, 3], [3, 4], [5, 6]]))
def test_assert_almost_equal_value_mismatch4():
msg = """numpy array are different
numpy array values are different \\(25\\.0 %\\)
\\[left\\]: \\[\\[1, 2\\], \\[3, 4\\]\\]
\\[right\\]: \\[\\[1, 3\\], \\[3, 4\\]\\]"""
with pytest.raises(AssertionError, match=msg):
assert_almost_equal(np.array([[1, 2], [3, 4]]),
np.array([[1, 3], [3, 4]]))
def test_assert_almost_equal_shape_mismatch_override():
msg = """Index are different
Index shapes are different
\\[left\\]: \\(2L*,\\)
\\[right\\]: \\(3L*,\\)"""
with pytest.raises(AssertionError, match=msg):
assert_almost_equal(np.array([1, 2]),
np.array([3, 4, 5]),
obj="Index")
def test_assert_almost_equal_unicode():
# see gh-20503
msg = """numpy array are different
numpy array values are different \\(33\\.33333 %\\)
\\[left\\]: \\[á, à, ä\\]
\\[right\\]: \\[á, à, å\\]"""
with pytest.raises(AssertionError, match=msg):
assert_almost_equal(np.array(["á", "à", "ä"]),
np.array(["á", "à", "å"]))
def test_assert_almost_equal_timestamp():
a = np.array([Timestamp("2011-01-01"), Timestamp("2011-01-01")])
b = np.array([Timestamp("2011-01-01"), Timestamp("2011-01-02")])
msg = """numpy array are different
numpy array values are different \\(50\\.0 %\\)
\\[left\\]: \\[2011-01-01 00:00:00, 2011-01-01 00:00:00\\]
\\[right\\]: \\[2011-01-01 00:00:00, 2011-01-02 00:00:00\\]"""
with pytest.raises(AssertionError, match=msg):
assert_almost_equal(a, b)
def test_assert_almost_equal_iterable_length_mismatch():
msg = """Iterable are different
Iterable length are different
\\[left\\]: 2
\\[right\\]: 3"""
with pytest.raises(AssertionError, match=msg):
assert_almost_equal([1, 2], [3, 4, 5])
def test_assert_almost_equal_iterable_values_mismatch():
msg = """Iterable are different
Iterable values are different \\(50\\.0 %\\)
\\[left\\]: \\[1, 2\\]
\\[right\\]: \\[1, 3\\]"""
with pytest.raises(AssertionError, match=msg):
assert_almost_equal([1, 2], [1, 3])
|
python
|
"""Register thermal expansion data."""
import pandas as pd
import sqlalchemy.sql.functions as func
from setting import session
from create_db import *
def main():
df = pd.DataFrame({"id":[0, 1, 2, 3, 4, 5, 6, 7, 8, 9],
"name":[
"ポンタ",
"たぬまる",
"たぬ子",
"タヌ太郎",
"たぬ次郎",
"タヌキチ",
"たぬ三郎",
"たぬ衛門",
"ぽん助",
"アライグマ"
],
"age":[5, 3, 8, 4, 5, 8, 1, 5, 9, 10],
"type":[
"ウスリータヌキ",
"タイリクタヌキ",
"エゾタヌキ",
"ホンドタヌキ",
"コウライタヌキ",
"ホンドタヌキ",
"ウスリータヌキ",
"エゾタヌキ",
"ホンドタヌキ",
"アライグマ"
]
})
session.bulk_save_objects([Tanuki(id=int(df["id"][i]),
name=df["name"][i],
age=int(df["age"][i]),
type=df["type"][i])
for i in range(len(df))])
session.commit()
if __name__ == "__main__":
main()
|
python
|
import requests
from datetime import datetime
from .get_client_id import get_client_id
from ._client_keys import _sign_message_as_client, _ephemeral_mode
from ._kachery_cloud_api_url import _kachery_cloud_api_url
def _kacherycloud_request(request_payload: dict):
client_id = get_client_id()
url = f'{_kachery_cloud_api_url}/api/kacherycloud'
timestamp = int(datetime.timestamp(datetime.now()) * 1000)
payload = {**request_payload, **{'timestamp': timestamp}}
if not _ephemeral_mode():
req = {
'payload': payload,
'fromClientId': client_id,
'signature': _sign_message_as_client(payload)
}
else:
# ephemeral mode
if payload['type'] not in ['getProjectBucketBaseUrl', 'findIpfsFile', 'getMutable', 'subscribeToPubsubChannel', 'publishToPubsubChannel', 'getFeedInfo', 'getFeedMessages']:
raise Exception(f'Unable to perform operation in ephemeral mode: {payload["type"]}')
req = {
'payload': payload
}
resp = requests.post(url, json=req)
if resp.status_code != 200:
raise Exception(f'Error in {payload["type"]} ({resp.status_code}) {resp.reason}: {resp.text}')
response = resp.json()
return response
|
python
|
from conans import ConanFile, tools, AutoToolsBuildEnvironment, MSBuild
import os
class YASMConan(ConanFile):
name = "yasm"
url = "https://github.com/conan-io/conan-center-index"
homepage = "https://github.com/yasm/yasm"
description = "Yasm is a complete rewrite of the NASM assembler under the 'new' BSD License"
topics = ("conan", "yasm", "installer", "assembler")
license = "BSD-2-Clause"
settings = "os", "arch", "compiler", "build_type"
_autotools = None
@property
def _source_subfolder(self):
return "source_subfolder"
def configure(self):
del self.settings.compiler.libcxx
del self.settings.compiler.cppstd
def package_id(self):
del self.info.settings.compiler
def source(self):
tools.get(**self.conan_data["sources"][self.version])
extracted_dir = "yasm-%s" % self.version
os.rename(extracted_dir, self._source_subfolder)
tools.download("https://raw.githubusercontent.com/yasm/yasm/bcc01c59d8196f857989e6ae718458c296ca20e3/YASM-VERSION-GEN.bat",
os.path.join(self._source_subfolder, "YASM-VERSION-GEN.bat"))
def _build_vs(self):
with tools.chdir(os.path.join(self._source_subfolder, "Mkfiles", "vc10")):
with tools.vcvars(self.settings, force=True):
msbuild = MSBuild(self)
if self.settings.arch== "x86":
msbuild.build_env.link_flags.append("/MACHINE:X86")
elif self.settings.arch== "x86_64":
msbuild.build_env.link_flags.append("/SAFESEH:NO /MACHINE:X64")
msbuild.build(project_file="yasm.sln",
targets=["yasm"], platforms={"x86": "Win32"}, force_vcvars=True)
def _configure_autotools(self):
if self._autotools:
return self._autotools
self._autotools = AutoToolsBuildEnvironment(self, win_bash=tools.os_info.is_windows)
yes_no = lambda v: "yes" if v else "no"
conf_args = [
"--enable-debug={}".format(yes_no(self.settings.build_type == "Debug")),
"--disable-rpath",
"--disable-nls",
]
self._autotools.configure(args=conf_args, configure_dir=self._source_subfolder)
return self._autotools
def build(self):
if self.settings.compiler == "Visual Studio":
self._build_vs()
else:
autotools = self._configure_autotools()
autotools.make()
def package(self):
self.copy(pattern="BSD.txt", dst="licenses", src=self._source_subfolder)
self.copy(pattern="COPYING", dst="licenses", src=self._source_subfolder)
if self.settings.compiler == "Visual Studio":
self.copy(pattern="*.exe", src=self._source_subfolder, dst="bin", keep_path=False)
else:
autotools = self._configure_autotools()
autotools.install()
tools.rmdir(os.path.join(self.package_folder, "share"))
def package_info(self):
bin_path = os.path.join(self.package_folder, "bin")
self.output.info("Appending PATH environment variable: {}".format(bin_path))
self.env_info.PATH.append(bin_path)
|
python
|
""" Convert a PCF file into a VPR io.place file. """
from __future__ import print_function
import argparse
import csv
import json
import sys
import os
import vpr_io_place
from lib.parse_pcf import parse_simple_pcf
def main():
parser = argparse.ArgumentParser(
description='Convert a PCF file into a VPR io.place file.'
)
parser.add_argument(
"--pcf",
'-p',
"-P",
type=argparse.FileType('r'),
required=False,
help='PCF input file'
)
parser.add_argument(
"--blif",
'-b',
type=argparse.FileType('r'),
required=True,
help='BLIF / eBLIF file'
)
parser.add_argument(
"--map",
'-m',
"-M",
type=argparse.FileType('r'),
required=True,
help='Pin map CSV file'
)
parser.add_argument(
"--output",
'-o',
"-O",
type=argparse.FileType('w'),
default=sys.stdout,
help='The output io.place file'
)
parser.add_argument(
"--iostandard_defs", help='(optional) Output IOSTANDARD def file'
)
parser.add_argument(
"--iostandard",
default="LVCMOS33",
help='Default IOSTANDARD to use for pins',
)
parser.add_argument(
"--drive",
type=int,
default=12,
help='Default drive to use for pins',
)
parser.add_argument(
"--net",
'-n',
type=argparse.FileType('r'),
required=True,
help='top.net file'
)
args = parser.parse_args()
io_place = vpr_io_place.IoPlace()
io_place.read_io_list_from_eblif(args.blif)
io_place.load_block_names_from_net_file(args.net)
# Map of pad names to VPR locations.
pad_map = {}
for pin_map_entry in csv.DictReader(args.map):
pad_map[pin_map_entry['name']] = (
(
int(pin_map_entry['x']),
int(pin_map_entry['y']),
int(pin_map_entry['z']),
),
pin_map_entry['is_output'],
pin_map_entry['iob'],
)
iostandard_defs = {}
# Load iostandard constraints. This is a temporary workaround that allows
# to pass them into fasm2bels. As soon as there is support for XDC this
# will not be needed anymore.
# If there is a JSON file with the same name as the PCF file then it is
# loaded and used as iostandard constraint source NOT for the design but
# to be used in fasm2bels.
iostandard_constraints = {}
if args.pcf:
fname = args.pcf.name.replace(".pcf", ".json")
if os.path.isfile(fname):
with open(fname, "r") as fp:
iostandard_constraints = json.load(fp)
net_to_pad = io_place.net_to_pad
if args.pcf:
pcf_constraints = parse_simple_pcf(args.pcf)
net_to_pad |= set(
(constr.net, constr.pad) for constr in pcf_constraints
)
# Check for conflicting pad constraints
net_to_pad_map = dict()
for (net, pad) in net_to_pad:
if net not in net_to_pad_map:
net_to_pad_map[net] = pad
elif pad != net_to_pad_map[net]:
print(
"""ERROR:
Conflicting pad constraints for net {}:\n{}\n{}""".format(
net, pad, net_to_pad_map[net]
),
file=sys.stderr
)
sys.exit(1)
# Constrain nets
for net, pad in net_to_pad:
if not io_place.is_net(net):
print(
"""ERROR:
Constrained net {} is not in available netlist:\n{}""".format(
net, '\n'.join(io_place.get_nets())
),
file=sys.stderr
)
sys.exit(1)
if pad not in pad_map:
print(
"""ERROR:
Constrained pad {} is not in available pad map:\n{}""".format(
pad, '\n'.join(sorted(pad_map.keys()))
),
file=sys.stderr
)
sys.exit(1)
loc, is_output, iob = pad_map[pad]
io_place.constrain_net(
net_name=net,
loc=loc,
comment="set_property LOC {} [get_ports {{{}}}]".format(pad, net)
)
if pad in iostandard_constraints:
iostandard_defs[iob] = iostandard_constraints[pad]
else:
if is_output:
iostandard_defs[iob] = {
'DRIVE': args.drive,
'IOSTANDARD': args.iostandard,
}
else:
iostandard_defs[iob] = {
'IOSTANDARD': args.iostandard,
}
io_place.output_io_place(args.output)
# Write iostandard definitions
if args.iostandard_defs:
with open(args.iostandard_defs, 'w') as f:
json.dump(iostandard_defs, f, indent=2)
if __name__ == '__main__':
main()
|
python
|
'''
SOLO TEs are these transcripts that contain intact, or semi intact unspliced transcripts.
As we don't trust the short read data to assemble these, we only consider them from the pacbio data:
'''
import sys
from glbase3 import glload, genelist, config
config.draw_mode = 'pdf'
sys.path.append('../../')
import shared
all_te_transcripts = glload('../te_transcripts/transcript_table_merged.mapped.glb')
dfam = genelist('../dfam/dfam_annotation.tsv', format={'force_tsv': True, 'name': 0, 'type': 3, 'subtype': 4})
solotes = []
type_subtype_counts = {}
stats = {'known_unkonwn': {'known': 0, 'novel': 0, 'unknown': 0},
'coding_noncoding': {'coding': 0, 'noncoding': 0}
}
for trans in all_te_transcripts:
if 'LR' not in trans['name']:
continue
if trans['exonCounts'] > 1:
continue
# add a typ_subtype key:
ts = set([])
full_names = set([])
for d in trans['doms']:
te = dfam.get(key='name', value=d['dom'])[0]
type_subtyp = '%s:%s' % (te['type'], te['subtype'])
ts.add(type_subtyp)
full_names.add('{0}:{1}:{2}'.format(te['type'], te['subtype'], d['dom']))
if type_subtyp not in type_subtype_counts:
type_subtype_counts[type_subtyp] = 0
type_subtype_counts[type_subtyp] += 1
trans['te_type'] = '; '.join(ts)
trans['te_fullanmes'] = '; '.join(full_names)
solotes.append(trans)
# collect stats;
if ';!' in trans['name']:
stats['known_unkonwn']['unknown'] += 1
elif ';=' in trans['name']:
stats['known_unkonwn']['known'] += 1
elif ';~' in trans['name']:
stats['known_unkonwn']['novel'] += 1
if ';C;' in trans['name']:
stats['coding_noncoding']['coding'] += 1
elif ';NC;' in trans['name']:
stats['coding_noncoding']['noncoding'] += 1
newgl = genelist()
newgl.load_list(solotes)
newgl.save('solo_tes.glb')
newgl.saveTSV('solo_tes.tsv', key_order=['ensg', 'enst', 'te_type',])
# collect some stats and pies;
for k in stats:
shared.pie('pie_%s.png' % k, stats[k].values(), stats[k].keys(), k)
|
python
|
from KeyHardwareInput import *
from time import *
from Enderecos import *
class keyController(object):
def __init__(self):
pass
def pressionar(self,tecla,tempo):
if (tecla==0):
self.pressKey(S)
sleep(tempo)
self.releaseKey(S)
if (tecla==1):
self.pressKey(A)
sleep(tempo)
self.releaseKey(A)
if (tecla==2):
self.pressKey(F4)
sleep(tempo)
self.releaseKey(F4)
def pressKey(self,hexKeyCode):
extra = ctypes.c_ulong(0)
ii_ = Input_I()
ii_.ki = KeyBdInput(0, hexKeyCode, 0x0008, 0, ctypes.pointer(extra))
x = Input(ctypes.c_ulong(1), ii_)
ctypes.windll.user32.SendInput(1, ctypes.pointer(x), ctypes.sizeof(x))
def releaseKey(self,hexKeyCode):
extra = ctypes.c_ulong(0)
ii_ = Input_I()
ii_.ki = KeyBdInput(0, hexKeyCode, 0x0008 | 0x0002, 0,
ctypes.pointer(extra))
x = Input(ctypes.c_ulong(1), ii_)
ctypes.windll.user32.SendInput(1, ctypes.pointer(x), ctypes.sizeof(x))
|
python
|
def insertionsort(lista):
tam = len(lista)
for i in range(1, tam):
proximo = lista[i]
atual = i - 1
while proximo < lista[atual] and atual >= 0:
lista[atual + 1] = lista[atual]
atual -= 1
lista[atual + 1] = proximo
# debug
if __name__ == "__main__":
lista = [2, 1, 3, 4, 6, 5]
insertionsort(lista)
print(lista)
|
python
|
import time
import argparse
from helpers.connection import conn
import tabulate as tb
tb.WIDE_CHARS_MODE = True
def parsing_store(parser:argparse.ArgumentParser):
sub_parsers = parser.add_subparsers(dest='function')
# info
parser_info = sub_parsers.add_parser('info')
parser_info.add_argument('id', type=int)
# menu
parser_menu = sub_parsers.add_parser('menu')
parser_menu.add_argument('id', type=int)
# add_menu
parser_amenu = sub_parsers.add_parser('add_menu')
parser_amenu.add_argument('id', type=int)
parser_amenu.add_argument('menu', type=str)
# order
parser_order = sub_parsers.add_parser('order')
parser_order.add_argument('id', type=int)
parser_order.add_argument('status', type=str.lower, choices=['pending', 'delivering', 'delivered', '0', '1', '2'], nargs='?')
# update_order
parser_update_order = sub_parsers.add_parser('update_order')
parser_update_order.add_argument('id', type=int)
parser_update_order.add_argument('order_idx', type=int)
# stat
parser_stat = sub_parsers.add_parser('stat')
parser_stat.add_argument('id', type=int)
parser_stat.add_argument('start_date', type=str)
parser_stat.add_argument('days', type=int)
# search
parser_search = sub_parsers.add_parser('search')
parser_search.add_argument('id', type=int)
def int_check(text):
try: int(text); return True
except ValueError: return False
def string_check(text):
if text[0] == "\'" and text[-1] == "\'":
return True
else: return False
def time_form(time_text):
return time_text[:2] + ":" + time_text[2:4]
def show_store_from_table(row):
print("Name: {id}".format(id = row[2]))
print("Location: lat {lat} | lng {lng}".format(lat = row[3], lng = row[4]))
print("Address: {addr}".format(addr = row[1]))
print("Phone Number: {phone}".format(phone = row[5]))
print("Schedules: ")
print("\t|day| |open| |closed|")
for i in range(len(row[6])):
if(row[6][i]['holiday'] == False):
print("\t%-8s %-9s %-9s" % (row[6][i]['day'], time_form(row[6][i]['open']), time_form(row[6][i]['closed'])))
else:
print("\t%-8s H O L I D A Y" % (row[6][i]['day']))
print("Seller (id): {sid}".format(sid = row[7]))
def show_store_info(args):
# TODO
try:
cur = conn.cursor()
sql = "SELECT * FROM store WHERE id=%(id)s;"
cur.execute(sql, {"id": args.id})
rows = cur.fetchall()
for row in rows:
show_store_from_table(row)
except Exception as err:
print(err)
def show_menu_info_store(args):
# TODO
try:
cur = conn.cursor()
sql = "SELECT id, menu FROM menu WHERE sid=%(id)s;"
cur.execute(sql, {"id": args.id})
rows = cur.fetchall()
print("Menu of Store {sid}".format(sid=args.id))
print(tb.tabulate(rows, headers=['Menu ID', 'Name']))
except Exception as err:
print(err)
def add_menu_into_store(args):
# TODO
try:
cur = conn.cursor()
sql = "INSERT INTO menu (menu, sid) " \
"VALUES (\'{menu}\', {sid})".format(menu=args.menu, sid=args.id)
print(sql)
cur.execute(sql)
conn.commit()
except Exception as err:
print(err)
conn.rollback()
else:
print("adding menu success!")
def show_order_info_store(args):
# TODO
try:
cur = conn.cursor()
sql=str()
if args.status is None:
sql = "SELECT id, cid, otime, status FROM orders WHERE sid={sid};".format(sid=args.id)
print("ALL orders for Store {sid}".format(sid=args.id))
cur.execute(sql)
rows = cur.fetchall()
print(tb.tabulate(rows, headers=['Order ID', 'Customer ID', 'OTime', 'Status']))
return
elif args.status == '0' or args.status == 'pending':
sql = "SELECT id, cid, otime FROM orders WHERE sid={sid} and status=\'pending\';".format(sid=args.id)
print("Pending orders for Store {sid}".format(sid=args.id))
elif args.status == '1' or args.status == 'delivering':
sql = "SELECT id, cid, otime FROM orders WHERE sid={sid} and status=\'delivering\';".format(sid=args.id)
print("Delivering orders for Store {sid}".format(sid=args.id))
else:
sql = "SELECT id, cid, otime FROM orders WHERE sid={sid} and status=\'delivered\';".format(sid=args.id)
print("Delivered orders for Store {sid}".format(sid=args.id))
cur.execute(sql)
rows = cur.fetchall()
print(tb.tabulate(rows, headers=['Order ID', 'Customer ID', 'OTime']))
except Exception as err:
print(err)
print("show_order_info_store")
def update_order_store(args):
# TODO
try:
cur = conn.cursor()
# check validity of order-store relationship
sql0 = "SELECT sid FROM orders WHERE id={oid}".format(oid=args.order_idx)
cur.execute(sql0); tmp_id = cur.fetchone()
if tmp_id is None:
print("Given order ID is invalid!")
return
if tmp_id[0] != args.id:
print("Given order #{oid} is not accessible from Store #{sid}".format(oid=args.order_idx, sid=args.id))
return
# fetch customer's location info
sql1 = "SELECT lat, lng " \
"FROM store WHERE id = {sid};".format(sid=args.id)
cur.execute(sql1)
info_ = cur.fetchone()
latitude, longitude = info_[0], info_[1]
# get the closest deliver from the given store
sql2 = "SELECT d.id " \
"FROM delivery d " \
"WHERE d.stock <= 4 " \
"ORDER BY power(({lat}-d.lat), 2) + power(({lng}-d.lng), 2) " \
"LIMIT 1;".format(lat=latitude, lng=longitude)
cur.execute(sql2)
delivery_id = (cur.fetchone())[0] # delivery id
print("Closest deliver is found: Deliver #{did}".format(did=delivery_id))
# update order record, status: pending->delivering
sql3 = "UPDATE orders SET did = {did}, status = \'delivering\' " \
"WHERE id={order_id}".format(did=delivery_id, order_id=args.order_idx)
cur.execute(sql3)
conn.commit()
# increment stock of corresponding deliver
sql4 = "UPDATE delivery SET stock = stock + 1 WHERE id={did}".format(did=delivery_id)
cur.execute(sql4)
conn.commit()
except Exception as err:
print(err)
conn.rollback()
else:
print("update_order_store")
def stat_info_store(args):
# TODO
try:
cur = conn.cursor()
y, m, d = (args.start_date).split('/')
sql = "SELECT otime::date as Date, COUNT(*) as Orders " \
"FROM orders " \
"WHERE sid={sid} and otime::date >= \'{year}/{month}/{day}\'::date and otime::date < \'{year}/{month}/{day}\'::date + interval \'{interval} day\' " \
"GROUP BY otime::date;".format(sid=args.id, year=y, month=m, day=d, interval=args.days)
cur.execute(sql)
rows = cur.fetchall()
print("STAT info of Store {sid}".format(sid=args.id))
print(tb.tabulate(rows, headers=['Date', 'Orders']))
except Exception as err:
print(err)
def search_info_store(args):
# TODO
try:
cur = conn.cursor()
sql = \
"SELECT DISTINCT cid, name " \
"FROM ( " \
"SELECT * FROM " \
"( " \
"SELECT DISTINCT cid, menu_id FROM cart WHERE menu_id IN (SELECT id FROM menu WHERE sid={sid}) " \
")sx " \
"WHERE NOT EXISTS ( " \
"(SELECT p.id FROM (SELECT id FROM menu WHERE sid={sid})p) " \
"EXCEPT " \
"(SELECT sp.menu_id FROM ( " \
"SELECT DISTINCT cid, menu_id FROM cart WHERE menu_id IN (SELECT id FROM menu WHERE sid={sid}) " \
")sp " \
"WHERE sp.cid = sx.cid " \
") " \
") " \
")end_query, customer " \
"WHERE customer.id = end_query.cid".format(sid=args.id)
cur.execute(sql)
vip_customer = cur.fetchall()
print("VIP LIST of STORE {sid}".format(sid=args.id))
print(tb.tabulate(vip_customer, headers=['Customer ID', 'Customer Name']))
except Exception as err:
print(err)
if __name__ == "__main__":
start = time.time()
parser = argparse.ArgumentParser()
parsing_store(parser)
args = parser.parse_args()
if (args.function == "info"):
show_store_info(args)
elif (args.function == "menu"):
show_menu_info_store(args)
elif (args.function == "add_menu"):
add_menu_into_store(args)
elif (args.function == 'order'):
show_order_info_store(args)
elif (args.function == 'update_order'):
update_order_store(args)
elif (args.function == 'stat'):
stat_info_store(args)
elif (args.function == 'search'):
search_info_store(args)
else:
parser.print_help()
print("Running Time: ", end="")
print(time.time() - start)
|
python
|
# Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import functools
import inspect
import weakref
from functools import partial
from typing import Any, Callable, Dict, List, Optional, Sequence, Set, Tuple, Type, TYPE_CHECKING, Union
import torch
from pytorch_lightning.utilities.exceptions import MisconfigurationException
from pytorch_lightning.utilities.model_helpers import is_overridden
from torch.utils.data import DataLoader, IterableDataset
import flash
from flash.core.data.auto_dataset import IterableAutoDataset
from flash.core.data.batch import _DeserializeProcessor, _DeserializeProcessorV2
from flash.core.data.input_transform import _create_collate_input_transform_processors
from flash.core.data.input_transform import InputTransform as NewInputTransform
from flash.core.data.io.input import Input
from flash.core.data.io.input_base import InputBase
from flash.core.data.io.input_transform import _InputTransformProcessor, DefaultInputTransform, InputTransform
from flash.core.data.io.output import _OutputProcessor, Output
from flash.core.data.io.output_transform import _OutputTransformProcessor, OutputTransform
from flash.core.data.process import Deserializer
from flash.core.data.properties import ProcessState
from flash.core.data.utils import _INPUT_TRANSFORM_FUNCS, _OUTPUT_TRANSFORM_FUNCS, _STAGES_PREFIX
from flash.core.utilities.imports import _PL_GREATER_EQUAL_1_4_3, _PL_GREATER_EQUAL_1_5_0
from flash.core.utilities.stages import _RUNNING_STAGE_MAPPING, RunningStage
if not _PL_GREATER_EQUAL_1_5_0:
from pytorch_lightning.trainer.connectors.data_connector import _PatchDataLoader
if TYPE_CHECKING:
from flash.core.model import Task
class DataLoaderGetter:
"""A utility class to be used when patching the ``{stage}_dataloader`` attribute of a LightningModule."""
def __init__(self, dataloader):
self.dataloader = dataloader
# Dummy `__code__` attribute to trick is_overridden
self.__code__ = self.__call__.__code__
def __call__(self):
return self.dataloader
class DataPipelineState:
"""A class to store and share all process states once a :class:`.DataPipeline` has been initialized."""
def __init__(self):
self._state: Dict[Type[ProcessState], ProcessState] = {}
def set_state(self, state: ProcessState):
"""Add the given :class:`.ProcessState` to the :class:`.DataPipelineState`."""
self._state[type(state)] = state
def get_state(self, state_type: Type[ProcessState]) -> Optional[ProcessState]:
"""Get the :class:`.ProcessState` of the given type from the :class:`.DataPipelineState`."""
return self._state.get(state_type, None)
def __str__(self) -> str:
return f"{self.__class__.__name__}(state={self._state})"
class DataPipeline:
"""
DataPipeline holds the engineering logic to connect
:class:`~flash.core.data.io.input_transform.InputTransform` and/or
:class:`~flash.core.data.io.output_transform.OutputTransform`
objects to the ``DataModule``, Flash ``Task`` and ``Trainer``.
"""
INPUT_TRANSFORM_FUNCS: Set[str] = _INPUT_TRANSFORM_FUNCS
OUTPUT_TRANSFORM_FUNCS: Set[str] = _OUTPUT_TRANSFORM_FUNCS
def __init__(
self,
input: Optional[Union[Input, List[InputBase]]] = None,
input_transform: Optional[InputTransform] = None,
output_transform: Optional[OutputTransform] = None,
deserializer: Optional[Deserializer] = None,
output: Optional[Output] = None,
) -> None:
self.input = input
self._input_transform_pipeline = input_transform or DefaultInputTransform()
self._output_transform = output_transform or OutputTransform()
self._output = output or Output()
self._deserializer = deserializer or Deserializer()
self._running_stage = None
def initialize(self, data_pipeline_state: Optional[DataPipelineState] = None) -> DataPipelineState:
"""Creates the :class:`.DataPipelineState` and gives the reference to the: :class:`.InputTransform`,
:class:`.OutputTransform`, and :class:`.Output`. Once this has been called, any attempt to add new state will
give a warning."""
data_pipeline_state = data_pipeline_state or DataPipelineState()
if self.input is not None:
if isinstance(self.input, list):
[input.attach_data_pipeline_state(data_pipeline_state) for input in self.input]
else:
self.input.attach_data_pipeline_state(data_pipeline_state)
self._deserializer.attach_data_pipeline_state(data_pipeline_state)
self._input_transform_pipeline.attach_data_pipeline_state(data_pipeline_state)
self._output_transform.attach_data_pipeline_state(data_pipeline_state)
self._output.attach_data_pipeline_state(data_pipeline_state)
return data_pipeline_state
@property
def example_input(self) -> str:
return self._deserializer.example_input
@staticmethod
def _is_overridden(method_name: str, process_obj, super_obj: Any, prefix: Optional[str] = None) -> bool:
"""Cropped Version of https://github.com/PyTorchLightning/pytorch-
lightning/blob/master/pytorch_lightning/utilities/model_helpers.py."""
current_method_name = method_name if prefix is None else f"{prefix}_{method_name}"
if not hasattr(process_obj, current_method_name):
return False
# TODO: With the new API, all hooks are implemented to improve discoverability.
return (
getattr(process_obj, current_method_name).__code__
!= getattr(super_obj, current_method_name if super_obj == NewInputTransform else method_name).__code__
)
@classmethod
def _is_overridden_recursive(
cls, method_name: str, process_obj, super_obj: Any, prefix: Optional[str] = None
) -> bool:
"""Cropped Version of https://github.com/PyTorchLightning/pytorch-
lightning/blob/master/pytorch_lightning/utilities/model_helpers.py."""
assert isinstance(process_obj, super_obj), (process_obj, super_obj)
if prefix is None and not hasattr(super_obj, method_name):
raise MisconfigurationException(f"This function doesn't belong to the parent class {super_obj}")
current_method_name = method_name if prefix is None else f"{prefix}_{method_name}"
if not hasattr(process_obj, current_method_name):
return DataPipeline._is_overridden_recursive(method_name, process_obj, super_obj)
current_code = inspect.unwrap(getattr(process_obj, current_method_name)).__code__
has_different_code = current_code != getattr(super_obj, method_name).__code__
if not prefix:
return has_different_code
return has_different_code or cls._is_overridden_recursive(method_name, process_obj, super_obj)
@staticmethod
def _identity(samples: Sequence[Any]) -> Sequence[Any]:
return samples
def deserialize_processor(self) -> _DeserializeProcessor:
if isinstance(self._input_transform_pipeline, NewInputTransform):
return _DeserializeProcessorV2(
self._deserializer,
self._input_transform_pipeline,
self._input_transform_pipeline._per_sample_transform,
[],
)
return self._create_collate_input_transform_processors(RunningStage.PREDICTING)[0]
def worker_input_transform_processor(
self, running_stage: RunningStage, collate_fn: Optional[Callable] = None, is_serving: bool = False
) -> _InputTransformProcessor:
if isinstance(self._input_transform_pipeline, NewInputTransform):
return _create_collate_input_transform_processors(self._input_transform_pipeline, [])[0]
return self._create_collate_input_transform_processors(
running_stage, collate_fn=collate_fn, is_serving=is_serving
)[1]
def device_input_transform_processor(self, running_stage: RunningStage) -> _InputTransformProcessor:
if isinstance(self._input_transform_pipeline, NewInputTransform):
return _create_collate_input_transform_processors(self._input_transform_pipeline, [])[1]
return self._create_collate_input_transform_processors(running_stage)[2]
def output_transform_processor(self, running_stage: RunningStage, is_serving=False) -> _OutputTransformProcessor:
return self._create_output_transform_processor(running_stage, is_serving=is_serving)
def output_processor(self) -> _OutputProcessor:
return _OutputProcessor(self._output)
@classmethod
def _resolve_function_hierarchy(
cls, function_name, process_obj, stage: RunningStage, object_type: Optional[Type] = None
) -> str:
if object_type is None:
object_type = InputTransform
prefixes = []
if stage in (RunningStage.TRAINING, RunningStage.TUNING):
prefixes += ["train", "fit"]
elif stage == RunningStage.VALIDATING:
prefixes += ["val", "fit"]
elif stage == RunningStage.TESTING:
prefixes += ["test"]
elif stage == RunningStage.PREDICTING:
prefixes += ["predict"]
elif stage == RunningStage.SERVING:
prefixes += ["serve"]
prefixes += [None]
for prefix in prefixes:
if cls._is_overridden(function_name, process_obj, object_type, prefix=prefix):
return function_name if prefix is None else f"{prefix}_{function_name}"
return function_name
def _make_collates(self, on_device: bool, collate: Callable) -> Tuple[Callable, Callable]:
if on_device:
return self._identity, collate
return collate, self._identity
def _create_collate_input_transform_processors(
self,
stage: RunningStage,
collate_fn: Optional[Callable] = None,
is_serving: bool = False,
) -> Tuple[_DeserializeProcessor, _InputTransformProcessor, _InputTransformProcessor]:
original_collate_fn = collate_fn
input_transform: InputTransform = self._input_transform_pipeline
prefix: str = _STAGES_PREFIX[stage]
if collate_fn is not None:
input_transform._default_collate = collate_fn
func_names: Dict[str, str] = {
k: self._resolve_function_hierarchy(k, input_transform, stage, InputTransform)
for k in self.INPUT_TRANSFORM_FUNCS
}
collate_fn: Callable = getattr(input_transform, func_names["collate"])
per_batch_transform_overridden: bool = self._is_overridden_recursive(
"per_batch_transform", input_transform, InputTransform, prefix=prefix
)
per_sample_transform_on_device_overridden: bool = self._is_overridden_recursive(
"per_sample_transform_on_device", input_transform, InputTransform, prefix=prefix
)
collate_in_worker_from_transform: Optional[bool] = getattr(
input_transform, f"_{prefix}_collate_in_worker_from_transform", None
)
is_per_overridden = per_batch_transform_overridden and per_sample_transform_on_device_overridden
if collate_in_worker_from_transform is None and is_per_overridden:
raise MisconfigurationException(
f"{self.__class__.__name__}: `per_batch_transform` and `per_sample_transform_on_device` "
f"are mutually exclusive for stage {stage}"
)
if isinstance(collate_in_worker_from_transform, bool):
worker_collate_fn, device_collate_fn = self._make_collates(not collate_in_worker_from_transform, collate_fn)
else:
worker_collate_fn, device_collate_fn = self._make_collates(
per_sample_transform_on_device_overridden, collate_fn
)
worker_collate_fn = (
worker_collate_fn.collate_fn
if isinstance(worker_collate_fn, _InputTransformProcessor)
else worker_collate_fn
)
per_sample_transform = getattr(input_transform, func_names["per_sample_transform"])
deserialize_processor = _DeserializeProcessor(
self._deserializer,
input_transform,
per_sample_transform,
callbacks=input_transform.callbacks,
)
worker_input_transform_processor = _InputTransformProcessor(
input_transform,
worker_collate_fn,
self._identity if is_serving else per_sample_transform,
getattr(input_transform, func_names["per_batch_transform"]),
stage,
callbacks=input_transform.callbacks,
)
worker_input_transform_processor._original_collate_fn = original_collate_fn
device_input_transform_processor = _InputTransformProcessor(
input_transform,
device_collate_fn,
getattr(input_transform, func_names["per_sample_transform_on_device"]),
getattr(input_transform, func_names["per_batch_transform_on_device"]),
stage,
apply_per_sample_transform=device_collate_fn != self._identity,
on_device=True,
callbacks=input_transform.callbacks,
)
return deserialize_processor, worker_input_transform_processor, device_input_transform_processor
@staticmethod
def _model_transfer_to_device_wrapper(
func: Callable, input_transform: _InputTransformProcessor, model: "Task", stage: RunningStage
) -> Callable:
if not isinstance(func, _StageOrchestrator):
func = _StageOrchestrator(func, model)
func.register_additional_stage(stage, input_transform)
return func
@staticmethod
def _model_predict_step_wrapper(
func: Callable, output_transform_processor: _OutputTransformProcessor, model: "Task"
) -> Callable:
if not isinstance(func, _StageOrchestrator):
_original = func
func = _StageOrchestrator(func, model)
func._original = _original
func.register_additional_stage(RunningStage.PREDICTING, output_transform_processor)
return func
@staticmethod
def _get_dataloader(model: "Task", loader_name: str) -> Tuple[DataLoader, str]:
dataloader, attr_name = None, None
if is_overridden(loader_name, model):
dataloader = getattr(model, loader_name)
attr_name = loader_name
elif (
model.trainer
and hasattr(model.trainer, "datamodule")
and model.trainer.datamodule
and is_overridden(loader_name, model.trainer.datamodule, flash.DataModule)
):
dataloader = getattr(model.trainer.datamodule, loader_name, None)
attr_name = f"trainer.datamodule.{loader_name}"
elif _PL_GREATER_EQUAL_1_5_0 and model.trainer is not None:
source = getattr(model.trainer._data_connector, f"_{loader_name}_source")
if not source.is_module():
dataloader = source.dataloader()
attr_name = loader_name
if dataloader is not None:
# Update source as wrapped loader will be attached to model
source.instance = model
source.name = loader_name
return dataloader, attr_name
@staticmethod
def _patch_dataloader(model: "Task", dataloader: Union[Callable, DataLoader], stage: RunningStage):
if isinstance(dataloader, DataLoader):
if _PL_GREATER_EQUAL_1_5_0:
dataloader = DataLoaderGetter(dataloader)
elif _PL_GREATER_EQUAL_1_4_3:
dataloader = _PatchDataLoader(dataloader, _STAGES_PREFIX[stage])
dataloader.patch(model)
else:
dataloader = _PatchDataLoader(dataloader)
return dataloader
@staticmethod
def _set_loader(model: "Task", loader_name: str, new_loader: DataLoader) -> None:
"""This function is used to set the loader to model and/or datamodule."""
*intermediates, final_name = loader_name.split(".")
curr_attr = model
# This relies on python calling all non-integral types by reference.
# It may fail for integral types since those will be called by value.
for intermediate in intermediates:
curr_attr = getattr(curr_attr, intermediate)
setattr(curr_attr, final_name, new_loader)
setattr(model, final_name, new_loader)
def _attach_input_transform_to_model(
self,
model: "Task",
stage: Optional[RunningStage] = None,
device_transform_only: bool = False,
is_serving: bool = False,
) -> None:
device_collate_fn = torch.nn.Identity()
if not stage:
stages = [RunningStage.TRAINING, RunningStage.VALIDATING, RunningStage.TESTING, RunningStage.PREDICTING]
elif isinstance(stage, RunningStage):
stages = [stage]
for stage in stages:
loader_name = f"{_STAGES_PREFIX[stage]}_dataloader"
dataloader, whole_attr_name = self._get_dataloader(model, loader_name)
if not dataloader:
continue
if callable(dataloader):
dataloader = dataloader()
if dataloader is None:
continue
if isinstance(dataloader, Sequence):
was_seq = True
else:
dataloader = [dataloader]
was_seq = False
for idx, loader in enumerate(dataloader):
# TODO: See lightning for proper reinstantiation of loader
if isinstance(loader, DataLoader):
dl_args = {k: v for k, v in vars(loader).items() if not k.startswith("_")}
_, dl_args["collate_fn"], device_collate_fn = self._create_collate_input_transform_processors(
stage=stage, collate_fn=dl_args["collate_fn"], is_serving=is_serving
)
if isinstance(dl_args["dataset"], IterableDataset):
del dl_args["sampler"]
# don't have to reinstantiate loader if just rewrapping devices (happens during detach)
if not device_transform_only:
del dl_args["batch_sampler"]
loader = type(loader)(**dl_args)
dataloader[idx] = loader
# don't have to set attribute if rewrapping device part (happens during detach)
if not device_transform_only:
if not was_seq:
dataloader = dataloader[0]
dataloader = self._patch_dataloader(model, dataloader, stage)
self._set_loader(model, whole_attr_name, dataloader)
model.transfer_batch_to_device = self._model_transfer_to_device_wrapper(
model.transfer_batch_to_device, device_collate_fn, model, stage
)
def _create_output_transform_processor(
self,
stage: RunningStage,
is_serving: bool = False,
) -> _OutputTransformProcessor:
output_transform: OutputTransform = self._output_transform
func_names: Dict[str, str] = {
k: self._resolve_function_hierarchy(k, output_transform, stage, object_type=OutputTransform)
for k in self.OUTPUT_TRANSFORM_FUNCS
}
return _OutputTransformProcessor(
getattr(output_transform, func_names["uncollate"]),
getattr(output_transform, func_names["per_batch_transform"]),
getattr(output_transform, func_names["per_sample_transform"]),
output=None if is_serving else self._output,
is_serving=is_serving,
)
def _attach_output_transform_to_model(
self,
model: "Task",
stage: RunningStage,
is_serving: bool = False,
) -> "Task":
model.predict_step = self._model_predict_step_wrapper(
model.predict_step, self._create_output_transform_processor(stage, is_serving=is_serving), model
)
return model
def _attach_to_model(
self,
model: "Task",
stage: RunningStage = None,
is_serving: bool = False,
):
# not necessary to detach. preprocessing and postprocessing for stage will be overwritten.
self._attach_input_transform_to_model(model, stage)
if not stage or stage == RunningStage.PREDICTING:
self._attach_output_transform_to_model(model, RunningStage.PREDICTING, is_serving=is_serving)
def _detach_from_model(self, model: "Task", stage: Optional[RunningStage] = None):
self._detach_input_transform_from_model(model, stage)
if not stage or stage == RunningStage.PREDICTING:
self._detach_output_transform_from_model(model)
def _detach_input_transform_from_model(self, model: "Task", stage: Optional[RunningStage] = None):
if not stage:
stages = [RunningStage.TRAINING, RunningStage.VALIDATING, RunningStage.TESTING, RunningStage.PREDICTING]
elif isinstance(stage, RunningStage):
stages = [stage]
for stage in stages:
device_collate = None
if isinstance(model.transfer_batch_to_device, _StageOrchestrator):
device_collate = model.transfer_batch_to_device.unregister_stage(stage)
# if no additional funmc available: remove wrapper
if model.transfer_batch_to_device.is_empty():
model.transfer_batch_to_device = model.transfer_batch_to_device.func
if not device_collate:
device_collate = self._identity
loader_name = f"{_STAGES_PREFIX[stage]}_dataloader"
dataloader, whole_attr_name = self._get_dataloader(model, loader_name)
if not dataloader:
continue
if callable(dataloader):
dataloader = dataloader()
if isinstance(dataloader, Sequence):
was_seq = True
else:
dataloader = [dataloader]
was_seq = False
for idx, loader in enumerate(dataloader):
if isinstance(loader, DataLoader):
dl_args = {k: v for k, v in vars(loader).items() if not k.startswith("_")}
# TODO: Remove the partial function once resolved on Lightning side.
if isinstance(dl_args["collate_fn"], partial):
default_collate = dl_args["collate_fn"].keywords.get("default_collate", None)
if default_collate:
dl_args["collate_fn"] = default_collate
if isinstance(dl_args["collate_fn"], _InputTransformProcessor):
dl_args["collate_fn"] = dl_args["collate_fn"]._original_collate_fn
if isinstance(dl_args["dataset"], (IterableAutoDataset, IterableDataset)):
del dl_args["sampler"]
del dl_args["batch_sampler"]
loader = type(loader)(**dl_args)
dataloader[idx] = loader
if not was_seq:
dataloader = dataloader[0]
dataloader = self._patch_dataloader(model, dataloader, stage)
self._set_loader(model, whole_attr_name, dataloader)
@staticmethod
def _detach_output_transform_from_model(model: "Task"):
if hasattr(model.predict_step, "_original"):
# don't delete the predict_step here since we don't know
# if any other pipeline is attached which may rely on this!
model.predict_step = model.predict_step._original
def __str__(self) -> str:
input: Input = self.input
input_transform: InputTransform = self._input_transform_pipeline
output_transform: OutputTransform = self._output_transform
output: Output = self._output
deserializer: Deserializer = self._deserializer
return (
f"{self.__class__.__name__}("
f"input={str(input)}, "
f"deserializer={deserializer}, "
f"input_transform={input_transform}, "
f"output_transform={output_transform}, "
f"output={output})"
)
class _StageOrchestrator:
def __init__(self, func_to_wrap: Callable, model: "Task") -> None:
self.func = func_to_wrap
self._stage_mapping = {k: None for k in RunningStage}
self.model = weakref.proxy(model)
functools.update_wrapper(self, self.func)
def __call__(self, *args, **kwargs):
outputs = self.func(*args, **kwargs)
try:
stage = self.model.trainer._running_stage
except AttributeError:
stage = self.model.trainer.state.stage
internal_running_state = _RUNNING_STAGE_MAPPING[stage]
additional_func = self._stage_mapping.get(internal_running_state, None)
if additional_func:
outputs = additional_func(outputs)
return outputs
def register_additional_stage(self, stage: RunningStage, stage_func: Optional[Callable] = None):
assert stage_func is None or callable(stage_func)
self._stage_mapping[stage] = stage_func.to(self.model.device, self.model.dtype)
def unregister_stage(self, stage: RunningStage):
ret_val = self._stage_mapping.pop(stage)
self._stage_mapping[stage] = None
if ret_val:
ret_val = ret_val.cpu()
return ret_val
def is_empty(self):
return all(v is None for v in self._stage_mapping.values()) or not self._stage_mapping
|
python
|
# -*- coding: utf-8 -*-
"""
定时监控futnn api进程,如果进程crash, 自动重启,
1. 该脚本仅支持windows (目前api也只有windows版本)
2. 构造 FTApiDaemon 需指定ftnn.exe所在的目录 ,一般是'C:\Program Files (x86)\FTNN\\'
3. 对象实现的本地监控, 只能运行在ftnn api 进程所在的机器上
"""
import psutil
import time
import socket
import sys
import configparser
from threading import Thread
import os
class FTApiDaemon:
'''
only run for windows, to restart ftunn.exe when crashed
'''
def __init__(self, ftnn_root_path='D:\Program Files (x86)\FTNN\\'):
self._root_path = ftnn_root_path
self._exe_path = self._root_path + 'FTNN.exe'
self._crash_report_path = self._root_path + 'FTBugReport.exe'
self._plugin_path = self._root_path + 'plugin\config.ini'
self._api_port = None
self._started = False
self._thread_daemon = None
self._close = False
if not os.path.isfile(self._exe_path) or not os.path.isfile(self._crash_report_path):
print("FTApiDaemon erro file not exist !")
else:
'读取ini中api的配置信息'
try:
config = configparser.ConfigParser()
config.read_file(open(self._plugin_path))
self._api_port = int(config.get("pluginserver", "port"))
print('FTApiDaemon find api_port={}'.format(self._api_port))
del config
except Exception as e:
print('FTApiDaemon config read error!')
''' 启动线程监控ftnn api 进程'''
def start(self):
if self._started:
return
if self._api_port is None:
print("FTApiDaemon start fail!")
return
self._started = True
self._close = False
self._thread_daemon = Thread(target = self._fun_thread_daemon)
self._thread_daemon.setDaemon(False)
self._thread_daemon.start()
'''中止监控'''
def close(self):
if not self._started:
return
self._started = False
if self._thread_daemon is not None:
self._close = True
self._thread_daemon.join(tiimeout=10)
self._thread_daemon = None
def _fun_thread_daemon(self):
time_sleep = 5
if self._close:
return
while True:
'''check api work'''
is_api_ok = self._is_api_socket_ok()
if is_api_ok is True:
time.sleep(time_sleep)
continue
'''loop to close exist ftnn.exe && ftbugreport.exe process'''
while True:
process_bugreport = self._get_process_by_path(self._crash_report_path)
process_ftnn = self._get_process_by_path(self._exe_path)
if process_bugreport is None and process_ftnn is None:
break
if process_bugreport is not None:
process_bugreport.kill()
if process_ftnn is not None:
process_ftnn.kill()
time.sleep(1)
'''start new ftnn.exe process'''
process_new = psutil.Popen([self._exe_path, "type=python_auto"])
if process_new is not None:
print("FTApiDaemon new futnn process open ! pid={}".format(process_new.pid))
else:
print("FTApiDaemon open process fail ! ")
time.sleep(time_sleep)
def _is_api_socket_ok(self):
api_ip = '127.0.0.1'
s = socket.socket()
s.settimeout(10)
try:
s.connect((api_ip, self._api_port))
s.close()
del s
except Exception as e:
err = sys.exc_info()[1]
err_msg = str(err)
print("socket connect err:{}".format(err_msg))
return False
return True
def _get_process_by_pid(self, pid):
"""通过processid 获取进程"""
try:
p = psutil.Process(pid)
except Exception as e:
return None
return p
def _get_process_by_path(self, path):
"""通过路径获取进程"""
lower_path = str(path).lower()
for pid in psutil.pids():
try:
process = psutil.Process(pid)
tmp = process.exe()
if str(tmp).lower() == path:
return process
except:
continue
return None
if __name__ == '__main__':
root_path = 'D:\Program Files (x86)\FTNN\\'
daemon = FTApiDaemon(root_path)
daemon.start()
|
python
|
import os
from jinja2 import Environment, FileSystemLoader
PATH = os.path.dirname(os.path.abspath(__file__))
TEMPLATE_PATH = os.path.join("/".join(PATH.split("/")[0:-1]),'resources')
TEMPLATE_ENVIRONMENT = Environment(
autoescape=False,
loader=FileSystemLoader(TEMPLATE_PATH),
trim_blocks=False)
def renderTemplate(template_filename, data):
"""Render Jinja template."""
def stringsToUnicode(data):
"""Convert strings to utf8 encoding"""
if isinstance(data, dict):
for k,v in data.items():
if isinstance(v, dict):
stringsToUnicode(v)
elif isinstance(v, list):
for i in range(len(v)):
stringsToUnicode(v[i])
elif isinstance(v, set):
data[k] = ",".join(v)
elif isinstance(v, str):
data[k] = v
stringsToUnicode(data)
return TEMPLATE_ENVIRONMENT.get_template(template_filename).render(data)
|
python
|
# Defines projections through a parallel slab.
import math
import numpy
from camera_model import central_projection
def polar_normal(elevation, azimuth):
# Defines the polar coordinate representation of a normal oriented towards the z-axis per default
#
# elevation Elevation of the normal vector
# azimuth Azimuth of the normal vector
# Returns a unit vector
return numpy.array([-math.cos(elevation) * math.sin(azimuth), math.sin(elevation), math.cos(elevation) * math.cos(
azimuth)])
class ExactSlabProjection:
# Defines the exact central projection through a planar slab.
def __init__(self, elevation=0.0, azimuth=0.0, tau=0.01, nu=1.0):
# Initializes the projection
#
# elevation Elevation of the normal vector
# azimuth Azimuth of the normal vector
# tau Thickness of the slab
# nu Refraction index
self.elevation = elevation
self.azimuth = azimuth
self.tau = tau
self.nu = nu
def __call__(self, point):
# Projects a point through the slab by using the exact projection.
#
# point Space point
# Returns the result of central projection through the slab
n = polar_normal(self.elevation, self.azimuth)
w = numpy.dot(n, point)
wsqr = w * w
usqr = numpy.dot(point, point) - wsqr
# usqr is per construction positive, so a violation to that can be only a numerical issue
if usqr < 0.0:
usqr = 0.0
tsqr = self.tau * self.tau
nusqr = self.nu * self.nu
wwpuu = wsqr + usqr
a4 = nusqr
a3 = -2.0 * nusqr * (w + self.tau)
a2 = (nusqr - 1.0) * (usqr + tsqr) + nusqr * w * (w + 4.0 * self.tau)
a1 = -2.0 * self.tau * (nusqr * wwpuu + self.tau * w * (nusqr - 1.0) - usqr)
a0 = (nusqr - 1.0) * tsqr * wwpuu
delta0 = a2 * a2 - 3.0 * a3 * a1 + 12.0 * a4 * a0
delta1 = 2.0 * a2 * a2 * a2 - 9.0 * a3 * a2 * a1 + 27.0 * a3 * a3 * a0 + 27.0 * a4 * a1 * a1 - 72.0 * a4 * a2 * a0
omega = pow((delta1 + math.sqrt(delta1 * delta1 - 4.0 * delta0 * delta0 * delta0)) / 2.0, 1.0 / 3.0)
p = (8.0 * a4 * a2 - 3.0 * a3 * a3) / (8.0 * a4 * a4)
s = math.sqrt(-2.0 * p / 3.0 + 1.0 / (3.0 * a4) * (omega + delta0 / omega)) / 2.0
q = (a3 * a3 * a3 - 4.0 * a4 * a3 * a2 + 8.0 * a4 * a4 * a1) / (8.0 * a4 * a4 * a4)
sigma = -a3 / (4.0 * a4) - s - math.sqrt(-4.0 * s * s - 2.0 * p + q / s) / 2.0
return central_projection(point - sigma * n)
def vectorize(self):
# Returns a vector representation of the class
return [self.elevation, self.azimuth, self.tau, self.nu]
def unvectorize(self, x):
# Takes the first entries of the vector as own parametrization and returns the rest
#
# x Vector containing the parameterization of the class in its first entries
# Returns the rest of the vector
self.elevation = x[0]
self.azimuth = x[1]
self.tau = x[2]
self.nu = x[3]
return x[4:]
class ApproximateSlabProjection:
# Defines an approximate projection through a parallel slab.
def __init__(self, elevation=0.0, azimuth=0.0, tau=0.01, nu=1.0):
# Initializes the projection
#
# elevation Elevation of the normal vector
# azimuth Azimuth of the normal vector
# tau Thickness of the slab
# nu Refraction index
self.elevation = elevation
self.azimuth = azimuth
self.tau = tau
self.nu = nu
def __call__(self, point):
# Projects a point through the slab by using the approximate projection.
#
# point Space point
# Returns the result of central projection through the windshield
n = polar_normal(self.elevation, self.azimuth)
w = numpy.dot(n, point)
wsqr = w * w
usqr = numpy.dot(point, point) - wsqr
# usqr is per construction positive, so a violation to that can be only a numerical issue
if usqr < 0.0:
usqr = 0.0
sigma = self.tau * (1.0 - 1.0 / math.sqrt((self.nu * self.nu - 1.0) * (usqr / wsqr + 1.0) + 1.0))
return central_projection(point - sigma * n)
def vectorize(self):
# Returns a vector representation of the class
return [self.elevation, self.azimuth, self.tau, self.nu]
def unvectorize(self, x):
# Takes the first entries of the vector as own parametrization and returns the rest
#
# x Vector containing the parameterization of the class in its first entries
# Returns the rest of the vector
self.elevation = x[0]
self.azimuth = x[1]
self.tau = x[2]
self.nu = x[3]
return x[4:]
|
python
|
from django import forms
from django.contrib.auth.models import User
from django.contrib.auth.forms import UserCreationForm
from .models import UserDetails, HospitalDetails, BankDetails, Address, DoantionLogDetails
class UserRegisterForm(UserCreationForm):
email = forms.EmailField()
first_name = forms.CharField(label='Name')
last_name = forms.CharField()
class Meta:
model = User
fields = ['username', 'first_name', 'email', 'password1', 'password2']
class User_Details(forms.ModelForm):
CHOICES = [('A+', 'A+'),
('A-', 'A-'),
('B+', 'B+'),
('B-', 'B-'),
('O+', 'O+'),
('O-', 'O-'),
('AB+', 'AB+'),
('AB-', 'AB-')
]
blood_type = forms.ChoiceField(label='Blood Type', choices=CHOICES, required=True)
contact_no = forms.IntegerField()
aadhar = forms.CharField(required=True)
class Meta:
model = UserDetails
fields = ['contact_no', 'aadhar', 'blood_type']
class Hospital_Details(forms.ModelForm):
contact_no = forms.IntegerField()
MICID = forms.CharField(required=True, label='MCI ID (Medical Council of India)')
class Meta:
model = HospitalDetails
fields = ['contact_no', 'MICID']
class Bank_Details(forms.ModelForm):
a_positive = forms.IntegerField()
a_negative = forms.IntegerField()
b_positive = forms.IntegerField()
b_negative = forms.IntegerField()
o_positive = forms.IntegerField()
o_negative = forms.IntegerField()
ab_positive = forms.IntegerField()
ab_negative = forms.IntegerField()
contact_no = forms.IntegerField()
MICID = forms.CharField(required=True, label='MCI ID (Medical Council of India)')
class Meta:
model = BankDetails
fields = ['a_positive', 'a_negative', 'b_positive',
'b_negative', 'o_positive', 'o_negative', 'ab_positive', 'ab_negative', 'contact_no', 'MICID']
class Address_(forms.ModelForm):
address = forms.CharField()
city = forms.CharField()
state = forms.CharField()
zipcode = forms.CharField()
class Meta:
model = Address
fields = ['address', 'city', 'state', 'zipcode']
class Donate(forms.ModelForm):
amount = forms.IntegerField()
CHOICES = [('A+', 'A+'),
('A-', 'A-'),
('B+', 'B+'),
('B-', 'B-'),
('O+', 'O+'),
('O-', 'O-'),
('AB+', 'AB+'),
('AB-', 'AB-')
]
blood_type = forms.ChoiceField(label='Blood Type', choices=CHOICES, required=True)
class Meta:
model = DoantionLogDetails
fields = ['amount', 'blood_type']
|
python
|
import re
from typing import List, TYPE_CHECKING, Union
from MFramework import (
Interaction,
Application_Command_Option_Type,
Interaction_Type,
Snowflake,
Message,
Interaction_Type,
ChannelID,
RoleID,
UserID,
User,
Guild_Member,
GuildID,
Groups,
)
from .command import Command, commands, aliasList
from .exceptions import MissingPermissions, WrongContext, CommandNotFound, SoftError
if TYPE_CHECKING:
from MFramework import Bot, Context
DEFAULTS = {
ChannelID: "channel_id",
RoleID: "guild_id",
UserID: "user_id",
User: "user",
Guild_Member: "member",
GuildID: "guild_id",
}
_FIRST_CHAR = re.compile("^")
def get_name(data: Union[Message, Interaction]) -> str:
"""Retrieves command name from arguments"""
if type(data) is Interaction:
if data.type is not Interaction_Type.MODAL_SUBMIT:
return data.data.name
return data.data.custom_id.split("-", 1)[0]
name = get_arguments(data._Client, data)
name = get_original_cmd(name[0])
if not name:
raise CommandNotFound
return name
def get_arguments(client: "Bot", message: Message) -> List[str]:
"""Retrieve list of arguments from text"""
if message.guild_id:
alias = client.cache[message.guild_id].alias
else:
alias = _FIRST_CHAR
if not alias.search(message.content):
raise SoftError()
args = alias.split(message.content, 1)[-1].strip()
args = args.split(" ")
return args
kwargs = {"positional": args.split(" ")}
# for argument in ARGUMENTS.finditer(args):
# kwargs[argument.group("parameter")] = argument.group("argument")
return kwargs
def retrieve_command(data: Union[Message, Interaction]) -> Command:
name = get_name(data)
cmd = commands.get(name)
if type(data) is Interaction:
cmd = unnest_interaction(data, None, cmd)
else:
if "." in name:
for sub in name.split("."):
if sub in commands:
cmd = commands.get(sub)
if cmd:
cmd = is_nested(None, cmd, sub)
# if not cmd and type is Interaction:
# cmd = components.get(name)
if not cmd:
raise CommandNotFound(name)
if cmd.only_accept and cmd.only_accept is not type(data):
raise WrongContext(type(data), cmd.only_accept)
return cmd
def unnest_interaction(interaction: Interaction, group: Groups, cmd: Command):
"""Returns nested command"""
if len(interaction.data.options) and interaction.data.options[0].type in {
Application_Command_Option_Type.SUB_COMMAND_GROUP,
Application_Command_Option_Type.SUB_COMMAND,
}:
cmd = is_nested(group, cmd, interaction.data.options[0].name)
interaction.data.options = interaction.data.options[0].options
return unnest_interaction(interaction, group, cmd)
return cmd
def get_original_cmd(_name: str) -> str:
return aliasList.get(_name.lower(), _name)
def set_context(client: "Bot", cmd: Command, data: Union[Message, Interaction]) -> "Context":
"""Sets Context. Raises MissingPermissions"""
ctx: "Context" = client._Context(client.cache, client, data, cmd=cmd)
if not ctx.permission_group.can_use(cmd.group):
raise MissingPermissions(ctx.permission_group, cmd.group)
return ctx
def detect_group(Client: "Bot", user_id: Snowflake, guild_id: Snowflake, roles: Snowflake) -> Groups:
if user_id != 273499695186444289:
if user_id != Client.cache[guild_id].guild.owner_id:
return Client.cache[guild_id].cachedRoles(roles)
return Groups.OWNER
return Groups.SYSTEM
def is_nested(group: Groups, command: Command, name: str) -> Command:
for sub_command in command.sub_commands:
if name.lower() == sub_command.name.lower():
return sub_command
if sub_command.sub_commands != []:
nested = is_nested(group, sub_command, name)
if nested != sub_command:
return nested
return command
|
python
|
from scipy.optimize import linprog
import numpy as np
import pandas as pd
class OptimizationFailedError(Exception):
pass
def findTaxaAGSVec(proportions, sampleAGS, taxaBounds=True):
nsamples, ntaxa = proportions.shape
b = np.concatenate([sampleAGS, -1 * sampleAGS])
if taxaBounds:
taxaMax = 10 * 1000 * 1000
taxaMin = 1000 * 1000
b = np.concatenate([b,
-1 * taxaMin * np.ones(ntaxa),
taxaMax * np.ones(ntaxa)])
c = np.concatenate([np.zeros(ntaxa), np.ones(nsamples)])
A = np.bmat([[proportions, -1 * np.identity(nsamples)],
[-1 * proportions, -1 * np.identity(nsamples)]])
if taxaBounds:
A = np.bmat([[A],
[-1 * np.identity(ntaxa), np.zeros((ntaxa, nsamples))],
[np.identity(ntaxa), np.zeros((ntaxa, nsamples))]])
res = linprog(c, A_ub=A, b_ub=b, method='interior-point')
if not res.success:
msg = ['Optimization terminated successfully',
'Iteration limit reached',
'Problem appears to be infeasible',
'Problem appears to be unbounded']
msg = msg[res.status]
raise OptimizationFailedError(msg)
taxaVec = res.x[:ntaxa]
return taxaVec
def normalizeTaxa(proportions, sampleAGS, taxaBounds=False):
taxaVec = findTaxaAGSVec(proportions, sampleAGS, taxaBounds=taxaBounds)
normed = proportions / taxaVec
taxaVec = pd.Series(taxaVec, index=proportions.columns)
normed = pd.DataFrame(normed, index=proportions.index, columns=proportions.columns)
return normed, taxaVec
|
python
|
# coding: utf-8
# In[ ]:
from Bio.Blast.Applications import NcbiblastpCommandline
from Bio.Blast import NCBIXML
import pandas as pd
import os
# In[ ]:
meso_file = "best_hit_org/hit_meso.csv"
thermal_file = "best_hit_org/query_thermal.csv"
meso_fold = "meso_protein/"
thermal_fold = "thermal_protein/"
meso_fst_fold = "meso_fasta/"
thermal_fst_fold = "thermal_fasta/"
blast_dir = "blastp_result/"
# In[ ]:
if not os.path.exists(thermal_fst_fold):
os.mkdir(thermal_fst_fold)
if not os.path.exists(meso_fst_fold):
os.mkdir(meso_fst_fold)
if not os.path.exists(blast_dir):
os.mkdir(blast_dir)
# In[ ]:
def read_txt(fname):
with open(fname, 'r') as f:
content = [line.rstrip() for line in f]
return content
# In[ ]:
meso = read_txt(meso_file)
thermal = read_txt(thermal_file)
# In[ ]:
def csv_to_fasta(file, to_file):
f = open(file, 'r').readlines()
fst = open(to_file, 'w')
for i in range(len(f)):
lines = f[i].split("\t")
fst.write(f">{lines[3]}\n{lines[-1]}")
fst.close()
# In[ ]:
for i in range(len(meso)):
if i == 0:
print("Start:")
elif i % 10 == 0:
print(i / len(meso) * 100 + "% Completed!")
meso_pro = meso_fold + meso[i] + ".csv"
thermal_pro = thermal_fold + thermal[i] + ".csv"
meso_fst = meso_fst_fold + meso[i] + ".fasta"
thermal_fst = thermal_fst_fold + thermal[i] + ".fasta"
if os.path.isfile(meso_pro) and os.path.isfile(thermal_pro) and not os.path.exists(meso_fst) and not os.path.exists(thermal_fst):
csv_to_fasta(meso_pro, meso_fst)
csv_to_fasta(thermal_pro, thermal_fst)
# protein blast
filename = meso[i]+"-"+thermal[i]
NcbiblastpCommandline(query=thermal_fst, subject=meso_fst, outfmt=5, out=blast_dir+filename+".xml")()[0]
blast_records = NCBIXML.parse(open(blast_dir+filename+".xml", "r"))
f = open(blast_dir+filename+".csv", 'w')
f.write("query_seq,hit_seq,hit_len,identity,score,evalue\n")
for blast_record in blast_records:
for alignment in blast_record.alignments:
for hsp in alignment.hsps:
if hsp.gaps != 0:
f.write(f"{blast_record.query},{alignment.hit_def},{hsp.align_length},{hsp.identities},{hsp.score},{hsp.expect}\n")
f.close()
|
python
|
# coding: utf-8
from .base import Base
from .config_auth import ConfigAuth
from .config import Config
from .user import User
from .song import Song
from .feedback import Feedback
|
python
|
# Standard
import sys
# Dependencies
import xdg.BaseDirectory as xdg
class Config_File_Parse:
def __init__(self, project_name, file_name):
""" This function creates a diction of configuration keys and values
by reading a specified text file passed as an argument. It excludes
lines starting with '#' (to act as comments) and blank lines. All
lines of value shoud be formatted as 'key: value', where the value
can only be one word, while the value can be a full string.
"""
self.config_dict = {}
try:
with open(self.in_file) as a_file:
for line in a_file:
key = ()
value = ()
if line[0] != '#' and line != '\n':
key, value = line.split(maxsplit=1)
else:
continue
self.config_dict[key[:-1]] = str(value.rstrip('\n'))
except FileNotFoundError:
print("Configuration file or directory could not be found")
sys.exit(1)
def get_info(self):
return self.config_dict
def just_print(self):
""" Almost self-explanitory, it just prints the dictionary itself. Not
quite sure if it is useful, but may be a good debugging tool.
"""
print(self.config_dict)
def pretty_print(self):
""" Print a somewhat formatted and visually pleasing output of all
variables found within the config file on a new line.
"""
print("Configuration Dictionary")
print("------------------------\n")
for key in self.config_dict:
print("Key: %s\tValue: %s" % (key, self.config_dict[key]))
|
python
|
import pytoml as toml
class TomlConfig(object):
def __init__(self, param={}):
pass
def load_file(self, fn):
with open(fn, 'r') as f:
return toml.load(f)
def load_str(self, s):
return toml.loads(s)
def dump_file(self, data, fn):
with open(fn, 'w') as f:
toml.dump(f, data)
def dump_str(self, data):
return toml.dumps(data)
|
python
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import sonnet as snt
import tensorflow as tf
from ..direction import Direction
from ..naive_effect import ShortEffectLayer
from ..short_board.black_piece import select_black_gi
__author__ = 'Yasuhiro'
__date__ = '2018/2/19'
class BlackGiEffectLayer(snt.AbstractModule):
def __init__(self, data_format, use_cudnn=True, name='black_gi_effect'):
super().__init__(name=name)
self.data_format = data_format
self.use_cudnn = use_cudnn
def _build(self, pinned_board, available_square):
gi_directions = (Direction.RIGHT_UP, Direction.RIGHT_DOWN,
Direction.UP, Direction.LEFT_UP, Direction.LEFT_DOWN)
outputs = {
direction: self._make_effect(
pinned_board=pinned_board, available_square=available_square,
direction=direction
) for direction in gi_directions
}
return outputs
def _make_effect(self, pinned_board, available_square, direction):
selected = select_black_gi(board=pinned_board, direction=direction)
effect = ShortEffectLayer(
direction=direction, data_format=self.data_format,
use_cudnn=self.use_cudnn
)(selected)
return tf.logical_and(effect, available_square)
|
python
|
length = int(input())
width = int(input())
height = int(input())
lengths_edges = 4 * (length + width + height)
area = 2 * ((length * width) + (width * height) + (length * height))
volume = (length * width) * height
print(lengths_edges)
print(area)
print(volume)
|
python
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.