content
stringlengths 0
894k
| type
stringclasses 2
values |
---|---|
'''
siehe Bilder in diesem Ordner
F: Forget Gate -> welche vorherigen Informationen werden verworfen
I: Input Gate -> welche neuen Informationen sind wichtig
O: Output Gate -> welche Informationen werden intern im Cell State gespeichert
C: Candidate State -> welche Informationen werden intern dem Cell State (c) hinzugefügt
h: Hidden State -> Ausgabe der LSTM(Long Short Term Memory) in dem aktuellen Zeitschritt
einmal Keras Implementierung und eigene Implementierung
'''
import random
import numpy as np
import tensorflow as tf
from tensorflow.keras.models import *
from tensorflow.keras.layers import *
from tensorflow.keras.activations import *
def sigmoid(x):
return 1.0 / (1.0 + np.exp(-x))
def tanh(x):
return np.tanh(x)
class LSTMInference:
def __init__(self, lstm_layer, return_sequences=False):
self.return_sequences = return_sequences
self.lstm_layer = lstm_layer
self.W, self.U, self.b = self.lstm_layer.get_weights()
self.units = self.b.shape[0] // 4
self.W_i = self.W[:, :self.units]
self.W_f = self.W[:, self.units: self.units * 2]
self.W_c = self.W[:, self.units * 2: self.units * 3]
self.W_o = self.W[:, self.units * 3:]
self.U_i = self.U[:, :self.units]
self.U_f = self.U[:, self.units: self.units * 2]
self.U_c = self.U[:, self.units * 2: self.units * 3]
self.U_o = self.U[:, self.units * 3:]
self.b_i = self.b[: self.units]
self.b_f = self.b[self.units: self.units * 2]
self.b_c = self.b[self.units * 2: self.units * 3]
self.b_o = self.b[self.units * 3:]
# magic method call überschreiben
def __call__(self, x):
# output shape (num_timesteps, units)
if self.return_sequences:
self.time_steps = x.shape[0]
self.h = np.zeros((self.time_steps, self.units))
# output shape (units)
else:
self.h = np.zeros((self.units))
h_t = np.zeros((1, self.units))
c_t = np.zeros((1, self.units))
for t, x_t in enumerate(x):
x_t = x_t.reshape(1, -1) # (2) => (1, 2)
c_t, h_t = self.forward_step(x_t, c_t, h_t)
if self.return_sequences:
self.h[t] = h_t
else:
self.h = h_t
return self.h
# Berechnung
def forward_step(self, x_t, c_t, h_t):
i_t = sigmoid(np.matmul(x_t, self.W_i) + np.matmul(h_t, self.U_i) + self.b_i)
f_t = sigmoid(np.matmul(x_t, self.W_f) + np.matmul(h_t, self.U_f) + self.b_f)
c_tilde = tanh(np.matmul(x_t, self.W_c) + np.matmul(h_t, self.U_c) + self.b_c)
o_t = sigmoid(np.matmul(x_t, self.W_o) + np.matmul(h_t, self.U_o) + self.b_o)
c_t = f_t * c_t + i_t * c_tilde
h_t = o_t * tanh(c_t)
return c_t, h_t
# data set shape = (num_samples, num_timesteps, num_features)
# input shape = (num_timesteps, num_features)
# If return_sequences == True:
# output shape = (num_timesteps, units)
# Else:
# output shape = (1, units)
x = np.random.normal(size=(1, 3, 2))
units = 4
return_sequences = True
# num_features = 2
# units = 4
# h_t shape = (4), (units)
# W shape = (2, 4), (num_features, units)
# U shape = (4, 4), (units, units)
# b shape = (4), (units)
#
# matmul(x, W) (1, 2)*(2,4) => (4)
# matmul(h, U) (1, 4)*(4,4) => (4)
# intern + b (4)+(4) => (4)
# Keras Implementation
model = Sequential()
model.add(LSTM(units=units, return_sequences=return_sequences, input_shape=x.shape[1:]))
model.compile(loss="mse", optimizer="Adam")
#model.summary()
# Implementation without Keras
rnn = LSTMInference(lstm_layer=model.layers[0], return_sequences=return_sequences)
output_rnn_own = rnn(x[0]) # 10.5 aufrufen der call Methode
print(output_rnn_own)
print("\n\n")
output_rnn_tf = model.predict(x[[0]])
print(output_rnn_tf) # 10.5
assert np.all(np.isclose(output_rnn_own - output_rnn_tf, 0.0, atol=1e-06))
|
python
|
# stdlib
import os
import zipfile
from typing import Type, Union
# 3rd party
import handy_archives
import pytest
import remotezip
from apeye import URL
from coincidence.params import param
from coincidence.regressions import AdvancedDataRegressionFixture, AdvancedFileRegressionFixture
from domdf_python_tools.paths import PathPlus
from packaging.version import Version
from shippinglabel.checksum import get_sha256_hash
# this package
from remote_wheel import RemoteWheelDistribution, RemoteZipFile
wheel_urls = PathPlus(__file__).parent.joinpath("wheel_urls.json").load_json()
wheels = pytest.mark.parametrize("url", [param(w[2], id=f"{w[0]}-{w[1]}") for w in wheel_urls])
url_type = pytest.mark.parametrize(
"url_type",
[param(str, id="str"), param(URL, id="URL")],
)
class TestRemoteWheelDistribution:
@url_type
@wheels
def test_distribution(
self,
url: str,
url_type: Type[Union[str, URL]],
advanced_data_regression: AdvancedDataRegressionFixture,
):
wd = RemoteWheelDistribution.from_url(url_type(url))
advanced_data_regression.check({
"name": wd.name,
"url": wd.url,
"repr": repr(wd),
"version": str(wd.version),
"wheel": list(wd.get_wheel().items()),
"metadata": list(wd.get_metadata().items()),
"entry_points": wd.get_entry_points(),
"has_license": wd.has_file("LICENSE"),
})
assert isinstance(wd.wheel_zip, zipfile.ZipFile)
assert isinstance(wd.wheel_zip, handy_archives.ZipFile)
assert isinstance(wd.wheel_zip, RemoteZipFile)
assert isinstance(wd.wheel_zip, remotezip.RemoteZip)
@wheels
def test_get_record(self, url: str):
distro = RemoteWheelDistribution.from_url(url)
record = distro.get_record()
assert record is not None
assert len(record) # pylint: disable=len-as-condition
for file in record:
if file.hash is None:
assert file.name == "RECORD"
else:
with distro.wheel_zip.open(os.fspath(file)) as fp:
assert get_sha256_hash(fp).hexdigest() == file.hash.hexdigest()
if file.size is not None:
assert distro.wheel_zip.getinfo(os.fspath(file)).file_size == file.size
assert file.distro is None
with pytest.raises(ValueError, match="Cannot read files with 'self.distro = None'"):
file.read_bytes()
def test_remotezip(self, advanced_file_regression: AdvancedFileRegressionFixture):
wd = RemoteWheelDistribution.from_url(
"https://files.pythonhosted.org/packages/94/e2/"
"0a5630e43ca0b21ca891ec3a697bdb98a25663e27ebd1079ab55e8c68e72/"
"domdf_python_tools-2.9.1-py3-none-any.whl"
"#sha256=ad1058fa0769a68808c2ed44909222508edf6f26ec3a36f91f86b6d654c58474",
)
assert isinstance(wd.wheel_zip, zipfile.ZipFile)
assert isinstance(wd.wheel_zip, handy_archives.ZipFile)
assert isinstance(wd.wheel_zip, RemoteZipFile)
advanced_file_regression.check(
wd.wheel_zip.read("domdf_python_tools/__init__.py").decode("UTF-8"), extension="._py"
)
with wd:
advanced_file_regression.check(
wd.wheel_zip.read("domdf_python_tools/__init__.py").decode("UTF-8"), extension="._py"
)
assert wd.wheel_zip.fp is None
def test_remotezip_github_pages(self, advanced_file_regression: AdvancedFileRegressionFixture):
wd = RemoteWheelDistribution.from_url(
"https://repo-helper.uk/simple503/pydash/pydash-5.0.0-py3-none-any.whl"
"#sha256=0d87f879a3df4ad9389ab6d63c69eea078517d41541ddd5744cfcff3396e8543",
)
assert isinstance(wd.wheel_zip, zipfile.ZipFile)
assert isinstance(wd.wheel_zip, handy_archives.ZipFile)
assert isinstance(wd.wheel_zip, RemoteZipFile)
advanced_file_regression.check(wd.wheel_zip.read("pydash/__init__.py").decode("UTF-8"), extension="._py")
assert isinstance(wd, RemoteWheelDistribution)
with wd:
advanced_file_regression.check(
wd.wheel_zip.read("pydash/__init__.py").decode("UTF-8"), extension="._py"
)
assert wd.wheel_zip.fp is None
def test_remotezip_ionos(self):
with pytest.raises(
remotezip.RangeNotSupported,
match="The server at remote-wheel-test.repo-helper.uk doesn't support range requests",
):
wd = RemoteWheelDistribution.from_url(
"http://remote-wheel-test.repo-helper.uk/pydash-5.0.0-py3-none-any.whl",
)
def test_remotezip_auth(self, advanced_file_regression: AdvancedFileRegressionFixture):
url = "http://remote-wheel-test.repo-helper.uk/toml-0.10.2-py2.py3-none-any.whl"
wheel_zip = RemoteZipFile(url, initial_buffer_size=100, auth=("user", "password"))
wd = RemoteWheelDistribution("toml", Version("0.10.2"), url, wheel_zip)
assert isinstance(wd.wheel_zip, zipfile.ZipFile)
assert isinstance(wd.wheel_zip, handy_archives.ZipFile)
assert isinstance(wd.wheel_zip, RemoteZipFile)
advanced_file_regression.check(wd.wheel_zip.read("toml/__init__.py").decode("UTF-8"), extension="._py")
assert isinstance(wd, RemoteWheelDistribution)
with wd:
advanced_file_regression.check(wd.wheel_zip.read("toml/__init__.py").decode("UTF-8"), extension="._py")
assert wd.wheel_zip.fp is None
# Again to check the auth requirement works
with pytest.raises(remotezip.RemoteIOError, match=f"^401 Client Error: Unauthorized for url: {url}$"):
RemoteZipFile(url, initial_buffer_size=100)
|
python
|
class Postprocessor:
pass
|
python
|
from .test_task import TestEnv
# Robot Import
from .agents.stretch import Stretch
from .agents.pr2 import PR2
# Human Import
from .agents.human import Human
from .agents import human
# Robot Configuration
robot_arm = 'left'
# Human Configuration
# human_controllable_joint_indices = human.right_arm_joints
class TestPR2Env(TestEnv):
def __init__(self):
super(TestPR2Env, self).__init__(robot=PR2(robot_arm), human=Human(human_controllable_joint_indices, controllable=False))
class TestStretchEnv(TestEnv):
def __init__(self):
super(TestStretchEnv, self).__init__(robot=Stretch('wheel_' + robot_arm), human=None)
|
python
|
from typing_extensions import Protocol
class HasStr(Protocol):
def __str__(self) -> str:
...
|
python
|
# This entry point is intended to be used to start the backend at a terminal for debugging purposes.
from backend import app
app.main()
|
python
|
"""
ASDF tags for geometry related models.
"""
from asdf_astropy.converters.transform.core import TransformConverterBase
__all__ = ['DirectionCosinesConverter', 'SphericalCartesianConverter']
class DirectionCosinesConverter(TransformConverterBase):
tags = ["tag:stsci.edu:gwcs/direction_cosines-*"]
types = ["gwcs.geometry.ToDirectionCosines",
"gwcs.geometry.FromDirectionCosines"]
def from_yaml_tree_transform(self, node, tag, ctx):
from ..geometry import ToDirectionCosines, FromDirectionCosines
transform_type = node['transform_type']
if transform_type == 'to_direction_cosines':
return ToDirectionCosines()
elif transform_type == 'from_direction_cosines':
return FromDirectionCosines()
else:
raise TypeError(f"Unknown model_type {transform_type}")
def to_yaml_tree_transform(self, model, tag, ctx):
from ..geometry import ToDirectionCosines, FromDirectionCosines
if isinstance(model, FromDirectionCosines):
transform_type = 'from_direction_cosines'
elif isinstance(model, ToDirectionCosines):
transform_type = 'to_direction_cosines'
else:
raise TypeError(f"Model of type {model.__class__} is not supported.")
node = {'transform_type': transform_type}
return node
class SphericalCartesianConverter(TransformConverterBase):
tags = ["tag:stsci.edu:gwcs/spherical_cartesian-*"]
types = ["gwcs.geometry.SphericalToCartesian",
"gwcs.geometry.CartesianToSpherical"]
def from_yaml_tree_transform(self, node, tag, ctx):
from ..geometry import SphericalToCartesian, CartesianToSpherical
transform_type = node['transform_type']
wrap_lon_at = node['wrap_lon_at']
if transform_type == 'spherical_to_cartesian':
return SphericalToCartesian(wrap_lon_at=wrap_lon_at)
elif transform_type == 'cartesian_to_spherical':
return CartesianToSpherical(wrap_lon_at=wrap_lon_at)
else:
raise TypeError(f"Unknown model_type {transform_type}")
def to_yaml_tree_transform(self, model, tag, ctx):
from ..geometry import SphericalToCartesian, CartesianToSpherical
if isinstance(model, SphericalToCartesian):
transform_type = 'spherical_to_cartesian'
elif isinstance(model, CartesianToSpherical):
transform_type = 'cartesian_to_spherical'
else:
raise TypeError(f"Model of type {model.__class__} is not supported.")
node = {
'transform_type': transform_type,
'wrap_lon_at': model.wrap_lon_at
}
return node
|
python
|
import asyncio
import base64
import json
import os
from dataclasses import dataclass
from datetime import datetime, timezone
from email.message import EmailMessage
from enum import Enum
from io import BytesIO
from typing import List, Optional
from uuid import uuid4
import pytest
from aiohttp import ClientSession, ClientTimeout
from aiohttp.test_utils import TestClient, teardown_test_loop
from aioredis import create_redis
from arq import ArqRedis, Worker
from arq.connections import RedisSettings
from atoolbox.db.helpers import DummyPgPool
from atoolbox.test_utils import DummyServer, create_dummy_server
from buildpg import Values
from cryptography.fernet import Fernet
from PIL import Image, ImageDraw
from yarl import URL
from em2.auth.utils import mk_password
from em2.background import push_multiple
from em2.core import Action, Connections, apply_actions, generate_conv_key
from em2.main import create_app
from em2.protocol.core import get_signing_key
from em2.protocol.smtp import LogSmtpHandler, SesSmtpHandler
from em2.settings import Settings
from em2.utils.web import MakeUrl
from em2.worker import worker_settings
from . import dummy_server
from .resolver import TestDNSResolver
commit_transactions = 'KEEP_DB' in os.environ
@pytest.fixture(scope='session', name='settings_session')
def _fix_settings_session():
pg_db = 'em2_test'
redis_db = 2
test_worker = os.getenv('PYTEST_XDIST_WORKER')
if test_worker:
worker_id = int(test_worker.replace('gw', ''))
redis_db = worker_id + 2
if worker_id:
pg_db = f'em2_test_{worker_id}'
return Settings(
testing=True,
pg_dsn=f'postgres://postgres@localhost:5432/{pg_db}',
redis_settings=f'redis://localhost:6379/{redis_db}',
bcrypt_work_factor=6,
max_request_size=1024 ** 2,
aws_access_key='testing_access_key',
aws_secret_key='testing_secret_key',
ses_url_token='testing',
aws_sns_signing_host='localhost',
aws_sns_signing_schema='http',
internal_auth_key='testing' * 6,
auth_key=Fernet.generate_key(),
s3_temp_bucket='s3_temp_bucket.example.com',
s3_file_bucket='s3_files_bucket.example.com',
s3_cache_bucket='s3_cache_bucket.example.com',
max_ref_image_size=666,
max_ref_image_count=10,
vapid_private_key=(
'MIGHAgEAMBMGByqGSM49AgEGCCqGSM49AwEHBG0wawIBAQQgvGPhHfTSfxCod+wT'
'zLuyK8KWjPGGvKJKJjzBGSF47YuhRANCAAQJNQfHBSOe5nI5fmUcwTFw3ckqXXvR'
'F632vcMyB9RxPMaxicdqPiLg45GIk9oeEtm1kQjHQe7ikWxPFAm7uxkB'
),
vapid_sub_email='[email protected]',
signing_secret_key=b'4' * 64,
max_em2_file_size=500,
)
@pytest.fixture(name='dummy_server')
async def _fix_dummy_server(loop, aiohttp_server):
ctx = {'smtp': [], 's3_files': {}, 'webpush': [], 'em2push': [], 'em2_follower_push': []}
return await create_dummy_server(aiohttp_server, extra_routes=dummy_server.routes, extra_context=ctx)
replaced_url_fields = 'grecaptcha_url', 'ses_endpoint_url', 's3_endpoint_url'
@pytest.fixture(name='settings')
def _fix_settings(dummy_server: DummyServer, tmpdir, settings_session):
update = {f: f'{dummy_server.server_name}/{f}/' for f in replaced_url_fields}
return settings_session.copy(update=update)
@pytest.fixture(scope='session', name='main_db_create')
def _fix_main_db_create(settings_session):
# loop fixture has function scope so can't be used here.
from atoolbox.db import prepare_database
loop = asyncio.new_event_loop()
loop.run_until_complete(prepare_database(settings_session, True))
teardown_test_loop(loop)
@pytest.fixture(name='db_conn')
async def _fix_db_conn(loop, settings, main_db_create):
from buildpg import asyncpg
conn = await asyncpg.connect_b(dsn=settings.pg_dsn, loop=loop)
tr = conn.transaction()
await tr.start()
yield DummyPgPool(conn)
if commit_transactions:
await tr.commit()
else:
await tr.rollback()
await conn.close()
@pytest.fixture(name='conns')
def _fix_conns(db_conn, redis, settings):
return Connections(db_conn.as_dummy_conn(), redis, settings)
@pytest.yield_fixture(name='redis')
async def _fix_redis(loop, settings):
addr = settings.redis_settings.host, settings.redis_settings.port
redis = await create_redis(addr, db=settings.redis_settings.database, encoding='utf8', commands_factory=ArqRedis)
await redis.flushdb()
yield redis
redis.close()
await redis.wait_closed()
class UserTestClient(TestClient):
async def post_json(self, path, data=None, *, origin=None, status=200):
if not isinstance(data, (str, bytes)):
data = json.dumps(data)
r = await self.post(
path,
data=data,
headers={
'Content-Type': 'application/json',
'Referer': 'http://localhost:3000/dummy-referer/',
'Origin': origin or 'http://localhost:3000',
},
)
if status:
assert r.status == status, await r.text()
return r
async def get_json(self, path, *, status=200, **kwargs):
r = await self.get(path, **kwargs)
assert r.status == status, await r.text()
return await r.json()
async def get_ndjson(self, path, *, status=200, **kwargs):
r = await self.get(path, **kwargs)
assert r.status == status, await r.text()
assert r.content_type == 'text/plain'
text = await r.text()
return [json.loads(line) for line in text.split('\n') if line]
@pytest.fixture(name='resolver')
def _fix_resolver(dummy_server: DummyServer, loop):
return TestDNSResolver(dummy_server, loop=loop)
@pytest.fixture(name='cli')
async def _fix_cli(settings, db_conn, aiohttp_server, redis, resolver):
app = await create_app(settings=settings)
app['pg'] = db_conn
app['protocol_app']['resolver'] = resolver
server = await aiohttp_server(app)
settings.local_port = server.port
resolver.main_server = server
cli = UserTestClient(server)
yield cli
await cli.close()
def em2_json_default(v):
if isinstance(v, Enum):
return v.value
if isinstance(v, datetime):
return v.isoformat()
raise TypeError(f'unable to serialize {type(v)}: {v!r}')
class Em2TestClient(TestClient):
def __init__(self, *args, settings, dummy_server, factory, **kwargs):
super().__init__(*args, **kwargs)
self._settings: Settings = settings
self._dummy_server: DummyServer = dummy_server
self._factory: Factory = factory
self._url_func = MakeUrl(self.app).get_path
self.signing_key = get_signing_key(self._settings.signing_secret_key)
async def post_json(self, path, data, *, expected_status=200):
if not isinstance(data, (str, bytes)):
data = json.dumps(data, default=em2_json_default)
sign_ts = datetime.utcnow().isoformat()
to_sign = f'POST http://127.0.0.1:{self.server.port}{path} {sign_ts}\n{data}'.encode()
r = await self.post(
path,
data=data,
headers={
'Content-Type': 'application/json',
'Signature': sign_ts + ',' + self.signing_key.sign(to_sign).signature.hex(),
},
)
if expected_status:
assert r.status == expected_status, await r.text()
return r
async def push_actions(self, conv_key, actions, *, em2_node=None, expected_status=200):
em2_node = em2_node or f'localhost:{self._dummy_server.server.port}/em2'
path = self.url('protocol:em2-push', conv=conv_key, query={'node': em2_node})
return await self.post_json(path, data={'actions': actions}, expected_status=expected_status)
async def create_conv(
self,
*,
em2_node=None,
actor='[email protected]',
subject='Test Subject',
recipient='[email protected]',
msg='test message',
expected_status=200,
):
# use [email protected] here so recipient can be changed in test errors
if not await self._factory.conn.fetchval('select 1 from users where email=$1', '[email protected]'):
await self._factory.create_user(email='[email protected]')
ts = datetime(2032, 6, 6, 12, 0, tzinfo=timezone.utc)
conv_key = generate_conv_key(actor, ts, subject)
actions = [
{'id': 1, 'act': 'participant:add', 'ts': ts, 'actor': actor, 'participant': actor},
{'id': 2, 'act': 'participant:add', 'ts': ts, 'actor': actor, 'participant': recipient},
{'id': 3, 'act': 'message:add', 'ts': ts, 'actor': actor, 'body': msg},
{'id': 4, 'act': 'conv:publish', 'ts': ts, 'actor': actor, 'body': subject},
]
return await self.push_actions(conv_key, actions, em2_node=em2_node, expected_status=expected_status)
def url(self, name: str, *, query=None, **kwargs) -> URL:
return self._url_func(name, query=query, **kwargs)
@pytest.fixture(name='em2_cli')
async def _fix_em2_cli(settings, aiohttp_client, cli: UserTestClient, dummy_server, factory):
cli = Em2TestClient(cli.server, settings=settings, dummy_server=dummy_server, factory=factory)
yield cli
await cli.close()
@pytest.fixture(name='url')
def _fix_url(cli: UserTestClient):
return MakeUrl(cli.server.app).get_path
@dataclass
class User:
email: str
first_name: str
last_name: str
password: str
auth_user_id: int
id: Optional[int] = None
session_id: Optional[int] = None
@dataclass
class Conv:
key: str
id: int
class Factory:
def __init__(self, redis, cli, url):
self.redis: ArqRedis = redis
self.cli = cli
self.conn = self.cli.server.app['pg'].as_dummy_conn()
self.conns = Connections(self.conn, self.redis, cli.server.app['settings'])
self.email_index = 1
self.user: User = None
self.conv: Conv = None
self._url = url
async def create_user(self, *, login=True, email=None, first_name='Tes', last_name='Ting', pw='testing') -> User:
if email is None:
email = f'testing-{self.email_index}@example.com'
self.email_index += 1
password_hash = mk_password(pw, self.conns.settings)
auth_user_id = await self.conn.fetchval(
"""
insert into auth_users (email, first_name, last_name, password_hash, account_status)
values ($1, $2, $3, $4, 'active')
on conflict (email) do nothing returning id
""",
email,
first_name,
last_name,
password_hash,
)
if not auth_user_id:
raise RuntimeError(f'user with email {email} already exists')
user_id = None
session_id = None
if login:
r1, r2 = await self.login(email, pw)
obj = await r1.json()
session_id = obj['session']['session_id']
user_id = await self.conn.fetchval('select id from users where email=$1', email)
user = User(email, first_name, last_name, pw, auth_user_id, user_id, session_id)
self.user = self.user or user
return user
async def create_simple_user(
self,
email: str = None,
visibility: str = None,
profile_type: str = None,
main_name: str = 'John',
last_name: str = None,
strap_line: str = None,
image_storage: str = None,
profile_status: str = None,
profile_status_message: str = None,
profile_details: str = None,
):
if email is None:
email = f'testing-{self.email_index}@example.com'
self.email_index += 1
user_id = await self.conn.fetchval_b(
'insert into users (:values__names) values :values on conflict (email) do nothing returning id',
values=Values(
email=email,
visibility=visibility,
profile_type=profile_type,
main_name=main_name,
last_name=last_name,
strap_line=strap_line,
image_storage=image_storage,
profile_status=profile_status,
profile_status_message=profile_status_message,
profile_details=profile_details,
),
)
if not user_id:
raise RuntimeError(f'user with email {email} already exists')
await self.conn.execute(
"""
update users set
vector=setweight(to_tsvector(main_name || ' ' || coalesce(last_name, '')), 'A') ||
setweight(to_tsvector(coalesce(strap_line, '')), 'B') ||
to_tsvector(coalesce(profile_details, ''))
where id=$1
""",
user_id,
)
return user_id
def url(self, name, *, query=None, **kwargs):
if self.user and name.startswith('ui:'):
kwargs.setdefault('session_id', self.user.session_id)
return self._url(name, query=query, **kwargs)
async def login(self, email, password, *, captcha=False):
data = dict(email=email, password=password)
if captcha:
data['grecaptcha_token'] = '__ok__'
r1 = await self.cli.post(
self._url('auth:login'),
data=json.dumps(data),
headers={'Content-Type': 'application/json', 'Origin': 'null'},
)
assert r1.status == 200, await r1.text()
obj = await r1.json()
r2 = await self.cli.post_json(self._url('ui:auth-token'), data={'auth_token': obj['auth_token']})
assert r2.status == 200, await r2.text()
assert len(self.cli.session.cookie_jar) == 1
return r1, r2
async def create_conv(
self, subject='Test Subject', message='Test Message', session_id=None, participants=(), publish=False
) -> Conv:
data = {'subject': subject, 'message': message, 'publish': publish, 'participants': participants}
r = await self.cli.post_json(
self.url('ui:create', session_id=session_id or self.user.session_id), data, status=201
)
conv_key = (await r.json())['key']
conv_id = await self.conn.fetchval('select id from conversations where key=$1', conv_key)
conv = Conv(conv_key, conv_id)
self.conv = self.conv or conv
return conv
async def create_label(self, name='Test Label', *, user_id=None, ordering=None, color=None, description=None):
val = dict(name=name, user_id=user_id or self.user.id, ordering=ordering, color=color, description=description)
return await self.conn.fetchval_b(
'insert into labels (:values__names) values :values returning id',
values=Values(**{k: v for k, v in val.items() if v is not None}),
)
async def act(self, conv_id: int, action: Action) -> List[int]:
key, leader = await self.conns.main.fetchrow('select key, leader_node from conversations where id=$1', conv_id)
interaction_id = uuid4().hex
if leader:
await self.conns.redis.enqueue_job('follower_push_actions', key, leader, interaction_id, [action])
else:
action_ids = await apply_actions(self.conns, conv_id, [action])
if action_ids:
await push_multiple(self.conns, conv_id, action_ids)
return action_ids
async def create_contact(
self,
owner: int,
user_id: int,
*,
profile_type: str = None,
main_name: str = None,
last_name: str = None,
strap_line: str = None,
image_storage: str = None,
**kwargs,
):
val = dict(
owner=owner,
profile_user=user_id,
profile_type=profile_type,
main_name=main_name,
last_name=last_name,
strap_line=strap_line,
image_storage=image_storage,
**kwargs,
)
contact_id = await self.conn.fetchval_b(
'insert into contacts (:values__names) values :values returning id',
values=Values(**{k: v for k, v in val.items() if v is not None}),
)
# TODO update contact search vector
return contact_id
@pytest.fixture(name='factory')
def _fix_factory(redis, cli, url):
return Factory(redis, cli, url)
@pytest.yield_fixture(name='worker_ctx')
async def _fix_worker_ctx(redis, settings, db_conn, dummy_server, resolver):
session = ClientSession(timeout=ClientTimeout(total=10))
ctx = dict(
settings=settings,
pg=db_conn,
client_session=session,
resolver=resolver,
redis=redis,
signing_key=get_signing_key(settings.signing_secret_key),
)
ctx['smtp_handler'] = LogSmtpHandler(ctx)
yield ctx
await session.close()
@pytest.yield_fixture(name='worker')
async def _fix_worker(redis, worker_ctx):
worker = Worker(
functions=worker_settings['functions'], redis_pool=redis, burst=True, poll_delay=0.01, ctx=worker_ctx
)
yield worker
worker.pool = None
await worker.close()
@pytest.yield_fixture(name='ses_worker')
async def _fix_ses_worker(redis, settings, db_conn, resolver):
session = ClientSession(timeout=ClientTimeout(total=10))
ctx = dict(
settings=settings,
pg=db_conn,
client_session=session,
resolver=resolver,
signing_key=get_signing_key(settings.signing_secret_key),
)
ctx.update(smtp_handler=SesSmtpHandler(ctx), conns=Connections(ctx['pg'], redis, settings))
worker = Worker(functions=worker_settings['functions'], redis_pool=redis, burst=True, poll_delay=0.01, ctx=ctx)
yield worker
await ctx['smtp_handler'].shutdown()
worker.pool = None
await worker.close()
await session.close()
@pytest.fixture(name='send_to_remote')
async def _fix_send_to_remote(factory: Factory, worker: Worker, db_conn):
await factory.create_user()
await factory.create_conv(participants=[{'email': '[email protected]'}], publish=True)
assert 4 == await db_conn.fetchval('select count(*) from actions')
await worker.async_run()
assert (worker.jobs_complete, worker.jobs_failed, worker.jobs_retried) == (3, 0, 0)
assert 1 == await db_conn.fetchval('select count(*) from sends')
return await db_conn.fetchrow('select id, ref from sends')
@pytest.fixture(name='sns_data')
def _fix_sns_data(dummy_server: DummyServer, mocker):
def run(message_id, *, mock_verify=True, **message):
if mock_verify:
mocker.patch('em2.protocol.views.smtp_ses.x509.load_pem_x509_certificate')
return {
'Type': 'Notification',
'MessageId': message_id,
'Subject': 'Amazon SES Email Receipt Notification',
'Timestamp': '2032-03-11T18:00:00.000Z',
'TopicArn': 'arn:aws:sns:us-east-1:123:em2-webhook',
'Message': json.dumps(message),
'SigningCertURL': dummy_server.server_name + '/sns_signing_url.pem',
'Signature': base64.b64encode(b'the signature').decode(),
}
return run
@pytest.fixture(name='attachment')
def _fix_attachment():
def run(filename, mime_type, content, headers=None):
attachment = EmailMessage()
for k, v in (headers or {}).items():
attachment[k] = v
maintype, subtype = mime_type.split('/', 1)
kwargs = dict(subtype=subtype, filename=filename)
if maintype != 'text':
# not sure why this is
kwargs['maintype'] = maintype
attachment.set_content(content, **kwargs)
for k, v in (headers or {}).items():
if k in attachment:
attachment.replace_header(k, v)
else:
attachment.add_header(k, v)
return attachment
return run
@pytest.fixture(name='create_email')
def _fix_create_email():
def run(
subject='Test Subject',
e_from='[email protected]',
to=('[email protected]',),
text_body='this is a message.',
html_body='this is an html <b>message</b>.',
message_id='[email protected]',
attachments=(),
headers=None,
):
email_msg = EmailMessage()
if message_id is not None:
email_msg['Message-ID'] = message_id
email_msg['Subject'] = subject
email_msg['From'] = e_from
email_msg['To'] = ','.join(to)
# email.utils.format_datetime(datetime(2032, 1, 1, 12, 0))
email_msg['Date'] = 'Thu, 01 Jan 2032 12:00:00 -0000'
for k, v in (headers or {}).items():
email_msg[k] = v
text_body and email_msg.set_content(text_body)
html_body and email_msg.add_alternative(html_body, subtype='html')
for attachment in attachments:
if email_msg.get_content_type() != 'multipart/mixed':
email_msg.make_mixed()
email_msg.attach(attachment)
return email_msg
return run
@pytest.fixture(name='create_ses_email')
def _fix_create_ses_email(dummy_server, sns_data, create_email):
def run(
*args,
to=('[email protected]',),
key='foobar',
headers=None,
message_id='[email protected]',
receipt_extra=None,
**kwargs,
):
msg = create_email(*args, to=to, message_id=message_id, headers=headers, **kwargs)
dummy_server.app['s3_files'][key] = msg.as_string()
headers = headers or {}
h = [{'name': k, 'value': v} for k, v in headers.items()]
if message_id is not None:
h.append({'name': 'Message-ID', 'value': message_id})
mail = dict(headers=h, commonHeaders={'to': list(to)})
receipt = dict(
action={'type': 'S3', 'bucketName': 'em2-testing', 'objectKeyPrefix': '', 'objectKey': key},
spamVerdict={'status': 'PASS'},
virusVerdict={'status': 'PASS'},
spfVerdict={'status': 'PASS'},
dkimVerdict={'status': 'PASS'},
dmarcVerdict={'status': 'PASS'},
)
receipt.update(receipt_extra or {})
return sns_data(message_id, notificationType='Received', mail=mail, receipt=receipt)
return run
@pytest.fixture(name='create_image')
def _fix_create_image():
def create_image(image_format='JPEG'):
stream = BytesIO()
image = Image.new('RGB', (400, 300), (50, 100, 150))
ImageDraw.Draw(image).polygon([(0, 0), (image.width, 0), (image.width, 100), (0, 100)], fill=(128, 128, 128))
image.save(stream, format=image_format, optimize=True)
return stream.getvalue()
return create_image
@pytest.fixture(name='web_push_sub')
def _fix_web_push_sub(dummy_server):
return {
'endpoint': dummy_server.server_name.replace('localhost', '127.0.0.1') + '/vapid/',
'expirationTime': None,
'keys': {
# generated by code in dummy_server.py
'p256dh': 'BGsX0fLhLEJH-Lzm5WOkQPJ3A32BLeszoPShOUXYmMKWT-NC4v4af5uO5-tKfA-eFivOM1drMV7Oy7ZAaDe_UfU',
'auth': 'x' * 32,
},
}
@pytest.fixture(scope='session', name='alt_settings_session')
def _fix_alt_settings_session(settings_session):
pg_db = 'em2_test_alt'
redis_db = 3
test_worker = os.getenv('PYTEST_XDIST_WORKER')
if test_worker:
worker_id = int(test_worker.replace('gw', ''))
redis_db = worker_id + 8
if worker_id:
pg_db = f'em2_test_alt_{worker_id}'
return settings_session.copy(
update={
'pg_dsn': f'postgres://postgres@localhost:5432/{pg_db}',
'redis_settings': RedisSettings(database=redis_db),
}
)
@pytest.fixture(name='alt_settings')
def _fix_alt_settings(dummy_server: DummyServer, tmpdir, alt_settings_session):
update = {f: f'{dummy_server.server_name}/{f}/' for f in replaced_url_fields}
return alt_settings_session.copy(update=update)
@pytest.fixture(scope='session', name='alt_db_create')
def _fix_alt_db_create(alt_settings_session):
# loop fixture has function scope so can't be used here.
from atoolbox.db import prepare_database
loop = asyncio.new_event_loop()
loop.run_until_complete(prepare_database(alt_settings_session, True))
teardown_test_loop(loop)
@pytest.fixture(name='alt_db_conn')
async def _fix_alt_db_conn(loop, alt_settings, alt_db_create):
from buildpg import asyncpg
conn = await asyncpg.connect_b(dsn=alt_settings.pg_dsn, loop=loop)
tr = conn.transaction()
await tr.start()
yield DummyPgPool(conn)
if commit_transactions:
await tr.commit()
else:
await tr.rollback()
await conn.close()
@pytest.yield_fixture(name='alt_redis')
async def _fix_alt_redis(loop, alt_settings):
addr = alt_settings.redis_settings.host, alt_settings.redis_settings.port
redis = await create_redis(
addr, db=alt_settings.redis_settings.database, encoding='utf8', commands_factory=ArqRedis
)
await redis.flushdb()
yield redis
redis.close()
await redis.wait_closed()
@pytest.fixture(name='alt_conns')
def _fix_alt_conns(alt_db_conn, alt_redis, alt_settings):
return Connections(alt_db_conn, alt_redis, alt_settings)
@pytest.fixture(name='alt_cli')
async def _fix_alt_cli(alt_settings, alt_db_conn, aiohttp_server, alt_redis, resolver: TestDNSResolver):
app = await create_app(settings=alt_settings)
app['pg'] = alt_db_conn
app['protocol_app']['resolver'] = resolver
server = await aiohttp_server(app)
resolver.alt_server = server
alt_settings.local_port = server.port
cli = UserTestClient(server)
yield cli
await cli.close()
@pytest.fixture(name='alt_url')
def _fix_alt_url(alt_cli: UserTestClient):
return MakeUrl(alt_cli.server.app).get_path
@pytest.fixture(name='alt_factory')
async def _fix_alt_factory(alt_redis, alt_cli, alt_url):
return Factory(alt_redis, alt_cli, alt_url)
@pytest.yield_fixture(name='alt_worker_ctx')
async def _fix_alt_worker_ctx(alt_redis, alt_settings, alt_db_conn, resolver):
session = ClientSession(timeout=ClientTimeout(total=10))
ctx = dict(
settings=alt_settings,
pg=alt_db_conn,
client_session=session,
resolver=resolver,
redis=alt_redis,
signing_key=get_signing_key(alt_settings.signing_secret_key),
)
ctx['smtp_handler'] = LogSmtpHandler(ctx)
yield ctx
await session.close()
@pytest.yield_fixture(name='alt_worker')
async def _fix_alt_worker(alt_redis, alt_worker_ctx):
worker = Worker(
functions=worker_settings['functions'], redis_pool=alt_redis, burst=True, poll_delay=0.01, ctx=alt_worker_ctx
)
yield worker
worker.pool = None
await worker.close()
def create_raw_image(width: int = 600, height: int = 600, mode: str = 'RGB') -> Image:
image = Image.new(mode, (width, height), (50, 100, 150))
ImageDraw.Draw(image).line((0, 0) + image.size, fill=128)
return image
def create_image(width: int = 600, height: int = 600, mode: str = 'RGB', format: str = 'JPEG') -> bytes:
image = create_raw_image(width, height, mode)
stream = BytesIO()
image.save(stream, format=format, optimize=True)
return stream.getvalue()
|
python
|
from .alias import *
from .bookmark import *
__all__ = [ 'ALIAS_KIND_FILE', 'ALIAS_KIND_FOLDER',
'ALIAS_HFS_VOLUME_SIGNATURE',
'ALIAS_FIXED_DISK', 'ALIAS_NETWORK_DISK', 'ALIAS_400KB_FLOPPY_DISK',
'ALIAS_800KB_FLOPPY_DISK', 'ALIAS_1_44MB_FLOPPY_DISK',
'ALIAS_EJECTABLE_DISK',
'ALIAS_NO_CNID',
'kBookmarkPath', 'kBookmarkCNIDPath', 'kBookmarkFileProperties',
'kBookmarkFileName', 'kBookmarkFileID', 'kBookmarkFileCreationDate',
'kBookmarkTOCPath', 'kBookmarkVolumePath',
'kBookmarkVolumeURL', 'kBookmarkVolumeName', 'kBookmarkVolumeUUID',
'kBookmarkVolumeSize', 'kBookmarkVolumeCreationDate',
'kBookmarkVolumeProperties', 'kBookmarkContainingFolder',
'kBookmarkUserName', 'kBookmarkUID', 'kBookmarkWasFileReference',
'kBookmarkCreationOptions', 'kBookmarkURLLengths',
'kBookmarkSecurityExtension',
'AppleShareInfo',
'VolumeInfo',
'TargetInfo',
'Alias',
'Bookmark',
'Data',
'URL' ]
|
python
|
from django.apps import apps
from django.conf import settings
from django.contrib.auth import get_user_model
from django.contrib.sites.models import Site
from django.core.management.base import BaseCommand
from django.utils import translation
User = get_user_model()
user_deletion_config = apps.get_app_config('user_deletion')
class Command(BaseCommand):
def handle(self, *args, **options):
translation.activate(settings.LANGUAGE_CODE)
users = User.objects.users_to_delete()
site = Site.objects.get_current()
user_deletion_config.deletion_notification_class(
user=None,
site=site,
users=users,
).notify()
users.delete()
|
python
|
# -*- coding: utf-8 -*-
from django.apps import AppConfig
from django.utils.translation import ugettext_lazy as _
class OnTaskConfig(AppConfig):
name = 'ontask'
verbose_name = _('OnTask')
def ready(self):
# Needed so that the signal registration is done
from ontask import signals # noqa
|
python
|
from app import db
class Entity(db.Model):
__tablename__ = 'entities'
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(64), index=True)
description = db.Column(db.Text, index=True)
def __repr__(self):
return "<Entity '{}'>".format(self.name)
class WikipediaSuggest(db.Model):
__tablename__ = 'wikipedia_suggest'
id = db.Column(db.Integer, primary_key=True)
entity_id = db.Column(db.Integer, index=True)
wikipedia_page_id = db.Column(db.BigInteger)
wikipedia_page_title = db.Column(db.String(128))
confirmed = db.Column(db.Boolean, default=False)
entity = db.relationship('Entity', foreign_keys=[entity_id], primaryjoin='Entity.id == WikipediaSuggest.entity_id', backref='wikipedia_suggest', uselist=False, lazy=True)
@property
def wikipedia_url(self):
return "https://en.wikipedia.org/wiki/{}".format(self.wikipedia_page_title)
class EntityMeta(db.Model):
__tablename__ = 'entities_meta'
id = db.Column(db.Integer, primary_key=True)
entity_id = db.Column(db.Integer)
type_ = db.Column(db.String(64), index=True)
description = db.Column(db.Text)
entity = db.relationship('Entity', foreign_keys=[entity_id], primaryjoin='Entity.id == EntityMeta.entity_id', backref='entity_meta', uselist=False, lazy=True)
|
python
|
from wx import wx
from wx.lib.pubsub import Publisher
import select
import socket
import sys
import Queue
import os
from thread import *
from collections import defaultdict
uploadInfos = defaultdict(dict) # For seeders
downloadInfos = defaultdict(dict) # For leechers
pieceRequestQueue = defaultdict(dict) # For leechers
torrentInfo = defaultdict(dict) # For storing torrentInfo for every file running
sizeDownloaded = defaultdict(dict) # total size download corresponding to each file
Downloading = defaultdict(dict) # flag for each file.
pieceStatus = defaultdict(dict)
# inputs = defaultdict(dict)
# outputs = defaultdict(dict)
myGroupID = 0
myGroupList = []
lastPieceStatus = {}
server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
PIECE_SIZE = 1024 * 512
BLOCK_SIZE = 4 * 1024
DELIMITER = '|/!@#$%^&*\|'
SEPAERATOR = '|/<>?\~|'
lastBuffered = ''
fd = None
running = True
badaFlag = False
# torrentInfo = {}
pieceBitVector = {}
seeder = True
count = 0
myHost = ""
myPort = 0
inputs = []
outputs = []
numPiecesDownloaded = 0
# Outgoing message queues (socket:Queue)
message_queues = {}
def multicast(index, currentFile, blockNumber, byteData):
blockNumber = 1
for s in myGroupList:
print "Inside multicast = " + str(len(myGroupList))
msg = "MULTICAST" + SEPAERATOR + currentFile + SEPAERATOR + str(index) + SEPAERATOR + str(
blockNumber) + SEPAERATOR + byteData + DELIMITER
message_queues[s].put(msg)
def broadcast(index, currentFile):
print "In broadcast"
try:
print currentFile
bitvector = returnBitVector(currentFile, int(torrentInfo[currentFile]["pieces"]))
bitvector[index - 1] = '1'
except:
print "error in bitvector"
file = "./bitvector/" + currentFile.split('.')[0] + ".vec"
f = open(file, "w")
try:
str1 = stringify(bitvector)
except:
print "error in stringify"
f.write(str1)
for s in downloadInfos[currentFile]:
msg = "BROADCAST" + SEPAERATOR + currentFile + SEPAERATOR + str(index) + DELIMITER
message_queues[s].put(msg)
def getSize(filename):
print "In getSize"
filename = "./" + filename
print "retrieving size of file: " + filename
if (os.path.exists(filename)):
size = os.path.getsize(filename)
else:
size = 0
print "Size is: " + str(size)
# if size < 0:
# import subprocess as s
# size = long( s.Popen("ls -l %s | cut -d ' ' -f5" % filename,
# shell=True, stdout=s.PIPE).communicate()[0] )
return size
def getKey(item):
return item[0]
def returnPeerList(torrentInfo, host, port, currentFile):
connected = False
for tracker in torrentInfo[currentFile]['trackers']:
print tracker
tracker = tracker.strip('\n')
hostTracker, portTracker = tracker.split(':')
server_address = (hostTracker, int(portTracker))
msg = "REQUEST_PEERS-" + host + ":" + str(port) + ",FILE:" + torrentInfo[currentFile]['name']
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
print 'connecting to %s port %s' % server_address
try:
s.connect(server_address)
print '%s: sending "%s"' % (s.getsockname(), msg)
s.send(msg)
connected = True
break
except:
print "Unable to Connect"
pass
if connected:
data = s.recv(BLOCK_SIZE)
# print "Received data:" + data
data = data.split('-')
peerList = data[1].split(',')
print peerList
return peerList
else:
return []
def parseTorrentFile(inp):
global trackers, torrentName
currentFile = ""
with open(inp) as f:
for line in f:
info = line.strip(' ')
info = line.strip('\n')
info = line.split('-')
if info[0] == 'name':
currentFile = info[1].split('\n')[0]
with open(inp) as f:
for line in f:
info = line.strip(' ')
info = line.strip('\n')
info = line.split('-')
if info[0] == 'trackers':
torrentInfo[currentFile]['trackers'] = info[1].split(',')
elif info[0] == 'name':
torrentInfo[currentFile]['name'] = info[1]
elif info[0] == 'length':
torrentInfo[currentFile]['length'] = int(info[1])
elif info[0] == 'pieces':
torrentInfo[currentFile]['pieces'] = int(info[1])
else:
print "Torrent File Corrupted\n"
sys.exit(0)
for i in xrange(1, torrentInfo[currentFile]['pieces'] + 1):
pieceStatus[currentFile][i] = 0
return currentFile
def availablePieces():
return 10
def processMsg(data):
global seeder, torrentInfo, uploadInfos, downloadInfos, pieceRequestQueue
response = ''
data = data.split(SEPAERATOR)
Q = Queue.Queue()
header = data[0]
if header == "REQUEST_FILE":
currentPiece = 1
count = 0
pieceFlag = True
f = open(data[1].strip('\n'), "rb")
fileInfo = os.stat(data[1].strip('\n'))
fileSize = fileInfo.st_size
pieces = fileSize / 2
offset = 1 # for 1st seeder and this for 2nd seeder
# offset = (pieces/PIECE_SIZE)*PIECE_SIZE + 1
f.seek(offset)
msg = "OFFSET" + SEPAERATOR + str(offset)
# Q.put(msg)
l = f.read(BLOCK_SIZE)
while (l and pieceFlag):
Q.put(l)
l = f.read(BLOCK_SIZE)
count = count + 1
if (count / 10 == currentPiece):
print "Piece " + str(currentPiece) + " put in queue for senting to leecher"
currentPiece = currentPiece + 1
if (currentPiece == pieces / PIECE_SIZE and offset == 0):
pieceFlag = False
f.close()
response = "Queue"
ret = (response, Q)
elif header == "HAVE":
pass
return ret
def handleRecvIO(s, file, length):
global seeder, torrentInfo, uploadInfos, downloadInfos, pieceRequestQueue
count = 1
percent = 100
offset = 0
f = open(file, "wb")
f.seek(offset)
print "Ready to Recieve : " + file
while (count <= length):
part = s.recv(BLOCK_SIZE)
f.write(part)
count = count + 1
if count == length / percent:
print "" + str(percent) + " Percent Remaining"
if percent != 1:
percent = percent - 1
f.close()
print file + " Downloaded Successfully"
# generates the queue
def pieceRequestOrdering(filename, currentFile):
filename = filename.strip("\n")
global seeder, torrentInfo, uploadInfos, downloadInfos, pieceRequestQueue, inputs, outputs
for x in xrange(1, int(torrentInfo[currentFile]["pieces"]) + 1):
pieceRequestQueue[filename][x] = Queue.Queue()
print inputs
tempList = inputs[1:]
for s in tempList:
# print "Hello"
if s in downloadInfos[filename]:
bitvector = downloadInfos[filename][s]
print bitvector
index = 1
pieces = int(torrentInfo[currentFile]["pieces"])
print "length of bitvector is: " + str(len(bitvector))
for i in xrange(0, pieces):
if (bitvector[i] == '1'):
pieceRequestQueue[filename][i + 1].put(s)
else:
print i
# for i in bitvector:
# if i =='1':
# if(index==1):
# print "why????????????????????????"
# pieceRequestQueue[filename][index].put(s)
# index = index + 1
# read from file and send blocks to the requesting peer
def retrieveBytesFromFile(s, filename, index):
global PIECE_SIZE, BLOCK_SIZE
filename = filename.strip('\n')
print filename
offset = 0
try:
fo = open(filename, "r+b")
print "Reading File at index : " + str(index)
fo.seek((index - 1) * PIECE_SIZE, 0)
print "current file position is : " + str(fo.tell())
# print "Name of the file: ", fo.name
# fo.seek((index-1)*PIECE_SIZE, 0)
for blockNumber in xrange(0, PIECE_SIZE / BLOCK_SIZE):
print "In Loop"
byteData = fo.read(BLOCK_SIZE)
if (byteData == ''):
print "byteData is NULL"
break
if (byteData):
data = "HAVE_PIECE" + SEPAERATOR + filename + SEPAERATOR + str(index) + SEPAERATOR + str(
blockNumber) + SEPAERATOR + byteData + DELIMITER
message_queues[s].put(data)
fo.close()
except:
print "Error Handling File "
pass
# handles different messages
def processRecvdMsg(data, s):
global seeder, torrentInfo, uploadInfos, downloadInfos, pieceRequestQueue, badaFlag, fd, pieceStatus, sizeDownloaded, inputs, outputs
# print 'received "%s" from %s' % (data, s.getpeername())
# print "data is: " + data
data = data.strip('\n')
temp = data.split(SEPAERATOR)
# print "temp is: " + ('')temp
try:
currentFile = temp[1]
except:
print "temp[1] out of index"
return
print "In processRecvdMsg"
print currentFile
header = temp[0]
data = ""
if header == "RECEIVE_FILE":
file = temp[1]
length = int(temp[2])
handleRecvIO(s, file, length)
# running = False
pass
elif header == "REQUEST_FILE":
response, Q = processMsg(data)
if response == "Queue":
length = Q.qsize()
msg = "RECEIVE_FILE" + SEPAERATOR + temp[1] + SEPAERATOR + str(length) + SEPAERATOR
message_queues[s].put(msg)
while (not Q.empty()):
message_queues[s].put(Q.get_nowait())
elif header == "HANDSHAKE":
filename = temp[1]
grpID = int(temp[2])
bitvector = temp[3]
if (grpID == myGroupID):
myGroupList.append(s)
pieces = len(bitvector)
# uploadInfos[filename][s.getpeername()] = bitvector
uploadInfos[filename][s] = bitvector
print "In Handshake"
print filename
print "after Handshake"
bitvector = returnBitVector(filename, pieces)
data = "REPLY_HANDSHAKE" + SEPAERATOR + filename + SEPAERATOR + str(myGroupID) + SEPAERATOR + stringify(
bitvector) + DELIMITER
message_queues[s].put(data)
elif header == "REPLY_HANDSHAKE":
filename = temp[1]
grpID = int(temp[2])
bitvector = temp[3]
if (grpID == myGroupID):
myGroupList.append(s)
pieces = len(bitvector)
downloadInfos[filename][s] = bitvector
if (not os.path.exists(filename)):
fd = open(filename, "w+b", 0)
fd.close()
fd = open(filename, "rw+b", 0)
# uploadInfos[filename][s] = bitvector
elif header == "REQUEST_PIECE":
# print temp
filename = temp[1]
index = int(temp[2])
actualPieceData = retrieveBytesFromFile(s, filename, index)
elif header == "MULTICAST":
filename = temp[1]
index = int(temp[2])
blockNumber = int(temp[3])
byteData = temp[4]
print "Multicasting Message recieved for block = " + str(blockNumber) + "and piece = " + str(
index) + "and file = " + filename
try:
position = PIECE_SIZE * (index - 1) + blockNumber * BLOCK_SIZE
if (not os.path.exists(filename)):
fs = open(filename, "wb+")
fs = open(filename, "rwb+")
fs.seek(position, 0)
fs.write(byteData)
fs.close()
except:
print "Error while Multicasting"
elif header == "BROADCAST":
filename = temp[1]
index = int(temp[2])
print "Broadcast message received for piece " + str(index) + "of file " + filename
if s in downloadInfos[filename]:
pieceRequestQueue[filename][index].put(s)
elif header == "HAVE_PIECE":
filename = temp[1]
index = int(temp[2])
blockNumber = int(temp[3])
byteData = temp[4]
# print "piece status list: " + (' ').join(pieceStatus)
# print pieceStatus
# print len(temp)
# print len(byteData)
sizeDownloaded[filename] += len(byteData)
try:
# if(not os.path.exists(filename)):
# fd = open(filename,"wb+")
# fd.close()
# fd = open(filename,"rwb+")
position = PIECE_SIZE * (index - 1) + blockNumber * BLOCK_SIZE
fd.seek(position, 0)
writtenAmount = fd.write(byteData)
# time.sleep(0.001)
fd.flush()
try:
pieceStatus[filename][index] = pieceStatus[filename][index] + 1
except:
print "error in pieceStatus"
try:
if (pieceStatus[filename][index] == 128):
print "Piece no. =" + str(index) + " of file " + filename + " Downloaded"
broadcast(index, filename)
multicast(index, filename, blockNumber, byteData)
except:
print "Error in casting"
print "Downloaded index = " + str(index) + " blockNumber = " + str(
blockNumber) + " for filename = " + filename + " at position: " + str(position) + " till: " + str(
fd.tell()) + " Written = " + str(writtenAmount)
except:
print "Error handling while Flushing data"
else:
# print "data count: " + data
pass
if s not in outputs:
outputs.append(s)
# send handshake message to the peers and recv handshake_reply
# transfers bitvector to each other
def handShaking(peerList, currentFile):
global seeder, torrentInfo, uploadInfos, downloadInfos, pieceRequestQueue, pieceStatus, inputs, outputs
print "Seeder in handshaking"
for peers in peerList:
print peers
host, port = peers.split(':')
port = int(port)
peerServer = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
peerServer.connect((host, port))
# peerServer.setblocking(0)
outputs.append(peerServer)
print "Connection to " + peers + " succeeded"
print "Creating output queue for " + peers
message_queues[peerServer] = Queue.Queue()
data = "HANDSHAKE" + SEPAERATOR + torrentInfo[currentFile]['name'] + SEPAERATOR + str(
myGroupID) + SEPAERATOR + stringify(pieceBitVector[torrentInfo[currentFile]["name"]]) + DELIMITER
print data
peerServer.send(data)
print peerServer.getpeername()
# print "Hello"
try:
data = peerServer.recv(BLOCK_SIZE)
print 'received "%s" from %s' % (data, peerServer.getpeername())
# print data
processRecvdMsg(data, peerServer)
except:
print "Error while recieveing "
inputs.append(peerServer)
except:
print "Some error"
pass
peerServer.setblocking(0)
# request messages are put in the message queue corresponding to the peers
def rarestPieceFirstAlgo(filename, pieces):
print "Executing Rarest Piece first algorithm"
filename = filename.strip('\n')
global seeder, torrentInfo, uploadInfos, downloadInfos, pieceRequestQueue
# Creating list of tuples for rarest first order (pieceCount,pieceIndex)
countPiece = []
for i in xrange(1, pieces + 1):
countPiece.append((pieceRequestQueue[filename][i].qsize(), i))
countPiece = sorted(countPiece, key=getKey)
print countPiece
for tuples in countPiece:
pieceQsize = tuples[0]
pieceIndex = tuples[1]
# FORMAT of Sending message
if pieceQsize != 0:
data = "REQUEST_PIECE" + SEPAERATOR + filename + SEPAERATOR + str(pieceIndex) + DELIMITER
s = pieceRequestQueue[filename][pieceIndex].get_nowait()
message_queues[s].put(data)
# print data
# called by recvMessage
def reactor(server, currentFile):
global seeder, torrentInfo, uploadInfos, downloadInfos, pieceRequestQueue, inputs, outputs, message_queues, DELIMITER, lastBuffered, badaFlag, running, numPiecesDownloaded, Downloading, sizeDownloaded
Downloading[currentFile] = True
sizeDownloaded[currentFile] = 0
running = True
while running:
# In case of a leecher, an initial check to whether the file has been downloaded
# if not seeder:
# # print "Size Downloaded : " + str(sizeDownloaded[currentFile])
# if(getSize(currentFile)==int(torrentInfo[currentFile]['length'])):
# Downloading[currentFile] = False
# print currentFile + " downloaded Successfully"
# #TO DO here:
# #call some dialog box saying "Download completed for the currentFile"
# #Close sockets gracefully
# fd.close()
# break
# # try:
# # for s in inputs:
# # if s is not server:
# # s.close()
# # for s in outputs:
# # if s is not server:
# # s.close()
# # except:
# # print "Error while closing sockets"
# Wait for at least one of the sockets to be ready for processing
print '\nwaiting for the next event using select'
readable, writable, exceptional = select.select(inputs, outputs, inputs)
# Handle inputs
for s in readable:
print "In Readable"
if s is server:
# A "readable" server socket is ready to accept a connection
connection, client_address = s.accept()
print 'new connection from', client_address
connection.setblocking(0)
inputs.append(connection)
outputs.append(connection)
# Give the connection a queue for data we want to send
message_queues[connection] = Queue.Queue()
else:
bufferMsg = s.recv(BLOCK_SIZE)
# print bufferMsg
if bufferMsg:
if lastBuffered != "":
bufferMsg = lastBuffered + bufferMsg
lastBuffered = ""
# if badaFlag :
# print "Stray data is bufferMsg = "+bufferMsg
bufferMsg = bufferMsg.split(DELIMITER)
# if badaFlag :
# print " bufferMsgafter splitting DELIMITER= " + ('').join(bufferMsg)
if (bufferMsg[-1]):
lastBuffered = bufferMsg[-1]
for data in bufferMsg[:-1]:
processRecvdMsg(data, s)
# A readable client socket has data
else:
# Interpret empty result as closed connection
print 'closing', client_address, 'after reading no data'
# Stop listening for input on the connection
if s in outputs:
outputs.remove(s)
inputs.remove(s)
s.close()
# Remove message queue
del message_queues[s]
# Handle outputs
for s in writable:
print "In writable"
try:
next_msg = message_queues[s].get_nowait()
# print "nextmessage: " + next_msg
except:
# No messages waiting so stop checking for writability.
print 'output queue for', s.getpeername(), 'is empty'
outputs.remove(s)
else:
temp = next_msg.split(SEPAERATOR)
if (temp[0] == "HAVE_PIECE"):
print "Sending data for file = " + temp[1] + " PieceIndex = " + temp[2] + " blockNumber = " + temp[
3]
# print 'sending "%s" to %s' % (next_msg, s.getpeername())
s.send(next_msg)
# time.sleep(0.075)
# Handle "exceptional conditions"
for s in exceptional:
print "In Exceptional"
print 'handling exceptional condition for', s.getpeername()
# Stop listening for input on the connection
inputs.remove(s)
if s in outputs:
outputs.remove(s)
s.close()
# Remove message queue
del message_queues[s] # reactor is called by recvMessage
# thread runs here
def recvMessage(host, port, peerList, currentFile):
# global seeder,torrentInfo,uploadInfos,downloadInfos,pieceRequestQueue, inputs, outputs
# print "Entering recvMessage"
# global count
# server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# server.setblocking(0)
# # Bind the socket to the port
# server_address = (host, port)
# print 'starting up on %s port %s' % server_address
# server.bind(server_address)
# count = count+1
# # Listen for incoming connections
# server.listen(5)
# # if(not seeder)
# inputs = []
# outputs = []
# print "currentFile is : " + currentFile
# print "inputs is : " + str(type(inputs))
# print "outputs is : " + str(type(outputs))
# inputs.append(server)
# # Sockets from which we expect to read
handShaking(peerList, currentFile)
if not seeder:
pieceRequestOrdering(torrentInfo[currentFile]["name"], currentFile)
rarestPieceFirstAlgo(torrentInfo[currentFile]["name"], int(torrentInfo[currentFile]['pieces']))
reactor(server, currentFile) # Thread first calls this function
print "Closing Main Socket"
server.close()
# bitvector showing which pieces I have. '0' means piece missing and '1' means I have the piece.
def returnBitVector(filename, pieces):
file = "./bitvector/" + filename.split(".")[0] + ".vec"
print file
try:
f = open(file, "r")
stringBitVector = f.read()
stringBitVector = stringBitVector.strip('\n')
bitvector = []
for i in xrange(0, pieces):
bitvector.append(int(stringBitVector[i]))
return bitvector
except:
print "printing 0 vector"
return [0] * pieces
def stringify(bitvector):
str = ""
for i in bitvector:
if (i == 0):
str = str + '0'
else:
str = str + '1'
return str
def initialize(torrentFile):
global myHost, myPort, seeder, torrentInfo, uploadInfos, downloadInfos, pieceRequestQueue
# if(len(sys.argv) < 3):
# print "To run please follow following Format: python %s hostname port (optional:Torrent File)",sys.argv[0]
# sys.exit("Bye")
seeder = True
# myHost = sys.argv[1]
# myPort = int(sys.argv[2])
currentFile = ""
peerList = []
if len(sys.argv) == 1:
# filename = sys.argv[3]
currentFile = parseTorrentFile(torrentFile)
torrentInfo[currentFile]["name"] = torrentInfo[currentFile]["name"].strip('\n')
print "calling returnBitVector"
bitvector = returnBitVector(torrentInfo[currentFile]["name"], int(torrentInfo[currentFile]["pieces"]))
print "call to returnBitVector ended"
pieceBitVector[torrentInfo[currentFile]["name"]] = bitvector
seeder = False
if (not seeder):
peerList = returnPeerList(torrentInfo, myHost, myPort, currentFile)
print "Peer List Received"
return (currentFile, peerList)
# if __name__ == '__main__':
# #Tracker connection
# initialize()
# try:
# start_new_thread(recvMessage,(myHost,myPort,peerList))
# # start_new_thread(sendMessage,(host,port,peerList))
# except:
# print "Error: unable to start thread"
# while 1:
# pass
#######################################################################################
class MyProgressDialog(wx.Dialog):
""""""
# ----------------------------------------------------------------------
def __init__(self):
"""Constructor"""
wx.Dialog.__init__(self, None, title="Progress")
self.count = 0
self.progress = wx.Gauge(self, range=20)
sizer = wx.BoxSizer(wx.VERTICAL)
sizer.Add(self.progress, 0, wx.EXPAND)
self.SetSizer(sizer)
# create a pubsub listenerwx.
CallAfter(Publisher().sendMessage, "update", "")
Publisher().subscribe(self.updateProgress, "update")
# ----------------------------------------------------------------------
def updateProgress(self, msg):
"""
Update the progress bar
"""
self.count += 1
if self.count >= 20:
self.Destroy()
self.progress.SetValue(self.count)
class Example(wx.Frame):
def __init__(self, parent, title):
super(Example, self).__init__(parent, title=title,
size=(460, 300))
# self.InitUI()
self.panel = wx.Panel(self)
sizer = wx.GridBagSizer(5, 5)
self.text1 = wx.StaticText(self.panel, label="BITTORRENT v1.0")
sizer.Add(self.text1, pos=(0, 0), flag=wx.TOP | wx.LEFT | wx.BOTTOM,
border=15)
self.icon = wx.StaticBitmap(self.panel, bitmap=wx.Bitmap('exec.png'))
sizer.Add(self.icon, pos=(0, 4), flag=wx.TOP | wx.RIGHT | wx.ALIGN_RIGHT,
border=5)
self.line = wx.StaticLine(self.panel)
sizer.Add(self.line, pos=(1, 0), span=(1, 5),
flag=wx.EXPAND | wx.BOTTOM, border=10)
self.text2 = wx.StaticText(self.panel, label="Port")
sizer.Add(self.text2, pos=(3, 0), flag=wx.LEFT, border=10)
self.portText = wx.TextCtrl(self.panel)
sizer.Add(self.portText, pos=(3, 1), span=(1, 3), flag=wx.TOP | wx.EXPAND)
self.text3 = wx.StaticText(self.panel, label="Torrent File")
sizer.Add(self.text3, pos=(4, 0), flag=wx.LEFT | wx.TOP, border=10)
self.torrentFileText = wx.TextCtrl(self.panel)
sizer.Add(self.torrentFileText, pos=(4, 1), span=(1, 3), flag=wx.TOP | wx.EXPAND,
border=5)
self.text4 = wx.StaticText(self.panel, label="IP")
sizer.Add(self.text4, pos=(2, 0), flag=wx.LEFT | wx.TOP, border=10)
self.IPText = wx.TextCtrl(self.panel)
sizer.Add(self.IPText, pos=(2, 1), span=(1, 3), flag=wx.TOP | wx.EXPAND,
border=5)
self.text4 = wx.StaticText(self.panel, label="Group ID")
sizer.Add(self.text4, pos=(5, 0), flag=wx.LEFT | wx.TOP, border=10)
self.GroupText = wx.TextCtrl(self.panel)
sizer.Add(self.GroupText, pos=(5, 1), span=(1, 1), flag=wx.TOP | wx.EXPAND,
border=5)
self.IPText.SetValue("127.0.0.1")
self.portText.SetValue("10001")
self.button1 = wx.Button(self.panel, label="Browse...")
sizer.Add(self.button1, pos=(4, 4), flag=wx.TOP | wx.RIGHT, border=5)
self.Bind(wx.EVT_BUTTON, self.OnButton_FrameHandler, self.button1)
self.button3 = wx.Button(self.panel, label='Help')
sizer.Add(self.button3, pos=(7, 0), flag=wx.LEFT, border=10)
self.button4 = wx.Button(self.panel, label="Start")
sizer.Add(self.button4, pos=(7, 3))
self.Bind(wx.EVT_BUTTON, self.OnClickStart, self.button4)
self.button4 = wx.Button(self.panel, label="Connect")
sizer.Add(self.button4, pos=(3, 4))
self.Bind(wx.EVT_BUTTON, self.OnConnect, self.button4)
self.button5 = wx.Button(self.panel, label="Exit")
sizer.Add(self.button5, pos=(7, 4), span=(1, 1),
flag=wx.BOTTOM | wx.RIGHT, border=5)
self.Bind(wx.EVT_BUTTON, self.OnClickExit, self.button5)
sizer.AddGrowableCol(2)
self.panel.SetSizer(sizer)
self.Centre()
self.Show()
def OnButton_FrameHandler(self, event):
# print "Hello"
openFileDialog = wx.FileDialog(self, "Open Torrent file", "", "",
"Torrent files (*.torrent)|*.torrent", wx.FD_OPEN | wx.FD_FILE_MUST_EXIST)
if openFileDialog.ShowModal() == wx.ID_CANCEL:
return
else:
paths = openFileDialog.GetPaths()
self.torrentFileText.SetValue(paths[0])
def OnClickExit(self, event):
print "Thank you for sharing"
running = False
self.Destroy()
def OnConnect(self, event):
myHost = self.IPText.GetValue()
myPort = int(self.portText.GetValue())
global seeder, torrentInfo, uploadInfos, downloadInfos, pieceRequestQueue, inputs, outputs, server
print "Entering recvMessage"
global count
print "Socket connected"
# server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server.setblocking(0)
# Bind the socket to the port
server_address = (myHost, myPort)
print 'starting up on %s port %s' % server_address
server.bind(server_address)
count = count + 1
# Listen for incoming connections
server.listen(5)
# if(not seeder)
inputs = []
outputs = []
# print "currentFile is : " + currentFile
print "inputs is : " + str(type(inputs))
print "outputs is : " + str(type(outputs))
inputs.append(server)
# Sockets from which we expect to read
def OnClickStart(self, event):
print "Ready to share torrents"
torrentFilename = self.torrentFileText.GetValue()
myGroupID = self.GroupText.GetValue()
(currentFile, peerList) = initialize(torrentFilename)
# recvMessage(myHost,myPort,peerList, currentFile)
try:
start_new_thread(recvMessage, (myHost, myPort, peerList, currentFile))
except:
print "Error: unable to start thread"
def createProgressBar(self):
self.count = 0
self.progress = wx.Gauge(self, range=20)
sizer = wx.BoxSizer(wx.VERTICAL)
sizer.Add(self.progress, 2,
flag=wx.EXPAND, border=5)
self.SetSizer(sizer)
Publisher().subscribe(self.updateProgress, "update")
def updateProgress(self, msg):
"""
Update the progress bar
"""
self.count += 1
if self.count >= 20:
self.Destroy()
self.progress.SetValue(self.count)
def InitUI(self):
pass
def GUI():
app = wx.App()
Example(None, title="Distributed Bittorent")
app.MainLoop()
if __name__ == '__main__':
GUI()
|
python
|
# -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2021 CERN.
#
# Invenio is free software; you can redistribute it and/or modify it
# under the terms of the MIT License; see LICENSE file for more details.
"""Community access system field."""
from invenio_rdm_records.records.systemfields.access import Owner, Owners, \
RecordAccessField
class _Owner(Owner):
@property
def user(self):
if self.owner_type == 'user':
return self.owner_id
class _Owners(Owners):
owner_cls = _Owner
def __init__(self, owners=None, owner_cls=None):
"""Create a new list of owners."""
self.owner_cls = owner_cls or self.owner_cls
for owner in owners or []:
self.add(owner)
class CommunityAccess:
"""Access management per community."""
VISIBILITY_LEVELS = ('public', 'restricted')
MEMBER_POLICY_LEVELS = ('open', 'closed')
RECORD_POLICY_LEVELS = ('open', 'closed', 'restricted')
owners_cls = _Owners
def __init__(
self,
visibility=None,
member_policy=None,
record_policy=None,
owned_by=None,
owners_cls=None,
):
"""Create a new CommunityAccess object.
If ``owned_by`` is not specified, a new instance of ``owners_cls``
will be used.
:param visibility: The visibility level.
:param owned_by: The set of community owners
"""
self.visibility = visibility or 'public'
self.member_policy = member_policy or 'open'
self.record_policy = record_policy or 'open'
owners_cls = owners_cls or self.owners_cls
self.owned_by = owned_by if owned_by else owners_cls()
self.errors = []
def _validate_visibility_level(self, level):
return level in self.VISIBILITY_LEVELS
def _validate_member_policy_level(self, level):
return level in self.MEMBER_POLICY_LEVELS
def _validate_record_policy_level(self, level):
return level in self.RECORD_POLICY_LEVELS
@property
def visibility(self):
"""Get the visibility level."""
return self._visibility
@visibility.setter
def visibility(self, value):
"""Set the visibility level."""
if not self._validate_visibility_level(value):
raise ValueError(f"Unknown visibility level: {value}")
self._visibility = value
@property
def member_policy(self):
"""Get the member policy level."""
return self._member_policy
@member_policy.setter
def member_policy(self, value):
"""Set the member policy level."""
if not self._validate_member_policy_level(value):
raise ValueError(f"Unknown member policy level: {value}")
self._member_policy = value
@property
def record_policy(self):
"""Get the record policy level."""
return self._record_policy
@record_policy.setter
def record_policy(self, value):
"""Set the record policy level."""
if not self._validate_record_policy_level(value):
raise ValueError(f"Unknown record policy level: {value}")
self._record_policy = value
def dump(self):
"""Dump the field values as dictionary."""
return {
"visibility": self.visibility,
"member_policy": self.member_policy,
"record_policy": self.record_policy,
"owned_by": self.owned_by.dump(),
}
def refresh_from_dict(self, access_dict):
"""Re-initialize the Access object with the data in the access_dict."""
new_access = self.from_dict(access_dict)
self.visibility = new_access.visibility
self.member_policy = new_access.member_policy
self.record_policy = new_access.record_policy
self.owned_by = new_access.owned_by
@classmethod
def from_dict(
cls,
access_dict,
owners_cls=None,
):
"""Create a new Access object from the specified 'access' property.
The new ``CommunityAccess`` object will be populated with new instances
from the configured classes.
If ``access_dict`` is empty, the ``Access`` object will be populated
with new instance ``owners_cls``.
"""
owners_cls = owners_cls or cls.owners_cls
errors = []
# provide defaults in case there is no 'access' property
owned_by = owners_cls()
if access_dict:
for owner_dict in access_dict.get("owned_by", []):
try:
owned_by.add(owned_by.owner_cls(owner_dict))
except Exception as e:
errors.append(e)
access = cls(
visibility=access_dict.get("visibility"),
member_policy=access_dict.get("member_policy"),
record_policy=access_dict.get("record_policy"),
owned_by=owned_by
)
access.errors = errors
return access
def __repr__(self):
"""Return repr(self)."""
return (
"<{} (visibility: {}, "
"member_policy: {}, "
"record_policy: {}, "
"owners: {})>"
).format(
type(self).__name__,
self.visibility,
self.member_policy,
self.record_policy,
self.owned_by,
)
class CommunityAccessField(RecordAccessField):
"""System field for managing community access."""
def __init__(self, *args, access_obj_class=CommunityAccess, **kwargs):
"""Create a new CommunityAccessField instance."""
super().__init__(*args, access_obj_class=access_obj_class, **kwargs)
def obj(self, instance):
"""Get the access object."""
obj = self._get_cache(instance)
if obj is not None:
return obj
data = self.get_dictkey(instance)
if data:
obj = self._access_obj_class.from_dict(data)
else:
obj = self._access_obj_class()
self._set_cache(instance, obj)
return obj
# NOTE: The original RecordAccessField dumps some non-existing fields
def post_dump(self, *args, **kwargs):
"""Called before a record is dumped."""
pass
def pre_load(self, *args, **kwargs):
"""Called before a record is dumped."""
pass
|
python
|
from datetime import datetime
from typing import List
from fastapi import APIRouter
from utils.database import database
from .models import replies
from .schema import ReplyIn, Reply, LikeIn, Like
replies_router = APIRouter()
@replies_router.get("/list-for-post/{post_id}/", response_model=List[Reply])
async def list_replies(post_id: int):
"""
List Reply(ies) for a given Post
:param post_id: int Post.id
:return: List[Reply]
"""
query = replies.select().where(
replies.c.post_id == post_id
)
return await database.fetch_all(query=query)
@replies_router.post("/add-to-post/", response_model=Reply)
async def create_reply(reply: ReplyIn):
created_at = datetime.utcnow()
query = replies.insert().values(
post_id=reply.post_id,
reply_type=reply.reply_type,
content=reply.content,
created_at=created_at
)
last_record_id = await database.execute(query=query)
return {
**reply.dict(),
"id": last_record_id,
"created_at": created_at
}
@replies_router.post("/add-like/", response_model=Like)
async def add_like(like: LikeIn):
query = replies.select().where(
replies.c.id == like.reply_id
)
exiting = await database.fetch_one(query=query)
likes_count = 1 if exiting.likes_count is None else exiting.likes_count + 1
query = replies.update().values(
likes_count=likes_count
).where(
replies.c.id == like.reply_id
)
await database.execute(query=query)
return {
**like.dict(),
"likes_count": likes_count
}
|
python
|
# Generated by Django 3.0.6 on 2020-05-20 10:05
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
("resources", "0003_auto_20200520_0825"),
]
operations = [
migrations.RemoveField(model_name="land", name="images",),
]
|
python
|
#
# This file is part of BridgeDB, a Tor bridge distribution system.
#
# :authors: Isis Lovecruft 0xA3ADB67A2CDB8B35 <[email protected]>
# please also see AUTHORS file
# :copyright: (c) 2007-2013, The Tor Project, Inc.
# (c) 2007-2013, all entities within the AUTHORS file
# :license: 3-clause BSD, see included LICENSE for information
from ._version import get_versions
from ._langs import get_langs
#__version__ = get_versions()['version']
#__version_full__ = get_versions()['full']
__version__ = get_versions()['version']
__langs__ = get_langs()
del get_versions
del get_langs
|
python
|
#-*- coding: utf-8 -*-
'''
Copyright (c) 2016 NSR (National Security Research Institute)
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
'''
from .lsh256 import LSH256
from .lsh512 import LSH512
## 해쉬 함수 wrapper 클래스
class LSHDigest:
## 파라미터에 맞는 LSH 알고리즘 객체 생성
# @param [in] wordlenbits 워드 길이 (비트) 256, 512만 가능함
# @param [in] outlenbits 출력 길이 (비트) 1 ~ 256 (LSH-256) 혹은 1 ~ 512 (LSH-512) 가 가능함
# @return LSH 객체
@staticmethod
def getInstance(wordlenbits, outlenbits = None):
if outlenbits is None:
outlenbits = wordlenbits
if wordlenbits == 256:
return LSH256(outlenbits)
elif wordlenbits == 512:
return LSH512(outlenbits)
else:
raise ValueError("Unsupported algorithm parameter");
## digest 함수 - 최종 해쉬값을 계산하여 리턴한다.
# @param [in] wordlenbits 워드 길이 256, 512 중 하나여야 함
# @param [in] outlenbits 출력 해시 길이 1 ~ wordlenbits 사이의 값이어야 함
# @param [in] data 입력 데이터
# @param [in] offset 데이터 시작 오프셋 (바이트)
# @param [in] length 데이터 길이 (비트)
# @return 계산된 해쉬값
@staticmethod
def digest(wordlenbits, outlenbits = None, data = None, offset = 0, length = -1):
if outlenbits is None:
outlenbits = wordlenbits
lsh = LSHDigest.getInstance(wordlenbits, outlenbits)
return lsh.final(data, offset, length)
|
python
|
# Copyright 2021 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Ranking losses."""
import tensorflow as tf
from delf.python.training.losses import ranking_losses
class RankingLossesTest(tf.test.TestCase):
def testContrastiveLoss(self):
# Testing the correct numeric value.
queries = tf.math.l2_normalize(tf.constant([[1.0, 2.0, -2.0]]))
positives = tf.math.l2_normalize(tf.constant([[-1.0, 2.0, 0.0]]))
negatives = tf.math.l2_normalize(tf.constant([[[-5.0, 0.0, 3.0]]]))
result = ranking_losses.contrastive_loss(queries, positives, negatives,
margin=0.7, eps=1e-6)
exp_output = 0.55278635
self.assertAllClose(exp_output, result)
def testTripletLossZeroLoss(self):
# Testing the correct numeric value in case if query-positive distance is
# smaller than the query-negative distance.
queries = tf.math.l2_normalize(tf.constant([[1.0, 2.0, -2.0]]))
positives = tf.math.l2_normalize(tf.constant([[-1.0, 2.0, 0.0]]))
negatives = tf.math.l2_normalize(tf.constant([[[-5.0, 0.0, 3.0]]]))
result = ranking_losses.triplet_loss(queries, positives, negatives,
margin=0.1)
exp_output = 0.0
self.assertAllClose(exp_output, result)
def testTripletLossNonZeroLoss(self):
# Testing the correct numeric value in case if query-positive distance is
# bigger than the query-negative distance.
queries = tf.math.l2_normalize(tf.constant([[1.0, 2.0, -2.0]]))
positives = tf.math.l2_normalize(tf.constant([[-5.0, 0.0, 3.0]]))
negatives = tf.math.l2_normalize(tf.constant([[[-1.0, 2.0, 0.0]]]))
result = ranking_losses.triplet_loss(queries, positives, negatives,
margin=0.1)
exp_output = 2.2520838
self.assertAllClose(exp_output, result)
if __name__ == '__main__':
tf.test.main()
|
python
|
import curve25519
import time
# from urandom import randint
d = b'\x70\x1f\xb4\x30\x86\x55\xb4\x76\xb6\x78\x9b\x73\x25\xf9\xea\x8c\xdd\xd1\x6a\x58\x53\x3f\xf6\xd9\xe6\x00\x09\x46\x4a\x5f\x9d\x54\x00\x00\x00\x00'
u = b'\x09' + bytes(31)
v = b'\xd9\xd3\xce~\xa2\xc5\xe9)\xb2a|m~M=\x92L\xd1Hw,\xdd\x1e\xe0\xb4\x86\xa0\xb8\xa1\x19\xae \x00\x00\x00\x00'
print('Test vectors from https://tools.ietf.org/html/rfc8031#appendix-A')
print('Test 1: X25519: q = d*u')
start = time.ticks_ms() # get millisecond counter
b = curve25519.x25519(d, u)
delta = time.ticks_diff(time.ticks_ms(), start) # compute time difference
print('Computation time: %d ms' % delta)
q = int.from_bytes(b, 'little')
print('q [hex/dec] = %x %d' % (q, q))
if q != 0x66c7fb0d9f7090f777fa8493081ce8a4f174dbbbf9a36f16ba571206d4ddd548:
print('Test 1 failed.')
else:
print('Test 1 passed.')
print()
print('Test 2: X25519 + y-coordinate recovery + transform to Edwards-curve')
print('(x, y) = Edward(q, r), (q, r) = d*(u, v)')
start = time.ticks_ms() # get millisecond counter
b = curve25519.x25519_ed(d, u, v)
delta = time.ticks_diff(time.ticks_ms(), start) # compute time difference
print('Computation time: %d ms' % delta)
x = int.from_bytes(b[0], 'little')
y = int.from_bytes(b[1], 'little')
print('x [hex/dec] = %x %d' % (x, x))
print('y [hex/dec] = %x %d' % (y, y))
if x != 0x1ce7e6e3a747a25352df2d3155f06427ba389769e37755731dead2b54c5cef03 or y != 0x4dd1c7c2001c147333ceedf77ebd48b1100e2a95f88cf1f40d1b74ec7279e657:
print('Test 2 failed.')
else:
print('Test 2 passed.')
print()
|
python
|
import torch
import pytest
def test_nll(device):
from speechbrain.nnet.losses import nll_loss
predictions = torch.zeros(4, 10, 8, device=device)
targets = torch.zeros(4, 10, device=device)
lengths = torch.ones(4, device=device)
out_cost = nll_loss(predictions, targets, lengths)
assert torch.all(torch.eq(out_cost, 0))
def test_mse(device):
from speechbrain.nnet.losses import mse_loss
predictions = torch.ones(4, 10, 8, device=device)
targets = torch.ones(4, 10, 8, device=device)
lengths = torch.ones(4, device=device)
out_cost = mse_loss(predictions, targets, lengths)
assert torch.all(torch.eq(out_cost, 0))
predictions = torch.zeros(4, 10, 8, device=device)
out_cost = mse_loss(predictions, targets, lengths)
assert torch.all(torch.eq(out_cost, 1))
def test_l1(device):
from speechbrain.nnet.losses import l1_loss
predictions = torch.ones(4, 10, 8, device=device)
targets = torch.ones(4, 10, 8, device=device)
lengths = torch.ones(4, device=device)
out_cost = l1_loss(predictions, targets, lengths)
assert torch.all(torch.eq(out_cost, 0))
def test_bce_loss(device):
from speechbrain.nnet.losses import bce_loss
# Ensure this works both with and without singleton dimension
predictions_singleton = torch.zeros(4, 10, 1, device=device)
predictions_match = torch.zeros(4, 10, device=device)
targets = torch.ones(4, 10, device=device)
lengths = torch.ones(4, device=device)
out_cost_singleton = bce_loss(predictions_singleton, targets, lengths)
out_cost_match = bce_loss(predictions_match, targets, lengths)
assert torch.allclose(
torch.exp(out_cost_singleton), torch.tensor(2.0, device=device)
)
assert torch.allclose(
torch.exp(out_cost_match), torch.tensor(2.0, device=device)
)
# How about one dimensional inputs
predictions = torch.zeros(5, 1, device=device)
targets = torch.ones(5, device=device)
out_cost = bce_loss(predictions, targets)
assert torch.allclose(torch.exp(out_cost), torch.tensor(2.0, device=device))
# Can't pass lengths in 1D case
with pytest.raises(ValueError):
bce_loss(predictions, targets, length=torch.ones(5, device=device))
def test_classification_error(device):
from speechbrain.nnet.losses import classification_error
predictions = torch.zeros(4, 10, 8, device=device)
predictions[:, :, 0] += 1.0
targets = torch.zeros(4, 10, device=device)
lengths = torch.ones(4, device=device)
out_cost = classification_error(predictions, targets, lengths)
assert torch.all(torch.eq(out_cost, 0))
def test_pitwrapper(device):
from speechbrain.nnet.losses import PitWrapper
import torch
from torch import nn
base_loss = nn.MSELoss(reduction="none")
pit = PitWrapper(base_loss)
predictions = torch.rand(
(2, 32, 4), device=device
) # batch, frames, sources
p = (3, 0, 2, 1)
# same but we invert the ordering to check if permutation invariant
targets = predictions[..., p]
loss, opt_p = pit(predictions, targets)
assert [x == p for x in opt_p] == [True for i in range(len(opt_p))]
predictions = pit.reorder_tensor(predictions, opt_p)
assert torch.all(torch.eq(base_loss(predictions, targets), 0))
predictions = torch.rand(
(3, 32, 32, 32, 5), device=device
) # batch, ..., sources
p = (3, 0, 2, 1, 4)
targets = predictions[
..., p
] # same but we invert the ordering to check if permutation invariant
loss, opt_p = pit(predictions, targets)
assert [x == p for x in opt_p] == [True for i in range(len(opt_p))]
predictions = pit.reorder_tensor(predictions, opt_p)
assert torch.all(torch.eq(base_loss(predictions, targets), 0))
def test_transducer_loss(device):
# Make this its own test since it can only be run
# if numba is installed and a GPU is available
pytest.importorskip("numba")
if torch.cuda.device_count() == 0:
pytest.skip("This test can only be run if a GPU is available")
from speechbrain.nnet.losses import transducer_loss
device = torch.device("cuda")
log_probs = (
torch.Tensor(
[
[
[
[0.1, 0.6, 0.1, 0.1, 0.1],
[0.1, 0.1, 0.6, 0.1, 0.1],
[0.1, 0.1, 0.2, 0.8, 0.1],
],
[
[0.1, 0.6, 0.1, 0.1, 0.1],
[0.1, 0.1, 0.2, 0.1, 0.1],
[0.7, 0.1, 0.2, 0.1, 0.1],
],
]
]
)
.to(device)
.requires_grad_()
.log_softmax(dim=-1)
)
targets = torch.Tensor([[1, 2]]).to(device).int()
probs_length = torch.Tensor([1.0]).to(device)
target_length = torch.Tensor([1.0]).to(device)
out_cost = transducer_loss(
log_probs,
targets,
probs_length,
target_length,
blank_index=0,
use_torchaudio=False,
)
out_cost.backward()
assert out_cost.item() == 2.247833251953125
def test_guided_attention_loss_mask(device):
from speechbrain.nnet.loss.guidedattn_loss import GuidedAttentionLoss
loss = GuidedAttentionLoss().to(device)
input_lengths = torch.tensor([3, 2, 6], device=device)
output_lengths = torch.tensor([4, 3, 5], device=device)
soft_mask = loss.guided_attentions(input_lengths, output_lengths)
ref_soft_mask = torch.tensor(
[
[
[0.0, 0.54216665, 0.9560631, 0.9991162, 0.0],
[0.7506478, 0.08314464, 0.2933517, 0.8858382, 0.0],
[0.9961341, 0.8858382, 0.2933517, 0.08314464, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0],
],
[
[0.0, 0.7506478, 0.9961341, 0.0, 0.0],
[0.9560631, 0.2933517, 0.2933517, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0],
],
[
[0.0, 0.39346933, 0.86466473, 0.988891, 0.99966455],
[0.2933517, 0.01379288, 0.49366438, 0.90436554, 0.993355],
[0.7506478, 0.1992626, 0.05404053, 0.5888877, 0.93427145],
[0.9560631, 0.6753475, 0.1175031, 0.1175031, 0.6753475],
[0.9961341, 0.93427145, 0.5888877, 0.05404053, 0.1992626],
[0.9998301, 0.993355, 0.90436554, 0.49366438, 0.01379288],
],
],
device=device,
)
assert torch.allclose(soft_mask, ref_soft_mask)
def test_guided_attention_loss_value(device):
from speechbrain.nnet.loss.guidedattn_loss import GuidedAttentionLoss
loss = GuidedAttentionLoss().to(device)
input_lengths = torch.tensor([2, 3], device=device)
target_lengths = torch.tensor([3, 4], device=device)
alignments = torch.tensor(
[
[
[0.8, 0.2, 0.0],
[0.4, 0.6, 0.0],
[0.2, 0.8, 0.0],
[0.0, 0.0, 0.0],
],
[
[0.6, 0.2, 0.2],
[0.1, 0.7, 0.2],
[0.3, 0.4, 0.3],
[0.2, 0.3, 0.5],
],
],
device=device,
)
loss_value = loss(alignments, input_lengths, target_lengths)
ref_loss_value = torch.tensor(0.1142)
assert torch.isclose(loss_value, ref_loss_value, 0.0001, 0.0001).item()
def test_guided_attention_loss_shapes(device):
from speechbrain.nnet.loss.guidedattn_loss import GuidedAttentionLoss
loss = GuidedAttentionLoss().to(device)
input_lengths = torch.tensor([3, 2, 6], device=device)
output_lengths = torch.tensor([4, 3, 5], device=device)
soft_mask = loss.guided_attentions(input_lengths, output_lengths)
assert soft_mask.shape == (3, 6, 5)
soft_mask = loss.guided_attentions(
input_lengths, output_lengths, max_input_len=10
)
assert soft_mask.shape == (3, 10, 5)
soft_mask = loss.guided_attentions(
input_lengths, output_lengths, max_target_len=12
)
assert soft_mask.shape == (3, 6, 12)
soft_mask = loss.guided_attentions(
input_lengths, output_lengths, max_input_len=10, max_target_len=12
)
assert soft_mask.shape == (3, 10, 12)
|
python
|
amount = int(input("Inserire il reddito imponibile: "))
married = input("Sei coniugato? [y/N]: ") == "y"
if married:
if amount > 64000:
tax = 8800 + (amount - 64000) * .25
elif amount > 16000:
tax = 1600 + (amount - 16000) * .15
else:
tax = amount * .10
else:
if amount > 32000:
tax = 4400 + (amount - 32000) * .25
elif amount > 8000:
tax = 800 + (amount - 8000) * .15
else:
tax = amount * .10
print(f"Le tasse sono {tax:.2f}$")
|
python
|
# Source Generated with Decompyle++
# File: device_parameter_component.pyc (Python 2.5)
from __future__ import absolute_import
from ableton.v2.control_surface.control import ControlList
from pushbase.device_parameter_component import DeviceParameterComponentBase
from mapped_control import MappedControl
class DeviceParameterComponent(DeviceParameterComponentBase):
controls = ControlList(MappedControl, 8)
def set_parameter_controls(self, encoders):
self.controls.set_control_element(encoders)
self._connect_parameters()
def _connect_parameters(self):
parameters = self._parameter_provider.parameters[:self.controls.control_count]
for (control, parameter_info) in map(None, self.controls, parameters):
if parameter_info:
pass
1
parameter = None
control.mapped_parameter = parameter
if parameter:
control.update_sensitivities(parameter_info.default_encoder_sensitivity, parameter_info.fine_grain_encoder_sensitivity)
continue
|
python
|
import codecs
from luadata.serializer.unserialize import unserialize
def read(path, encoding="utf-8", multival=False):
"""Read luadata from file
Args:
path (str): file path
encoding (str, optional): file encoding. Defaults to "utf-8".
Returns:
tuple([*]): unserialized data from luadata file
"""
with codecs.open(path, "r", encoding) as file:
text = file.read().strip()
if text[0:6] == "return":
ch = text[6:7]
if not (
(ch >= "a" and ch <= "z")
or (ch >= "A" and ch <= "Z")
or (ch >= "0" and ch <= "9")
or ch == "_"
):
text = text[6:]
return unserialize(text, encoding=encoding, multival=False)
|
python
|
# This is to test the conditions in python
# Demo for if and elsif
number = int(input("Please enter a number to check\n"))
if number <100:
print("the number is less that 100")
elif number == 100:
print("the number is equal to 100")
else:
print("number is more than 100\n")
# this part
city = ['Tokyo', 'New York', 'Toronto', 'Hong Kong']
for name in city:
print('City: ' + name)
print('\n') # newline
num = [1,2,3,4,5,6,7,8,9]
print('x^2 loop:')
for x in num:
y = x * x
print(str(x) + '*' + str(x) + '=' + str(y))
|
python
|
#!/usr/bin/env python
# Han Xiao <[email protected]> <https://hanxiao.github.io>
import multiprocessing
import os
import random
import sys
import threading
import time
from collections import defaultdict
from datetime import datetime
from itertools import chain
from multiprocessing import Process
from multiprocessing.pool import Pool
import numpy as np
import zmq
import zmq.decorators as zmqd
from termcolor import colored
from zmq.utils import jsonapi
from .helper import *
from .protocol import *
from .http import BertHTTPProxy
from .zmq_decor import multi_socket
from .postsink import WKRSink
from .hard_worker import WKRHardWorker
from .statistic import ServerStatistic
__all__ = ['__version__', 'WKRServer', 'WKRHardWorker']
__version__ = '1.0.0-a'
class WKRServer(threading.Thread):
def __init__(self, args, hardprocesser=WKRHardWorker):
super().__init__()
self.hardprocessor_skeleton = hardprocesser
if not issubclass(self.hardprocessor_skeleton, WKRHardWorker):
raise AssertionError('hardprocesser must inherit from class WKRHardWorker')
self.model_dir = args.model_dir
self.num_worker = args.num_worker
self.device_map = args.device_map
self.gpu_memory_fraction = args.gpu_memory_fraction
self.all_cpu = args.cpu
self.num_concurrent_postsocket = max(8, args.num_worker * 2)
self.batch_size = args.batch_size
self.total_concurrent_socket = self.num_concurrent_postsocket
self.port = args.port
self.args = args
self.transfer_protocol = args.protocol
self.status_args = {k: v for k, v in sorted(vars(args).items())}
self.status_static = {
'python_version': sys.version,
'server_version': __version__,
'pyzmq_version': zmq.pyzmq_version(),
'zmq_version': zmq.zmq_version(),
'server_start_time': str(datetime.now()),
}
self.processes = []
self.logdir = args.log_dir
self.logger = set_logger(colored('NAVIGATOR', 'red'), logger_dir=self.logdir, verbose=args.verbose)
self.logger.info('freeze, optimize and export graph, could take a while...')
self.is_ready = threading.Event()
def __enter__(self):
self.start()
self.is_ready.wait()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.close()
def close(self):
self.logger.info('shutting down...')
self._send_close_signal()
self.is_ready.clear()
self.join()
@zmqd.context()
@zmqd.socket(zmq.PUSH)
def _send_close_signal(self, _, frontend):
frontend.connect('tcp://localhost:%d' % self.port)
frontend.send_multipart([b'', ServerCmd.terminate, b'', b''])
@staticmethod
def shutdown(args):
with zmq.Context() as ctx:
ctx.setsockopt(zmq.LINGER, args.timeout)
with ctx.socket(zmq.PUSH) as frontend:
try:
frontend.connect('tcp://%s:%d' % (args.ip, args.port))
frontend.send_multipart([b'', ServerCmd.terminate, b'', b''])
print('shutdown signal sent to %d' % args.port)
except zmq.error.Again:
raise TimeoutError(
'no response from the server (with "timeout"=%d ms), please check the following:'
'is the server still online? is the network broken? are "port" correct? ' % args.timeout)
def run(self):
self._run()
@zmqd.context()
@zmqd.socket(zmq.PULL)
@zmqd.socket(zmq.PAIR)
@multi_socket(zmq.PUSH, num_socket='total_concurrent_socket')
def _run(self, _, frontend, sink, *backend_socks):
def push_new_job(client, req_id, msg_raw, msg_info_raw):
_sock = rand_backend_socket
send_to_next_raw(client, req_id, msg_raw, msg_info_raw, _sock)
# bind all sockets
self.logger.info('bind all sockets')
frontend.bind('tcp://*:%d' % self.port)
addr_front2sink = auto_bind(sink)
addr_backend_post_list = [auto_bind(b) for b in backend_socks]
self.logger.info('open %d worker sockets' % len(addr_backend_post_list))
# start the sink process
self.logger.info('start the sink')
proc_postsink = WKRSink(self.args, addr_front2sink, addr_backend_post_list)
self.processes.append(proc_postsink)
proc_postsink.start()
addr_sink = sink.recv().decode('ascii')
# start the post-backend processes
# WaveWorker: self, id, args, worker_address_list, sink_address, device_id
self.logger.info('start main-workers')
device_map_main_worker = self._get_device_map(self.num_worker, self.device_map, self.gpu_memory_fraction, run_all_cpu=self.all_cpu)
for idx, device_id in enumerate(device_map_main_worker):
process = self.hardprocessor_skeleton(idx, self.args, addr_backend_post_list, addr_sink, device_id)
self.processes.append(process)
process.start()
# process.is_ready.wait() # start model sequencely
# start the http-service process
if self.args.http_port:
self.logger.info('start http proxy')
proc_proxy = BertHTTPProxy(self.args)
self.processes.append(proc_proxy)
proc_proxy.start()
rand_backend_socket = None
server_status = ServerStatistic()
for p in self.processes:
p.is_ready.wait()
self.is_ready.set()
self.logger.info('all set, ready to serve request!')
while True:
try:
request = frontend.recv_multipart()
client, req_id, msg, msg_info = request
# client, req_id, msg, msg_info = recv_from_prev(self.transfer_protocol, frontend)
# request = [client, msg, req_id, msg_info]
except (ValueError, AssertionError):
self.logger.error('received a wrongly-formatted request (expected 4 frames, got %d)' % len(request))
self.logger.error('\n'.join('field %d: %s' % (idx, k) for idx, k in enumerate(request)), exc_info=True)
else:
server_status.update(request)
if msg == ServerCmd.terminate:
break
elif msg == ServerCmd.show_config:
self.logger.info('new config request\treq id: %d\tclient: %s' % (int(req_id), client))
status_runtime = {'client': client.decode('ascii'),
'num_process': len(self.processes),
'navigator -> worker': addr_backend_post_list,
'worker -> sink': addr_sink,
'server_current_time': str(datetime.now()),
'statistic': server_status.value,
'main_device_map': device_map_main_worker,
'main_batch_size': self.batch_size,
'protocol': self.transfer_protocol,
'num_concurrent_socket': self.total_concurrent_socket}
sink.send_multipart([client, msg, jsonapi.dumps({**status_runtime,
**self.status_args,
**self.status_static}), req_id])
else:
self.logger.info('new encode request\treq id: %s\tclient: %s' %
(str(req_id), client))
# regist job
sink.send_multipart([client, ServerCmd.new_job, jsonapi.dumps({'job_parts': '1', 'split_info': {}}), to_bytes(req_id)])
# pick random socket
rand_backend_socket = random.choice([b for b in backend_socks if b != rand_backend_socket])
# info = jsonapi.loads(msg_info)
# if self.transfer_protocol == 'obj':
# msg = decode_object(msg, info)
# else:
# msg = decode_ndarray(msg, info)
# push job
push_new_job(client, req_id, msg, msg_info)
for p in self.processes:
p.close()
self.logger.info('terminated!')
def _get_device_map(self, num_worker, device_map_raw, per_process_gpu_fragment, run_all_cpu=False):
self.logger.info('get devices map')
run_on_gpu = False
device_map = [-1] * num_worker
if not run_all_cpu:
try:
import GPUtil
num_all_gpu = len(GPUtil.getGPUs())
avail_gpu = GPUtil.getAvailable(order='memory', limit=min(num_all_gpu, num_worker),
maxMemory=0.9, maxLoad=0.9)
num_avail_gpu = len(avail_gpu)
if num_avail_gpu >= num_worker:
run_on_gpu = True
elif 0 < num_avail_gpu < num_worker:
self.logger.warning('only %d out of %d GPU(s) is available/free, but "num_worker=%d"' %
(num_avail_gpu, num_all_gpu, num_worker))
if not device_map_raw:
self.logger.warning('multiple workers will be allocated to one GPU, '
'may not scale well and may raise out-of-memory')
else:
self.logger.warning('workers will be allocated based on "-device_map=%s", '
'may not scale well and may raise out-of-memory' % device_map_raw)
run_on_gpu = True
else:
self.logger.warning('no GPU available, fall back to CPU')
if run_on_gpu:
device_map = ((device_map_raw or avail_gpu) * num_worker)[: num_worker]
except FileNotFoundError:
self.logger.warning('nvidia-smi is missing, often means no gpu on this machine. '
'fall back to cpu!')
self.logger.info('device map: \n\t\t%s' % '\n\t\t'.join(
'worker %2d -> %s' % (w_id, ('gpu %2d' % g_id) if g_id >= 0 else 'cpu') for w_id, g_id in
enumerate(device_map)))
return device_map
|
python
|
#!/usr/bin/env python
"""Application controller for FastTree
designed for FastTree v1.1.0 . Also functions with v2.0.1, v2.1.0, and v2.1.3
though only with basic functionality"""
from cogent.app.parameters import ValuedParameter, FlagParameter, \
MixedParameter
from cogent.app.util import CommandLineApplication, FilePath, system, \
CommandLineAppResult, ResultPath, remove, ApplicationError
from cogent.core.tree import PhyloNode
from cogent.parse.tree import DndParser
from cogent.core.moltype import DNA, RNA, PROTEIN
from cogent.core.alignment import SequenceCollection
__author__ = "Daniel McDonald"
__copyright__ = "Copyright 2007-2012, The Cogent Project"
__credits__ = ["Daniel McDonald", "Justin Kuczynski"]
__license__ = "GPL"
__version__ = "1.5.3"
__maintainer__ = "Daniel McDonald"
__email__ = "[email protected]"
__status__ = "Development"
class FastTree(CommandLineApplication):
"""FastTree application Controller"""
_command = 'FastTree'
_input_handler = '_input_as_multiline_string'
_parameters = {
'-quiet':FlagParameter('-',Name='quiet'),
'-boot':ValuedParameter('-',Delimiter=' ',Name='boot'),
'-seed':ValuedParameter('-',Delimiter=' ',Name='seed'),
'-nni':ValuedParameter('-',Delimiter=' ',Name='nni'),
'-slow':FlagParameter('-',Name='slow'),
'-fastest':FlagParameter('-',Name='fastest'),
'-top':FlagParameter('-',Name='top'),
'-notop':FlagParameter('-',Name='notop'),
'-topm':ValuedParameter('-',Delimiter=' ',Name='topm'),
'-close':ValuedParameter('-',Delimiter=' ',Name='close'),
'-refresh':ValuedParameter('-',Delimiter=' ',Name='refresh'),
'-matrix':ValuedParameter('-',Delimiter=' ',Name='matrix'),
'-nomatrix':FlagParameter('-',Name='nomatrix'),
'-nj':FlagParameter('-',Name='nj'),
'-bionj':FlagParameter('-',Name='bionj'),
'-nt':FlagParameter('-',Name='nt'),
'-n':ValuedParameter('-',Delimiter=' ',Name='n'),
'-pseudo':MixedParameter('-',Delimiter=' ', Name='pseudo'),
'-intree':ValuedParameter('-',Delimiter=' ',Name='intree'),
'-spr':ValuedParameter('-',Delimiter=' ',Name='spr'),
'-constraints':ValuedParameter('-',Delimiter=' ',\
Name='constraints'),
'-constraintWeight':ValuedParameter('-',Delimiter=' ',\
Name='constraintWeight'),\
'-makematrix':ValuedParameter('-',Delimiter=' ',Name='makematrix')}
def __call__(self,data=None, remove_tmp=True):
"""Run the application with the specified kwargs on data
data: anything that can be cast into a string or written out to
a file. Usually either a list of things or a single string or
number. input_handler will be called on this data before it
is passed as part of the command-line argument, so by creating
your own input handlers you can customize what kind of data
you want your application to accept
remove_tmp: if True, removes tmp files
NOTE: Override of the base class to handle redirected output
"""
input_handler = self.InputHandler
suppress_stderr = self.SuppressStderr
outfile = self.getTmpFilename(self.TmpDir)
self._outfile = outfile
if suppress_stderr:
errfile = FilePath('/dev/null')
else:
errfile = FilePath(self.getTmpFilename(self.TmpDir))
if data is None:
input_arg = ''
else:
input_arg = getattr(self,input_handler)(data)
# Build up the command, consisting of a BaseCommand followed by
# input and output (file) specifications
command = self._command_delimiter.join(filter(None,\
[self.BaseCommand,str(input_arg),'>',str(outfile),'2>',\
str(errfile)]))
if self.HaltExec:
raise AssertionError, "Halted exec with command:\n" + command
# The return value of system is a 16-bit number containing the signal
# number that killed the process, and then the exit status.
# We only want to keep the exit status so do a right bitwise shift to
# get rid of the signal number byte
exit_status = system(command) >> 8
# Determine if error should be raised due to exit status of
# appliciation
if not self._accept_exit_status(exit_status):
raise ApplicationError, \
'Unacceptable application exit status: %s, command: %s'\
% (str(exit_status),command)
out = open(outfile,"r")
err = None
if not suppress_stderr:
err = open(errfile,"r")
result = CommandLineAppResult(out,err,exit_status,\
result_paths=self._get_result_paths(data))
# Clean up the input file if one was created
if remove_tmp:
if self._input_filename:
remove(self._input_filename)
self._input_filename = None
return result
def _get_result_paths(self, data):
result = {}
result['Tree'] = ResultPath(Path=self._outfile)
return result
def build_tree_from_alignment(aln, moltype, best_tree=False, params=None):
"""Returns a tree from alignment
Will check MolType of aln object
"""
if params is None:
params = {}
if moltype == DNA or moltype == RNA:
params['-nt'] = True
elif moltype == PROTEIN:
params['-nt'] = False
else:
raise ValueError, \
"FastTree does not support moltype: %s" % moltype.label
if best_tree:
params['-slow'] = True
#Create mapping between abbreviated IDs and full IDs
int_map, int_keys = aln.getIntMap()
#Create SequenceCollection from int_map.
int_map = SequenceCollection(int_map,MolType=moltype)
app = FastTree(params=params)
result = app(int_map.toFasta())
tree = DndParser(result['Tree'].read(), constructor=PhyloNode)
#remap tip names
for tip in tree.tips():
tip.Name = int_keys[tip.Name]
return tree
|
python
|
from service_runner.service_runner.answeb.ansible_api import AnsibleApi
from service_runner.service_runner.asset.models import Host
from django.conf import settings
DEFAULT_PLAYBOOKS_PATH = settings.BASE_DIR + '/service_runner/answeb/playbooks/'
def format_uptime_result(host, result):
callback = {
'message': '',
'data': {}
}
if result.get('success'):
host.status = 1
data = result.get('success').get(host.ip)
callback['message'] = 'success'
callback['data']['uptime'] = data.get('stdout')
callback['data']['status'] = host.get_status_display()
elif result.get('failed'):
host.status = 2
callback['message'] = 'failed'
callback['data']['uptime'] = result.get('failed').get(host.ip)
callback['data']['status'] = host.get_status_display()
elif result.get('unreachable'):
host.status = 2
callback['message'] = 'unreachable'
callback['data']['uptime'] = result.get('unreachable').get(host.ip)
callback['data']['status'] = host.get_status_display()
host.save()
return callback
def format_result(host, result):
callback = {}
if result.get('success'):
data = result.get('success').get(host.ip)
callback['message'] = 'success'
callback['data'] = data.get('out.stdout_lines')
elif result.get('failed'):
callback['message'] = 'failed'
callback['data'] = result.get('failed').get(host.ip)
elif result.get('unreachable'):
callback['message'] = 'unreachable'
callback['data'] = result.get('unreachable').get(host.ip)
return callback
def get_host_uptime(host_id):
callback = {}
host = Host.objects.get(id=host_id)
api = AnsibleApi(host.ip + ',')
if host.ssh_key:
api.options = api.create_options(remote_user=host.ssh_user,
private_key_file=host.ssh_key.ssh_key.path)
else:
api.options = api.create_options(remote_user=host.ssh_user)
api.passwords = dict(sshpass=host.ssh_passwd)
api.initializeData()
api.run(host.ip, 'shell', 'uptime')
result = api.get_result()
callback = format_uptime_result(host, result)
return callback
def get_host_info(host_id):
host = Host.objects.get(id=host_id)
api = AnsibleApi(host.ip + ',')
if host.ssh_key:
api.options = api.create_options(remote_user=host.ssh_user,
private_key_file=host.ssh_key.ssh_key.path)
else:
api.options = api.create_options(remote_user=host.ssh_user)
api.passwords = dict(sshpass=host.ssh_passwd)
api.initializeData()
# api.run(host.ip, 'shell', 'uptime')
api.run_playbook(host.ip,
[DEFAULT_PLAYBOOKS_PATH + 'memory_cpu_diskspace_uptime.yml'])
callback = format_result(host, api.get_result())
return callback
|
python
|
# Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Definition of a basic seq2seq model
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from pydoc import locate
import tensorflow as tf
from seq2seq.contrib.seq2seq import helper as tf_decode_helper
from seq2seq import graph_utils
from seq2seq import decoders
from seq2seq.data import vocab
from seq2seq.models.basic_seq2seq import BasicSeq2Seq
from seq2seq.graph_utils import templatemethod
from seq2seq.models import bridges
from seq2seq.encoders.encoder import Encoder, EncoderOutput
class BasicBiSeq2Seq(BasicSeq2Seq):
"""Basic Sequence2Sequence model with a unidirectional encoder and decoder.
The last encoder state is used to initialize the decoder and thus both
must share the same type of RNN cell.
Args:
source_vocab_info: An instance of `VocabInfo`
for the source vocabulary
target_vocab_info: An instance of `VocabInfo`
for the target vocabulary
params: A dictionary of hyperparameters
"""
def __init__(self, params, mode, name="basic_biseq2seq"):
super(BasicBiSeq2Seq, self).__init__(params, mode, name)
# add candidate answer part
self.source_candidate_vocab_info = None
if "vocab_source_candidate" in self.params and self.params["vocab_source_candidate"]:
self.source_candidate_vocab_info = vocab.get_vocab_info(self.params["vocab_source_candidate"])
self.encoder_class = locate(self.params["encoder.class"])
self.decoder_class = locate(self.params["decoder.class"])
@staticmethod
def default_params():
params = BasicSeq2Seq.default_params().copy()
params.update({
"bridge.class": "seq2seq.models.bridges.InitialStateBridge",
"bridge.params": {},
"encoder.class": "seq2seq.encoders.UnidirectionalRNNEncoder",
"encoder.params": {}, # Arbitrary parameters for the encoder
"decoder.class": "seq2seq.decoders.BasicDecoder",
"decoder.params": {}, # Arbitrary parameters for the decoder
"source_candidate.max_seq_len": 20,
"source_candidate.reverse": True,
"vocab_source_candidate": None
})
return params
def _create_bridge(self, encoder_outputs, decoder_state_size):
"""Creates the bridge to be used between encoder and decoder"""
bridge_class = locate(self.params["bridge.class"]) or \
getattr(bridges, self.params["bridge.class"])
return bridge_class(
encoder_outputs=encoder_outputs,
decoder_state_size=decoder_state_size,
params=self.params["bridge.params"],
mode=self.mode)
def _create_decoder(self, _encoder_output, _features, _labels):
"""Creates a decoder instance based on the passed parameters."""
return self.decoder_class(
params=self.params["decoder.params"],
mode=self.mode,
vocab_size=self.target_vocab_info.total_size)
def _decode_train(self, decoder, bridge, _encoder_output, _features, labels):
"""Runs decoding in training mode"""
target_embedded = tf.nn.embedding_lookup(self.target_embedding,
labels["target_ids"])
helper_train = tf_decode_helper.TrainingHelper(
inputs=target_embedded[:, :-1],
sequence_length=labels["target_len"] - 1)
decoder_initial_state = bridge()
print("basic_seq2seq decoder_initial_state:{}".format(decoder_initial_state))
return decoder(decoder_initial_state, helper_train)
def _decode_infer(self, decoder, bridge, _encoder_output, features, labels):
"""Runs decoding in inference mode"""
batch_size = self.batch_size(features, labels)
if self.use_beam_search:
batch_size = self.params["inference.beam_search.beam_width"]
target_start_id = self.target_vocab_info.special_vocab.SEQUENCE_START
helper_infer = tf_decode_helper.GreedyEmbeddingHelper(
embedding=self.target_embedding,
start_tokens=tf.fill([batch_size], target_start_id),
end_token=self.target_vocab_info.special_vocab.SEQUENCE_END)
decoder_initial_state = bridge()
return decoder(decoder_initial_state, helper_infer)
@templatemethod("encode")
def encode(self, features, labels):
# source_embedded = tf.nn.embedding_lookup(self.source_embedding,
# features["source_ids"])
# encoder_fn = self.encoder_class(self.params["encoder.params"], self.mode)
# return encoder_fn(source_embedded, features["source_len"])
# 1. query source encoder sequence output
query_embedded = tf.nn.embedding_lookup(self.source_embedding,
features["source_ids"])
query_encoder_fn = self.encoder_class(self.params["encoder.params"], self.mode)
query_output = query_encoder_fn(query_embedded, features["source_len"])
# return query_output
# 2. candidate source encoder sequence output
candidate_embedded = tf.nn.embedding_lookup(self.source_candidate_embedding,
features["source_candidate_ids"])
candidate_encoder_fn = self.encoder_class(self.params["encoder.params"], self.mode)
candidate_output = candidate_encoder_fn(candidate_embedded, features["source_candidate_len"])
print("query_output:{}".format(query_output))
print("candidate_output:{}".format(candidate_output))
# 3. merge two encoder generated output
# outputs = tf.concat([query_output.outputs, candidate_output.outputs], 0)
# #final_state = tf.reshape(tf.concat([query_output.final_state, candidate_output.final_state], 0), [-1, 128])
# final_state = tf.concat([query_output.final_state, candidate_output.final_state], 0)
# attention_values = tf.concat([query_output.attention_values, candidate_output.attention_values], 0)
# att_v_len = tf.concat([query_output.attention_values_length, candidate_output.attention_values_length], 0)
outputs = query_output.outputs + candidate_output.outputs
final_state = (query_output.final_state[0] + candidate_output.final_state[0],
query_output.final_state[1] + candidate_output.final_state[1])
attention_values = query_output.attention_values + candidate_output.attention_values
att_v_len = query_output.attention_values_length + candidate_output.attention_values_length
encoderOutput = EncoderOutput(outputs=outputs,
final_state=final_state,
attention_values=attention_values,
attention_values_length=att_v_len)
print("encoderOutput:{}".format(encoderOutput))
return encoderOutput
@templatemethod("decode")
def decode(self, encoder_output, features, labels):
decoder = self._create_decoder(encoder_output, features, labels)
if self.use_beam_search:
decoder = self._get_beam_search_decoder(decoder)
bridge = self._create_bridge(
encoder_outputs=encoder_output,
decoder_state_size=decoder.cell.state_size)
if self.mode == tf.contrib.learn.ModeKeys.INFER:
return self._decode_infer(decoder, bridge, encoder_output, features,
labels)
else:
return self._decode_train(decoder, bridge, encoder_output, features,
labels)
@property
@templatemethod("source_candidate_embedding")
def source_candidate_embedding(self):
"""Returns the embedding used for the source sequence.
"""
return tf.get_variable(
name="W_candidate",
shape=[self.source_candidate_vocab_info.total_size, self.params["embedding.dim"]],
initializer=tf.random_uniform_initializer(
-self.params["embedding.init_scale"],
self.params["embedding.init_scale"]))
def _preprocess(self, features, labels):
"""Model-specific preprocessing for features and labels:
- Creates vocabulary lookup tables for source and target vocab
- Converts tokens into vocabulary ids
"""
# Create vocabulary lookup for source
source_vocab_to_id, source_id_to_vocab, source_word_to_count, _ = \
vocab.create_vocabulary_lookup_table(self.source_vocab_info.path)
source_candidate_vocab_to_id, source_candidate_id_to_vocab, source_candidate_word_to_count, _ = \
vocab.create_vocabulary_lookup_table(self.source_candidate_vocab_info.path)
# Create vocabulary look for target
target_vocab_to_id, target_id_to_vocab, target_word_to_count, _ = \
vocab.create_vocabulary_lookup_table(self.target_vocab_info.path)
# Add vocab tables to graph colection so that we can access them in
# other places.
graph_utils.add_dict_to_collection({
"source_vocab_to_id": source_vocab_to_id,
"source_id_to_vocab": source_id_to_vocab,
"source_word_to_count": source_word_to_count,
"source_candidate_vocab_to_id": source_candidate_vocab_to_id,
"source_candidate_id_to_vocab": source_candidate_id_to_vocab,
"source_candidate_word_to_count": source_candidate_word_to_count,
"target_vocab_to_id": target_vocab_to_id,
"target_id_to_vocab": target_id_to_vocab,
"target_word_to_count": target_word_to_count
}, "vocab_tables")
# Slice source to max_len
if self.params["source.max_seq_len"] is not None:
features["source_tokens"] = features["source_tokens"][:, :self.params[
"source.max_seq_len"]]
features["source_len"] = tf.minimum(features["source_len"],
self.params["source.max_seq_len"])
# Slice source_candidate to max_len
if self.params["source_candidate.max_seq_len"] is not None:
features["source_candidate_tokens"] = features["source_candidate_tokens"][:, :self.params[
"source_candidate.max_seq_len"]]
features["source_candidate_len"] = tf.minimum(features["source_candidate_len"],
self.params["source_candidate.max_seq_len"])
# Look up the source ids in the vocabulary
features["source_ids"] = source_vocab_to_id.lookup(features[
"source_tokens"])
features["source_candidate_ids"] = source_candidate_vocab_to_id.lookup(features[
"source_candidate_tokens"])
# Maybe reverse the source
if self.params["source.reverse"] is True:
features["source_ids"] = tf.reverse_sequence(
input=features["source_ids"],
seq_lengths=features["source_len"],
seq_dim=1,
batch_dim=0,
name=None)
features["source_candidate_ids"] = tf.reverse_sequence(
input=features["source_candidate_ids"],
seq_lengths=features["source_candidate_len"],
seq_dim=1,
batch_dim=0,
name=None)
features["source_len"] = tf.to_int32(features["source_len"])
tf.summary.histogram("source_len", tf.to_float(features["source_len"]))
features["source_candidate_len"] = tf.to_int32(features["source_candidate_len"])
tf.summary.histogram("source_candidate_len", tf.to_float(features["source_candidate_len"]))
if labels is None:
return features, None
labels = labels.copy()
# Slices targets to max length
if self.params["target.max_seq_len"] is not None:
labels["target_tokens"] = labels["target_tokens"][:, :self.params[
"target.max_seq_len"]]
labels["target_len"] = tf.minimum(labels["target_len"],
self.params["target.max_seq_len"])
# Look up the target ids in the vocabulary
labels["target_ids"] = target_vocab_to_id.lookup(labels["target_tokens"])
labels["target_len"] = tf.to_int32(labels["target_len"])
tf.summary.histogram("target_len", tf.to_float(labels["target_len"]))
# Keep track of the number of processed tokens
num_tokens = tf.reduce_sum(labels["target_len"])
num_tokens += tf.reduce_sum(features["source_len"])
num_tokens += tf.reduce_sum(features["source_candidate_len"])
token_counter_var = tf.Variable(0, "tokens_counter")
total_tokens = tf.assign_add(token_counter_var, num_tokens)
tf.summary.scalar("num_tokens", total_tokens)
with tf.control_dependencies([total_tokens]):
features["source_tokens"] = tf.identity(features["source_tokens"])
features["source_candidate_tokens"] = tf.identity(features["source_candidate_tokens"])
# Add to graph collection for later use
graph_utils.add_dict_to_collection(features, "features")
if labels:
graph_utils.add_dict_to_collection(labels, "labels")
print("attention_biseqseq features:{} labels:{}".format(features, labels))
return features, labels
|
python
|
import random
import sys
def main():
amount = 1000000
min = 0
max = sys.maxsize #To get some big integer
#Fixed Length from 2 to 6
for i in range(2,7):
result = 0
#Generate N amount of array with fixed length above
for a in range(amount):
array = []
for l in range(i):
array.append(random.randint(min, max))
resultGCD = gcd(array)
#Calculator percentage of set-wise vectors
if resultGCD == 1:
result += 1
percentage = round(result/amount*100, 2)
print(f"The percentage of set-wise coprime vectors among {amount} vectors with fixed length = {i} and range = [{min},{max}) is equal to {percentage}%")
#Function to calculate gcd of arrays with 2 and more elements
def gcd(array):
num1 = array[0]
num2 = array[1]
gcd = find_gcd(num1, num2)
for i in range(2, len(array)):
gcd = find_gcd(gcd, array[i])
return gcd
def find_gcd(a,b):
if(b==0):
return a
else:
return find_gcd(b,a%b)
if __name__ == '__main__':
main()
|
python
|
# some changes
|
python
|
from scapy.all import *
interface = 'mon0'
ap_list = []
def info(fm):
if fm.haslayer(Dot11):
if ((fm.type == 0) & (fm.subtype==8)):
if fm.addr2 not in ap_list:
ap_list.append(fm.addr2)
print "SSID--> ",fm.info,"-- BSSID --> ",fm.addr2
sniff(iface=interface,prn=info)
|
python
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2019/4/17 13:48
# @File : db.py
# @Role : ORM
from datetime import datetime
from sqlalchemy import Column, String, Integer, DateTime, UniqueConstraint, DECIMAL
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import class_mapper
from libs.common import get_factor_num_with_xlarge
Base = declarative_base()
def model_to_dict(model, time_strf=None):
model_dict = {}
time_strf = time_strf or "%Y-%m-%d %H:%M:%S"
for key, column in class_mapper(model.__class__).c.items():
value = getattr(model, key, None)
if isinstance(value, datetime):
value = value.strftime(time_strf)
model_dict[column.name] = value
return model_dict
class ResourceUsage(Base):
__tablename__ = 'resource_usage'
id = Column('id', Integer, primary_key=True, autoincrement=True)
ec2_id = Column('ec2_id', String(128), nullable=False) # 实例ID
cpu_usage = Column('cpu_usage', Integer(), nullable=True) # CPU使用率
mem_usage = Column('mem_usage', Integer(), nullable=True) # 内存使用率
disk_usage = Column('disk_usage', Integer(), nullable=True) # 磁盘使用率
date = Column('date', DateTime(), nullable=False) # 月份
__table_args__ = (
UniqueConstraint('ec2_id', 'date', name='uix_ec2_date'),
)
class UsageReport(Base):
__tablename__ = 'usage_report'
id = Column('id', Integer, primary_key=True, autoincrement=True)
ec2_id = Column('ec2_id', String(128), nullable=False) # 实例ID
host_name = Column('host_name', String(128), nullable=False) # 实例名字
project_name = Column('project_name', String(128), nullable=False) # 项目名称
cpu_avg_usage = Column('cpu_avg_usage', Integer(), nullable=False) # CPU当月平均使用率
mem_avg_usage = Column('mem_avg_usage', Integer(), nullable=False) # 内存当月平均使用率
disk_avg_usage = Column('disk_avg_usage', Integer(), nullable=False) # 磁盘当月平均使用率
curr_inst_type = Column('curr_inst_type', String(128), nullable=False) # 当前实例类型
suggest_inst_type = Column('suggest_inst_type', String(128), nullable=False) # 建议实例类型
cost_gap = Column('cost_gap', DECIMAL(10, 5), nullable=True) # 费用差
month = Column('month', DateTime(), nullable=False) # 月份
__table_args__ = (
UniqueConstraint('ec2_id', 'month', name='uix_ec2_month'),
)
class AWSRiUsageReport(Base):
__tablename__ = 'aws_ri_usage_report'
id = Column('id', Integer, primary_key=True, autoincrement=True)
family = Column('family', String(128), nullable=True) # 家族
size = Column('size', String(128), nullable=True) # 实例大小,Linux平台默认为xlarge
platform = Column('platform', String(128), nullable=True) # 平台:
# available_zone = Column('available_zone', String(128), nullable=True) # 区
total_running = Column('total_running', DECIMAL(10, 5), nullable=True) # 当前运行数量
total_ri = Column('total_ri', DECIMAL(10, 5), nullable=True) # RI购买数量
coverage_rate = Column('coverage_rate', DECIMAL(10, 5), nullable=True) # RI覆盖率
date = Column('date', DateTime(), nullable=True) # 月份
__table_args__ = (
UniqueConstraint('family', 'size', 'platform', 'date', name='uix_date'),
)
def merge(self, AWSRiUsageReport):
if self.family == AWSRiUsageReport.family \
and self.platform == AWSRiUsageReport.platform:
if self.platform == "UNIX/Linux":
total_running = get_factor_num_with_xlarge(AWSRiUsageReport.size) * AWSRiUsageReport.total_running
total_ri = get_factor_num_with_xlarge(AWSRiUsageReport.size) * AWSRiUsageReport.total_running
elif self.size == AWSRiUsageReport.size:
total_running = AWSRiUsageReport.total_running
total_ri = AWSRiUsageReport.total_ri
else:
return False
self.total_running += total_running
self.total_ri += total_ri
return True
class AwsTaskQueue(Base):
__tablename__ = 'aws_task_queue'
id = Column('id', Integer, primary_key=True, autoincrement=True)
date = Column('date', DateTime(), nullable=True) # 任务时间
task_name = Column('task_name', String(128), nullable=True) # 任务类型
status = Column('status', Integer, nullable=True) # 状态,0:等待执行,1:执行失败,2:已完成。
class AwsProjectBillReport(Base):
__tablename__ = 'aws_project_bill_report'
id = Column('id', Integer, primary_key=True, autoincrement=True)
userproject = Column('userproject', String(32), nullable=True) # 项目
ec2_cost = Column('ec2_cost', DECIMAL(10, 5), nullable=True) # ec2
ebs_cost = Column('ebs_cost', DECIMAL(10, 5), nullable=True) # ebs
snapshot_cost = Column('snapshot_cost', DECIMAL(10, 5), nullable=True) # snapshot
s3_cost = Column('s3_cost', DECIMAL(10, 5), nullable=True) # s3
rds_cost = Column('rds_cost', DECIMAL(10, 5), nullable=True) # rds
elasticache_cost = Column('elasticache_cost', DECIMAL(10, 5), nullable=True) # ElastiCache
credit = Column('credit', DECIMAL(10, 5), nullable=True) # Credit
no_reserved_ri_cost = Column('no_reserved_ri_cost', DECIMAL(10, 5), nullable=True) # no_reserved_ri_cost
support_cost = Column('support_cost', DECIMAL(10, 5), nullable=True) # Support费用
t_a_x = Column('t_a_x', DECIMAL(10, 5), nullable=True) # 税费
aws_total_cost = Column('aws_total_cost', DECIMAL(10, 5), nullable=True) # 总费用
bill_date = Column('bill_date', DateTime(), nullable=True) # 账单日期
__table_args__ = (
UniqueConstraint('userproject', 'bill_date', name='unix'),
)
class AwsServiceBillReport(Base):
__tablename__ = 'aws_service_bill_report'
id = Column('id', Integer, primary_key=True, autoincrement=True)
resource_id = Column('resource_id', String(64), nullable=True) # 资源id
Tag_AppName = Column('Tag_AppName', String(64), nullable=True) #应用名称
Tag_Center = Column('Tag_Center', String(64), nullable=True) #数据中心名称
Tag_Team = Column('Tag_Team', String(64), nullable=True) #组织架构名称
Traffic_charges = Column('Traffic_charges', DECIMAL(10, 5), nullable=True) #流量费用
service_name = Column('service_name', String(64), nullable=True) # 服务名
userproject = Column('userproject', String(32), nullable=True) # 项目名
total_cost = Column('total_cost', DECIMAL(10, 5), nullable=True) # 费用
bill_date = Column('bill_date', DateTime(), nullable=True) # 账单日期
__table_args__ = (
UniqueConstraint('userproject', 'service_name', 'resource_id', 'bill_date', name='unix'),
)
class AWSRiDateDB(Base):
__tablename__ = 'aws_ri_date_num'
id = Column('id', Integer, primary_key=True, autoincrement=True)
family = Column('family', String(128), nullable=True) # 家族
size = Column('size', String(128), nullable=True) # 实例大小,Linux平台默认为xlarge
platform = Column('platform', String(128), nullable=True) # 平台:
total_ri = Column('total_ri', DECIMAL(10, 5), nullable=True) # RI购买数量
end = Column('end', String(128), nullable=True) #过期时间
class AwsProjectBudgetControl(Base):
__tablename__ = 'aws_project_budget'
id = Column('id', Integer, primary_key=True, autoincrement=True)
userproject = Column('userproject', String(32), nullable=True) # 项目
aws_total_cost = Column('aws_total_cost', DECIMAL(10, 5), nullable=True) # 总费用
aws_budget_cost = Column('aws_budget_cost', DECIMAL(10, 5), nullable=True) # 预算费用
aws_alert_percentage = Column('aws_alert_percentage', DECIMAL(10, 5), default=1.2) # 警戒百分比
aws_percentage = Column('aws_percentage', DECIMAL(10, 5), nullable=True) # 费用百分比
bill_date = Column('bill_date', DateTime(), nullable=True) # 账单日期
|
python
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from . import _utilities
from . import outputs
from ._inputs import *
__all__ = ['GlobalClientArgs', 'GlobalClient']
@pulumi.input_type
class GlobalClientArgs:
def __init__(__self__, *,
addons: Optional[pulumi.Input['GlobalClientAddonsArgs']] = None,
allowed_logout_urls: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
allowed_origins: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
app_type: Optional[pulumi.Input[str]] = None,
callbacks: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
client_id: Optional[pulumi.Input[str]] = None,
client_metadata: Optional[pulumi.Input[Mapping[str, Any]]] = None,
client_secret: Optional[pulumi.Input[str]] = None,
client_secret_rotation_trigger: Optional[pulumi.Input[Mapping[str, Any]]] = None,
cross_origin_auth: Optional[pulumi.Input[bool]] = None,
cross_origin_loc: Optional[pulumi.Input[str]] = None,
custom_login_page: Optional[pulumi.Input[str]] = None,
custom_login_page_on: Optional[pulumi.Input[bool]] = None,
description: Optional[pulumi.Input[str]] = None,
encryption_key: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
form_template: Optional[pulumi.Input[str]] = None,
grant_types: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
initiate_login_uri: Optional[pulumi.Input[str]] = None,
is_first_party: Optional[pulumi.Input[bool]] = None,
is_token_endpoint_ip_header_trusted: Optional[pulumi.Input[bool]] = None,
jwt_configuration: Optional[pulumi.Input['GlobalClientJwtConfigurationArgs']] = None,
logo_uri: Optional[pulumi.Input[str]] = None,
mobile: Optional[pulumi.Input['GlobalClientMobileArgs']] = None,
name: Optional[pulumi.Input[str]] = None,
oidc_conformant: Optional[pulumi.Input[bool]] = None,
refresh_token: Optional[pulumi.Input['GlobalClientRefreshTokenArgs']] = None,
sso: Optional[pulumi.Input[bool]] = None,
sso_disabled: Optional[pulumi.Input[bool]] = None,
token_endpoint_auth_method: Optional[pulumi.Input[str]] = None,
web_origins: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None):
"""
The set of arguments for constructing a GlobalClient resource.
"""
if addons is not None:
pulumi.set(__self__, "addons", addons)
if allowed_logout_urls is not None:
pulumi.set(__self__, "allowed_logout_urls", allowed_logout_urls)
if allowed_origins is not None:
pulumi.set(__self__, "allowed_origins", allowed_origins)
if app_type is not None:
pulumi.set(__self__, "app_type", app_type)
if callbacks is not None:
pulumi.set(__self__, "callbacks", callbacks)
if client_id is not None:
pulumi.set(__self__, "client_id", client_id)
if client_metadata is not None:
pulumi.set(__self__, "client_metadata", client_metadata)
if client_secret is not None:
pulumi.set(__self__, "client_secret", client_secret)
if client_secret_rotation_trigger is not None:
pulumi.set(__self__, "client_secret_rotation_trigger", client_secret_rotation_trigger)
if cross_origin_auth is not None:
pulumi.set(__self__, "cross_origin_auth", cross_origin_auth)
if cross_origin_loc is not None:
pulumi.set(__self__, "cross_origin_loc", cross_origin_loc)
if custom_login_page is not None:
pulumi.set(__self__, "custom_login_page", custom_login_page)
if custom_login_page_on is not None:
pulumi.set(__self__, "custom_login_page_on", custom_login_page_on)
if description is not None:
pulumi.set(__self__, "description", description)
if encryption_key is not None:
pulumi.set(__self__, "encryption_key", encryption_key)
if form_template is not None:
pulumi.set(__self__, "form_template", form_template)
if grant_types is not None:
pulumi.set(__self__, "grant_types", grant_types)
if initiate_login_uri is not None:
pulumi.set(__self__, "initiate_login_uri", initiate_login_uri)
if is_first_party is not None:
pulumi.set(__self__, "is_first_party", is_first_party)
if is_token_endpoint_ip_header_trusted is not None:
pulumi.set(__self__, "is_token_endpoint_ip_header_trusted", is_token_endpoint_ip_header_trusted)
if jwt_configuration is not None:
pulumi.set(__self__, "jwt_configuration", jwt_configuration)
if logo_uri is not None:
pulumi.set(__self__, "logo_uri", logo_uri)
if mobile is not None:
pulumi.set(__self__, "mobile", mobile)
if name is not None:
pulumi.set(__self__, "name", name)
if oidc_conformant is not None:
pulumi.set(__self__, "oidc_conformant", oidc_conformant)
if refresh_token is not None:
pulumi.set(__self__, "refresh_token", refresh_token)
if sso is not None:
pulumi.set(__self__, "sso", sso)
if sso_disabled is not None:
pulumi.set(__self__, "sso_disabled", sso_disabled)
if token_endpoint_auth_method is not None:
pulumi.set(__self__, "token_endpoint_auth_method", token_endpoint_auth_method)
if web_origins is not None:
pulumi.set(__self__, "web_origins", web_origins)
@property
@pulumi.getter
def addons(self) -> Optional[pulumi.Input['GlobalClientAddonsArgs']]:
return pulumi.get(self, "addons")
@addons.setter
def addons(self, value: Optional[pulumi.Input['GlobalClientAddonsArgs']]):
pulumi.set(self, "addons", value)
@property
@pulumi.getter(name="allowedLogoutUrls")
def allowed_logout_urls(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
return pulumi.get(self, "allowed_logout_urls")
@allowed_logout_urls.setter
def allowed_logout_urls(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "allowed_logout_urls", value)
@property
@pulumi.getter(name="allowedOrigins")
def allowed_origins(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
return pulumi.get(self, "allowed_origins")
@allowed_origins.setter
def allowed_origins(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "allowed_origins", value)
@property
@pulumi.getter(name="appType")
def app_type(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "app_type")
@app_type.setter
def app_type(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "app_type", value)
@property
@pulumi.getter
def callbacks(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
return pulumi.get(self, "callbacks")
@callbacks.setter
def callbacks(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "callbacks", value)
@property
@pulumi.getter(name="clientId")
def client_id(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "client_id")
@client_id.setter
def client_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "client_id", value)
@property
@pulumi.getter(name="clientMetadata")
def client_metadata(self) -> Optional[pulumi.Input[Mapping[str, Any]]]:
return pulumi.get(self, "client_metadata")
@client_metadata.setter
def client_metadata(self, value: Optional[pulumi.Input[Mapping[str, Any]]]):
pulumi.set(self, "client_metadata", value)
@property
@pulumi.getter(name="clientSecret")
def client_secret(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "client_secret")
@client_secret.setter
def client_secret(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "client_secret", value)
@property
@pulumi.getter(name="clientSecretRotationTrigger")
def client_secret_rotation_trigger(self) -> Optional[pulumi.Input[Mapping[str, Any]]]:
return pulumi.get(self, "client_secret_rotation_trigger")
@client_secret_rotation_trigger.setter
def client_secret_rotation_trigger(self, value: Optional[pulumi.Input[Mapping[str, Any]]]):
pulumi.set(self, "client_secret_rotation_trigger", value)
@property
@pulumi.getter(name="crossOriginAuth")
def cross_origin_auth(self) -> Optional[pulumi.Input[bool]]:
return pulumi.get(self, "cross_origin_auth")
@cross_origin_auth.setter
def cross_origin_auth(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "cross_origin_auth", value)
@property
@pulumi.getter(name="crossOriginLoc")
def cross_origin_loc(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "cross_origin_loc")
@cross_origin_loc.setter
def cross_origin_loc(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "cross_origin_loc", value)
@property
@pulumi.getter(name="customLoginPage")
def custom_login_page(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "custom_login_page")
@custom_login_page.setter
def custom_login_page(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "custom_login_page", value)
@property
@pulumi.getter(name="customLoginPageOn")
def custom_login_page_on(self) -> Optional[pulumi.Input[bool]]:
return pulumi.get(self, "custom_login_page_on")
@custom_login_page_on.setter
def custom_login_page_on(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "custom_login_page_on", value)
@property
@pulumi.getter
def description(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "description")
@description.setter
def description(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "description", value)
@property
@pulumi.getter(name="encryptionKey")
def encryption_key(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
return pulumi.get(self, "encryption_key")
@encryption_key.setter
def encryption_key(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "encryption_key", value)
@property
@pulumi.getter(name="formTemplate")
def form_template(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "form_template")
@form_template.setter
def form_template(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "form_template", value)
@property
@pulumi.getter(name="grantTypes")
def grant_types(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
return pulumi.get(self, "grant_types")
@grant_types.setter
def grant_types(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "grant_types", value)
@property
@pulumi.getter(name="initiateLoginUri")
def initiate_login_uri(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "initiate_login_uri")
@initiate_login_uri.setter
def initiate_login_uri(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "initiate_login_uri", value)
@property
@pulumi.getter(name="isFirstParty")
def is_first_party(self) -> Optional[pulumi.Input[bool]]:
return pulumi.get(self, "is_first_party")
@is_first_party.setter
def is_first_party(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "is_first_party", value)
@property
@pulumi.getter(name="isTokenEndpointIpHeaderTrusted")
def is_token_endpoint_ip_header_trusted(self) -> Optional[pulumi.Input[bool]]:
return pulumi.get(self, "is_token_endpoint_ip_header_trusted")
@is_token_endpoint_ip_header_trusted.setter
def is_token_endpoint_ip_header_trusted(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "is_token_endpoint_ip_header_trusted", value)
@property
@pulumi.getter(name="jwtConfiguration")
def jwt_configuration(self) -> Optional[pulumi.Input['GlobalClientJwtConfigurationArgs']]:
return pulumi.get(self, "jwt_configuration")
@jwt_configuration.setter
def jwt_configuration(self, value: Optional[pulumi.Input['GlobalClientJwtConfigurationArgs']]):
pulumi.set(self, "jwt_configuration", value)
@property
@pulumi.getter(name="logoUri")
def logo_uri(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "logo_uri")
@logo_uri.setter
def logo_uri(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "logo_uri", value)
@property
@pulumi.getter
def mobile(self) -> Optional[pulumi.Input['GlobalClientMobileArgs']]:
return pulumi.get(self, "mobile")
@mobile.setter
def mobile(self, value: Optional[pulumi.Input['GlobalClientMobileArgs']]):
pulumi.set(self, "mobile", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter(name="oidcConformant")
def oidc_conformant(self) -> Optional[pulumi.Input[bool]]:
return pulumi.get(self, "oidc_conformant")
@oidc_conformant.setter
def oidc_conformant(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "oidc_conformant", value)
@property
@pulumi.getter(name="refreshToken")
def refresh_token(self) -> Optional[pulumi.Input['GlobalClientRefreshTokenArgs']]:
return pulumi.get(self, "refresh_token")
@refresh_token.setter
def refresh_token(self, value: Optional[pulumi.Input['GlobalClientRefreshTokenArgs']]):
pulumi.set(self, "refresh_token", value)
@property
@pulumi.getter
def sso(self) -> Optional[pulumi.Input[bool]]:
return pulumi.get(self, "sso")
@sso.setter
def sso(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "sso", value)
@property
@pulumi.getter(name="ssoDisabled")
def sso_disabled(self) -> Optional[pulumi.Input[bool]]:
return pulumi.get(self, "sso_disabled")
@sso_disabled.setter
def sso_disabled(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "sso_disabled", value)
@property
@pulumi.getter(name="tokenEndpointAuthMethod")
def token_endpoint_auth_method(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "token_endpoint_auth_method")
@token_endpoint_auth_method.setter
def token_endpoint_auth_method(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "token_endpoint_auth_method", value)
@property
@pulumi.getter(name="webOrigins")
def web_origins(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
return pulumi.get(self, "web_origins")
@web_origins.setter
def web_origins(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "web_origins", value)
@pulumi.input_type
class _GlobalClientState:
def __init__(__self__, *,
addons: Optional[pulumi.Input['GlobalClientAddonsArgs']] = None,
allowed_logout_urls: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
allowed_origins: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
app_type: Optional[pulumi.Input[str]] = None,
callbacks: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
client_id: Optional[pulumi.Input[str]] = None,
client_metadata: Optional[pulumi.Input[Mapping[str, Any]]] = None,
client_secret: Optional[pulumi.Input[str]] = None,
client_secret_rotation_trigger: Optional[pulumi.Input[Mapping[str, Any]]] = None,
cross_origin_auth: Optional[pulumi.Input[bool]] = None,
cross_origin_loc: Optional[pulumi.Input[str]] = None,
custom_login_page: Optional[pulumi.Input[str]] = None,
custom_login_page_on: Optional[pulumi.Input[bool]] = None,
description: Optional[pulumi.Input[str]] = None,
encryption_key: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
form_template: Optional[pulumi.Input[str]] = None,
grant_types: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
initiate_login_uri: Optional[pulumi.Input[str]] = None,
is_first_party: Optional[pulumi.Input[bool]] = None,
is_token_endpoint_ip_header_trusted: Optional[pulumi.Input[bool]] = None,
jwt_configuration: Optional[pulumi.Input['GlobalClientJwtConfigurationArgs']] = None,
logo_uri: Optional[pulumi.Input[str]] = None,
mobile: Optional[pulumi.Input['GlobalClientMobileArgs']] = None,
name: Optional[pulumi.Input[str]] = None,
oidc_conformant: Optional[pulumi.Input[bool]] = None,
refresh_token: Optional[pulumi.Input['GlobalClientRefreshTokenArgs']] = None,
sso: Optional[pulumi.Input[bool]] = None,
sso_disabled: Optional[pulumi.Input[bool]] = None,
token_endpoint_auth_method: Optional[pulumi.Input[str]] = None,
web_origins: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None):
"""
Input properties used for looking up and filtering GlobalClient resources.
"""
if addons is not None:
pulumi.set(__self__, "addons", addons)
if allowed_logout_urls is not None:
pulumi.set(__self__, "allowed_logout_urls", allowed_logout_urls)
if allowed_origins is not None:
pulumi.set(__self__, "allowed_origins", allowed_origins)
if app_type is not None:
pulumi.set(__self__, "app_type", app_type)
if callbacks is not None:
pulumi.set(__self__, "callbacks", callbacks)
if client_id is not None:
pulumi.set(__self__, "client_id", client_id)
if client_metadata is not None:
pulumi.set(__self__, "client_metadata", client_metadata)
if client_secret is not None:
pulumi.set(__self__, "client_secret", client_secret)
if client_secret_rotation_trigger is not None:
pulumi.set(__self__, "client_secret_rotation_trigger", client_secret_rotation_trigger)
if cross_origin_auth is not None:
pulumi.set(__self__, "cross_origin_auth", cross_origin_auth)
if cross_origin_loc is not None:
pulumi.set(__self__, "cross_origin_loc", cross_origin_loc)
if custom_login_page is not None:
pulumi.set(__self__, "custom_login_page", custom_login_page)
if custom_login_page_on is not None:
pulumi.set(__self__, "custom_login_page_on", custom_login_page_on)
if description is not None:
pulumi.set(__self__, "description", description)
if encryption_key is not None:
pulumi.set(__self__, "encryption_key", encryption_key)
if form_template is not None:
pulumi.set(__self__, "form_template", form_template)
if grant_types is not None:
pulumi.set(__self__, "grant_types", grant_types)
if initiate_login_uri is not None:
pulumi.set(__self__, "initiate_login_uri", initiate_login_uri)
if is_first_party is not None:
pulumi.set(__self__, "is_first_party", is_first_party)
if is_token_endpoint_ip_header_trusted is not None:
pulumi.set(__self__, "is_token_endpoint_ip_header_trusted", is_token_endpoint_ip_header_trusted)
if jwt_configuration is not None:
pulumi.set(__self__, "jwt_configuration", jwt_configuration)
if logo_uri is not None:
pulumi.set(__self__, "logo_uri", logo_uri)
if mobile is not None:
pulumi.set(__self__, "mobile", mobile)
if name is not None:
pulumi.set(__self__, "name", name)
if oidc_conformant is not None:
pulumi.set(__self__, "oidc_conformant", oidc_conformant)
if refresh_token is not None:
pulumi.set(__self__, "refresh_token", refresh_token)
if sso is not None:
pulumi.set(__self__, "sso", sso)
if sso_disabled is not None:
pulumi.set(__self__, "sso_disabled", sso_disabled)
if token_endpoint_auth_method is not None:
pulumi.set(__self__, "token_endpoint_auth_method", token_endpoint_auth_method)
if web_origins is not None:
pulumi.set(__self__, "web_origins", web_origins)
@property
@pulumi.getter
def addons(self) -> Optional[pulumi.Input['GlobalClientAddonsArgs']]:
return pulumi.get(self, "addons")
@addons.setter
def addons(self, value: Optional[pulumi.Input['GlobalClientAddonsArgs']]):
pulumi.set(self, "addons", value)
@property
@pulumi.getter(name="allowedLogoutUrls")
def allowed_logout_urls(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
return pulumi.get(self, "allowed_logout_urls")
@allowed_logout_urls.setter
def allowed_logout_urls(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "allowed_logout_urls", value)
@property
@pulumi.getter(name="allowedOrigins")
def allowed_origins(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
return pulumi.get(self, "allowed_origins")
@allowed_origins.setter
def allowed_origins(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "allowed_origins", value)
@property
@pulumi.getter(name="appType")
def app_type(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "app_type")
@app_type.setter
def app_type(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "app_type", value)
@property
@pulumi.getter
def callbacks(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
return pulumi.get(self, "callbacks")
@callbacks.setter
def callbacks(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "callbacks", value)
@property
@pulumi.getter(name="clientId")
def client_id(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "client_id")
@client_id.setter
def client_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "client_id", value)
@property
@pulumi.getter(name="clientMetadata")
def client_metadata(self) -> Optional[pulumi.Input[Mapping[str, Any]]]:
return pulumi.get(self, "client_metadata")
@client_metadata.setter
def client_metadata(self, value: Optional[pulumi.Input[Mapping[str, Any]]]):
pulumi.set(self, "client_metadata", value)
@property
@pulumi.getter(name="clientSecret")
def client_secret(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "client_secret")
@client_secret.setter
def client_secret(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "client_secret", value)
@property
@pulumi.getter(name="clientSecretRotationTrigger")
def client_secret_rotation_trigger(self) -> Optional[pulumi.Input[Mapping[str, Any]]]:
return pulumi.get(self, "client_secret_rotation_trigger")
@client_secret_rotation_trigger.setter
def client_secret_rotation_trigger(self, value: Optional[pulumi.Input[Mapping[str, Any]]]):
pulumi.set(self, "client_secret_rotation_trigger", value)
@property
@pulumi.getter(name="crossOriginAuth")
def cross_origin_auth(self) -> Optional[pulumi.Input[bool]]:
return pulumi.get(self, "cross_origin_auth")
@cross_origin_auth.setter
def cross_origin_auth(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "cross_origin_auth", value)
@property
@pulumi.getter(name="crossOriginLoc")
def cross_origin_loc(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "cross_origin_loc")
@cross_origin_loc.setter
def cross_origin_loc(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "cross_origin_loc", value)
@property
@pulumi.getter(name="customLoginPage")
def custom_login_page(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "custom_login_page")
@custom_login_page.setter
def custom_login_page(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "custom_login_page", value)
@property
@pulumi.getter(name="customLoginPageOn")
def custom_login_page_on(self) -> Optional[pulumi.Input[bool]]:
return pulumi.get(self, "custom_login_page_on")
@custom_login_page_on.setter
def custom_login_page_on(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "custom_login_page_on", value)
@property
@pulumi.getter
def description(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "description")
@description.setter
def description(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "description", value)
@property
@pulumi.getter(name="encryptionKey")
def encryption_key(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
return pulumi.get(self, "encryption_key")
@encryption_key.setter
def encryption_key(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "encryption_key", value)
@property
@pulumi.getter(name="formTemplate")
def form_template(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "form_template")
@form_template.setter
def form_template(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "form_template", value)
@property
@pulumi.getter(name="grantTypes")
def grant_types(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
return pulumi.get(self, "grant_types")
@grant_types.setter
def grant_types(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "grant_types", value)
@property
@pulumi.getter(name="initiateLoginUri")
def initiate_login_uri(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "initiate_login_uri")
@initiate_login_uri.setter
def initiate_login_uri(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "initiate_login_uri", value)
@property
@pulumi.getter(name="isFirstParty")
def is_first_party(self) -> Optional[pulumi.Input[bool]]:
return pulumi.get(self, "is_first_party")
@is_first_party.setter
def is_first_party(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "is_first_party", value)
@property
@pulumi.getter(name="isTokenEndpointIpHeaderTrusted")
def is_token_endpoint_ip_header_trusted(self) -> Optional[pulumi.Input[bool]]:
return pulumi.get(self, "is_token_endpoint_ip_header_trusted")
@is_token_endpoint_ip_header_trusted.setter
def is_token_endpoint_ip_header_trusted(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "is_token_endpoint_ip_header_trusted", value)
@property
@pulumi.getter(name="jwtConfiguration")
def jwt_configuration(self) -> Optional[pulumi.Input['GlobalClientJwtConfigurationArgs']]:
return pulumi.get(self, "jwt_configuration")
@jwt_configuration.setter
def jwt_configuration(self, value: Optional[pulumi.Input['GlobalClientJwtConfigurationArgs']]):
pulumi.set(self, "jwt_configuration", value)
@property
@pulumi.getter(name="logoUri")
def logo_uri(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "logo_uri")
@logo_uri.setter
def logo_uri(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "logo_uri", value)
@property
@pulumi.getter
def mobile(self) -> Optional[pulumi.Input['GlobalClientMobileArgs']]:
return pulumi.get(self, "mobile")
@mobile.setter
def mobile(self, value: Optional[pulumi.Input['GlobalClientMobileArgs']]):
pulumi.set(self, "mobile", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter(name="oidcConformant")
def oidc_conformant(self) -> Optional[pulumi.Input[bool]]:
return pulumi.get(self, "oidc_conformant")
@oidc_conformant.setter
def oidc_conformant(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "oidc_conformant", value)
@property
@pulumi.getter(name="refreshToken")
def refresh_token(self) -> Optional[pulumi.Input['GlobalClientRefreshTokenArgs']]:
return pulumi.get(self, "refresh_token")
@refresh_token.setter
def refresh_token(self, value: Optional[pulumi.Input['GlobalClientRefreshTokenArgs']]):
pulumi.set(self, "refresh_token", value)
@property
@pulumi.getter
def sso(self) -> Optional[pulumi.Input[bool]]:
return pulumi.get(self, "sso")
@sso.setter
def sso(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "sso", value)
@property
@pulumi.getter(name="ssoDisabled")
def sso_disabled(self) -> Optional[pulumi.Input[bool]]:
return pulumi.get(self, "sso_disabled")
@sso_disabled.setter
def sso_disabled(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "sso_disabled", value)
@property
@pulumi.getter(name="tokenEndpointAuthMethod")
def token_endpoint_auth_method(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "token_endpoint_auth_method")
@token_endpoint_auth_method.setter
def token_endpoint_auth_method(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "token_endpoint_auth_method", value)
@property
@pulumi.getter(name="webOrigins")
def web_origins(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
return pulumi.get(self, "web_origins")
@web_origins.setter
def web_origins(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "web_origins", value)
class GlobalClient(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
addons: Optional[pulumi.Input[pulumi.InputType['GlobalClientAddonsArgs']]] = None,
allowed_logout_urls: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
allowed_origins: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
app_type: Optional[pulumi.Input[str]] = None,
callbacks: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
client_id: Optional[pulumi.Input[str]] = None,
client_metadata: Optional[pulumi.Input[Mapping[str, Any]]] = None,
client_secret: Optional[pulumi.Input[str]] = None,
client_secret_rotation_trigger: Optional[pulumi.Input[Mapping[str, Any]]] = None,
cross_origin_auth: Optional[pulumi.Input[bool]] = None,
cross_origin_loc: Optional[pulumi.Input[str]] = None,
custom_login_page: Optional[pulumi.Input[str]] = None,
custom_login_page_on: Optional[pulumi.Input[bool]] = None,
description: Optional[pulumi.Input[str]] = None,
encryption_key: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
form_template: Optional[pulumi.Input[str]] = None,
grant_types: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
initiate_login_uri: Optional[pulumi.Input[str]] = None,
is_first_party: Optional[pulumi.Input[bool]] = None,
is_token_endpoint_ip_header_trusted: Optional[pulumi.Input[bool]] = None,
jwt_configuration: Optional[pulumi.Input[pulumi.InputType['GlobalClientJwtConfigurationArgs']]] = None,
logo_uri: Optional[pulumi.Input[str]] = None,
mobile: Optional[pulumi.Input[pulumi.InputType['GlobalClientMobileArgs']]] = None,
name: Optional[pulumi.Input[str]] = None,
oidc_conformant: Optional[pulumi.Input[bool]] = None,
refresh_token: Optional[pulumi.Input[pulumi.InputType['GlobalClientRefreshTokenArgs']]] = None,
sso: Optional[pulumi.Input[bool]] = None,
sso_disabled: Optional[pulumi.Input[bool]] = None,
token_endpoint_auth_method: Optional[pulumi.Input[str]] = None,
web_origins: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
__props__=None):
"""
Create a GlobalClient resource with the given unique name, props, and options.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: Optional[GlobalClientArgs] = None,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Create a GlobalClient resource with the given unique name, props, and options.
:param str resource_name: The name of the resource.
:param GlobalClientArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(GlobalClientArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
addons: Optional[pulumi.Input[pulumi.InputType['GlobalClientAddonsArgs']]] = None,
allowed_logout_urls: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
allowed_origins: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
app_type: Optional[pulumi.Input[str]] = None,
callbacks: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
client_id: Optional[pulumi.Input[str]] = None,
client_metadata: Optional[pulumi.Input[Mapping[str, Any]]] = None,
client_secret: Optional[pulumi.Input[str]] = None,
client_secret_rotation_trigger: Optional[pulumi.Input[Mapping[str, Any]]] = None,
cross_origin_auth: Optional[pulumi.Input[bool]] = None,
cross_origin_loc: Optional[pulumi.Input[str]] = None,
custom_login_page: Optional[pulumi.Input[str]] = None,
custom_login_page_on: Optional[pulumi.Input[bool]] = None,
description: Optional[pulumi.Input[str]] = None,
encryption_key: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
form_template: Optional[pulumi.Input[str]] = None,
grant_types: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
initiate_login_uri: Optional[pulumi.Input[str]] = None,
is_first_party: Optional[pulumi.Input[bool]] = None,
is_token_endpoint_ip_header_trusted: Optional[pulumi.Input[bool]] = None,
jwt_configuration: Optional[pulumi.Input[pulumi.InputType['GlobalClientJwtConfigurationArgs']]] = None,
logo_uri: Optional[pulumi.Input[str]] = None,
mobile: Optional[pulumi.Input[pulumi.InputType['GlobalClientMobileArgs']]] = None,
name: Optional[pulumi.Input[str]] = None,
oidc_conformant: Optional[pulumi.Input[bool]] = None,
refresh_token: Optional[pulumi.Input[pulumi.InputType['GlobalClientRefreshTokenArgs']]] = None,
sso: Optional[pulumi.Input[bool]] = None,
sso_disabled: Optional[pulumi.Input[bool]] = None,
token_endpoint_auth_method: Optional[pulumi.Input[str]] = None,
web_origins: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = GlobalClientArgs.__new__(GlobalClientArgs)
__props__.__dict__["addons"] = addons
__props__.__dict__["allowed_logout_urls"] = allowed_logout_urls
__props__.__dict__["allowed_origins"] = allowed_origins
__props__.__dict__["app_type"] = app_type
__props__.__dict__["callbacks"] = callbacks
__props__.__dict__["client_id"] = client_id
__props__.__dict__["client_metadata"] = client_metadata
__props__.__dict__["client_secret"] = client_secret
__props__.__dict__["client_secret_rotation_trigger"] = client_secret_rotation_trigger
__props__.__dict__["cross_origin_auth"] = cross_origin_auth
__props__.__dict__["cross_origin_loc"] = cross_origin_loc
__props__.__dict__["custom_login_page"] = custom_login_page
__props__.__dict__["custom_login_page_on"] = custom_login_page_on
__props__.__dict__["description"] = description
__props__.__dict__["encryption_key"] = encryption_key
__props__.__dict__["form_template"] = form_template
__props__.__dict__["grant_types"] = grant_types
__props__.__dict__["initiate_login_uri"] = initiate_login_uri
__props__.__dict__["is_first_party"] = is_first_party
__props__.__dict__["is_token_endpoint_ip_header_trusted"] = is_token_endpoint_ip_header_trusted
__props__.__dict__["jwt_configuration"] = jwt_configuration
__props__.__dict__["logo_uri"] = logo_uri
__props__.__dict__["mobile"] = mobile
__props__.__dict__["name"] = name
__props__.__dict__["oidc_conformant"] = oidc_conformant
__props__.__dict__["refresh_token"] = refresh_token
__props__.__dict__["sso"] = sso
__props__.__dict__["sso_disabled"] = sso_disabled
__props__.__dict__["token_endpoint_auth_method"] = token_endpoint_auth_method
__props__.__dict__["web_origins"] = web_origins
super(GlobalClient, __self__).__init__(
'auth0:index/globalClient:GlobalClient',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
addons: Optional[pulumi.Input[pulumi.InputType['GlobalClientAddonsArgs']]] = None,
allowed_logout_urls: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
allowed_origins: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
app_type: Optional[pulumi.Input[str]] = None,
callbacks: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
client_id: Optional[pulumi.Input[str]] = None,
client_metadata: Optional[pulumi.Input[Mapping[str, Any]]] = None,
client_secret: Optional[pulumi.Input[str]] = None,
client_secret_rotation_trigger: Optional[pulumi.Input[Mapping[str, Any]]] = None,
cross_origin_auth: Optional[pulumi.Input[bool]] = None,
cross_origin_loc: Optional[pulumi.Input[str]] = None,
custom_login_page: Optional[pulumi.Input[str]] = None,
custom_login_page_on: Optional[pulumi.Input[bool]] = None,
description: Optional[pulumi.Input[str]] = None,
encryption_key: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
form_template: Optional[pulumi.Input[str]] = None,
grant_types: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
initiate_login_uri: Optional[pulumi.Input[str]] = None,
is_first_party: Optional[pulumi.Input[bool]] = None,
is_token_endpoint_ip_header_trusted: Optional[pulumi.Input[bool]] = None,
jwt_configuration: Optional[pulumi.Input[pulumi.InputType['GlobalClientJwtConfigurationArgs']]] = None,
logo_uri: Optional[pulumi.Input[str]] = None,
mobile: Optional[pulumi.Input[pulumi.InputType['GlobalClientMobileArgs']]] = None,
name: Optional[pulumi.Input[str]] = None,
oidc_conformant: Optional[pulumi.Input[bool]] = None,
refresh_token: Optional[pulumi.Input[pulumi.InputType['GlobalClientRefreshTokenArgs']]] = None,
sso: Optional[pulumi.Input[bool]] = None,
sso_disabled: Optional[pulumi.Input[bool]] = None,
token_endpoint_auth_method: Optional[pulumi.Input[str]] = None,
web_origins: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None) -> 'GlobalClient':
"""
Get an existing GlobalClient resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = _GlobalClientState.__new__(_GlobalClientState)
__props__.__dict__["addons"] = addons
__props__.__dict__["allowed_logout_urls"] = allowed_logout_urls
__props__.__dict__["allowed_origins"] = allowed_origins
__props__.__dict__["app_type"] = app_type
__props__.__dict__["callbacks"] = callbacks
__props__.__dict__["client_id"] = client_id
__props__.__dict__["client_metadata"] = client_metadata
__props__.__dict__["client_secret"] = client_secret
__props__.__dict__["client_secret_rotation_trigger"] = client_secret_rotation_trigger
__props__.__dict__["cross_origin_auth"] = cross_origin_auth
__props__.__dict__["cross_origin_loc"] = cross_origin_loc
__props__.__dict__["custom_login_page"] = custom_login_page
__props__.__dict__["custom_login_page_on"] = custom_login_page_on
__props__.__dict__["description"] = description
__props__.__dict__["encryption_key"] = encryption_key
__props__.__dict__["form_template"] = form_template
__props__.__dict__["grant_types"] = grant_types
__props__.__dict__["initiate_login_uri"] = initiate_login_uri
__props__.__dict__["is_first_party"] = is_first_party
__props__.__dict__["is_token_endpoint_ip_header_trusted"] = is_token_endpoint_ip_header_trusted
__props__.__dict__["jwt_configuration"] = jwt_configuration
__props__.__dict__["logo_uri"] = logo_uri
__props__.__dict__["mobile"] = mobile
__props__.__dict__["name"] = name
__props__.__dict__["oidc_conformant"] = oidc_conformant
__props__.__dict__["refresh_token"] = refresh_token
__props__.__dict__["sso"] = sso
__props__.__dict__["sso_disabled"] = sso_disabled
__props__.__dict__["token_endpoint_auth_method"] = token_endpoint_auth_method
__props__.__dict__["web_origins"] = web_origins
return GlobalClient(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter
def addons(self) -> pulumi.Output['outputs.GlobalClientAddons']:
return pulumi.get(self, "addons")
@property
@pulumi.getter(name="allowedLogoutUrls")
def allowed_logout_urls(self) -> pulumi.Output[Sequence[str]]:
return pulumi.get(self, "allowed_logout_urls")
@property
@pulumi.getter(name="allowedOrigins")
def allowed_origins(self) -> pulumi.Output[Sequence[str]]:
return pulumi.get(self, "allowed_origins")
@property
@pulumi.getter(name="appType")
def app_type(self) -> pulumi.Output[str]:
return pulumi.get(self, "app_type")
@property
@pulumi.getter
def callbacks(self) -> pulumi.Output[Sequence[str]]:
return pulumi.get(self, "callbacks")
@property
@pulumi.getter(name="clientId")
def client_id(self) -> pulumi.Output[str]:
return pulumi.get(self, "client_id")
@property
@pulumi.getter(name="clientMetadata")
def client_metadata(self) -> pulumi.Output[Mapping[str, Any]]:
return pulumi.get(self, "client_metadata")
@property
@pulumi.getter(name="clientSecret")
def client_secret(self) -> pulumi.Output[str]:
return pulumi.get(self, "client_secret")
@property
@pulumi.getter(name="clientSecretRotationTrigger")
def client_secret_rotation_trigger(self) -> pulumi.Output[Optional[Mapping[str, Any]]]:
return pulumi.get(self, "client_secret_rotation_trigger")
@property
@pulumi.getter(name="crossOriginAuth")
def cross_origin_auth(self) -> pulumi.Output[bool]:
return pulumi.get(self, "cross_origin_auth")
@property
@pulumi.getter(name="crossOriginLoc")
def cross_origin_loc(self) -> pulumi.Output[str]:
return pulumi.get(self, "cross_origin_loc")
@property
@pulumi.getter(name="customLoginPage")
def custom_login_page(self) -> pulumi.Output[str]:
return pulumi.get(self, "custom_login_page")
@property
@pulumi.getter(name="customLoginPageOn")
def custom_login_page_on(self) -> pulumi.Output[bool]:
return pulumi.get(self, "custom_login_page_on")
@property
@pulumi.getter
def description(self) -> pulumi.Output[str]:
return pulumi.get(self, "description")
@property
@pulumi.getter(name="encryptionKey")
def encryption_key(self) -> pulumi.Output[Mapping[str, str]]:
return pulumi.get(self, "encryption_key")
@property
@pulumi.getter(name="formTemplate")
def form_template(self) -> pulumi.Output[str]:
return pulumi.get(self, "form_template")
@property
@pulumi.getter(name="grantTypes")
def grant_types(self) -> pulumi.Output[Sequence[str]]:
return pulumi.get(self, "grant_types")
@property
@pulumi.getter(name="initiateLoginUri")
def initiate_login_uri(self) -> pulumi.Output[str]:
return pulumi.get(self, "initiate_login_uri")
@property
@pulumi.getter(name="isFirstParty")
def is_first_party(self) -> pulumi.Output[bool]:
return pulumi.get(self, "is_first_party")
@property
@pulumi.getter(name="isTokenEndpointIpHeaderTrusted")
def is_token_endpoint_ip_header_trusted(self) -> pulumi.Output[bool]:
return pulumi.get(self, "is_token_endpoint_ip_header_trusted")
@property
@pulumi.getter(name="jwtConfiguration")
def jwt_configuration(self) -> pulumi.Output['outputs.GlobalClientJwtConfiguration']:
return pulumi.get(self, "jwt_configuration")
@property
@pulumi.getter(name="logoUri")
def logo_uri(self) -> pulumi.Output[str]:
return pulumi.get(self, "logo_uri")
@property
@pulumi.getter
def mobile(self) -> pulumi.Output['outputs.GlobalClientMobile']:
return pulumi.get(self, "mobile")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
return pulumi.get(self, "name")
@property
@pulumi.getter(name="oidcConformant")
def oidc_conformant(self) -> pulumi.Output[bool]:
return pulumi.get(self, "oidc_conformant")
@property
@pulumi.getter(name="refreshToken")
def refresh_token(self) -> pulumi.Output['outputs.GlobalClientRefreshToken']:
return pulumi.get(self, "refresh_token")
@property
@pulumi.getter
def sso(self) -> pulumi.Output[bool]:
return pulumi.get(self, "sso")
@property
@pulumi.getter(name="ssoDisabled")
def sso_disabled(self) -> pulumi.Output[bool]:
return pulumi.get(self, "sso_disabled")
@property
@pulumi.getter(name="tokenEndpointAuthMethod")
def token_endpoint_auth_method(self) -> pulumi.Output[str]:
return pulumi.get(self, "token_endpoint_auth_method")
@property
@pulumi.getter(name="webOrigins")
def web_origins(self) -> pulumi.Output[Sequence[str]]:
return pulumi.get(self, "web_origins")
|
python
|
import os
import unittest
from django.test import TestCase
from utils import description_for_objective, ellipsis, objectives_for_course
valid_course = 'MG4'
valid_objective = 'MG4-FACTMULT'
class BasicTests(TestCase):
def test_ellipsis(self):
long_str = 'yadayadayada'
self.assertEquals(ellipsis(long_str, 5), 'ya...')
self.assertEquals(ellipsis(long_str, len(long_str) - 1), long_str[:-4] + '...')
self.assertEquals(ellipsis(long_str, len(long_str) + 1), long_str)
self.assertEquals(ellipsis(long_str, 100), long_str)
@unittest.skipIf(not 'TEST_PROVIDER' in os.environ,
"Test case can't work without TEST_PROVIDER pointing to API provider")
def test_objective_lookup(self):
desc = description_for_objective(valid_objective, os.environ['TEST_PROVIDER'])
self.assertTrue('factors and multiples' in desc)
@unittest.skipIf(not 'TEST_PROVIDER' in os.environ,
"Test case can't work without TEST_PROVIDER pointing to API provider")
def test_course_lookup(self):
res = objectives_for_course(valid_course, os.environ['TEST_PROVIDER'])
self.assertTrue(valid_objective in [y for (y, z) in res])
|
python
|
# (c) 2016 James Turner <[email protected]>
# (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = '''
lookup: aws_service_ip_ranges
author:
- James Turner <[email protected]>
requirements:
- must have public internet connectivity
short_description: Look up the IP ranges for services provided in AWS such as EC2 and S3.
description:
- AWS publishes IP ranges used on the public internet by EC2, S3, CloudFront, CodeBuild, Route53, and Route53 Health Checking.
- This module produces a list of all the ranges (by default) or can narrow down the list to the specified region or service.
options:
service:
description: 'The service to filter ranges by. Options: EC2, S3, CLOUDFRONT, CODEbUILD, ROUTE53, ROUTE53_HEALTHCHECKS'
region:
description: 'The AWS region to narrow the ranges to. Examples: us-east-1, eu-west-2, ap-southeast-1'
'''
EXAMPLES = """
vars:
ec2_ranges: "{{ lookup('aws_service_ip_ranges', region='ap-southeast-2', service='EC2', wantlist=True) }}"
tasks:
- name: "use list return option and iterate as a loop"
debug: msg="{% for cidr in ec2_ranges %}{{ cidr }} {% endfor %}"
# "52.62.0.0/15 52.64.0.0/17 52.64.128.0/17 52.65.0.0/16 52.95.241.0/24 52.95.255.16/28 54.66.0.0/16 "
- name: "Pull S3 IP ranges, and print the default return style"
debug: msg="{{ lookup('aws_service_ip_ranges', region='us-east-1', service='S3') }}"
# "52.92.16.0/20,52.216.0.0/15,54.231.0.0/17"
"""
RETURN = """
_raw:
description: comma-separated list of CIDR ranges
"""
import json
from ansible.errors import AnsibleError
from ansible.plugins.lookup import LookupBase
from ansible.module_utils.urls import open_url, ConnectionError, SSLValidationError
from ansible.module_utils._text import to_native
from ansible.module_utils.six.moves.urllib.error import HTTPError, URLError
class LookupModule(LookupBase):
def run(self, terms, variables, **kwargs):
try:
resp = open_url('https://ip-ranges.amazonaws.com/ip-ranges.json')
amazon_response = json.load(resp)['prefixes']
except getattr(json.decoder, 'JSONDecodeError', ValueError) as e:
# on Python 3+, json.decoder.JSONDecodeError is raised for bad
# JSON. On 2.x it's a ValueError
raise AnsibleError("Could not decode AWS IP ranges: %s" % to_native(e))
except HTTPError as e:
raise AnsibleError("Received HTTP error while pulling IP ranges: %s" % to_native(e))
except SSLValidationError as e:
raise AnsibleError("Error validating the server's certificate for: %s" % to_native(e))
except URLError as e:
raise AnsibleError("Failed look up IP range service: %s" % to_native(e))
except ConnectionError as e:
raise AnsibleError("Error connecting to IP range service: %s" % to_native(e))
if 'region' in kwargs:
region = kwargs['region']
amazon_response = (item for item in amazon_response if item['region'] == region)
if 'service' in kwargs:
service = str.upper(kwargs['service'])
amazon_response = (item for item in amazon_response if item['service'] == service)
return [item['ip_prefix'] for item in amazon_response]
|
python
|
from deque import Deque
def isPalindrome(string: str)->bool:
d = Deque()
for character in string:
d.addRear(character)
isPalindromeFlag: bool = True
while d.size() > 1 and isPalindromeFlag:
if d.removeFront() != d.removeRear():
isPalindromeFlag = False
return isPalindromeFlag
print(isPalindrome("radar"))
print(isPalindrome("radr"))
|
python
|
from django.contrib.auth import views as auth_views
from django.urls import path
from accounts import views
from accounts.views import (
dealerSignupView, adminSignupView,
customerSignupView)
app_name = "accounts"
urlpatterns = [
path('login/', auth_views.LoginView.as_view(
template_name="accounts/login.html"), name="login"),
path('logout/', auth_views.LogoutView.as_view(
template_name="accounts/logout.html"), name="logout"),
path("signup/", dealerSignupView.as_view(), name="dealerSignup"),
path("customer/signup/", customerSignupView.as_view(),
name="customerSignup"),
path("administrator/signup/", adminSignupView.as_view(),
name="adminSignup"),
# Password Reset
path('password_reset/',
auth_views.PasswordResetView.as_view(
template_name="accounts/password_reset.html",
success_url='/accounts/password_reset/done/',
email_template_name="accounts/password_reset_email.html",
subject_template_name="accounts/password_reset_subject.txt"),
name="password_reset"),
path('password_reset/done/', auth_views.PasswordResetDoneView.as_view(
template_name="accounts/password_reset_done.html",),
name="password_reset_done"),
path('reset/<uidb64>/<token>/',
auth_views.PasswordResetConfirmView.as_view(
template_name="accounts/password_reset_confirm.html",
success_url='/reset/done/'
),
name="password_reset_confirm"
),
path('reset/done/', auth_views.PasswordResetCompleteView.as_view(
template_name="accounts/password_reset_complete.html"),
name="password_reset_complete"),
# Profile
path("farmer/profile/", views.Farmer_Profile_View,
name="farmerProfile"),
path('customer/profile/', views.Customer_Profile_View,
name="customerProfile"),
path('administrator/profile/', views.administrator_Profile_View,
name="administratorProfile"),
]
|
python
|
from rest_framework.permissions import IsAuthenticated
from rest_framework.response import Response
from rest_framework.views import APIView
from knox.auth import TokenAuthentication
class RootView(APIView):
authentication_classes = (TokenAuthentication,)
permission_classes = (IsAuthenticated,)
def get(self, request):
return Response("api root")
|
python
|
import sys
from collections import deque
from gym_snake.envs import *
from gym_snake.base.pos import Pos
from gym_snake.base.direc import Direc
class _HamiltonTableCell:
def __init__(self):
self.idx = None
self.direc = Direc.NONE
self.reset()
def __str__(self):
return "{ idx: %d direc: %s }" % \
(self.idx, self.direc)
__repr__ = __str__
def reset(self):
self.idx = None
self.direc = Direc.NONE
class _BFSTableCell:
def __init__(self):
self.parent = None
self.dist = sys.maxsize
self.reset()
def __str__(self):
return "{ dist: %d parent: %s }" % \
(self.dist, str(self.parent))
__repr__ = __str__
def reset(self):
self.parent = None
self.dist = sys.maxsize
class HamiltonianAgent(object):
def __init__(self, screen_height, screen_width):
if screen_height % 2 == 1 and screen_width % 2 == 1:
raise ValueError("Either height or width of screen must be an even number for Hamiltonian cycle to exist")
self.width = screen_width
self.height = screen_height
self.snake = []
self.snake_head = None
self.food = None
self.map_capacity = self.height * self.width
self._hamilton_table = [[_HamiltonTableCell() for _ in range(self.height)]
for _ in range(self.width)]
self._bfs_table = [[_BFSTableCell() for _ in range(self.height)]
for _ in range(self.width)]
self._generate_route()
def __call__(self, obs):
acts = []
for i in range(obs.shape[0]):
acts.append(self.predict(obs[i]))
return acts
def predict(self, obs):
# If flat observations are being used, transform into grid observations
if obs.ndim == 1:
obs = np.reshape(obs, (1, self.width, self.height))
elif obs.ndim != 3:
ValueError("Invalid observation shape")
self._parse_obs(obs)
head = self.snake_head
action = self._hamilton_table[head.x][head.y].direc
path = self._shortest_path_to_food()
if len(self.snake) < 0.75 * self.map_capacity:
if path:
for nxt in path:
head_idx = self._hamilton_table[head.x][head.y].idx
food_idx = self._hamilton_table[self.food.x][self.food.y].idx
nxt_idx = self._hamilton_table[nxt.x][nxt.y].idx
# Default to BFS path if it is physically impossible to collide with body
if len(self.snake) <= 2:
action = head.direc_to(nxt)
else:
# Since we don't know which block is the tail, check all snake body blocks
choose_shortest = True
for body in self.snake:
body_idx = self._hamilton_table[body.x][body.y].idx
head_idx_rel = self._relative_dist(body_idx, head_idx)
nxt_idx_rel = self._relative_dist(body_idx, nxt_idx)
food_idx_rel = self._relative_dist(body_idx, food_idx)
if not (head_idx_rel < nxt_idx_rel <= food_idx_rel):
choose_shortest = False
break
if choose_shortest:
action = head.direc_to(nxt)
# If we ended up in a situation where we are about to take a bad action, attempt to find a safe space
if self._is_valid(head.adj(action)) is False:
if path:
action = head.direc_to(path[0])
else:
# If BFS does not yield a safe route, look for any adjacent safe space
adjs = head.all_adj()
for pos in adjs:
if self._is_valid(pos):
action = head.direc_to(pos)
return action
def _parse_obs(self, obs):
self.snake = []
for x in range(self.width):
for y in range(self.height):
if obs[0][x][y] == SnakeEnv.HEAD_BLOCK:
self.snake_head = Pos(x, y)
self.snake.append(self.snake_head)
elif obs[0][x][y] == SnakeEnv.SNAKE_BLOCK:
self.snake.append(Pos(x, y))
elif obs[0][x][y] == SnakeEnv.FOOD_BLOCK:
self.food = Pos(x, y)
def _generate_route(self):
# Generate a predetermined hamiltonian cycle so that it will be the same
# no matter what observation is received
cnt = 0
if self.height % 2 == 0:
for y in range(self.height):
self._hamilton_table[0][y].idx = cnt
self._hamilton_table[0][y].direc = Direc.UP
if y == self.height - 1:
self._hamilton_table[0][y].direc = Direc.RIGHT
cnt += 1
for y in range(self.height-1, -1, -1):
if y % 2 == 1:
path = range(1, self.width)
direction = Direc.RIGHT
else:
path = range(self.width-1, 0, -1)
direction = Direc.LEFT
for idx, x in enumerate(path):
self._hamilton_table[x][y].idx = cnt
self._hamilton_table[x][y].direc = direction
if idx == self.width-2 and y != 0:
self._hamilton_table[x][y].direc = Direc.DOWN
cnt += 1
else:
for x in range(self.width):
self._hamilton_table[x][0].idx = cnt
self._hamilton_table[x][0].direc = Direc.RIGHT
if x == self.width - 1:
self._hamilton_table[x][0].direc = Direc.UP
cnt += 1
for x in range(self.width-1, -1, -1):
if x % 2 == 1:
path = range(1, self.height)
direction = Direc.UP
else:
path = range(self.height-1, 0, -1)
direction = Direc.DOWN
for idx, y in enumerate(path):
self._hamilton_table[x][y].idx = cnt
self._hamilton_table[x][y].direc = direction
if idx == self.height-2 and x != 0:
self._hamilton_table[x][y].direc = Direc.LEFT
cnt += 1
def _shortest_path_to_food(self):
self._reset_bfs_table()
food = self.food
head = self.snake_head
# Run BFS from food to head so that we can check which nodes adjacent to nodes are closest to food
# if multiple exist
start = food
dest = head
self._bfs_table[start.x][start.y].dist = 0
queue = deque()
queue.append(start)
path_found = False
while len(queue) > 0:
cur = queue.popleft()
if cur == dest:
path_found = True
adjs = cur.all_adj()
# Traverse adjacent positions
for pos in adjs:
if self._is_valid(pos):
adj_cell = self._bfs_table[pos.x][pos.y]
if adj_cell.dist == sys.maxsize:
adj_cell.parent = cur
adj_cell.dist = self._bfs_table[cur.x][cur.y].dist + 1
queue.append(pos)
# Return all possible next steps which could lead to shortest route to food source
next_steps = []
min_path_len = sys.maxsize
if path_found:
adjs = head.all_adj()
for pos in adjs:
if self._is_valid(pos):
adj_cell = self._bfs_table[pos.x][pos.y]
if adj_cell.dist < min_path_len:
next_steps = []
next_steps.append(pos)
min_path_len = adj_cell.dist
elif adj_cell.dist == min_path_len:
next_steps.append(pos)
return next_steps
def _is_valid(self, pos):
if (pos in self.snake and pos != self.snake_head) or self._out_of_bounds(pos):
return False
else:
return True
def _out_of_bounds(self, pos):
if pos.x < 0 or pos.y < 0 or pos.x >= self.width or pos.y >= self.height:
return True
else:
return False
def _relative_dist(self, ori, x):
size = self.map_capacity
if ori > x:
x += size
return x - ori
def _reset_bfs_table(self):
for row in self._bfs_table:
for col in row:
col.reset()
if __name__ == "__main__":
env = SnakeEnvFlatObsSparseReward(screen_width=8, screen_height=8)
agent = HamiltonianAgent(screen_width=8, screen_height=8)
n_timesteps = 100000
observation = env.reset()
for _ in range(n_timesteps):
action = agent.predict(observation)
observation, reward, done, infos = env.step(action)
env.render("human")
if done:
observation = env.reset()
if reward == -100.0:
assert False, "Snake died unexpectedly"
|
python
|
#==================================================================================
# PROGRAM: "boat_sat.py"
# LOCATION: beluga>examples>Mansell
# Author: Justin Mansell (2016)
#
# Description: simple path optimization for a boat with bounded control used
# to demonstrate graph search continuation. Uses saturation fcns.
#==================================================================================
#Import Necessary Modules
import numpy as np
import beluga.bvpsol as bvpsol
import beluga.bvpsol.algorithms as algorithms
import beluga.optim.Problem
from beluga.optim.problem import *
from beluga.continuation import *
from math import *
import functools
def sat_func(u):
cu = 1 #Upper asymptote
cl = -1 #Lower asymptote
s0 = 1 #smoothing factor
return cu - (cu-cl)/(1+np.exp(4*s0/(cu-cl)*u))
def get_problem():
"""A simple example of graph search continuation"""
# Rename this and/or move to optim package?
problem = beluga.optim.Problem('boat_sat')
#Define independent variables
problem.independent('t', 's')
# Define equations of motion
problem.state('x','V*cos(hdg)','m') \
.state('y','V*sin(hdg)','m') \
.state('hdg','k*(1-2/(1+exp(2*u)))','rad')
# Define controls
problem.control('u',1)
# problem.control('hdgdot','rad/s')
problem.mode = 'dae'
# Define Cost Functional
problem.cost['path'] = Expression('1+eps*u^2', 's')
#Define constraints
problem.constraints().initial('x-x_0','m') \
.initial('y-y_0','m') \
.initial('hdg-hdg_0','rad') \
.terminal('x-x_f','m') \
.terminal('y-y_f','m')
#Define constants
problem.constant('cmax',1.0,'m/s^2') #Maximum allowed centripetal acceleration
problem.constant('V',1,'m/s') #Velocity
problem.constant('k',1,'rad/s')
problem.constant('eps',1,1) #Error constant
#Problem scaling
problem.scale.unit('m',1) \
.unit('s',1) \
.unit('rad',1)
#Configure solver
problem.bvp_solver = algorithms.MultipleShooting(derivative_method='fd',tolerance=1e-4, max_iterations=1000, verbose = True, cached = False,number_arcs=16)
#problem.bvp_solver = algorithms.SingleShooting(derivative_method='fd',tolerance=1e-4, max_iterations=100, verbose = True, cached = False)
#Initial Guess....................x0..y0..hdg0
problem.guess.setup('auto',start=[0.0,0.0,0.0])
#Add Continuation Steps
#problem.steps.add_step().num_cases(2) \
# .terminal('x', 3) \
# .terminal('y', 0.1)
problem.steps.add_step().num_cases(30) \
.terminal('x', 3.0) \
.terminal('y', 0)
problem.steps.add_step().num_cases(30) \
.terminal('x', 3.0) \
.terminal('y', 3.0)
return problem
if __name__ == '__main__':
import beluga.Beluga as Beluga
problem = get_problem()
sol = Beluga.run(problem)
|
python
|
import namespace_override as override
def _(methods, address, class_name):
ret = []
post = []
if 'using.cs' in methods:
add_to = [ret]
for line in methods['using.cs']:
if '---;' not in line:
add_to[0] += [line]
else:
add_to = [post]
ret += ['']
del(methods['using.cs'])
address = list(map(lambda s: override._(s), address))
ret += ['namespace ' + '.'.join(address)]
ret += ['{']
if 'test' in class_name:
ret += [" using NUnit.Framework;"]
ret += [""]
ret += [" [TestFixture]"]
if '_.cs' in methods:
start = list(methods['_.cs'])
if 'interface' not in start[0]:
ret += [" [System.Serializable]"]
for line in start:
ret += [' ' + line]
del(methods['_.cs'])
else:
ret += [" [System.Serializable]"]
ret += [" public class " + class_name]
if '}' not in ret[-1]:
ret += [" {"]
ret += [" }"]
ret = '\n' + '\n'.join(ret[:-1]) + '\n'
return ret, '\n'.join(post)
|
python
|
from abc import abstractmethod
from typing import Union, Optional, Any, List, Coroutine
import inspect
from .typings import TProtocol
from .interact import InteractiveObject, IOManager
from .component import MetadataComponent
from .behavior import BaseBehavior
from .utilles import IOStatus
class MonoMetaComponent(MetadataComponent):
io: "Monomer"
name: str
alias: str
__limit__ = ["name", "alias"]
def __getitem__(self, item: str) -> Union[Any, Coroutine[Any, Any, Any]]:
res = self.additions.get(item, None) or self.__dict__.get(item, None)
if res is None:
return self.protocol.put_metadata(item, self.io)
return res
class BaseMonoBehavior(BaseBehavior):
io: "Monomer"
@abstractmethod
def activate(self):
...
@abstractmethod
async def change_metadata(
self,
meta: str,
value: Any,
target: Optional["Monomer"] = None,
**addition
):
await self.io.protocol.set_metadata(meta, value, target or self.io, **addition)
raise NotImplementedError
async def update(self):
pass
raise NotImplementedError
class Monomer(InteractiveObject):
prefab_metadata = MonoMetaComponent
prefab_behavior = BaseMonoBehavior
metadata: MonoMetaComponent
behavior: BaseMonoBehavior
def __init__(
self,
protocol: TProtocol,
name: str,
identifier: Union[int, str],
alias: Optional[str] = None,
):
data = self.prefab_metadata(self)
data.protocol = protocol
data.identifier = f"{identifier}@{protocol.identifier}"
data.name = name
data.alias = alias or ""
super(Monomer, self).__init__(data)
self.metadata.state = IOStatus.ESTABLISHED
def __getitem__(self, item: str):
parts = item.split(".")
if len(parts) == 1:
return self.metadata.__getitem__(item)
tag, attr = parts[0], parts[1]
if self.compare(tag):
return self.metadata.__getitem__(item)
def __setstate__(self, state):
f = inspect.currentframe()
lcs = f.f_back.f_back.f_locals
self.__init__(
lcs['self'].protocol,
state['metadata']['name'],
state['metadata']['identifier'].split("@")[0],
state['metadata']['alias']
)
self.add_tags(*state['metadata']['tags'])
class _EntitySelect:
def __getitem__(self, item) -> List["Monomer"]:
monomers: List["Monomer"] = IOManager.filter(Monomer)
conditions = []
slices = list(item) if not isinstance(item, slice) else [item]
for sl in slices:
key, value = sl.start, str(sl.stop)
if key in ("id", "uid", "identifier"):
def _(monomer: "Monomer", _value=value):
return monomer.metadata.pure_id == _value
elif key == "tag":
def _(monomer: "Monomer", _value=value):
return monomer.prime_tag == _value
elif key == "type":
def _(monomer: "Monomer", _value=value):
return monomer.__class__.__name__ == _value
else:
def _(monomer: "Monomer", _key=key, _value=value):
return getattr(monomer.metadata, _key, None) == _value
conditions.append(_)
return list(filter(lambda x: all([condition(x) for condition in conditions]), monomers))
at_mono = _EntitySelect()
|
python
|
try:
from . import generic as g
except BaseException:
import generic as g
class SectionTest(g.unittest.TestCase):
def test_section(self):
mesh = g.get_mesh('featuretype.STL')
# this hits many edge cases
step = .125
z_levels = g.np.arange(start=mesh.bounds[0][2],
stop=mesh.bounds[1][2] + 2 * step,
step=step)
sections = [None] * len(z_levels)
for index, z in enumerate(z_levels):
plane_origin = [0, 0, z]
plane_normal = [0, 0, 1]
section = mesh.section(plane_origin=plane_origin,
plane_normal=plane_normal)
if section is None:
# section will return None if the plane doesn't
# intersect the mesh
assert z > (mesh.bounds[1][2] -
g.trimesh.constants.tol.merge)
continue
planar, to_3D = section.to_planar()
assert planar.is_closed
assert (len(planar.polygons_full) > 0)
sections[index] = planar
# try getting the sections as Path2D through
# the multiplane method
paths = mesh.section_multiplane(plane_origin=[0, 0, 0],
plane_normal=[0, 0, 1],
heights=z_levels)
# call the multiplane method directly
lines, faces, T = g.trimesh.intersections.mesh_multiplane(
mesh=mesh,
plane_origin=[0, 0, 0],
plane_normal=[0, 0, 1],
heights=z_levels)
# make sure various methods return the same results
for index in range(len(z_levels)):
if sections[index] is None:
assert len(lines[index]) == 0
continue
rc = g.trimesh.load_path(lines[index])
assert g.np.isclose(rc.area, sections[index].area)
assert g.np.isclose(rc.area, paths[index].area)
class PlaneLine(g.unittest.TestCase):
def test_planes(self):
count = 10
z = g.np.linspace(-1, 1, count)
plane_origins = g.np.column_stack((
g.np.random.random((count, 2)), z))
plane_normals = g.np.tile([0, 0, -1], (count, 1))
line_origins = g.np.tile([0, 0, 0], (count, 1))
line_directions = g.np.random.random((count, 3))
i, valid = g.trimesh.intersections.planes_lines(
plane_origins=plane_origins,
plane_normals=plane_normals,
line_origins=line_origins,
line_directions=line_directions)
assert valid.all()
assert (g.np.abs(i[:, 2] - z) < g.tol.merge).all()
if __name__ == '__main__':
g.trimesh.util.attach_to_log()
g.unittest.main()
|
python
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2019-03-27 10:58
from __future__ import unicode_literals
import datetime
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('aggregator', '0039_merge_20190316_2108'),
]
operations = [
migrations.AddField(
model_name='dimension',
name='description',
field=models.TextField(null=True),
),
migrations.AddField(
model_name='variable',
name='description',
field=models.TextField(null=True),
),
migrations.AlterField(
model_name='datasetaccessrequest',
name='creation_date',
field=models.DateTimeField(default=datetime.datetime(2019, 3, 27, 12, 58, 43, 390000)),
),
]
|
python
|
from django.test import TestCase
from rodan.models.job import Job
# from model_mommy import mommy
from rodan.test.helpers import RodanTestTearDownMixin, RodanTestSetUpMixin
class JobTestCase(RodanTestTearDownMixin, TestCase, RodanTestSetUpMixin):
def setUp(self):
self.setUp_rodan()
def test_save(self):
job = Job(name="test job")
job.save()
retr_job = Job.objects.get(name="test job")
self.assertEqual(retr_job.name, job.name)
def test_delete(self):
job = Job(name="test job")
job.save()
retr_job = Job.objects.get(name="test job")
retr_job.delete()
retr_job2 = Job.objects.filter(name="test job")
self.assertFalse(retr_job2.exists())
|
python
|
import sys
import base64
import logging
import marshal
import importlib.util
from os import sep as path_sep
from paker.exceptions import PakerImportError
# use _memimporter if is available
_MEMIMPORTER = False
try:
import _memimporter
_MEMIMPORTER = True
except ImportError:
from paker.importers import _tempimporter as _memimporter
_module_type = type(sys)
class jsonimporter:
def __init__(self, jsonmod):
super(jsonimporter, self).__init__()
self.jsonmod: dict = jsonmod
self.module_cache = {}
self.logger = logging.getLogger(self.__class__.__name__)
sys.meta_path.append(self)
# Check whether we can satisfy the import of the module named by
# 'fullname', or whether it could be a portion of a namespace
# package. Return self if we can load it, a string containing the
# full path if it's a possible namespace portion, None if we
# can't load it.
def find_loader(self, fullname: str, path=None):
"""find_loader(fullname, path=None) -> self, str or None.
Search for a module specified by 'fullname'. 'fullname' must be the
fully qualified (dotted) module name. It returns the zipimporter
instance itself if the module was found, a string containing the
full path name if it's possibly a portion of a namespace package,
or None otherwise. The optional 'path' argument is ignored -- it's
there for compatibility with the importer protocol.
"""
path = fullname.split(".")
try:
jsonmod = self.jsonmod[path[0]]
for submod in path[1:]:
jsonmod = jsonmod["modules"][submod]
return self, []
except KeyError:
return None, []
def find_module(self, fullname, path=None):
"""find_module(fullname, path=None) -> self or None.
Search for a module specified by 'fullname'. 'fullname' must be the
fully qualified (dotted) module name. It returns the zipimporter
instance itself if the module was found, or None if it wasn't.
The optional 'path' argument is ignored -- it's there for compatibility
with the importer protocol.
"""
self.logger.debug("searching for {}".format(fullname))
return self.find_loader(fullname, path)[0]
def get_data(self, fullname):
"""Get module data by name in following format:
- package\\module.extension
This method is called by _memimporter to get source code of
.pyd and .dll modules.
"""
path = fullname.split(".")[0].split("\\")
try:
jsonmod = self.jsonmod[path[0]]
for submod in path[1:]:
jsonmod = jsonmod["modules"][submod]
return base64.b64decode(jsonmod["code"])
except Exception as e:
return None
# Load and return the module named by 'fullname'.
def load_module(self, fullname):
"""load_module(fullname) -> module.
Load the module specified by 'fullname'. 'fullname' must be the
fully qualified (dotted) module name. It returns the imported
module, or raises PakerImportError if it wasn't found.
"""
mod = sys.modules.get(fullname)
if isinstance(mod, _module_type):
return mod
if fullname in self.module_cache:
self.logger.info("loading previously imported module {}".format(fullname))
return self.module_cache[fullname]
try:
path = fullname.split(".")
jsonmod = self.jsonmod[path[0]]
for submod in path[1:]:
jsonmod = jsonmod["modules"][submod]
except KeyError:
raise PakerImportError("could not find {} module".format(fullname))
extension = jsonmod["extension"]
if extension == "py":
mod = _module_type(fullname)
mod.__loader__ = self
if jsonmod["type"] == "package":
mod.__path__ = ["paker://" + fullname.replace(".", path_sep)]
if not hasattr(mod, '__builtins__'):
mod.__builtins__ = __builtins__
sys.modules[fullname] = mod
exec(jsonmod["code"], mod.__dict__)
elif extension == "pyc":
mod = _module_type(fullname)
mod.__loader__ = self
if jsonmod["type"] == "package":
mod.__path__ = ["paker://" + fullname.replace(".", path_sep)]
if not hasattr(mod, '__builtins__'):
mod.__builtins__ = __builtins__
sys.modules[fullname] = mod
exec(marshal.loads(base64.b64decode(jsonmod["code"])), mod.__dict__)
elif extension in ("dll", "pyd", "so"):
# initname = "init" + fullname.rsplit(".", 1)[-1]
initname = "PyInit_" + fullname.split(".")[-1]
path = fullname.replace(".", "\\") + "." + extension
spec = importlib.util.find_spec(fullname, path)
self.logger.info("using {} to load '.{}' file".format("_memimporter" if _MEMIMPORTER else "_tempimporter",
extension))
mod = _memimporter.import_module(fullname, path, initname, self.get_data, spec)
mod.__name__ = fullname
sys.modules[fullname] = mod
else:
raise PakerImportError("module extension must be .py, .pyc, .dll, .pyd or .so (got {})".format(extension))
try:
mod = sys.modules[fullname]
except KeyError:
raise PakerImportError("module {} not found in sys.modules".format(fullname))
self.logger.info("{} has been imported successfully".format(mod.__name__))
self.module_cache[fullname] = mod
return mod
def add_module(self, module_name: str, module: dict):
"""Add new module to jsonimporter object."""
if not isinstance(module, dict):
raise PakerImportError("module must be a dict (got {})".format(type(module)))
self.jsonmod[module_name] = module
self.logger.info("{} has been added successfully".format(module_name))
def unload_module(self, module):
"""Unload single module from sys.modules and remove its serialized source code from memory."""
if isinstance(module, _module_type):
module = module.__name__
if module in self.jsonmod:
del self.jsonmod[module]
if module in self.module_cache:
del self.module_cache[module]
if module in sys.modules:
del sys.modules[module]
self.logger.info("{} has been unloaded successfully".format(module))
def unload(self):
"""Unload all imported modules and remove jsonimporter from meta path."""
for module_name in list(self.jsonmod.keys()):
del self.jsonmod[module_name]
if self in sys.meta_path:
sys.meta_path.remove(self)
for module_name in list(self.module_cache.keys()):
del self.module_cache[module_name]
if module_name in sys.modules:
del sys.modules[module_name]
self.logger.info("unloaded all modules")
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.unload()
|
python
|
from dataclasses import dataclass
from typing import Optional, List
@dataclass
class Attack:
name: str
cost: List[str]
convertedEnergyCost: int
damage: Optional[str]
text: Optional[str]
|
python
|
from django import template
from django.utils import timezone
from schedules.services import get_times_from_day
from schedules.models import TimeOfDay
register = template.Library()
@register.inclusion_tag('templatetags/calendar_month.html')
def calendar_month():
variable = None
print(">>>>>>")
return {'variable': variable}
|
python
|
from mock import patch
from nerve_tools.envoy import get_envoy_ingress_listeners
def test_get_envoy_ingress_listeners_success():
expected_envoy_listeners = {
('test_service.main', 1234): 54321,
}
mock_envoy_admin_listeners_return_value = {
'listener_statuses': [
{
'name': 'test_service.main.1234.ingress_listener',
'local_address': {
'socket_address': {
'address': '0.0.0.0',
'port_value': 54321,
},
},
},
],
}
with patch(
'nerve_tools.envoy._get_envoy_listeners_from_admin',
return_value=mock_envoy_admin_listeners_return_value,
):
assert get_envoy_ingress_listeners(123) == expected_envoy_listeners
def test_get_envoy_ingress_listeners_failure():
with patch(
'nerve_tools.envoy.requests.get',
side_effect=Exception,
):
assert get_envoy_ingress_listeners(123) == {}
|
python
|
import datetime
def convert_timestamp(ts: str) -> str:
"""Helper function that converts timestamp to %m-%d-%Y format"""
datetime_obj = datetime.datetime.fromtimestamp(ts)
date = datetime.datetime.strftime(datetime_obj,"%m-%d-%Y")
return date
def extract_attributes_from_subreddit(subreddit) -> dict:
'''Helper function that extracts and stores attributes from Subreddit object'''
return {
"active_user_count": subreddit.active_user_count,
"url": subreddit.url,
"title": subreddit.title,
"subscribers": subreddit.subscribers,
"subreddit_type": subreddit.subreddit_type,
"spoilers_enabled": subreddit.spoilers_enabled,
"public_description": subreddit.public_description,
"over18": subreddit.over18,
"created": subreddit.created,
"created_utc": subreddit.created_utc,
"lang": subreddit.lang,
"videos_allowed": subreddit.allow_videos,
"images_allowed": subreddit.allow_images
}
|
python
|
# Exercise 76 - Date and Time Generator
from datetime import datetime
print(datetime.now().strftime('Today is %A, %B %d, %Y'))
|
python
|
from django.apps import AppConfig
class EvaluationConfig(AppConfig):
name = "grandchallenge.evaluation"
def ready(self):
# noinspection PyUnresolvedReferences
import grandchallenge.evaluation.signals
|
python
|
# -*- coding: utf-8 -*-
from Startup import db
from sqlalchemy import Column, Integer, DateTime, Text
class History_Data(db.Model):
__tablename__ = 'history_data'
ID = Column(Integer, primary_key=True, autoincrement=True , comment='編號')
msg = Column(Text(), nullable=False, comment='歷史訊息')
InertDate = Column(DateTime(), nullable=False, comment='發送日期')
|
python
|
# Copyright 2021 MONAI Consortium
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os.path
from typing import Any
from zipfile import BadZipFile, ZipFile
from monai.deploy.utils.importutil import optional_import
from .model import Model
torch, _ = optional_import("torch")
class TorchScriptModel(Model):
"""Represents TorchScript model.
TorchScript serialization format (TorchScript model file) is created by torch.jit.save() method and
the serialized model (which usually has .pt or .pth extension) is a ZIP archive containing many files.
(https://github.com/pytorch/pytorch/blob/master/torch/csrc/jit/docs/serialization.md)
We consider that the model is a torchscript model if its unzipped archive contains files named 'data.pkl' and
'constants.pkl', and folders named 'code' and 'data'.
When predictor property is accessed or the object is called (__call__), the model is loaded in `evaluation mode`
from the serialized model file (if it is not loaded yet) and the model is ready to be used.
"""
model_type: str = "torch_script"
@property
def predictor(self) -> "torch.nn.Module": # type: ignore
"""Get the model's predictor (torch.nn.Module)
If the predictor is not loaded, load it from the model file in evaluation mode.
(https://pytorch.org/docs/stable/generated/torch.jit.ScriptModule.html?highlight=eval#torch.jit.ScriptModule.eval)
Returns:
torch.nn.Module: the model's predictor
"""
if self._predictor is None:
# Use a device to dynamically remap, depending on the GPU availability.
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
self._predictor = torch.jit.load(self.path, map_location=device).eval()
return self._predictor
@predictor.setter
def predictor(self, predictor: Any):
self._predictor = predictor
def eval(self) -> "TorchScriptModel":
"""Set the model in evaluation model.
This is a proxy method for torch.jit.ScriptModule.eval().
See https://pytorch.org/docs/stable/generated/torch.jit.ScriptModule.html?highlight=eval#torch.jit.ScriptModule.eval
Returns:
self
"""
self.predictor.eval()
return self
def train(self, mode: bool = True) -> "TorchScriptModel":
"""Set the model in training mode.
This is a proxy method for torch.jit.ScriptModule.train().
See https://pytorch.org/docs/stable/generated/torch.jit.ScriptModule.html?highlight=train#torch.jit.ScriptModule.train
Args:
mode (bool): whether the model is in training mode
Returns:
self
"""
self.predictor.train(mode)
return self
@classmethod
def accept(cls, path: str):
prefix_code = False
prefix_data = False
prefix_constants_pkl = False
prefix_data_pkl = False
if not os.path.isfile(path):
return False, None
try:
zip_file = ZipFile(path)
for data in zip_file.filelist:
file_name = data.filename
pivot = file_name.find("/")
if pivot != -1 and not prefix_code and file_name[pivot:].startswith("/code/"):
prefix_code = True
if pivot != -1 and not prefix_data and file_name[pivot:].startswith("/data/"):
prefix_data = True
if pivot != -1 and not prefix_constants_pkl and file_name[pivot:] == "/constants.pkl":
prefix_constants_pkl = True
if pivot != -1 and not prefix_data_pkl and file_name[pivot:] == "/data.pkl":
prefix_data_pkl = True
except BadZipFile:
return False, None
if prefix_code and prefix_data and prefix_constants_pkl and prefix_data_pkl:
return True, cls.model_type
return False, None
|
python
|
#!/usr/bin/env python
import os
import argparse
import logging
from log_collectors.training_data_service_client import match_log_file
from log_collectors.training_data_service_client import push_log_line
from log_collectors.training_data_service_client import scan_log_dirs
def main():
logging.basicConfig(format='%(filename)s %(funcName)s %(lineno)d: %(message)s', level=logging.INFO)
log_directory = os.environ["LOG_DIR"]
# log_file = log_directory + "/latest-log"
parser = argparse.ArgumentParser()
parser.add_argument('--log_dir', type=str, default=log_directory,
help='Log directory')
FLAGS, unparsed = parser.parse_known_args()
scan_log_dirs.LogScanner(should_connect=True).scan(
log_dir=FLAGS.log_dir,
is_log=match_log_file.is_log_file,
push_function=push_log_line.push)
if __name__ == '__main__':
main()
|
python
|
import os
import pickle
import multiprocessing as mp
from collections import defaultdict
from nltk import pos_tag, sent_tokenize, wordpunct_tokenize
class Preprocessor(object):
def __init__(self, corpus, target=None,**kwargs):
self.corpus = corpus
self.target = target
results = []
def fileids(self, fileids=None, categories=None):
fileids = self.corpus.resolve(fileids, categories)
if fileids:
return fileids
return self.corpus.fileids()
def on_error(self,error_msg):
print(error_msg)
def on_result(self, result):
self.results.append(result)
def abspath(self, fileid):
# Find the directory, relative to the corpus root.
parent = os.path.relpath(
os.path.dirname(self.corpus.abspath(fileid)), self.corpus.root
)
# Compute the name parts to reconstruct
basename = os.path.basename(fileid)
name, ext = os.path.splitext(basename)
# Create the pickle file extension
basename = name + '.pickle'
# Return the path to the file relative to the target.
return os.path.normpath(os.path.join(self.target, parent, basename))
def process(self, fileid):
"""For a single file, checks the location on disk to ensure no errors,
uses +tokenize()+ to perform the preprocessing, and writes transformed
document as a pickle to target location.
"""
# Compute the outpath to write the file to.
target = self.abspath(fileid)
parent = os.path.dirname(target)
# Make sure the directory exists
if not os.path.exists(parent):
os.makedirs(parent)
# Make sure that the parent is a directory and not a file
if not os.path.isdir(parent):
raise ValueError(
"Please supply a directory to write preprocessed data to."
)
# Create a data structure for the pickle
document = list(self.tokenize(fileid))
# Open and serialize the pickle to disk
with open(target, 'wb') as f:
pickle.dump(document, f, pickle.HIGHEST_PROTOCOL)
# Clean up the document
del document
return target
def transform(self, fileids=None, categories=None,tasks=None):
# Reset the results
results = []
# Make the target directory if it doesn't already exist
if not os.path.exists(self.target):
os.makedirs(self.target)
# Create a multiprocessing pool
tasks = tasks or mp.cpu_count()
pool = mp.Pool(processes=tasks)
# Enqueue tasks on the multiprocessing pool and join
for fileid in self.fileids():
pool.apply_async(self.process, (fileid,), callback=self.on_result,error_callback=self.on_error)
# Close the pool and join
pool.close()
pool.join()
return results
def tokenize(self, fileid):
for paragraph in self.corpus.paras(fileids=fileid):
yield [
pos_tag(sent)
for sent in paragraph
]
|
python
|
import doctest
import io
from contextlib import redirect_stderr, redirect_stdout
from textwrap import dedent
from scoraptor.result import TestResult
class SingleDocTest:
"""
A single DocTest based test.
Instances of this class are callable. When called, it takes
a global_environment dict, and returns a TestResult object.
We only take a global_environment, *not* a local_environment.
This makes tests not useful inside functions, methods or
other scopes with local variables. This is a limitation of
doctest, so we roll with it.
"""
def __init__(self, name, doctest_string):
self.name = name
self.doctest_string = doctest_string
self.examples = doctest.DocTestParser().parse(
doctest_string,
name
)
PLAIN_TEXT_FAILURE_SUMMARY_TEMPLATE = dedent(r"""
Test {name} failed!
Test code:
{doctest_string}
Test result:
{runresults}
""").strip()
def __call__(self, global_environment):
"""
Run test with given global_environment.
"""
test = doctest.DocTest(
[e for e in self.examples if type(e) is doctest.Example],
global_environment,
self.name,
None,
None,
self.doctest_string
)
doctestrunner = doctest.DocTestRunner(verbose=True)
runresults = io.StringIO()
with redirect_stdout(runresults), redirect_stderr(runresults):
doctestrunner.run(test, clear_globs=False)
with open('/dev/null', 'w') as f, redirect_stderr(f), redirect_stdout(f):
result = doctestrunner.summarize(verbose=True)
score = 1.0 - (result.failed / result.attempted)
if score == 1.0:
summary = 'Test {} passed!'.format(self.name)
else:
summary = self.PLAIN_TEXT_FAILURE_SUMMARY_TEMPLATE.format(
name=self.name,
doctest_string=dedent(self.doctest_string),
runresults=runresults.getvalue()
)
return TestResult(score, summary)
|
python
|
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Fri Mar 9 11:06:16 2018
Test for the function of the chi2 script of the omnitool package.
@author: misiak
"""
import sys
from os import path
import numpy as np
import matplotlib.pyplot as plt
import mcmc_red as mcr
plt.close('all')
fs = 1e3
t_range = np.arange(0, 1, fs**-1)
# FUNCTION
funk = lambda t,a: np.heaviside(t_range-t, 1.) * a * (np.exp(t_range-t)-1)
# DATA
sig = 0.02
data = funk(.5, 1.) + np.random.normal(0, sig, t_range.shape)
# MODEL
tmod = (.4,.5,.6)
xmod = (0.5, 1., 1.5)
labmod = ('mod1', 'mod2', 'mod3')
darray = {l: funk(t,a) for l,t,a in zip(labmod, tmod, xmod)}
# TEMPORAL Chi2
d_sx2 = {l: mcr.chi2_simple(data, darray[l], err=sig) for l in labmod}
# FREQ Chi2 with fft, psd, etc...
dfft = {l: np.fft.fft(darray[l]) for l in labmod}
fftdata = np.fft.fft(data)
freq, dpsd = mcr.psd(fftdata, fs)
noise_list = list()
for k in range(100):
freq, noi = mcr.psd(np.fft.fft(np.random.normal(0, sig, t_range.shape)), fs)
noise_list.append(noi)
npsd = np.mean(noise_list, axis=0)
d_fx2 = {l: mcr.chi2_freq(fftdata, dfft[l], npsd, fs) for l in labmod}
# OPT Chi2 with free parameter
opt_funk = lambda t: funk(t, 1.3)
bounds = (0., 1)
opt_x2, opt_t = mcr.opt_chi2_freq(fftdata, opt_funk, npsd, fs, bounds, debug=True)
opt_mod = opt_funk(opt_t)
########## PLOT #############
### TEMPORAL PLOT
plt.figure()
plt.title('1000 pts _ Temporal Chi2')
plt.plot(
t_range, data, lw=1.,
label='data, $\chi^2=${:.2f}'.format(mcr.chi2_simple(data, data, err=sig))
)
for l in labmod:
plt.plot(t_range, darray[l], label=l + ' $\chi^2=${:.2f}'.format(d_sx2[l]))
plt.plot(t_range, opt_mod, ls='--', color='red', label='OPT')
plt.grid(b=True)
plt.legend()
# FREQUENCY PLOT
plt.figure()
plt.title('500 freqs _ Frequency Chi2')
plt.grid(b=True)
plt.loglog(
freq, dpsd,
label='data $\chi^2=${:.2f}'.format(mcr.chi2_freq(fftdata, fftdata, npsd, fs))
)
for l in labmod:
freq, PSD = mcr.psd(dfft[l], fs)
plt.loglog(freq, PSD, label=l+' $\chi^2=${:.2f}'.format(d_fx2[l]))
plt.loglog(freq, npsd, label='noise')
plt.legend()
|
python
|
from crudbuilder.abstract import BaseCrudBuilder
from .models import Person
from crudbuilder.abstract import BaseCrudBuilder
from crudbuilder.formset import BaseInlineFormset
class PersonCrud(BaseCrudBuilder):
model = Person
search_fields = ['name']
tables2_fields = ('name', 'email')
tables2_css_class = "table table-bordered table-condensed"
tables2_pagination = 20 # default is 10
modelform_excludes = ['created_by', 'updated_by']
login_required=True
permission_required=True
# permissions = {
# 'list': 'example.person_list',
# 'create': 'example.person_create'
# }
|
python
|
class Pos:
def __init__(self, x = 0, y = 0, z = 0):
self.x = x
self.y = y
self.z = z
self.dx = 0
self.dy = 0
self.dz = 0
def move(self):
self.x += self.dx
self.y += self.dy
self.z += self.dz
|
python
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
from django.conf import settings
import authentication.models
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Mail',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('subject', models.CharField(max_length=250, null=True, blank=True)),
('body', models.TextField(null=True, blank=True)),
('mail_draft', models.BooleanField(default=False)),
('mail_read', models.BooleanField(default=False)),
('mail_read_date', models.DateTimeField(null=True, blank=True)),
('mail_created', models.DateTimeField(auto_now=True)),
('mail_created_by', models.ForeignKey(related_name='mail_created_by_user', to=settings.AUTH_USER_MODEL)),
('mail_read_by', models.ManyToManyField(related_name='mail_read_by', null=True, to=settings.AUTH_USER_MODEL, blank=True)),
('mail_to', models.ManyToManyField(related_name='mail_sent_to', null=True, to=settings.AUTH_USER_MODEL, blank=True)),
('trash', models.ManyToManyField(related_name='mail_trash_by', null=True, to=settings.AUTH_USER_MODEL, blank=True)),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='MailFile',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('file_created', models.DateTimeField(auto_now=True)),
('mail_file', models.FileField(null=True, upload_to=authentication.models.get_upload_file_name, blank=True)),
('base_mail', models.ForeignKey(related_name='base_files', blank=True, to='messaging.Mail', null=True)),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='MailReply',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('subject', models.CharField(max_length=250, null=True, blank=True)),
('body', models.TextField(null=True, blank=True)),
('reply_draft', models.BooleanField(default=False)),
('reply_read_date', models.DateTimeField(null=True, blank=True)),
('reply_created', models.DateTimeField(auto_now=True)),
('mail_to', models.ManyToManyField(related_name='reply_sent_to', null=True, to=settings.AUTH_USER_MODEL, blank=True)),
('orig_mail', models.ForeignKey(related_name='reply_mail', to='messaging.Mail')),
('reply_created_by', models.ForeignKey(related_name='reply_created_by_user', to=settings.AUTH_USER_MODEL)),
('reply_read_by', models.ManyToManyField(related_name='reply_read_by', null=True, to=settings.AUTH_USER_MODEL, blank=True)),
('trash', models.ManyToManyField(related_name='reply_trash_by', null=True, to=settings.AUTH_USER_MODEL, blank=True)),
],
options={
},
bases=(models.Model,),
),
migrations.AddField(
model_name='mailfile',
name='reply_mail',
field=models.ForeignKey(related_name='reply_files', blank=True, to='messaging.MailReply', null=True),
preserve_default=True,
),
]
|
python
|
from tracardi.domain.import_config import ImportConfig, ImportConfigRecord
from tracardi.domain.storage_result import StorageResult
from tracardi.service.storage.factory import storage_manager
from typing import Optional
async def load(id: str) -> Optional[ImportConfig]:
import_configuration = await storage_manager("import").load(id)
if import_configuration is None:
return None
import_configuration = ImportConfigRecord(**import_configuration)
return ImportConfig.decode(import_configuration)
async def save(batch: ImportConfig):
batch = batch.encode()
return await storage_manager("import").upsert(batch.dict())
async def delete(id: str):
return await storage_manager("import").delete(id)
async def load_all(limit: int = 100, query: str = None):
if query is None:
result = await storage_manager("import").load_all(limit=limit)
else:
result = StorageResult(await storage_manager("import").query({
"query": {
"wildcard": {
"name": f"*{query}*"
}
},
"size": limit
}))
return list(result)
async def refresh():
return await storage_manager("import").refresh()
|
python
|
# coding: utf-8
import re
import six
from huaweicloudsdkcore.utils.http_utils import sanitize_for_serialization
class ApiVersionResp:
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'version_id': 'str',
'version_no': 'str',
'api_id': 'str',
'env_id': 'str',
'env_name': 'str',
'remark': 'str',
'publish_time': 'datetime',
'status': 'int'
}
attribute_map = {
'version_id': 'version_id',
'version_no': 'version_no',
'api_id': 'api_id',
'env_id': 'env_id',
'env_name': 'env_name',
'remark': 'remark',
'publish_time': 'publish_time',
'status': 'status'
}
def __init__(self, version_id=None, version_no=None, api_id=None, env_id=None, env_name=None, remark=None, publish_time=None, status=None):
"""ApiVersionResp - a model defined in huaweicloud sdk"""
self._version_id = None
self._version_no = None
self._api_id = None
self._env_id = None
self._env_name = None
self._remark = None
self._publish_time = None
self._status = None
self.discriminator = None
if version_id is not None:
self.version_id = version_id
if version_no is not None:
self.version_no = version_no
if api_id is not None:
self.api_id = api_id
if env_id is not None:
self.env_id = env_id
if env_name is not None:
self.env_name = env_name
if remark is not None:
self.remark = remark
if publish_time is not None:
self.publish_time = publish_time
if status is not None:
self.status = status
@property
def version_id(self):
"""Gets the version_id of this ApiVersionResp.
API历史版本的ID
:return: The version_id of this ApiVersionResp.
:rtype: str
"""
return self._version_id
@version_id.setter
def version_id(self, version_id):
"""Sets the version_id of this ApiVersionResp.
API历史版本的ID
:param version_id: The version_id of this ApiVersionResp.
:type: str
"""
self._version_id = version_id
@property
def version_no(self):
"""Gets the version_no of this ApiVersionResp.
API的版本号
:return: The version_no of this ApiVersionResp.
:rtype: str
"""
return self._version_no
@version_no.setter
def version_no(self, version_no):
"""Sets the version_no of this ApiVersionResp.
API的版本号
:param version_no: The version_no of this ApiVersionResp.
:type: str
"""
self._version_no = version_no
@property
def api_id(self):
"""Gets the api_id of this ApiVersionResp.
API编号
:return: The api_id of this ApiVersionResp.
:rtype: str
"""
return self._api_id
@api_id.setter
def api_id(self, api_id):
"""Sets the api_id of this ApiVersionResp.
API编号
:param api_id: The api_id of this ApiVersionResp.
:type: str
"""
self._api_id = api_id
@property
def env_id(self):
"""Gets the env_id of this ApiVersionResp.
发布的环境编号
:return: The env_id of this ApiVersionResp.
:rtype: str
"""
return self._env_id
@env_id.setter
def env_id(self, env_id):
"""Sets the env_id of this ApiVersionResp.
发布的环境编号
:param env_id: The env_id of this ApiVersionResp.
:type: str
"""
self._env_id = env_id
@property
def env_name(self):
"""Gets the env_name of this ApiVersionResp.
发布的环境名称
:return: The env_name of this ApiVersionResp.
:rtype: str
"""
return self._env_name
@env_name.setter
def env_name(self, env_name):
"""Sets the env_name of this ApiVersionResp.
发布的环境名称
:param env_name: The env_name of this ApiVersionResp.
:type: str
"""
self._env_name = env_name
@property
def remark(self):
"""Gets the remark of this ApiVersionResp.
发布描述
:return: The remark of this ApiVersionResp.
:rtype: str
"""
return self._remark
@remark.setter
def remark(self, remark):
"""Sets the remark of this ApiVersionResp.
发布描述
:param remark: The remark of this ApiVersionResp.
:type: str
"""
self._remark = remark
@property
def publish_time(self):
"""Gets the publish_time of this ApiVersionResp.
发布时间
:return: The publish_time of this ApiVersionResp.
:rtype: datetime
"""
return self._publish_time
@publish_time.setter
def publish_time(self, publish_time):
"""Sets the publish_time of this ApiVersionResp.
发布时间
:param publish_time: The publish_time of this ApiVersionResp.
:type: datetime
"""
self._publish_time = publish_time
@property
def status(self):
"""Gets the status of this ApiVersionResp.
版本状态 - 1:当前生效中的版本 - 2:未生效的版本
:return: The status of this ApiVersionResp.
:rtype: int
"""
return self._status
@status.setter
def status(self, status):
"""Sets the status of this ApiVersionResp.
版本状态 - 1:当前生效中的版本 - 2:未生效的版本
:param status: The status of this ApiVersionResp.
:type: int
"""
self._status = status
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
import simplejson as json
if six.PY2:
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)
def __repr__(self):
"""For `print`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ApiVersionResp):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
python
|
import time
import re
from collections import Counter
import operator
start_time = time.time()
def getManhattanDistance(src, dest):
return abs(src[0] - dest[0]) + abs(src[1] - dest[1])
with open("input") as f:
coords = f.readlines()
coords = [list(map(int,re.findall(r"\d+",x.strip()))) for x in coords]
# === Part One and Two ===
ignoredAreas = [-1]
maxCoords = max(coords,key=lambda item:item[1])
coordMap = {}
maxDistance = 10000
inMaxDistanceCount = 0
for i in range(maxCoords[0]+1):
for j in range(0,maxCoords[1]+1):
distances = {}
coordMap[str(i)+":"+str(j)] = -1
totalDistance = 0
for key, coord in enumerate(coords):
distance = getManhattanDistance((i,j), coord)
distances[key] = distance
totalDistance += distance
if(i == coord[0] and j == coord[1]):
coordMap[str(i)+":"+str(j)] = key
counts = Counter(distances.values())
best = min(distances.items(), key=operator.itemgetter(1))
if(counts[best[1]] > 1):
coordMap[str(i)+":"+str(j)] = -1
else:
coordMap[str(i)+":"+str(j)] = best[0]
if( best[0] not in ignoredAreas and (i==0 or j==0 or i==maxCoords[0] or j==maxCoords[1])):
ignoredAreas.append(best[0])
if(totalDistance < maxDistance) : inMaxDistanceCount+=1
finiteAreasCount = Counter([x for x in coordMap.values() if x not in ignoredAreas])
largestFiniteArea = max(finiteAreasCount.items(), key=operator.itemgetter(1))
print("Largest finite area is", largestFiniteArea[0],"with a total area of",largestFiniteArea[1])
print("Largest area with total distance to all points below",maxDistance,":",inMaxDistanceCount)
print("Time elapsed: ", time.time() - start_time)
|
python
|
#!/usr/bin/env python3
import calendar
from datetime import *
import fileinput
import html
import importlib
import math
import os
from os import path
import pkgutil
import re
import subprocess
import sys
import textwrap
from jinja2 import Environment, FileSystemLoader, select_autoescape
import diags
# START configuration
# Special statistics are only shown for vips
# (insert your and your friends names here)
# E. g. vips = ["MyName", "friend42"]
vips = []
# Merge or rename users by name or database id
# E.g. merges = [["MyName", "MyNameLaptop", 20], ...]
merges = []
# Maximum users to show in diagrams
maxUsers = 50
# Statistics about the TS3AudioBot
botStats = False
# Input folder for server logs
inputFolder = "Logs"
# Input folder for TS3AudioBot logs
inputFolderBot = "BotLogs"
# Output folder
outputFolder = "Result"
# Folder for temporary files
tempFolder = "temp"
# Length of a slot in seconds
slotLength = timedelta(minutes = 10)
# END configuration
# Load configuration from Settings.py
if path.isfile("Settings.py"):
exec(compile(open("Settings.py").read(), "Settings.py", "exec"))
# The length of a part in seconds
slotsPerDay = int(math.ceil(timedelta(days = 1) / slotLength))
# Enum for connections
CONNECTION_START = 1
CONNECTION_END = 2
def openTempfile(name):
return open(path.join(tempFolder, name + ".tmp"), "w")
def parseName(name):
return html.unescape(name)
def timeToString(t):
secs = t.total_seconds()
seconds = int(secs % 60)
minutes = int(secs / 60) % 60
total_hours = int(secs / 3600)
hours = total_hours % 24
days = int(total_hours / 24) % 365
years = int(total_hours / (24 * 365))
res = ""
if years > 0:
res += "{0} years ".format(years)
if years > 0 or days > 0:
res += "{0} days ".format(days)
if years > 0 or days > 0 or hours > 0:
res += "{0:02}:".format(hours)
return res + "{0:02}:{1:02}".format(minutes, seconds)
def timestampToString(timestamp):
date = datetime.fromtimestamp(timestamp)
return "{0:%Y-%m-%d}".format(date)
def to_slot_index(t, slotLength = slotLength):
return timedelta(hours = t.hour, minutes = t.minute, seconds = t.second) // slotLength
gnuplotEscapes = ['\\', '^', '_', '@', '&', '~', '{']
def gnuplotEscape(text):
for c in gnuplotEscapes:
text = text.replace(c, '\\' + c)
# Escape twice...
text = text.replace('\\', '\\\\')
return text
class DiagramCreator:
def __init__(self):
self.env = Environment(loader = FileSystemLoader("."),
autoescape = select_autoescape(["html", "xml"],
default_for_string = True),
trim_blocks = True,
lstrip_blocks = True)
self.diagramTemplate = self.env.get_template("template.gp")
self.htmlTemplate = self.env.get_template("template.html")
def load_meta(self):
os.makedirs(outputFolder, mode = 0o755, exist_ok = True)
os.makedirs(tempFolder, mode = 0o755, exist_ok = True)
self.args2diags()
def args2diags(self):
"""Parse the arguments and fill the diags list"""
# Get all diagrams
self.diags = []
package = diags
prefix = package.__name__ + "."
for importer, modname, ispkg in pkgutil.iter_modules(package.__path__, prefix):
module = importlib.import_module(modname)
self.diags.append(module)
def load_data(self):
self.users = {}
self.fakeTimeouts = 0
self.parse_server()
self.merge()
if botStats:
self.parse_bot()
self.dayCount = (self.endDay - self.startDay).days + 1
if botStats:
self.dayCountBot = (self.endDayBot - self.startDayBot).days + 1
self.generalTab = Tab("General")
self.vipTab = Tab("VIPs")
self.tabs = [self.generalTab, self.vipTab]
if botStats:
self.botTab = Tab("Bot")
self.tabs.append(self.botTab)
def parse_server(self):
"""Open the log files and store the users and connections"""
self.startDay = date.today()
self.endDay = date.today()
inputFiles = sorted([path.join(inputFolder, f) for f in os.listdir(inputFolder)
if path.isfile(path.join(inputFolder, f))])
linePattern = re.compile(r"^(?P<Date>\d{4}-\d{2}-\d{2})\s+(?P<Time>\d{2}:\d{2}:\d{2}.\d{6})\|\s*(?P<LogLevel>\w+)\s*\|\s*(?P<Initiator>\w+)\s*\|\s*(?P<VServ>\w+)\s*\|\s*client (?P<Action>(?:dis)?connected) '(?P<Name>.*)'\(id:(?P<DbId>\d+)\) (?:reason 'reasonmsg=?(?P<Reason>.*)')?.*\n?$")
# Read connections from log
for file in inputFiles:
with open(file) as f:
# Previous line
prevline = ""
for line in f:
# Remove the bom
if line.startswith("\ufeff"):
line = line[1:]
match = linePattern.fullmatch(line)
if match:
connected = match.group("Action") == "connected"
# Get time
t = datetime.strptime(line[:19], "%Y-%m-%d %H:%M:%S").replace(tzinfo = timezone.utc).astimezone()
if t.date() < self.startDay:
self.startDay = t.date()
userId = int(match.group("DbId"))
# Find or create the user
if userId not in self.users:
u = User(parseName(match.group("Name")))
self.users[userId] = u
else:
u = self.users[userId]
# True if the line is a connect
if connected:
u.lastConnected.append(t)
elif u.lastConnected:
# Ignore clients that didn't connect
timeout = False
if match.group("Reason") == "connection lost":
# Check if it is a timeout
if "ping timeout" in prevline or "resend timeout" in prevline or t < datetime(2017, 3, 1, tzinfo = timezone.utc):
timeout = True
else:
self.fakeTimeouts += 1
con = Connection(u.lastConnected[0], t, timeout)
u.connections.append(con)
del u.lastConnected[0]
prevline = line
# Disconnect all connected clients because the log is finished.
# Use the last known timestamp for the end
for u in self.users.values():
for start in u.lastConnected:
con = Connection(start, t)
u.connections.append(con)
u.lastConnected = []
def parse_bot(self):
"""Open the bot log files and store the users and plays"""
self.startDayBot = date.today()
self.endDayBot = date.today()
inputFiles = sorted([path.join(inputFolderBot, f) for f in os.listdir(inputFolderBot)
if path.isfile(path.join(inputFolderBot, f))])
linePattern = re.compile(r"^\[(?P<Time>\d{2}:\d{2}:\d{2})\]\s*Debug: MB Got message from (?P<Name>[^:]*): !(?P<Command>.*)\n?$")
datePattern = re.compile(r"^\[(?P<Time>\d{2}:\d{2}:\d{2})\]\s*Info: \[=== (?:Date:.*,\s*(?P<mday0>\d+) (?P<month0>\w+) (?P<year0>\d+).*|Date:.*,\s*(?P<month1>\w+) (?P<mday1>\d+), (?P<year1>\d+).*|Date/Time:.*,\s*(?P<month2>\w+) (?P<mday2>\d+), (?P<year2>\d+).*)\n?$")
timePattern = re.compile(r"^\[(?P<Time>\d{2}:\d{2}:\d{2})\].*\n?$")
newLinePattern = re.compile(r"^(?P<Time>\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2})\.\d{4}\|.*Got message from (?P<Name>[^:]*): !(?P<Command>.*)$")
newLinePattern2 = re.compile(r"^(?P<Time>\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2})\.\d{4}\|.*User (?P<Name>.*) requested: !(?P<Command>.*)$")
# Line formats:
# [04:44:57] Info: [=== Date: Monday, 27 March 2017 ===]
# [19:31:50] Info: [=== Date/Time: Friday, August 18, 2017 7:31:50 PM
# [17:13:54] Debug: MB Got message from Hengo: !pl [URL]https://www.youtube.com/watch?v=d_RjwMZItZo[/URL]
#
# 2018-01-11 00:30:09.2136|DEBUG|Bot.TextCallback Got message from Splamy: !ver
# 2018-06-27 15:59:49.9569| INFO|Bot.TextCallback User Splamy requested: !sett get connect.name
# Read connections from log
for file in inputFiles:
with open(file) as f:
curDate = None
for line in f:
# Remove the bom
if line.startswith("\ufeff"):
line = line[1:]
match = datePattern.fullmatch(line)
if match:
for i in range(3):
if match.group("year" + str(i)) != None:
curDate = datetime.strptime("{}-{}-{} {}".format(match.group("year" + str(i)), match.group("month" + str(i)), match.group("mday" + str(i)), match.group("Time")), "%Y-%B-%d %H:%M:%S")
break
match = timePattern.fullmatch(line)
if match:
curTime = datetime.strptime(match.group("Time"), "%H:%M:%S").time()
if type(curDate) is datetime:
if curTime < curDate.time():
curDate += timedelta(days = 1)
curDate = datetime.combine(curDate.date(), curTime)
else:
curDate = curTime
match = linePattern.fullmatch(line)
if match:
if type(curDate) is datetime and curDate.date() < self.startDayBot:
self.startDayBot = curDate.date()
self.parseAddEvent(curDate, match.group("Name").strip(), match.group("Command"))
match = newLinePattern.fullmatch(line.strip())
if not match:
match = newLinePattern2.fullmatch(line.strip())
if match:
curDate = datetime.strptime(match.group("Time"), "%Y-%m-%d %H:%M:%S")
if curDate.date() < self.startDayBot:
self.startDayBot = curDate.date()
self.parseAddEvent(curDate, match.group("Name").strip(), match.group("Command"))
def parseAddEvent(self, curDate, name, command):
playCmd = command.startswith("pl") or command.startswith("py") or command.startswith("ad")
# Find or create the user
for m in merges:
if name in m:
name = m[0]
break
user = None
for u in self.users:
if u.name == name:
user = u
break
if user == None:
user = User(name)
self.users.append(user)
if playCmd:
user.botPlays.append((curDate, command))
else:
user.botCommands.append((curDate, command))
def merge(self):
# Merge users
for id, u in self.users.items():
for m in merges:
# print(f"Id {id} {type(id)} searched in {m}")
# if id in m:
# print(f"Id {id} merged in {m[0]}")
if u.name in m or id in m:
u.name = m[0]
break
# Aggregate users with the same name
self.users = list(self.users.values())
i = 0
while i < len(self.users):
j = i + 1
while j < len(self.users):
if self.users[i].name == self.users[j].name:
# Merge users
self.users[i].connections += self.users[j].connections
del self.users[j]
j -= 1
j += 1
i += 1
print("User count: {}".format(len(self.users)), file = sys.stderr)
# Select vip users
self.vip = [u for u in self.users if u.name in vips]
def create_diagrams(self):
for diag in self.diags:
diag.create_diag(self)
# Render the html
with open(path.join(outputFolder, "index.html"), "w") as f:
f.write(self.htmlTemplate.render(tabs = self.tabs,
date = datetime.now().strftime("%d.%m.%Y %H:%M")))
# Link the static data
if not path.exists(path.join(outputFolder, "static")):
os.symlink("../static", path.join(outputFolder, "static"))
def fun_per_connected_slot(self, users, slotFun, slotLength = timedelta(days = 1), floorFun = None):
"""Calls f for each day a certain connection lasts.
userStart/End are called before and after the connections of a user are worked on.
slotType is a bit field of CONNECTION_START/END
start and end are relative to the slotStart
f(user, connection, slotStart, slotType, start, end)"""
if floorFun == None:
# Round down to the nearest multiple of the slotLength time
floorFun = lambda t: t - timedelta(hours = t.hour, minutes = t.minute, seconds = t.second) % slotLength
for u in users:
for c in u.connections:
# The start of the first slot
slotStart = floorFun(c.start)
slotEnd = slotStart + slotLength
# First slot
relStart = c.start - slotStart
if c.end > slotEnd:
slotFun(u, c, slotStart, CONNECTION_START, relStart, slotLength)
else:
# Only one slot
slotFun(u, c, slotStart, CONNECTION_START | CONNECTION_END, relStart, c.end - slotStart)
continue
slotStart = slotEnd
slotEnd += slotLength
# For each slot
while c.end > slotEnd:
slotFun(u, c, slotStart, 0, timedelta(), slotLength)
slotStart = slotEnd
slotEnd += slotLength
# Last slot
slotFun(u, c, slotStart, CONNECTION_END, timedelta(), c.end - slotStart)
def write_slots_per_day(self, f, slots, name = None):
if name != None:
f.write("# {0}\n".format(name))
for i, data in enumerate(slots):
minutes = int((i * slotLength).total_seconds()) // 60
f.write("{0:02}:{1:02}\t{2}\n".format(minutes // 60, minutes % 60, data))
f.write("24:00\t{0}\n\n\n".format(slots[0]))
def write_days(self, f, days, name = None, cumulative = False, start = None):
if name != None:
f.write("# {0}\n".format(name))
if start == None:
start = self.startDay
day = start
if cumulative:
sum = 0
for data in days:
if cumulative:
sum += data
f.write("{0:%d.%m.%Y}\t{1}\n".format(day, sum))
else:
f.write("{0:%d.%m.%Y}\t{1}\n".format(day, data))
day += timedelta(days = 1)
f.write("\n\n")
class Connection:
def __init__(self, start, end, timeout = False):
# Unix timestamp
self.start = start
self.end = end
self.timeout = timeout
def duration(self):
return self.end - self.start
class User:
def __init__(self, name):
self.name = name
# Didn't connect so far
self.lastConnected = []
# List of connections
self.connections = []
self.botPlays = []
self.botCommands = []
class Diagram:
diagrams = []
def __init__(self, filename, title = "Title", width = 1920, height = 600, shortname = None):
self.filename = filename
if shortname == None:
shortname = filename
self.shortname = shortname
self.title = title
self.width = width
self.height = height
self.xlabel = "x"
self.ylabel = "y"
self.appendText = ""
self.legend = "left"
# plots can be set to none to disable them
self.plots = []
self.subtitle = None
def __iter__(self):
yield "color", "#bbbbbb"
yield "filename", self.filename
yield "outputfile", path.join(outputFolder, self.filename)
yield "shortname", self.shortname
yield "title", self.title
if self.subtitle != None:
yield "subtitle", self.subtitle
yield "width", self.width
yield "height", self.height
yield "xlabel", self.xlabel
yield "ylabel", self.ylabel
yield "legend", self.legend
yield "appendText", textwrap.dedent(self.appendText)
if self.plots:
dataFile = "'{0}.tmp' ".format(path.join(tempFolder, self.filename))
yield "plots", "plot " + ", \\\n\t".join([dataFile + p for p in self.plots])
def render(self, diagramTemplate):
# Read the template
with open("template.gp") as f:
template = f.read()
# Create the gnuplot script
tempName = path.join(tempFolder, self.filename + ".gp.tmp")
with open(tempName, "w") as f:
f.write(diagramTemplate.render(dict(self)))
subprocess.Popen(["gnuplot", tempName])
if self.subtitle:
print(self.subtitle, file = sys.stderr)
Diagram.diagrams.append(self)
class Tab:
def __init__(self, name, shortname = None):
self.name = name
if shortname == None:
shortname = name
self.shortname = shortname
self.diagrams = []
def addDiagram(self, diag):
self.diagrams.append(diag)
def __iter__(self):
yield "name", self.name
yield "shortname", self.shortname
yield "diagrams", [dict(d) for d in self.diagrams]
def main():
dc = DiagramCreator()
dc.load_meta()
dc.load_data()
dc.create_diagrams()
if __name__ == '__main__':
main()
|
python
|
# -*- coding: utf-8 -*-
# @Author: Konstantin Schuckmann
# @Date: 2021-10-28 14:36:06
# @Last Modified by: Konstantin Schuckmann
# @Last Modified time: 2021-10-29 09:30:48
import pandas as pd
import numpy as np
# Needed for generating data from an existing dataset
from sklearn.neighbors import KernelDensity
from sklearn.model_selection import GridSearchCV
from sklearn.impute import SimpleImputer
def create_dummy_variables_from_original():
data = pd.read_csv('./data/input/input.csv', sep=';')
column_names = ['Distance','Rate 0 - 49 kg', 'Rate 50 - 99 kg', 'Rate 99 - 149 kg',
'Rate 149 - 199 kg', 'Rate 199 - 299 kg', 'Rate 299 - 399 kg',
'Rate 399 - 499 kg', 'Rate 499 - 599 kg', 'Rate 599 - 699 kg',
'Rate 699 - 799 kg', 'Rate 799 - 899 kg', 'Rate 899 - 999 kg',
'Rate 999 - 1099 kg', 'Rate 1099 - 1199 kg', 'Rate 1199 - 1299 kg',
'Rate 1299 - 1399 kg', 'Rate 1399 - 1499 kg', 'Rate 1499 - 1749 kg',
'Rate 1749 - 1999 kg', 'Rate 1999 - 2249 kg', 'Rate 2249 - 2499 kg',
'Rate 2499 - 2999 kg', 'Rate 2999 - 3499 kg', 'Rate 3499 - 3999 kg',
'Rate 3999 - 4499 kg', 'Rate 4499 - 4999 kg', 'Rate 4999 - 5999 kg',
'Rate 5999 - 6999 kg', 'Rate 6999 - 7999 kg', 'Rate 7999 - 8999 kg',
'Rate 8999 - 9999 kg', 'Rate 9999 - 10999 kg', 'Rate 10999 - 11999 kg',
'Rate 11999 - 12999 kg', 'Rate 12999 - 13999 kg',
'Rate 13999 - 14999 kg', 'Rate 14999 - 15999 kg',
'Rate 15999 - 16999 kg', 'Rate 16999 - 17999 kg',
'Rate 17999 - 18999 kg', 'Rate 18999 - 19999 kg', 'Rate 20000-21499 kg',
'Rate 21500-22999 kg', 'Rate 23000-24499 kg']
imp = SimpleImputer(missing_values=np.nan, strategy='mean')
imp.fit(data[['GDP WB origin', 'GDP WB Destination',
'Lat Origin', 'Lon Origin', 'Lat Destination', 'Lon Destination']])
data[['GDP WB origin', 'GDP WB Destination',
'Lat Origin', 'Lon Origin', 'Lat Destination', 'Lon Destination']] = imp.transform(data[['GDP WB origin', 'GDP WB Destination',
'Lat Origin', 'Lon Origin', 'Lat Destination', 'Lon Destination']])
sorted_data = data.sort_values(by=['Distance'])
rand_state = 11
# Fetch the dataset and store in X
X = data[column_names]
# Fit a kernel density model using GridSearchCV to determine the best parameter for bandwidth
bandwidth_params = {'bandwidth': np.arange(0.01,1,0.05)}
grid_search = GridSearchCV(KernelDensity(), bandwidth_params)
grid_search.fit(X)
kde = grid_search.best_estimator_
# Generate/sample
new_data = kde.sample(sorted_data.shape[0], random_state=rand_state)
new_data = pd.DataFrame(new_data, columns = column_names)
final_df = pd.concat([data[['Country Relation', 'Country Relation Vice Versa', 'Origin Country',
'Destination Country', 'GDP WB origin', 'GDP WB Destination',
'Lat Origin', 'Lon Origin', 'Lat Destination', 'Lon Destination']], new_data], axis = 1)
final_df.to_csv('./data/input/dummy_input.csv', sep=';', index=False)
def main():
create_dummy_variables_from_original()
if __name__ == '__main__':
main()
|
python
|
#!/bin/python3
import math
import os
import random
import re
import sys
#
# Complete the 'quartiles' function below.
#
# The function is expected to return an INTEGER_ARRAY.
# The function accepts INTEGER_ARRAY arr as parameter.
#
def quartiles(arr):
# Write your code here
arr = sorted(arr)
length_Of_Array = len(arr)
first_Number = length_Of_Array // 2
second_Number = first_Number // 2
third_Number = first_Number + second_Number
if(length_Of_Array % 2 != 0):
if(first_Number % second_Number != 0):
q1 = arr[second_Number]
q2 = arr[first_Number]
q3 = arr[third_Number + 1]
return q1, q2, q3
else:
q1 = (arr[second_Number - 1] + arr[second_Number]) // 2
q2 = arr[first_Number]
q3 = (arr[third_Number] + arr[third_Number + 1]) // 2
return q1, q2, q3
elif(length_Of_Array % 2 == 0):
if(first_Number % second_Number != 0):
q1 = arr[second_Number]
q2 = (arr[first_Number] + arr[first_Number - 1]) // 2
q3 = arr[third_Number]
return q1, q2, q3
else:
q1 = (arr[second_Number - 1] + arr[second_Number]) // 2
q2 = (arr[first_Number] + arr[first_Number - 1]) // 2
q3 = (arr[third_Number] + arr[third_Number - 1]) // 2
return q1, q2, q3
if __name__ == '__main__':
fptr = open(os.environ['OUTPUT_PATH'], 'w')
n = int(input().strip())
data = list(map(int, input().rstrip().split()))
res = quartiles(data)
fptr.write('\n'.join(map(str, res)))
fptr.write('\n')
fptr.close()
|
python
|
from model.client import Client
import string
import random
import os.path
import jsonpickle
import getopt
import sys
try:
opts, args = getopt.getopt(sys.argv[1:],"n:f:", ["number of clients", "file"])
except getopt.GetoptError as err:
getopt.usage()
sys.exit(2)
n=5
f= "data/clients.json"
for o, a in opts:
if o =="-n":
n=int(a)
elif o == "-f":
f=a
def random_string(prefix, maxlen):
symbols=string.ascii_letters
return prefix + "".join([random.choice(symbols) for i in range(random.randrange(maxlen))])
def random_only_digital(prefix, maxlen):
symbols=string.digits
return prefix + "".join([random.choice(symbols) for i in range(random.randrange(maxlen))])
testdata = [Client (name="", middlename="", lastname="", nickname="", address="", home="", mobile="", work="",
email="", email2="", email3="", phone2="") ] + [
Client(name=random_string("name",5), middlename=random_string("middlename", 10),
lastname=random_string("lastname",20), nickname=random_string("nickname",20),
address=random_string("address",20), home=random_only_digital("home", 11),
mobile=random_only_digital("mobile",11), work=random_only_digital("work",11),
email="[email protected]", email2="[email protected]", email3="[email protected]",
phone2=random_only_digital("phone2",11)
)
for i in range(n)
]
file = os.path.join(os.path.dirname(os.path.abspath(__file__)), "..", f)
with open(file, "w") as out:
jsonpickle.set_encoder_options("json", indent=2)
out.write(jsonpickle.encode(testdata))
|
python
|
"""
Copyright 2020 Google LLC
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
https://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
"""This module contains the slice-compare intent.
The slice-compare intent can give the result so that user can easily
compare the data according to the way user want.
Also it supports some operations like cropping based on date range,
slicing(removing rows that do not follow the conditions), group by.
Some of the operations are optional.
"""
from util import aspects
def slice_compare(table, metric, dimensions, slices, slice_compare_column,
summary_operator, **kwargs):
"""This function will implement the slice-compare intent
Also removes the tuples that do not lie in the given date range.
The arguments 'table, metric,dimension,slices, slices_compare_column,
summary_operator' are not optional, so they are passed as it is,
'date_range' will be passed in kwargs.
If some the optional args are None(not passed),
it is assumed that we don't have to apply them.
Args:
table: Type-pandas.dataframe
It has the contents of the csv file
metric: Type-string
It is the name of the column according to which we have group to be done,
summary operator is applied on metric. Metric could a column
containing strings, if we are applying count operator on it.
dimensions: Type-list of str
It is the name of column we want.
In query:'top 5 batsman according to runs', dimension is 'batsman'.
When summary_operator is not None, we group by dimensions.
date_range: Type-tuple
Tuple of start_date and end_date
date_column_name: Type-str
It is the name of column which contains date
date_format: Type-str
It is required by datetime.strp_time to parse the date in the format
Format Codes
https://docs.python.org/3/library/datetime.html#strftime-and-strptime-behavior
slices: Type-dictionary (will be changed)
contains the key as column name and value as instance we want
to slice
slice_compare_column: Type-list of string
first element denotes the column name by which we will do comparision.
rest elements will the value belongs to that column by which we
will compare the slices.
summary_operator: Type-summary_operators enum members
It denotes the summary operator, after grouping by dimensions.
ex. SummaryOperators.MAX, SummaryOperators.SUM
Note-summary_operator is always applied on metric column passed,
and only when grouping is done
Returns:
The function will return the `table(a pandas dataframe object)`
after applying the intent on the
given `table(a pandas dataframe object)``
"""
date_column_name = kwargs.get('date_column_name', 'date')
date_range = kwargs.get('date_range', None)
date_format = kwargs.get('date_format', 'yyyy-mm-dd')
table = aspects.apply_date_range(table, date_range,
date_column_name, date_format)
table = aspects.slice_table(table, slices)
# collecting the colums not to be removed
required_columns = []
if dimensions is not None:
required_columns = dimensions.copy()
required_columns.append(metric)
table = aspects.crop_other_columns(table, required_columns)
# slice_compare_column should be the last element of the group
# so that groupby will show them together for every grouping
dimensions.remove(slice_compare_column[0])
dimensions.append(slice_compare_column[0])
table = aspects.group_by(table, dimensions, summary_operator)
return table
|
python
|
import uuid
from django.db import models
from django.urls import reverse
class Masternode(models.Model):
# every masternode is bound to one transaction that shows the
# spend 1 500 001 bbp
txid = models.CharField(max_length=100, primary_key=True, editable=False)
# the address related to this masternode. The masternode reward is paid to this address
address = models.CharField(max_length=64)
# time when the masternode was first seen
inserted_at = models.DateTimeField(auto_now_add=True)
# the last time the masternode was seen (with any status)
last_seen_at = models.DateTimeField(auto_now_add=True)
# the status of the masternode known in the blockchain
# One of: ENABLED, NEW_START_REQUIRED, WATCHDOG_EXPIRED, PRE_ENABLED, UPDATE_REQUIRED, EXPIRED
status = models.CharField(max_length=30)
# version of the watchdog (?)
version = models.IntegerField()
def get_absolute_url(self):
return reverse('masternodes_masternodes')
def __str__(self):
return self.txid
def save(self, *args, **kwargs):
# we check if the current database entry is different from this entry
# If yes, we create a history entry
create_history = False
try:
db_mn = Masternode.objects.get(txid=self.txid)
if db_mn.status != self.status or str(db_mn.version) != str(self.version):
create_history = True
except Masternode.DoesNotExist:
create_history = True
pass
super(Masternode, self).save(*args, **kwargs)
if create_history:
history = MasternodeHistory()
history.masternode = self
history.status = self.status
history.version = self.version
history.save()
class MasternodeHistory(models.Model):
masternode = models.ForeignKey(Masternode, on_delete=models.CASCADE,)
# time of when his history entry was added
inserted_at = models.DateTimeField(auto_now_add=True)
status = models.CharField(max_length=30)
version = models.IntegerField()
|
python
|
'''
FastAPI Demo
SQLAlchemy ORM Models
'''
# Standard Imports
# PyPi Imports
from sqlalchemy import (
Boolean,
Column,
Integer,
String
)
# Local Imports
from database.setup import Base
###############################################################################
class User(Base):
'''ORM Models - users'''
__tablename__ = "users"
user_id = Column(Integer, primary_key=True, index=True)
username = Column(String, unique=True)
salted_password_hash = Column(String)
first_name = Column(String)
last_name = Column(String)
email = Column(String, unique=True)
active_boolean = Column(Boolean, nullable=False, default=True)
admin_boolean = Column(Boolean, nullable=False, default=False)
|
python
|
"""
Classe for reading/writing SpikeTrains in a text file.
It is the simple case where different spiketrains are written line by line.
Supported : Read/Write
Author: sgarcia
"""
import os
import numpy as np
import quantities as pq
from neo.io.baseio import BaseIO
from neo.core import Segment, SpikeTrain
class AsciiSpikeTrainIO(BaseIO):
"""
Class for reading/writing SpikeTrains in a text file.
Each Spiketrain is a line.
Usage:
>>> from neo import io
>>> r = io.AsciiSpikeTrainIO( filename = 'File_ascii_spiketrain_1.txt')
>>> seg = r.read_segment()
>>> print seg.spiketrains # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
[<SpikeTrain(array([ 3.89981604, 4.73258781, 0.608428 , 4.60246277, 1.23805797,
...
"""
is_readable = True
is_writable = True
supported_objects = [Segment, SpikeTrain]
readable_objects = [Segment]
writeable_objects = [Segment]
has_header = False
is_streameable = False
read_params = {
Segment: [
('delimiter', {'value': '\t', 'possible': ['\t', ' ', ',', ';']}),
('t_start', {'value': 0., }),
]
}
write_params = {
Segment: [
('delimiter', {'value': '\t', 'possible': ['\t', ' ', ',', ';']}),
]
}
name = None
extensions = ['txt']
mode = 'file'
def __init__(self, filename=None):
"""
This class read/write SpikeTrains in a text file.
Each row is a spiketrain.
**Arguments**
filename : the filename to read/write
"""
BaseIO.__init__(self)
self.filename = filename
def read_segment(self,
lazy=False,
delimiter='\t',
t_start=0. * pq.s,
unit=pq.s,
):
"""
Arguments:
delimiter : columns delimiter in file '\t' or one space or two space or ',' or ';'
t_start : time start of all spiketrain 0 by default
unit : unit of spike times, can be a str or directly a Quantities
"""
assert not lazy, 'Do not support lazy'
unit = pq.Quantity(1, unit)
seg = Segment(file_origin=os.path.basename(self.filename))
f = open(self.filename, 'Ur')
for i, line in enumerate(f):
alldata = line[:-1].split(delimiter)
if alldata[-1] == '':
alldata = alldata[:-1]
if alldata[0] == '':
alldata = alldata[1:]
spike_times = np.array(alldata).astype('f')
t_stop = spike_times.max() * unit
sptr = SpikeTrain(spike_times * unit, t_start=t_start, t_stop=t_stop)
sptr.annotate(channel_index=i)
seg.spiketrains.append(sptr)
f.close()
seg.create_many_to_one_relationship()
return seg
def write_segment(self, segment,
delimiter='\t',
):
"""
Write SpikeTrain of a Segment in a txt file.
Each row is a spiketrain.
Arguments:
segment : the segment to write. Only analog signals will be written.
delimiter : columns delimiter in file '\t' or one space or two space or ',' or ';'
information of t_start is lost
"""
f = open(self.filename, 'w')
for s, sptr in enumerate(segment.spiketrains):
for ts in sptr:
f.write('{:f}{}'.format(ts, delimiter))
f.write('\n')
f.close()
|
python
|
import unittest
from main import Min_Heap
class MinHeapTestCase(unittest.TestCase):
def test_min_heap_returns_None_if_peek_is_called_with_no_items(self):
heap = Min_Heap()
self.assertEqual(heap.peek_min(), None)
def test_min_heap_returns_None_if_extract_is_called_with_no_items(self):
heap = Min_Heap()
self.assertEqual(heap.extract_min(), None)
def test_min_heap_has_the_correct_root_when_an_item_is_added(self):
heap = Min_Heap()
heap.insert(20)
self.assertEqual(heap.peek_min(), 20)
def test_min_heap_has_the_correct_items_as_they_are_added(self):
heap = Min_Heap()
heap.insert(5)
heap.insert(20)
self.assertEqual(heap._items[0], 5)
self.assertEqual(heap._items[1], 20)
def test_min_heap_changes_the_root_if_smaller_item_is_added(self):
heap = Min_Heap()
heap.insert(20)
heap.insert(5)
self.assertEqual(heap.peek_min(), 5)
def test_min_heap_does_not_rebalance_when_level_as_room_for_smaller_item(self):
heap = Min_Heap()
heap.insert(20)
heap.insert(5)
heap.insert(15)
self.assertEqual(heap._items[0], 5)
self.assertEqual(heap._items[1], 20)
self.assertEqual(heap._items[2], 15)
def test_min_heap_works_as_expected_as_more_levels_are_added(self):
heap = Min_Heap()
heap.insert(20)
heap.insert(5)
heap.insert(15)
heap.insert(22)
heap.insert(40)
self.assertEqual(heap._items[0], 5)
self.assertEqual(heap._items[1], 20)
self.assertEqual(heap._items[2], 15)
self.assertEqual(heap._items[3], 22)
self.assertEqual(heap._items[4], 40)
def test_min_heap_rebalances_and_bubbles_up_when_smaller_item_is_added(self):
heap = Min_Heap()
heap.insert(5)
heap.insert(20)
heap.insert(15)
heap.insert(22)
heap.insert(40)
heap.insert(6)
self.assertEqual(heap._items[0], 5)
self.assertEqual(heap._items[1], 20)
self.assertEqual(heap._items[2], 6)
self.assertEqual(heap._items[3], 22)
self.assertEqual(heap._items[4], 40)
self.assertEqual(heap._items[5], 15)
def test_min_heap_extracts_all_items_in_the_correct_order(self):
mins = []
heap = Min_Heap()
heap.insert(5)
heap.insert(20)
heap.insert(15)
heap.insert(3)
heap.insert(22)
heap.insert(40)
heap.insert(6)
while heap:
mins.append(heap.extract_min())
self.assertEqual(mins, [3, 5, 6, 15, 20, 22, 40])
def test_min_heap_returns_the_correct_length(self):
heap = Min_Heap()
self.assertEqual(len(heap), 0)
heap.insert(5)
self.assertEqual(len(heap), 1)
heap.insert(6)
self.assertEqual(len(heap), 2)
heap.extract_min()
self.assertEqual(len(heap), 1)
heap.extract_min()
self.assertEqual(len(heap), 0)
heap.extract_min()
self.assertEqual(len(heap), 0)
if __name__ == "__main__":
unittest.main()
|
python
|
from typing import Tuple, Callable
from thinc.api import Model, to_numpy
from thinc.types import Ragged, Ints1d
from ..util import registry
@registry.layers("spacy.extract_spans.v1")
def extract_spans() -> Model[Tuple[Ragged, Ragged], Ragged]:
"""Extract spans from a sequence of source arrays, as specified by an array
of (start, end) indices. The output is a ragged array of the
extracted spans.
"""
return Model(
"extract_spans", forward, layers=[], refs={}, attrs={}, dims={}, init=init
)
def init(model, X=None, Y=None):
pass
def forward(
model: Model, source_spans: Tuple[Ragged, Ragged], is_train: bool
) -> Tuple[Ragged, Callable]:
"""Get subsequences from source vectors."""
ops = model.ops
X, spans = source_spans
assert spans.dataXd.ndim == 2
indices = _get_span_indices(ops, spans, X.lengths)
if len(indices) > 0:
Y = Ragged(X.dataXd[indices], spans.dataXd[:, 1] - spans.dataXd[:, 0]) # type: ignore[arg-type, index]
else:
Y = Ragged(
ops.xp.zeros(X.dataXd.shape, dtype=X.dataXd.dtype),
ops.xp.zeros((len(X.lengths),), dtype="i"),
)
x_shape = X.dataXd.shape
x_lengths = X.lengths
def backprop_windows(dY: Ragged) -> Tuple[Ragged, Ragged]:
dX = Ragged(ops.alloc2f(*x_shape), x_lengths)
ops.scatter_add(dX.dataXd, indices, dY.dataXd) # type: ignore[arg-type]
return (dX, spans)
return Y, backprop_windows
def _get_span_indices(ops, spans: Ragged, lengths: Ints1d) -> Ints1d:
"""Construct a flat array that has the indices we want to extract from the
source data. For instance, if we want the spans (5, 9), (8, 10) the
indices will be [5, 6, 7, 8, 8, 9].
"""
spans, lengths = _ensure_cpu(spans, lengths)
indices = []
offset = 0
for i, length in enumerate(lengths):
spans_i = spans[i].dataXd + offset
for j in range(spans_i.shape[0]):
indices.append(ops.xp.arange(spans_i[j, 0], spans_i[j, 1])) # type: ignore[call-overload, index]
offset += length
return ops.flatten(indices, dtype="i", ndim_if_empty=1)
def _ensure_cpu(spans: Ragged, lengths: Ints1d) -> Tuple[Ragged, Ints1d]:
return Ragged(to_numpy(spans.dataXd), to_numpy(spans.lengths)), to_numpy(lengths)
|
python
|
"""Initial revision
Revision ID: df419851a830
Revises:
Create Date: 2020-11-11 18:00:45.523670
"""
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
# revision identifiers, used by Alembic.
revision = 'df419851a830'
down_revision = None
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('user',
sa.Column('id', postgresql.UUID(), server_default=sa.text('uuid_generate_v4()'), nullable=False),
sa.Column('created', sa.TIMESTAMP(), server_default=sa.text('now()'), nullable=False),
sa.Column('username', sa.UnicodeText(), nullable=False),
sa.Column('email', sa.UnicodeText(), nullable=False),
sa.Column('password', sa.UnicodeText(), nullable=False),
sa.Column('salt', sa.UnicodeText(), nullable=False),
sa.Column('active', sa.Boolean(), nullable=False),
sa.Column('verificationCode', sa.Integer(), nullable=False),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('email')
)
op.create_table('subscription',
sa.Column('user_id', postgresql.UUID(), nullable=False),
sa.Column('mutation', sa.UnicodeText(), nullable=False),
sa.ForeignKeyConstraint(['user_id'], ['user.id'], onupdate='CASCADE', ondelete='CASCADE'),
sa.PrimaryKeyConstraint('user_id', 'mutation')
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table('subscription')
op.drop_table('user')
# ### end Alembic commands ###
|
python
|
# Copyright (C) 2021-2022 Modin authors
#
# SPDX-License-Identifier: Apache-2.0
"""``DaskRunner`` class functionality."""
import os
import warnings
from unidist.cli.base.runner import BackendRunner
from unidist.cli.base.utils import Defaults, validate_num_cpus
from unidist.core.base.common import BackendName
class DaskRunner(BackendRunner):
"""
An implementation of unidist ``BackendRunner`` for Dask backend.
Parameters
----------
**cli_kwargs : dict
Keyword arguments supported by unidist CLI.
"""
def __init__(self, **cli_kwargs):
self.backend = BackendName.DASK
self.hosts = cli_kwargs.get("hosts", Defaults.HOSTS)
super().__init__(**cli_kwargs)
def check_kwargs_support(self, **kwargs):
"""Check support for `kwargs` combination for Dask backend."""
hosts = kwargs.get("hosts", self.hosts)
num_cpus = kwargs.get("num_cpus", self.num_cpus)
if hosts == Defaults.HOSTS:
self.hosts = None
if (
num_cpus == Defaults.NUM_CPUS
or isinstance(num_cpus, list)
and len(num_cpus) == 1
):
self.num_cpus = validate_num_cpus(num_cpus)[0]
else:
raise RuntimeError(
f"`num_cpus` must have a single value for {self.backend} backend."
)
elif isinstance(hosts, list) and len(hosts) == 1:
self.hosts = hosts[0]
if isinstance(num_cpus, list):
warnings.warn(
f"`num_cpus` isn't supported for existing {self.backend} cluster.",
RuntimeWarning,
)
self.num_cpus = None
else:
raise RuntimeError(
f"`hosts` must have a single value with existing cluster address for {self.backend} backend."
)
if (
kwargs.get("redis_password", Defaults.REDIS_PASSWORD)
!= Defaults.REDIS_PASSWORD
):
warnings.warn(
f"`redis_password` isn't supported for {self.backend} backend.",
RuntimeWarning,
)
def prepare_env(self):
"""Setup unidist environment variables for Dask backend."""
super().prepare_env()
if self.hosts is not None:
os.environ["UNIDIST_DASK_CLUSTER"] = "True"
os.environ["UNIDIST_DASK_SCHEDULER_ADDRESS"] = self.hosts
else:
os.environ["UNIDIST_CPUS"] = self.num_cpus
|
python
|
import argparse
import codecs
from typing import Dict, Union
import os
import yaml
import optuna
from .model.constant import *
from .train import main as train
DB_URL = 'mysql+pymysql://pmod:pmod@{host}:13306/optuna_pmod?charset=utf8'
RESULT_DIR = os.path.join(DIR_OPTUNA, DIR_RESULTS)
def parse_args() -> Dict[str, str]:
parser = argparse.ArgumentParser()
subparsers = parser.add_subparsers()
parser_optuna = parser.add_argument_group('Optuna')
parser_optuna.add_argument(
f'--{arg_hyphen(ARG_SEED)}',
type=int, default=1,
help='Seed for random number generator.'
)
parser_optuna.add_argument(
'-n', f'--{arg_hyphen(ARG_N_TRIALS)}',
type=int, default=200,
help='Number of trials.'
)
parser_optuna.add_argument(
'-t', f'--{arg_hyphen(ARG_TAG)}',
type=str, required=True,
help='Optuna training tag.'
)
parser_optuna.add_argument(
'-H', f'--{arg_hyphen(ARG_HOST)}',
type=str, default=None,
help='When using a MySQL server, specify the hostname.'
)
parser_optuna.add_argument(
'-s', f'--{arg_hyphen(ARG_SAMPLER)}',
choices=SINGLE_SAMPLERS, default=SINGLE_SAMPLER_TPE,
help='Optuna sampler.'
)
parser.set_defaults(func=single_main)
parser_train = parser.add_argument_group('Training')
parser_train.add_argument(
'-tdc', f'--{arg_hyphen(ARG_TRAIN_DL_CONFIG)}',
type=str, metavar='PATH', required=True,
help='PATH of JSON file of dataloader config for training.'
)
parser_train.add_argument(
'-vdc', f'--{arg_hyphen(ARG_VAL_DL_CONFIG)}',
type=str, metavar='PATH', default=None,
help=f'PATH of JSON file of dataloader config for validation. If not specified, the same file as "--{arg_hyphen(ARG_TRAIN_DL_CONFIG)}" will be used.'
)
parser_train.add_argument(
'-bs', f'--{arg_hyphen(ARG_BLOCK_SIZE)}',
type=int, default=0,
help='Block size of dataset.'
)
parser_train.add_argument(
'-td', f'--{arg_hyphen(ARG_TRAIN_DATA)}',
type=str, metavar='PATH', nargs='+', required=True,
help='PATH of training HDF5 datasets.'
)
parser_train.add_argument(
'-vd', f'--{arg_hyphen(ARG_VAL_DATA)}',
type=str, metavar='PATH', nargs='*', default=[],
help=f'PATH of validation HDF5 datasets. If not specified, the same files as "--{arg_hyphen(ARG_TRAIN_DATA)}" will be used.'
)
parser_train.add_argument(
f'--{arg_hyphen(ARG_EPOCHS)}',
type=int, default=50,
help='Epochs'
)
parser_train.add_argument(
f'--{arg_hyphen(ARG_EPOCH_START_COUNT)}',
type=int, default=1,
help='The starting epoch count'
)
parser_train.add_argument(
f'--{arg_hyphen(ARG_STEPS_PER_EPOCH)}',
type=int, default=10000,
help='Number of steps per epoch. If it is greater than the total number of datasets, then the total number of datasets is used.'
)
parser_train.add_argument(
'-ppa', f'--{arg_hyphen(ARG_PROJECTED_POSITION_AUGMENTATION)}',
action='store_false', help='Unuse Projected Positiion Augmentation'
)
parser_train.add_argument(
f'--{arg_hyphen(ARG_TR_ERROR_RANGE)}',
type=float, default=[0.6, 1.3, 0.7],
help='Translation Error Range [m].'
)
parser_train.add_argument(
f'--{arg_hyphen(ARG_ROT_ERROR_RANGE)}',
type=float, default=3.0,
help='Rotation Error Range [deg].'
)
parser_net = parser.add_argument_group('Network')
parser_net.add_argument(
'-b', f'--{arg_hyphen(ARG_BATCH_SIZE)}',
type=int, default=2,
help='Batch Size'
)
parser_net.add_argument(
f'--{arg_hyphen(ARG_RESUME)}',
type=str, metavar='PATH', default=None,
help='PATH of checkpoint(.pth).'
)
parser_net.add_argument(
f'-amp', f'--{arg_hyphen(ARG_AMP)}',
action='store_true',
help='Use AMP.'
)
parser_optim = parser.add_argument_group('Optimizer')
parser_optim.add_argument(
f'--{arg_hyphen(ARG_CLIP_MAX_NORM)}',
type=float, default=1.0,
help='max_norm for clip_grad_norm.'
)
parser_optim.add_argument(
'-op', f'--{arg_hyphen(ARG_OPTIM_PARAMS)}',
type=str, metavar='PATH', default='./config/optim-params-default.yaml', help='PATH of YAML file of optimizer params.'
)
parser_optim.add_argument(
'-o', f'--{arg_hyphen(ARG_OPTIMIZER)}',
type=str, default=None, choices=OPTIM_TYPES,
help='Optimizer'
)
parser_optim.add_argument(
'-lp', f'--{arg_hyphen(ARG_LR_POLICY)}',
type=str, default=LR_POLICY_PLATEAU, choices=LR_POLICIES,
help='Learning rate policy.'
)
parser_loss = parser.add_argument_group('Loss')
parser_loss.add_argument(
f'--{arg_hyphen(ARG_L1)}',
type=float, default=None,
help='Weight of L1 loss.'
)
parser_loss.add_argument(
f'--{arg_hyphen(ARG_SEG_CE)}',
type=float, default=None,
help='Weight of Segmentation CrossEntropy Loss.'
)
parser_loss.add_argument(
f'--{arg_hyphen(ARG_SEG_CE_AUX1)}',
type=float, default=None,
help='Weight of Segmentation Aux1 CrosEntropy Loss.'
)
parser_loss.add_argument(
f'--{arg_hyphen(ARG_SEG_CE_AUX2)}',
type=float, default=None,
help='Weight of Segmentation Aux2 CrosEntropy Loss.'
)
parser_multi = subparsers.add_parser(
'multi', help='Multi Objective Trial'
)
parser_multi.add_argument(
'-s', f'--{arg_hyphen(ARG_SAMPLER)}',
choices=MULTI_SAMPLERS, default=MULTI_SAMPLER_MOTPE,
help='Optuna sampler.'
)
parser_multi.set_defaults(func=multi_main)
args = vars(parser.parse_args())
if os.path.isfile(args[ARG_OPTIM_PARAMS]) is False:
raise FileNotFoundError(f'"{args[ARG_OPTIM_PARAMS]}"')
with open(args[ARG_OPTIM_PARAMS]) as f:
optim_params:dict = yaml.safe_load(f)
args[ARG_OPTIM_PARAMS] = optim_params
args[ARG_EVAL_DATA] = []
args[ARG_DETECT_ANOMALY] = False
return args
def objective_with_args(args: Dict[str, str], workdir: str):
def objective(trial: optuna.Trial):
train_args: Dict[str, str] = args.copy()
train_args.pop('func', None)
trial.set_user_attr('hostname', os.environ.get('HOST_NAME', os.environ['HOSTNAME']))
train_args[ARG_TAG] = f'optuna-trial{trial.number:06d}-{args[ARG_TAG]}'
# Optimizer
train_args[ARG_OPTIMIZER] = trial.suggest_categorical(ARG_OPTIMIZER, OPTIM_TYPES) if args[ARG_OPTIMIZER] is None else args[ARG_OPTIMIZER]
optim_params: Dict[str, Dict[str, Dict[str, Dict[str, Union[int, float]]]]] = train_args[ARG_OPTIM_PARAMS]
for module_key, module_dict in optim_params.items():
optim_dict: Dict[str, Union[float, Dict[str, float]]] = module_dict[CONFIG_OPTIMIZER]
if optim_dict.get(CONFIG_LR) is None:
optim_dict[CONFIG_LR] = trial.suggest_loguniform(CONFIG_LR, 1e-9, 1e-1)
if train_args[ARG_OPTIMIZER] in [OPTIM_TYPE_ADAM, OPTIM_TYPE_ADABELIEF]:
optim_param_dict: Dict[str, float] = optim_dict[train_args[ARG_OPTIMIZER]]
if optim_param_dict.get(CONFIG_BETA1) is None:
optim_param_dict[CONFIG_BETA1] = trial.suggest_uniform(CONFIG_BETA1, 0.0, 1.0)
if optim_param_dict.get(CONFIG_BETA2) is None:
optim_param_dict[CONFIG_BETA2] = trial.suggest_uniform(CONFIG_BETA2, 0.0, 1.0)
elif train_args[ARG_OPTIMIZER] in [OPTIM_TYPE_SGD]:
optim_param_dict: Dict[str, float] = optim_dict[train_args[ARG_OPTIMIZER]]
if optim_param_dict.get(CONFIG_MOMENTUM) is None:
optim_param_dict[CONFIG_MOMENTUM] = trial.suggest_float(CONFIG_MOMENTUM, 0.0, 1.0)
else:
raise NotImplementedError(train_args[ARG_OPTIMIZER])
scheduler_dict: Dict[str, Dict[str, Union[int, float]]] = module_dict[CONFIG_SCHEDULER]
if train_args[ARG_LR_POLICY] in [LR_POLICY_LAMBDA]:
scheduler_param_dict: Dict[str, Union[int, float]] = scheduler_dict[train_args[ARG_LR_POLICY]]
if scheduler_param_dict.get(CONFIG_EPOCH_COUNT) is None:
scheduler_param_dict[CONFIG_EPOCH_COUNT] = args[ARG_EPOCH_START_COUNT]
if scheduler_param_dict.get(CONFIG_NITER) is None:
scheduler_param_dict[CONFIG_NITER] = trial.suggest_int(CONFIG_NITER, 1, args[ARG_STEPS_PER_EPOCH])
if scheduler_param_dict.get(CONFIG_NITER_DECAY) is None:
scheduler_param_dict[CONFIG_NITER_DECAY] = trial.suggest_int(CONFIG_NITER_DECAY, 1, args[ARG_STEPS_PER_EPOCH])
elif train_args[ARG_LR_POLICY] in [LR_POLICY_STEP]:
scheduler_param_dict: Dict[str, Union[int, float]] = scheduler_dict[train_args[ARG_LR_POLICY]]
if scheduler_param_dict.get(CONFIG_NITER_DECAY) is None:
scheduler_param_dict[CONFIG_NITER_DECAY] = trial.suggest_int(CONFIG_NITER_DECAY, low=1)
if scheduler_param_dict.get(CONFIG_GAMMA) is None:
scheduler_param_dict[CONFIG_GAMMA] = trial.suggest_uniform(CONFIG_GAMMA, 1e-3, 9e-1)
elif train_args[ARG_LR_POLICY] in [LR_POLICY_PLATEAU]:
scheduler_param_dict: Dict[str, Union[int, float]] = scheduler_dict[train_args[ARG_LR_POLICY]]
if scheduler_param_dict.get(CONFIG_PATIENCE) is None:
scheduler_param_dict[CONFIG_PATIENCE] = trial.suggest_int(CONFIG_PATIENCE, 1, 100)
if scheduler_param_dict.get(CONFIG_FACTOR) is None:
scheduler_param_dict[CONFIG_FACTOR] = trial.suggest_uniform(CONFIG_FACTOR, 1e-3, 9e-1)
if scheduler_param_dict.get(CONFIG_THRESHOLD) is None:
scheduler_param_dict[CONFIG_THRESHOLD] = trial.suggest_uniform(CONFIG_THRESHOLD, 1e-9, 1e-1)
elif train_args[ARG_LR_POLICY] in [LR_POLICY_COS]:
scheduler_param_dict: Dict[str, Union[int, float]] = scheduler_dict[train_args[ARG_LR_POLICY]]
if scheduler_param_dict.get(CONFIG_NITER) is None:
scheduler_param_dict[CONFIG_NITER] = trial.suggest_int(CONFIG_NITER, 1, args[ARG_STEPS_PER_EPOCH])
if scheduler_param_dict.get(CONFIG_ETA_MIN) is None:
scheduler_param_dict[CONFIG_ETA_MIN] = trial.suggest_uniform(CONFIG_ETA_MIN, 1e-9, 1e-1)
elif train_args[ARG_LR_POLICY] in [LR_POLICY_CLR]:
scheduler_param_dict: Dict[str, Union[int, float]] = scheduler_dict[train_args[ARG_LR_POLICY]]
if scheduler_param_dict.get(CONFIG_BASE_LR) is None:
scheduler_param_dict[CONFIG_BASE_LR] = trial.suggest_uniform(CONFIG_BASE_LR, 1e-9, 1e-1)
if scheduler_param_dict.get(CONFIG_MAX_LR) is None:
scheduler_param_dict[CONFIG_MAX_LR] = trial.suggest_uniform(CONFIG_MAX_LR, 1e-9, 1e-1)
else:
raise NotImplementedError(train_args[ARG_LR_POLICY])
# Loss
train_args[ARG_L1] = trial.suggest_uniform(ARG_L1, 0.0, 1.0) if args[ARG_L1] is None else args[ARG_L1]
train_args[ARG_SEG_CE] = trial.suggest_uniform(ARG_SEG_CE, 0.0, 1.0) if args[ARG_SEG_CE] is None else args[ARG_SEG_CE]
train_args[ARG_SEG_CE_AUX1] = trial.suggest_uniform(ARG_SEG_CE_AUX1, 0.0, 1.0) if args[ARG_SEG_CE_AUX1] is None else args[ARG_SEG_CE_AUX1]
train_args[ARG_SEG_CE_AUX2] = trial.suggest_uniform(ARG_SEG_CE_AUX2, 0.0, 1.0) if args[ARG_SEG_CE_AUX2] is None else args[ARG_SEG_CE_AUX2]
print(f'{"Trial":11s}: {trial.number:14d}')
result: Dict[str, float] = train(train_args, workdir, trial)
if len(trial.study.directions) > 1:
return result[METRIC_IOU], result[METRIC_MAPE]
else:
return 1.0 - result[METRIC_IOU] + result[METRIC_MAPE]
return objective
def optimize(study: optuna.Study, args: Dict[str, Union[int, str]], workdir: str, storage: str):
print(f'{"Optuna":11s}: {"Tag":14s}: {args[ARG_TAG]}')
print(f'{"":11s}: {"Study":14s}: {study.study_name}')
print(f'{"":11s}: {"Num Trials":14s}: {args[ARG_N_TRIALS]}')
print(f'{"":11s}: {"Storage":14s}: {storage}')
try:
study.optimize(objective_with_args(args, workdir), n_trials=args[ARG_N_TRIALS])
except:
pass
finally:
save_path: str = os.path.join(
workdir,
RESULT_DIR
)
os.makedirs(save_path, exist_ok=True)
for best_trial in study.best_trials:
save_path = os.path.join(save_path, f'{best_trial.datetime_start.strftime("%Y%m%dT%H%M%S")}-{args[ARG_TAG]}.yaml')
with codecs.open(save_path, mode='w', encoding='utf-8') as f:
yaml.dump(best_trial.params, f, encoding='utf-8', allow_unicode=True)
def single_main(args: Dict[str, str], workdir: str):
if args[ARG_SAMPLER] == SINGLE_SAMPLER_GRID:
sampler = optuna.samplers.GridSampler()
elif args[ARG_SAMPLER] == SINGLE_SAMPLER_RANDOM:
sampler = optuna.samplers.RandomSampler(seed=args[ARG_SEED])
elif args[ARG_SAMPLER] == SINGLE_SAMPLER_CMAES:
sampler = optuna.samplers.CmaEsSampler(seed=args[ARG_SEED])
else:
sampler = optuna.samplers.TPESampler(seed=args[ARG_SEED])
storage: str = DB_URL.replace('{host}', args[ARG_HOST]) if isinstance(args[ARG_HOST], str) else None
study_name: str = f'pmod-{args[ARG_TAG]}'
study: optuna.Study = optuna.create_study(
storage=storage,
sampler=sampler,
direction='minimize',
study_name=study_name,
load_if_exists=True,
)
if 'Objective' not in study.user_attrs.keys():
study.set_user_attr('Objective', '(1 - mIoU) + MAPE')
optimize(study, args, workdir, storage)
def multi_main(args: Dict[str, str], workdir: str):
if args[ARG_SAMPLER] == MULTI_SAMPLER_NSGA2:
sampler = optuna.samplers.NSGAIISampler(seed=args[ARG_SEED])
else:
sampler = optuna.samplers.MOTPESampler(seed=args[ARG_SEED])
storage: str = DB_URL.replace('{host}', args[ARG_HOST]) if isinstance(args[ARG_HOST], str) else None
study_name: str = f'pmod-{args[ARG_TAG]}'
study: optuna.Study = optuna.create_study(
storage=storage,
directions=['maximize', 'minimize'],
sampler=sampler,
study_name=study_name,
load_if_exists=True,
)
if 'Objectives' not in study.user_attrs.keys():
study.set_user_attr('Objectives', ['Best mIoU', 'Best MAPE'])
optimize(study, args, workdir, storage)
|
python
|
'''
Prints data about general statistics by region
'''
def runStat(dashes):
regions = {}
for dash in dashes.dashes:
if dash.region in regions:
regions[dash.region]["pay"] += dash.total
regions[dash.region]["num"] += 1
delta = dash.end - dash.start
regions[dash.region]["hours"] += (delta.seconds / 60.) / 60.
regions[dash.region]["deliveries"] += len(dash.deliveries)
else:
delta = dash.end - dash.start
regions[dash.region] = {"pay": dash.total, "num": 1, "hours": ((delta.seconds / 60.) / 60.), "deliveries": len(dash.deliveries)}
for region in regions:
print("Region: " + region)
print("Dashes done: " + str(regions[region]["num"]))
print("Deliveries done: " + str(regions[region]["deliveries"]))
print("Money made: $" + str(regions[region]["pay"] / 100.))
print("Hours worked: " + str(int(regions[region]["hours"])))
print("Average hourly rate: $" + str(int(regions[region]["pay"] / regions[region]["hours"]) / 100.))
print("Average deliveries per hour: " + ("%.2f" % (regions[region]["deliveries"] / regions[region]["hours"])))
print()
def getName():
return "Stats per region"
|
python
|
#!/usr/bin/env python3
import funct
import sql
from jinja2 import Environment, FileSystemLoader
env = Environment(loader=FileSystemLoader('templates/'), autoescape=True)
template = env.get_template('metrics.html')
print('Content-type: text/html\n')
funct.check_login()
try:
user, user_id, role, token, servers = funct.get_users_params()
cmd = "rpm --query haproxy-wi-metrics-* |awk -F\"metrics\" '{print $2}' |awk -F\".noa\" '{print $1}' |sed 's/-//1' |sed 's/-/./'"
service_ver, stderr = funct.subprocess_execute(cmd)
if service_ver == '* is not installed':
servers = ''
else:
servers = sql.select_servers_metrics(user_id.value)
except Exception:
pass
template = template.render(h2=1, title="Metrics",
autorefresh=1,
role=role,
user=user,
servers=servers,
versions=funct.versions(),
services=service_ver[0],
token=token)
print(template)
|
python
|
import sys
lista = [
('chave1', 'valor1'),
('chave2', 'valor2'),
('chave1', 'valor1'),
('chave2', 'valor2'),
('chave1', 'valor1'),
]
#d1 = {x.upper(): y.upper() for x, y in lista} # deixa tudo em maiusculo
#d1 = {x for x in range(5)}
d1 = {f'chave_{x}': 'a' for x in range(5)}
print(d1)
print(sys.getsizeof(d1))
print(sys.getsizeof(lista))
|
python
|
from githubpy import *
def main():
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("-t", "--token")
parser.add_argument("-o", "--owner")
parser.add_argument("-r", "--repo")
parser.add_argument("-w", "--workflow", action='append', default=[])
parser.add_argument("-b", "--branch", help="Branch or tag")
options = parser.parse_args()
ghc = GitHubClient(token=options.token)
result = ghc.ActionsCreateWorkflowDispatch(options.owner,
options.repo,
options.workflow[0],
options.branch, inputs={})
if not isinstance(result, HttpResponse) and result.status_code != 204:
print("ERROR: {result.message}")
return
if __name__ == '__main__':
main()
|
python
|
#!/usr/bin/env python3
from PyQt5.QtWidgets import *
import sys
class Window(QWidget):
def __init__(self):
QWidget.__init__(self)
layout = QGridLayout()
self.setLayout(layout)
toolbox = QToolBox()
layout.addWidget(toolbox, 0, 0)
label = QLabel()
toolbox.addItem(label, "Honda")
label = QLabel()
toolbox.addItem(label, "Toyota")
label = QLabel()
toolbox.addItem(label, "Mercedes")
app = QApplication(sys.argv)
screen = Window()
screen.show()
sys.exit(app.exec_())
|
python
|
# -*- coding: iso-8859-1 -*-
# -----------------------------------------------------------------------------
# mkv.py - Matroska Streaming Video Files
# -----------------------------------------------------------------------------
# $Id$
#
# -----------------------------------------------------------------------------
# kaa-Metadata - Media Metadata for Python
# Copyright (C) 2003-2006 Thomas Schueppel, Dirk Meyer, Jason Tackaberry
#
# Maintainer: Jason Tackaberry <[email protected]>
#
# Please see the file AUTHORS for a complete list of authors.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of MER-
# CHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# -----------------------------------------------------------------------------
__all__ = ['Parser']
# python imports
from struct import unpack
import logging
import re
from datetime import datetime
# import kaa.metadata.video core
from . import core
# get logging object
log = logging.getLogger('metadata')
# Main IDs for the Matroska streams
MATROSKA_VIDEO_TRACK = 0x01
MATROSKA_AUDIO_TRACK = 0x02
MATROSKA_SUBTITLES_TRACK = 0x11
MATROSKA_HEADER_ID = 0x1A45DFA3
MATROSKA_TRACKS_ID = 0x1654AE6B
MATROSKA_CUES_ID = 0x1C53BB6B
MATROSKA_SEGMENT_ID = 0x18538067
MATROSKA_SEGMENT_INFO_ID = 0x1549A966
MATROSKA_CLUSTER_ID = 0x1F43B675
MATROSKA_VOID_ID = 0xEC
MATROSKA_CRC_ID = 0xBF
MATROSKA_TIMECODESCALE_ID = 0x2AD7B1
MATROSKA_DURATION_ID = 0x4489
MATROSKA_CRC32_ID = 0xBF
MATROSKA_TIMECODESCALE_ID = 0x2AD7B1
MATROSKA_MUXING_APP_ID = 0x4D80
MATROSKA_WRITING_APP_ID = 0x5741
MATROSKA_CODEC_ID = 0x86
MATROSKA_CODEC_PRIVATE_ID = 0x63A2
MATROSKA_FRAME_DURATION_ID = 0x23E383
MATROSKA_VIDEO_SETTINGS_ID = 0xE0
MATROSKA_VIDEO_WIDTH_ID = 0xB0
MATROSKA_VIDEO_HEIGHT_ID = 0xBA
MATROSKA_VIDEO_INTERLACED_ID = 0x9A
MATROSKA_VIDEO_DISPLAY_WIDTH_ID = 0x54B0
MATROSKA_VIDEO_DISPLAY_HEIGHT_ID = 0x54BA
MATROSKA_VIDEO_STEREO = 0x53B8
MATROSKA_AUDIO_SETTINGS_ID = 0xE1
MATROSKA_AUDIO_SAMPLERATE_ID = 0xB5
MATROSKA_AUDIO_CHANNELS_ID = 0x9F
MATROSKA_TRACK_UID_ID = 0x73C5
MATROSKA_TRACK_NUMBER_ID = 0xD7
MATROSKA_TRACK_TYPE_ID = 0x83
MATROSKA_TRACK_LANGUAGE_ID = 0x22B59C
MATROSKA_TRACK_OFFSET = 0x537F
MATROSKA_TRACK_FLAG_DEFAULT_ID = 0x88
MATROSKA_TRACK_FLAG_ENABLED_ID = 0xB9
MATROSKA_TITLE_ID = 0x7BA9
MATROSKA_DATE_UTC_ID = 0x4461
MATROSKA_NAME_ID = 0x536E
MATROSKA_CHAPTERS_ID = 0x1043A770
MATROSKA_CHAPTER_UID_ID = 0x73C4
MATROSKA_EDITION_ENTRY_ID = 0x45B9
MATROSKA_CHAPTER_ATOM_ID = 0xB6
MATROSKA_CHAPTER_TIME_START_ID = 0x91
MATROSKA_CHAPTER_TIME_END_ID = 0x92
MATROSKA_CHAPTER_FLAG_ENABLED_ID = 0x4598
MATROSKA_CHAPTER_DISPLAY_ID = 0x80
MATROSKA_CHAPTER_LANGUAGE_ID = 0x437C
MATROSKA_CHAPTER_STRING_ID = 0x85
MATROSKA_ATTACHMENTS_ID = 0x1941A469
MATROSKA_ATTACHED_FILE_ID = 0x61A7
MATROSKA_FILE_DESC_ID = 0x467E
MATROSKA_FILE_NAME_ID = 0x466E
MATROSKA_FILE_MIME_TYPE_ID = 0x4660
MATROSKA_FILE_DATA_ID = 0x465C
MATROSKA_SEEKHEAD_ID = 0x114D9B74
MATROSKA_SEEK_ID = 0x4DBB
MATROSKA_SEEKID_ID = 0x53AB
MATROSKA_SEEK_POSITION_ID = 0x53AC
MATROSKA_TAGS_ID = 0x1254C367
MATROSKA_TAG_ID = 0x7373
MATROSKA_TARGETS_ID = 0x63C0
MATROSKA_TARGET_TYPE_VALUE_ID = 0x68CA
MATROSKA_TARGET_TYPE_ID = 0x63CA
MATRSOKA_TAGS_TRACK_UID_ID = 0x63C5
MATRSOKA_TAGS_EDITION_UID_ID = 0x63C9
MATRSOKA_TAGS_CHAPTER_UID_ID = 0x63C4
MATRSOKA_TAGS_ATTACHMENT_UID_ID = 0x63C6
MATROSKA_SIMPLE_TAG_ID = 0x67C8
MATROSKA_TAG_NAME_ID = 0x45A3
MATROSKA_TAG_LANGUAGE_ID = 0x447A
MATROSKA_TAG_STRING_ID = 0x4487
MATROSKA_TAG_BINARY_ID = 0x4485
# See mkv spec for details:
# http://www.matroska.org/technical/specs/index.html
# Map to convert to well known codes
# http://haali.cs.msu.ru/mkv/codecs.pdf
FOURCCMap = {
'V_THEORA': 'THEO',
'V_SNOW': 'SNOW',
'V_MPEG4/ISO/ASP': 'MP4V',
'V_MPEG4/ISO/AVC': 'AVC1',
'A_AC3': 0x2000,
'A_MPEG/L3': 0x0055,
'A_MPEG/L2': 0x0050,
'A_MPEG/L1': 0x0050,
'A_DTS': 0x2001,
'A_PCM/INT/LIT': 0x0001,
'A_PCM/FLOAT/IEEE': 0x003,
'A_TTA1': 0x77a1,
'A_WAVPACK4': 0x5756,
'A_VORBIS': 0x6750,
'A_FLAC': 0xF1AC,
'A_AAC': 0x00ff,
'A_AAC/': 0x00ff
}
stereo_map = {
1: 'side by side (left eye is first)',
2: 'top-bottom (right eye is first)',
3: 'top-bottom (left eye is first)',
4: 'checkboard (right is first)',
5: 'checkboard (left is first)',
6: 'row interleaved (right is first)',
7: 'row interleaved (left is first)',
8: 'column interleaved (right is first)',
9: 'column interleaved (left is first)',
10: 'anaglyph (cyan/red)',
11: 'side by side (right eye is first)',
12: 'anaglyph (green/magenta)',
13: 'both eyes laced in one Block (left eye is first)',
14: 'both eyes laced in one Block (right eye is first)'
}
def matroska_date_to_datetime(date):
"""
Converts a date in Matroska's date format to a python datetime object.
Returns the given date string if it could not be converted.
"""
# From the specs:
# The fields with dates should have the following format: YYYY-MM-DD
# HH:MM:SS.MSS [...] To store less accuracy, you remove items starting
# from the right. To store only the year, you would use, "2004". To store
# a specific day such as May 1st, 2003, you would use "2003-05-01".
format = re.split(r'([-:. ])', '%Y-%m-%d %H:%M:%S.%f')
while format:
try:
return datetime.strptime(date, ''.join(format))
except ValueError:
format = format[:-2]
return date
def matroska_bps_to_bitrate(bps):
"""
Tries to convert a free-form bps string into a bitrate (bits per second).
"""
m = re.search(r'([\d.]+)\s*(\D.*)', bps)
if m:
bps, suffix = m.groups()
if 'kbit' in suffix:
return float(bps) * 1024
elif 'kbyte' in suffix:
return float(bps) * 1024 * 8
elif 'byte' in suffix:
return float(bps) * 8
elif 'bps' in suffix or 'bit' in suffix:
return float(bps)
if bps.replace('.', '').isdigit():
if float(bps) < 30000:
# Assume kilobits and convert to bps
return float(bps) * 1024
return float(bps)
# Used to convert the official matroska tag names (and some unofficial ones) to core
# attributes. tag name -> attr, filter
TAGS_MAP = {
# From Media core
'album': ('album', None),
'title': ('title', None),
'subtitle': ('caption', None),
'comment': ('comment', None),
'comments': ('comment', None),
'url': ('url', None),
'artist': ('artist', None),
'keywords': ('keywords', lambda s: [word.strip() for word in s.split(',')]),
'composer_nationality': ('country', None),
'date_released': ('datetime', None),
'date_recorded': ('datetime', None),
'date_written': ('datetime', None),
# From Video core
'encoder': ('encoder', None),
'bps': ('bitrate', matroska_bps_to_bitrate),
'part_number': ('trackno', int),
'total_parts': ('trackof', int),
'copyright': ('copyright', None),
'genre': ('genre', None),
'actor': ('actors', None),
'written_by': ('writer', None),
'producer': ('producer', None),
'production_studio': ('studio', None),
'law_rating': ('rating', None),
'summary': ('summary', None),
'synopsis': ('synopsis', None),
}
class EbmlEntity:
"""
This is class that is responsible to handle one Ebml entity as described in
the Matroska/Ebml spec
"""
def __init__(self, inbuf):
# Compute the EBML id
# Set the CRC len to zero
self.crc_len = 0
# Now loop until we find an entity without CRC
try:
self.build_entity(inbuf)
except IndexError:
raise core.ParseError()
while self.get_id() == MATROSKA_CRC32_ID:
self.crc_len += self.get_total_len()
inbuf = inbuf[self.get_total_len():]
self.build_entity(inbuf)
def build_entity(self, inbuf):
self.compute_id(inbuf)
if self.id_len == 0:
log.debug("EBML entity not found, bad file format")
raise core.ParseError()
self.entity_len, self.len_size = self.compute_len(inbuf[self.id_len:])
self.entity_data = inbuf[self.get_header_len() : self.get_total_len()]
self.ebml_length = self.entity_len
self.entity_len = min(len(self.entity_data), self.entity_len)
# if the data size is 8 or less, it could be a numeric value
self.value = 0
if self.entity_len <= 8:
for pos, shift in zip(list(range(self.entity_len)), list(range((self.entity_len-1)*8, -1, -8))):
self.value |= self.entity_data[pos] << shift
def add_data(self, data):
maxlen = self.ebml_length - len(self.entity_data)
if maxlen <= 0:
return
self.entity_data += data[:maxlen]
self.entity_len = len(self.entity_data)
def compute_id(self, inbuf):
self.id_len = 0
if len(inbuf) < 1:
return 0
first = inbuf[0]
if first & 0x80:
self.id_len = 1
self.entity_id = first
elif first & 0x40:
if len(inbuf) < 2:
return 0
self.id_len = 2
self.entity_id = inbuf[0]<<8 | inbuf[1]
elif first & 0x20:
if len(inbuf) < 3:
return 0
self.id_len = 3
self.entity_id = (inbuf[0]<<16) | (inbuf[1]<<8) | inbuf[2]
elif first & 0x10:
if len(inbuf) < 4:
return 0
self.id_len = 4
self.entity_id = (inbuf[0]<<24) | (inbuf[1]<<16) | (inbuf[2]<<8) | inbuf[3]
self.entity_str = inbuf[0:self.id_len]
def compute_len(self, inbuf):
if not inbuf:
return 0, 0
i = num_ffs = 0
len_mask = 0x80
len = inbuf[0]
while not len & len_mask:
i += 1
len_mask >>= 1
if i >= 8:
return 0, 0
len &= len_mask - 1
if len == len_mask - 1:
num_ffs += 1
for p in range(i):
len = (len << 8) | inbuf[p + 1]
if len & 0xff == 0xff:
num_ffs += 1
if num_ffs == i + 1:
len = 0
return len, i + 1
def get_crc_len(self):
return self.crc_len
def get_value(self):
return self.value
def get_float_value(self):
if len(self.entity_data) == 4:
return unpack('!f', self.entity_data)[0]
elif len(self.entity_data) == 8:
return unpack('!d', self.entity_data)[0]
return 0.0
def get_data(self):
return self.entity_data
def get_utf8(self):
# EBML RFC says "A string MAY be zero padded at the end."
return str(self.entity_data.rstrip(b'\x00'), 'utf-8', 'replace')
def get_str(self):
return str(self.entity_data.rstrip(b'\x00'), 'ascii', 'replace')
def get_id(self):
return self.entity_id
def get_str_id(self):
return self.entity_str
def get_len(self):
return self.entity_len
def get_total_len(self):
return self.entity_len + self.id_len + self.len_size
def get_header_len(self):
return self.id_len + self.len_size
class Matroska(core.AVContainer):
"""
Matroska video and audio parser. If at least one video stream is
detected it will set the type to MEDIA_AV.
"""
media = core.MEDIA_AUDIO
def __init__(self, file):
core.AVContainer.__init__(self)
self.samplerate = 1
self.file = file
# Read enough that we're likely to get the full seekhead and elements after
# the seekhead (but before clusters) in case the file is truncated.
# (FIXME: kludge.)
buffer = file.read(100000)
if not buffer:
# Regular File end
raise core.ParseError()
# Check the Matroska header
header = EbmlEntity(buffer)
if header.get_id() != MATROSKA_HEADER_ID:
raise core.ParseError()
log.debug("HEADER ID found %08X" % header.get_id() )
self.mime = 'application/mkv'
self.type = 'Matroska'
self.has_idx = False
self.objects_by_uid = {}
self._in_seekhead = False
# Now get the segment
self.segment = segment = EbmlEntity(buffer[header.get_total_len():])
# Record file offset of segment data for seekheads
self.segment.offset = header.get_total_len() + segment.get_header_len()
if segment.get_id() != MATROSKA_SEGMENT_ID:
log.debug("SEGMENT ID not found %08X" % segment.get_id())
return
log.debug("SEGMENT ID found %08X" % segment.get_id())
# The parsing strategy for mkv is to first process the seekhead (which is
# at the top of the file), which points to all top-level elements we're
# interested in parsing. Seekhead parsing is more robust as it seeks
# across the file as needed and reads all data. If this succeeds, then
# we stop processing everything else in the segment as we're done.
#
# If the seekhead parsing fails, this is usually because the file is
# incomplete/corrupt. In this case, we clear out anything that might
# have been processed from the seekhead and continue on with the
# other elements in the segment that might be in our pre-read buffer.
try:
for elem in self.process_one_level(segment):
log.debug("Segment level id: %x", elem.get_id())
try:
self.process_elem(elem)
if elem.get_id() == MATROSKA_SEEKHEAD_ID:
# Seekhead was successfully processed so we're done.
break
except core.ParseError:
if elem.get_id() == MATROSKA_SEEKHEAD_ID:
# We couldn't finish processing the seekhead. Clear
# out all metadata and keep processing the segment.
log.debug("Failed to process seekhead, continuing with segment")
del self.audio[:]
del self.video[:]
del self.subtitles[:]
del self.chapters[:]
self.objects_by_uid.clear()
continue
else:
# Some other error, stop processing.
break
except core.ParseError:
pass
if not self.has_idx:
log.debug('WARNING: file has no index')
self._set('corrupt', True)
def process_elem(self, elem):
elem_id = elem.get_id()
log.debug('BEGIN: process element %x size %d', elem_id, elem.entity_len)
if elem_id == MATROSKA_SEGMENT_INFO_ID:
duration = 0
scalecode = 1000000.0
for ielem in self.process_one_level(elem):
ielem_id = ielem.get_id()
if ielem_id == MATROSKA_TIMECODESCALE_ID:
scalecode = ielem.get_value()
elif ielem_id == MATROSKA_DURATION_ID:
duration = ielem.get_float_value()
elif ielem_id == MATROSKA_TITLE_ID:
self.title = ielem.get_utf8()
elif ielem_id == MATROSKA_DATE_UTC_ID:
timestamp = unpack('!q', ielem.get_data())[0] / 10.0**9
# Date is offset 2001-01-01 00:00:00 (timestamp 978307200.0)
self.timestamp = int(timestamp + 978307200)
self.length = duration * scalecode / 1000000000.0
elif elem_id == MATROSKA_TRACKS_ID:
self.process_tracks(elem)
elif elem_id == MATROSKA_CHAPTERS_ID:
self.process_chapters(elem)
elif elem_id == MATROSKA_ATTACHMENTS_ID:
self.process_attachments(elem)
elif elem_id == MATROSKA_SEEKHEAD_ID:
self.process_seekhead(elem)
elif elem_id == MATROSKA_TAGS_ID:
self.process_tags(elem)
elif elem_id == MATROSKA_CUES_ID:
self.has_idx = True
log.debug('END: process element %x', elem_id)
return True
def process_seekhead(self, elem):
if self._in_seekhead:
return log.debug('skipping recursive seekhead processing')
self._in_seekhead = True
for seek_elem in self.process_one_level(elem):
if seek_elem.get_id() != MATROSKA_SEEK_ID:
continue
for sub_elem in self.process_one_level(seek_elem):
if sub_elem.get_id() == MATROSKA_SEEK_POSITION_ID:
self.file.seek(self.segment.offset + sub_elem.get_value())
buffer = self.file.read(100)
elem = EbmlEntity(buffer)
# Fetch all data necessary for this element.
if elem.ebml_length > 100:
elem.add_data(self.file.read(elem.ebml_length - 100))
self.process_elem(elem)
self._in_seekhead = False
def process_tracks(self, tracks):
tracksbuf = tracks.get_data()
index = 0
while index < tracks.get_len():
trackelem = EbmlEntity(tracksbuf[index:])
self.process_track(trackelem)
index += trackelem.get_total_len() + trackelem.get_crc_len()
def process_one_level(self, item):
buf = item.get_data()
index = 0
while index < item.get_len():
if len(buf[index:]) == 0:
break
elem = EbmlEntity(buf[index:])
yield elem
index += elem.get_total_len() + elem.get_crc_len()
def process_track(self, track):
# Collapse generator into a list since we need to iterate over it
# twice.
elements = [ x for x in self.process_one_level(track) ]
track_type = [ x.get_value() for x in elements if x.get_id() == MATROSKA_TRACK_TYPE_ID ]
if not track_type:
log.debug('Bad track: no type id found')
return
track_type = track_type[0]
track = None
if track_type == MATROSKA_VIDEO_TRACK:
log.debug("Video track found")
track = self.process_video_track(elements)
elif track_type == MATROSKA_AUDIO_TRACK:
log.debug("Audio track found")
track = self.process_audio_track(elements)
elif track_type == MATROSKA_SUBTITLES_TRACK:
log.debug("Subtitle track found")
track = core.Subtitle()
track.id = len(self.subtitles)
self.subtitles.append(track)
for elem in elements:
self.process_track_common(elem, track)
def process_track_common(self, elem, track):
elem_id = elem.get_id()
if elem_id == MATROSKA_TRACK_LANGUAGE_ID:
track.language = elem.get_str()
log.debug("Track language found: %s" % track.language)
elif elem_id == MATROSKA_NAME_ID:
track.title = elem.get_utf8()
elif elem_id == MATROSKA_TRACK_NUMBER_ID:
track.trackno = elem.get_value()
elif elem_id == MATROSKA_TRACK_FLAG_ENABLED_ID:
track.enabled = bool(elem.get_value())
elif elem_id == MATROSKA_TRACK_FLAG_DEFAULT_ID:
track.default = bool(elem.get_value())
elif elem_id == MATROSKA_CODEC_ID:
track.codec = elem.get_str()
elif elem_id == MATROSKA_CODEC_PRIVATE_ID:
track.codec_private = elem.get_data()
elif elem_id == MATROSKA_TRACK_UID_ID:
self.objects_by_uid[elem.get_value()] = track
def process_video_track(self, elements):
track = core.VideoStream()
# Defaults
track.codec = 'Unknown'
track.fps = 0
for elem in elements:
elem_id = elem.get_id()
if elem_id == MATROSKA_CODEC_ID:
track.codec = elem.get_str()
elif elem_id == MATROSKA_FRAME_DURATION_ID:
try:
track.fps = 1 / (pow(10, -9) * (elem.get_value()))
except ZeroDivisionError:
pass
elif elem_id == MATROSKA_VIDEO_SETTINGS_ID:
d_width = d_height = None
for settings_elem in self.process_one_level(elem):
settings_elem_id = settings_elem.get_id()
if settings_elem_id == MATROSKA_VIDEO_WIDTH_ID:
track.width = settings_elem.get_value()
elif settings_elem_id == MATROSKA_VIDEO_HEIGHT_ID:
track.height = settings_elem.get_value()
elif settings_elem_id == MATROSKA_VIDEO_DISPLAY_WIDTH_ID:
d_width = settings_elem.get_value()
elif settings_elem_id == MATROSKA_VIDEO_DISPLAY_HEIGHT_ID:
d_height = settings_elem.get_value()
elif settings_elem_id == MATROSKA_VIDEO_INTERLACED_ID:
value = int(settings_elem.get_value())
self._set('interlaced', value)
elif settings_elem_id == MATROSKA_VIDEO_STEREO:
value = stereo_map.get(int(settings_elem.get_value()), None)
if value:
self._set('stereo', value)
if None not in (d_width, d_height):
track.aspect = float(d_width) / d_height
else:
self.process_track_common(elem, track)
# convert codec information
# http://haali.cs.msu.ru/mkv/codecs.pdf
if track.codec in FOURCCMap:
track.codec = FOURCCMap[track.codec]
elif '/' in track.codec and track.codec.split('/')[0] + '/' in FOURCCMap:
track.codec = FOURCCMap[track.codec.split('/')[0] + '/']
elif track.codec.endswith('FOURCC') and len(track.codec_private or '') == 40:
track.codec = track.codec_private[16:20]
elif track.codec.startswith('V_REAL/'):
track.codec = track.codec[7:]
elif track.codec.startswith('V_'):
# FIXME: add more video codecs here
track.codec = track.codec[2:]
self.media = core.MEDIA_AV
track.id = len(self.video)
self.video.append(track)
return track
def process_audio_track(self, elements):
track = core.AudioStream()
track.codec = 'Unknown'
for elem in elements:
elem_id = elem.get_id()
if elem_id == MATROSKA_CODEC_ID:
track.codec = elem.get_str()
elif elem_id == MATROSKA_AUDIO_SETTINGS_ID:
for settings_elem in self.process_one_level(elem):
settings_elem_id = settings_elem.get_id()
if settings_elem_id == MATROSKA_AUDIO_SAMPLERATE_ID:
track.samplerate = settings_elem.get_float_value()
elif settings_elem_id == MATROSKA_AUDIO_CHANNELS_ID:
track.channels = settings_elem.get_value()
else:
self.process_track_common(elem, track)
if track.codec in FOURCCMap:
track.codec = FOURCCMap[track.codec]
elif '/' in track.codec and track.codec.split('/')[0] + '/' in FOURCCMap:
track.codec = FOURCCMap[track.codec.split('/')[0] + '/']
elif track.codec.startswith('A_'):
track.codec = track.codec[2:]
track.id = len(self.audio)
self.audio.append(track)
return track
def process_chapters(self, chapters):
elements = self.process_one_level(chapters)
for elem in elements:
if elem.get_id() == MATROSKA_EDITION_ENTRY_ID:
buf = elem.get_data()
index = 0
while index < elem.get_len():
sub_elem = EbmlEntity(buf[index:])
if sub_elem.get_id() == MATROSKA_CHAPTER_ATOM_ID:
self.process_chapter_atom(sub_elem)
index += sub_elem.get_total_len() + sub_elem.get_crc_len()
def process_chapter_atom(self, atom):
elements = self.process_one_level(atom)
chap = core.Chapter()
for elem in elements:
elem_id = elem.get_id()
if elem_id == MATROSKA_CHAPTER_TIME_START_ID:
# Scale timecode to seconds (float)
chap.pos = elem.get_value() / 1000000 / 1000.0
elif elem_id == MATROSKA_CHAPTER_FLAG_ENABLED_ID:
chap.enabled = elem.get_value()
elif elem_id == MATROSKA_CHAPTER_DISPLAY_ID:
# Matroska supports multiple (chapter name, language) pairs for
# each chapter, so chapter names can be internationalized. This
# logic will only take the last one in the list.
for display_elem in self.process_one_level(elem):
if display_elem.get_id() == MATROSKA_CHAPTER_STRING_ID:
chap.name = display_elem.get_utf8()
elif elem_id == MATROSKA_CHAPTER_UID_ID:
self.objects_by_uid[elem.get_value()] = chap
log.debug('Chapter "%s" found', chap.name)
chap.id = len(self.chapters)
self.chapters.append(chap)
def process_attachments(self, attachments):
buf = attachments.get_data()
index = 0
while index < attachments.get_len():
elem = EbmlEntity(buf[index:])
if elem.get_id() == MATROSKA_ATTACHED_FILE_ID:
self.process_attachment(elem)
index += elem.get_total_len() + elem.get_crc_len()
def process_attachment(self, attachment):
elements = self.process_one_level(attachment)
name = desc = ''
mimetype = b''
data = None
for elem in elements:
elem_id = elem.get_id()
if elem_id == MATROSKA_FILE_NAME_ID:
name = elem.get_utf8()
elif elem_id == MATROSKA_FILE_DESC_ID:
desc = elem.get_utf8()
elif elem_id == MATROSKA_FILE_MIME_TYPE_ID:
mimetype = elem.get_data()
elif elem_id == MATROSKA_FILE_DATA_ID:
data = elem.get_data()
# Right now we only support attachments that could be cover images.
# Make a guess to see if this attachment is a cover image.
if mimetype.startswith(b"image/") and "cover" in (name+desc).lower() and data:
self.thumbnail = data
log.debug('Attachment "%s" found' % name)
def process_tags(self, tags):
# Tags spec: http://www.matroska.org/technical/specs/tagging/index.html
# Iterate over Tags children. Tags element children is a
# Tag element (whose children are SimpleTags) and a Targets element
# whose children specific what objects the tags apply to.
for tag_elem in self.process_one_level(tags):
# Start a new dict to hold all SimpleTag elements.
tags_dict = core.Tags()
# A list of target uids this tags dict applies too. If empty,
# tags are global.
targets = []
for sub_elem in self.process_one_level(tag_elem):
if sub_elem.get_id() == MATROSKA_SIMPLE_TAG_ID:
self.process_simple_tag(sub_elem, tags_dict)
elif sub_elem.get_id() == MATROSKA_TARGETS_ID:
# Targets element: if there is no uid child (track uid,
# chapter uid, etc.) then the tags dict applies to the
# whole file (top-level Media object).
for target_elem in self.process_one_level(sub_elem):
target_elem_id = target_elem.get_id()
if target_elem_id in (MATRSOKA_TAGS_TRACK_UID_ID, MATRSOKA_TAGS_EDITION_UID_ID,
MATRSOKA_TAGS_CHAPTER_UID_ID, MATRSOKA_TAGS_ATTACHMENT_UID_ID):
targets.append(target_elem.get_value())
elif target_elem_id == MATROSKA_TARGET_TYPE_VALUE_ID:
# Target types not supported for now. (Unclear how this
# would fit with kaa.metadata.)
pass
if targets:
# Assign tags to all listed uids
for target in targets:
try:
self.objects_by_uid[target].tags.update(tags_dict)
self.tags_to_attributes(self.objects_by_uid[target], tags_dict)
except KeyError:
log.warning('Tags assigned to unknown/unsupported target uid %d', target)
else:
self.tags.update(tags_dict)
self.tags_to_attributes(self, tags_dict)
def process_simple_tag(self, simple_tag_elem, tags_dict):
"""
Returns a dict representing the Tag element.
"""
name = lang = value = children = None
binary = False
for elem in self.process_one_level(simple_tag_elem):
elem_id = elem.get_id()
if elem_id == MATROSKA_TAG_NAME_ID:
name = elem.get_utf8().lower()
elif elem_id == MATROSKA_TAG_STRING_ID:
value = elem.get_utf8()
elif elem_id == MATROSKA_TAG_BINARY_ID:
value = elem.get_data()
binary = True
elif elem_id == MATROSKA_TAG_LANGUAGE_ID:
lang = elem.get_utf8()
elif elem_id == MATROSKA_SIMPLE_TAG_ID:
if children is None:
children = core.Tags()
self.process_simple_tag(elem, children)
if children:
# Convert ourselves to a Tags object.
children.value = value
children.langcode = lang
value = children
else:
# XXX: Python datetime objects have no way to express partial dates
# (e.g. only year), which the Matroska spec allows. Therefore datetime
# is not suitable for this. Until we figure out a proper way to express
# dates, just pass the tag value directly.
#if name.startswith('date_'):
# # Try to convert date to a datetime object.
# value = matroska_date_to_datetime(value)
value = core.Tag(value, lang, binary)
if name in tags_dict:
# Multiple items of this tag name.
if not isinstance(tags_dict[name], list):
# Convert to a list
tags_dict[name] = [tags_dict[name]]
# Append to list
tags_dict[name].append(value)
else:
tags_dict[name] = value
def tags_to_attributes(self, obj, tags):
# Convert tags to core attributes.
for name, tag in list(tags.items()):
if isinstance(tag, dict):
# Nested tags dict, recurse.
self.tags_to_attributes(obj, tag)
continue
elif name not in TAGS_MAP:
continue
attr, filter = TAGS_MAP[name]
if attr not in obj._keys and attr not in self._keys:
# Tag is not in any core attribute for this object or global,
# so skip.
continue
# Pull value out of Tag object or list of Tag objects. We expect scalar values
# so in the case of lists (because there was more than one tag of the same name)
# just pick the first.
value = tag[0].value if isinstance(tag, list) else tag.value
if filter:
try:
value = filter(value)
except Exception as e:
log.warning('Failed to convert tag to core attribute: %s', e)
# Special handling for tv series recordings. The 'title' tag
# can be used for both the series and the episode name. The
# same is true for trackno which may refer to the season
# and the episode number. Therefore, if we find these
# attributes already set we try some guessing.
if attr == 'trackno' and getattr(self, attr) is not None:
# delete trackno and save season and episode
self.season = self.trackno
self.episode = value
self.trackno = None
continue
if attr == 'title' and getattr(self, attr) is not None:
# store current value of title as series and use current
# value of title as title
self.series = self.title
if attr in obj._keys:
setattr(obj, attr, value)
else:
setattr(self, attr, value)
Parser = Matroska
|
python
|
#Author:D4Vinci
def ip2long(ip):
ip = ip.split("/")[0].split(":")[0]
p = ip.split(".")
return str( ( ( ( ( int(p[0]) * 256 + int(p[1]) ) * 256 ) + int(p[2]) ) * 256 ) + int(p[3]))
#p[0] + "." + str( ( ( ( int( p[1] ) * 256 + int( p[2] ) ) * 256 ) + int( p[3] ) ) * 256 ),
#p[0] + "." + p[1] + str( ( int( p[2] ) *256 ) + int( p[3] ) )
def ip2hex(ip):
ip = ip.split("/")[0].split(":")[0]
p = ip.split(".")
return [str( hex( int(p[0]) ) ) +"."+ str( hex( int(p[1]) ) ) +"."+ str( hex( int(p[2]) ) ) +"."+ str( hex( int(p[3]) ) ),
str( hex( int(p[0]) ) ) +"."+ str( hex( int(p[1]) ) ) +"."+ str( hex( int(p[2]) ) ) +"."+ str( int(p[3]) ),
str( hex( int(p[0]) ) ) +"."+ str( hex( int(p[1]) ) ) +"."+ str( int(p[2]) ) +"."+ str( int(p[3]) ),
str( hex( int(p[0]) ) ) +"."+ str( int(p[1]) ) +"."+ str( int(p[2]) ) +"."+ str( int(p[3]) ),
"0x"+"0"*8+str( hex( int(p[0]) ) ).replace("0x","") +"."+ "0x"+"0"*6+str( hex( int(p[1]) ) ).replace("0x","") +"."+ "0x"+"0"*4+str( hex( int(p[2]) ) ).replace("0x","")+"."+ "0x"+"0"*2+str( hex( int(p[3]) ) ).replace("0x",""),
str( hex( int( ip2long( ip ) ) ) ).replace( "L" , "" )]
def ip2Octal(ip):
return '.'.join(format(int(x), '04o') for x in ip.split('.'))
def ip_as_urlencoded(ip):
ip = ip.split("/")[0]
en=""
for i in ip :
if i.isdigit() :
en += "%3{}".format(i)
elif i == "." :
en += "%2E"
elif i == ":" :
en += "%3A"
return en
def ip_as_url(ip):
return [ "http://howsecureismypassword.net@"+str(ip),
"http://google.com@"+str( ip2long( ip ) ),
"http://facebook.com@"+str( ip2hex( ip )[-1] ),
"http://"+str( ip_as_urlencoded(ip) ),
"https://www.google.com@search@"+str( ip_as_urlencoded(ip) ),
"http://anywebsite@"+str( ip2Octal(ip) )]
print "\n Cuteit - Make a malicious ip a bit cuter :D"
print " Note:don't type a long url because it's encode the ip only.!"
ip = raw_input(" ip > ")
ip=ip.replace("http://","")
print "\n"
for n,i in enumerate( ip2hex(ip) + ip_as_url(ip) ):
if "http" not in i:
print " ["+str(n)+"] "+"http://"+i
else:
print " ["+str(n)+"] "+i
print " [12] http://" + ip2Octal(ip)
print " [13] http://" + ip2long(ip)
|
python
|
import os.path
activate_this = os.path.join(os.path.dirname(os.path.realpath(__file__)), '.pyvenv/bin/activate_this.py')
exec(compile(open(activate_this).read(), activate_this, 'exec'), dict(__file__=activate_this))
import syslog
from dotenv import dotenv_values
from keycloak import KeycloakOpenID
from keycloak.exceptions import KeycloakError
DEFAULT_USER = "nobody"
syslog.openlog(logoption=syslog.LOG_PID, facility=syslog.LOG_AUTH)
options = {}
def parse_options(pamh, argv):
global options
for arg in argv[1:]:
args = arg.split('=')
if len(args) > 1:
options[args[0]] = args[1]
else:
options[args[0]] = True
try:
config_file = options.get('config')
if config_file:
if not os.path.isabs(config_file):
config_file = os.path.join(os.path.dirname(os.path.realpath(__file__)), config_file)
options.update(dotenv_values(config_file))
except Exception as e:
pam_syslog(syslog.LOG_CRIT, pamh, "auth", "failed to read configuration: %s" % e)
return pamh.PAM_SYSTEM_ERR
def pam_syslog(prio, pamh, choice, message):
#print("pam_keycloak(%s:%s): %s" % (pamh.service, choice, message))
syslog.syslog(prio, "pam_keycloak(%s:%s): %s" % (pamh.service, choice, message))
def pam_sm_authenticate(pamh, flags, argv):
parse_options(pamh, argv)
try:
user = pamh.get_user(None)
except pamh.exception, e:
return e.pam_result
if user is None:
pamh.user = DEFAULT_USER
try:
# Configure client
keycloak_openid = KeycloakOpenID(server_url=options['server_url'],
realm_name=options['realm_name'],
client_id=options['client_id'],
client_secret_key=options['client_secret_key'],
verify=True)
# Get WellKnow
config_well_know = keycloak_openid.well_know()
except KeycloakError, e:
pam_syslog(syslog.LOG_NOTICE, pamh, "auth", "unable to authenticate for %s: %d %s" % (user, e.response_code, e.error_message))
return pamh.PAM_AUTHINFO_UNAVAIL
if pamh.authtok is None:
passmsg = pamh.Message(pamh.PAM_PROMPT_ECHO_OFF,
"Password: ")
res = pamh.conversation(passmsg)
pamh.authtok = res.resp
try:
token = keycloak_openid.token(user, pamh.authtok)
# Potentially fetch the user info and check for specific claims here:
# userinfo = keycloak_openid.userinfo(token['access_token'])
return pamh.PAM_SUCCESS
except KeycloakError as e:
pam_syslog(syslog.LOG_NOTICE, pamh, "auth", "authentication failure for %s: %d %s" % (user, e.response_code, e.error_message))
if e.response_code == 401:
return pamh.PAM_AUTH_ERR
return pamh.PAM_AUTHINFO_UNAVAIL
return pamh.PAM_AUTH_ERR
def pam_sm_setcred(pamh, flags, argv):
return pamh.PAM_SUCCESS
def pam_sm_acct_mgmt(pamh, flags, argv):
return pamh.PAM_SUCCESS
def pam_sm_open_session(pamh, flags, argv):
return pamh.PAM_SUCCESS
def pam_sm_close_session(pamh, flags, argv):
return pamh.PAM_SUCCESS
def pam_sm_chauthtok(pamh, flags, argv):
return pamh.PAM_SUCCESS
|
python
|
from dagster import execute_pipeline
from docs_snippets.concepts.configuration.config_mapping import example_pipeline
def test_config_mapping():
res = execute_pipeline(example_pipeline)
assert res.success
assert res.result_for_solid("hello_external").output_value() == "Hello, Sam!"
res = execute_pipeline(
example_pipeline, run_config={"solids": {"hello_external": {"config": {"name": "Bob"}}}}
)
assert res.success
assert res.result_for_solid("hello_external").output_value() == "Hello, Bob!"
|
python
|
import sys
reload(sys)
sys.setdefaultencoding('utf-8')
__author__ = 'but0n'
from multiprocessing import Pool, Manager
from bs4 import BeautifulSoup
import time, random, requests, sqlite3, os
server = Manager()
host = 'http://www.80s.tw'
screen = server.dict({'label' : 'NONE', 'url' : 'http://baidu.com', 'title':'none', 'IMG':'none', 'detail':'none', 'link':'none', 'index':0, 'total':10})
def mLog(opt):
os.system('clear')
print('\033[41;30m MESSAGE: %s\033[m' % opt['label'])
print('\033[46;30m PATH: %10s\033[m\n' % opt['url'])
print('\033[0;35m TITLE\033[m:\t%s' % opt['title'])
print('\033[0;35m IMG\033[m:\t%s' % opt['IMG'][:30]+'...')
print('\033[0;34m DETAIL\033[m:%s' % opt['detail'][:60]+'...')
print('\033[0;36m LINK\033[m:\t%s' % opt['link'][:60]+'...')
bar_status = opt['index']*40/opt['total']
status = opt['index']*100/opt['total']
print('\n[%-40s]%s(%d/%d)' % ('>'*bar_status, str(status)+'%', opt['index'], opt['total']))
class domPa(object):
def __init__(self, path, section = 'a', title = '.title', img = '.img', detail = '.detail'):
self.path = path
self.page = requests.get(host+path)
self.status = self.page.status_code
self.section = section
self.img = img
self.title = title
self.detail = detail
self.dom = BeautifulSoup(self.page.text, 'html.parser')
self.p = Pool(5)
def run(self):
screen['url'] = self.path
screen['label'] = self.status
screen['total'] = len(self.dom.select('.me1.clearfix')[0].select('li'))
mLog(screen)
result = []
for e in self.dom.select('.me1.clearfix')[0].select('li'):
result.append(self.p.apply_async(botTask, (e,)))
# self.botTask(i,e)
self.p.close()
self.p.join()
for res in result:
for e in res.get():
dat = (e[0],e[1],e[2],e[3])
try:
db.execute('INSERT INTO movies VALUES(?,?,?,?)',dat)
except Exception as e:
screen['label'] = '*************SAME LINK!************'
mLog(screen)
db.commit()
def botTask(e):
dom_title_path_img = e.select('a')[0]
movieName = dom_title_path_img.get('title')
screen['title'] = movieName
movieImg = dom_title_path_img.select('img')[0].get('_src')[2:]
screen['IMG'] = movieImg
movieDetail = e.select('.tip')[0].get_text().strip()
screen['detail'] = movieDetail[:50]+'...'
urll = host + dom_title_path_img.get('href')
pagee = requests.get(urll)
dom = BeautifulSoup(pagee.text, 'html.parser')
datas = []
for ee in dom.select('span.xunlei')[0].select('a'):
movieLink = ee.get('href')
screen['link'] = movieLink
mLog(screen)
# robLog(i, 'Got it ! [%s]@ %s' % (movieName, movieLink))
datas.append([movieName,movieLink, movieDetail,movieImg])
# end = time.time()
# robLog(i, 'Task done! Cost %0.2fs' % (end-start), '\033[0;36m')
screen['index'] += 1
return (datas)
# mLog(u'but0n,I\'m Running!')
# mLog('Connect Database...')
screen['label'] = 'Connect Database...'
db = sqlite3.connect('mv.db')
if db:
try:
db.execute('CREATE TABLE movies(name text, link text primary key, detail text, img text)')
screen['label']='CREATE TABLE...'
mLog()
finally:
i = 1
while i:
bug = domPa('/movie/list/-----p'+str(i))
if bug.status == 200:
screen['index']=0
screen['label']='HTTP Connect Succeed! To [p'+str(i)+']'
mLog(screen)
i += 1
bug.run()
else:
screen['label'] = 'Checkout your network!'
mLog(screen)
i = 0
db.close()
mLog('DONE')
|
python
|
import os
from setuptools import setup, find_packages
DESCRIPTION = (
"Graphical interface to manage Flatpak, Snap, AppImage and AUR packages"
)
AUTHOR = "Vinicius Moreira"
AUTHOR_EMAIL = "[email protected]"
NAME = 'bauh'
URL = "https://github.com/vinifmor/" + NAME
file_dir = os.path.dirname(os.path.abspath(__file__))
with open(file_dir + '/requirements.txt', 'r') as f:
requirements = [line.strip() for line in f.readlines() if line]
with open(file_dir + '/{}/__init__.py'.format(NAME), 'r') as f:
exec(f.readlines()[0])
setup(
name=NAME,
version=eval('__version__'),
description=DESCRIPTION,
long_description=DESCRIPTION,
author=AUTHOR,
author_email=AUTHOR_EMAIL,
python_requires=">=3.5",
url=URL,
packages=find_packages(exclude=["tests.*", "tests"]),
package_data={NAME: ["view/resources/locale/*", "view/resources/img/*", "gems/*/resources/img/*", "gems/*/resources/locale/*"]},
install_requires=requirements,
test_suite="tests",
entry_points={
"console_scripts": [
"{name}={name}.app:main".format(name=NAME)
]
},
include_package_data=True,
license="zlib/libpng",
classifiers=[
'Topic :: Utilities',
'Programming Language :: Python',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7'
]
)
|
python
|
import pygame as pg
class Snake(object):
def __init__(self, speed, tiles, path, length, message=None):
self.tiles = tiles
self.speed = speed
self.length = length
self.message = message
self.body_color = pg.Color("red")
self.head_color = pg.Color("blue")
self.body_coordinates = [path[0]]
self.path = path
self.path.pop(0)
# How far the snakes is on the last block
self.progress = 0
self.last_dt = 0
def update(self, dt):
self.progress = self.progress + (dt * self.speed)
if self.progress > 1:
self.move()
self.progress = self.progress - 1
def move(self):
if self.path:
self.body_coordinates.append(self.path[0])
self.path.pop(0)
if len(self.body_coordinates) > self.length:
self.body_coordinates.pop(0)
def draw(self, screen):
# draw body parts
for i, position in enumerate(self.body_coordinates[:-1]):
rectangle = self.tiles.get_rectangle(position)
pg.draw.rect(screen, self.body_color, self.tiles.get_rectangle(position))
if self.message:
font = pg.font.SysFont("arial", rectangle.height)
letter_surface = font.render(self.message[i], True, pg.Color("Black"))
screen.blit(letter_surface, rectangle.topleft)
# draw head
head_rectangle = self.tiles.get_rectangle(self.body_coordinates[-1])
pg.draw.rect(screen, self.head_color, head_rectangle)
def clicked_snake(self, position):
for coordinate in self.body_coordinates:
if self.tiles.get_rectangle(coordinate).collidepoint(position):
return True
return False
|
python
|
#-----------------------------------------------------------
# Baixar vídeos do youtube
#
# Programa copiado para teste, visto no Instagram: @pycodebr
#-----------------------------------------------------------
import os
from termcolor import colored
from pytube import YouTube
# Limpa tela ao iniciar // Clean screen on startup
os.system('clear') or None
# Informe o link do vídeo e o local onde deseja salvarprint
print(colored('1: Copie e cole o endereço do vídeo do Youtbe', 'red'))
print(colored('2: Informe o caminho correto da pasta onde o vídeo será baixado ou digite enter para baixar o vídeo na pasta atual\n', 'red'))
print(colored('3: O formato baixado é MP4\n', 'blue'))
link = input('Digite o link do vídeo que irá baixar: ')
path = input('Digite o diretório onde irá salvar o video OU pressione enter: ')
yt = YouTube(link)
# Detalhes do vídeos
print("Título: ", yt.title)
print('Número de views: ', yt.views)
print('Tamanho do Vídeo: ', yt.length)
print('Avaliação do vídeo: ', yt.rating)
# Usando a maior resolução
ys = yt.streams.get_highest_resolution()
# Inciando download
print('Baixando o vídeo na pasta informada......', path)
ys.download(path)
print(colored('Download finalizado...', 'green'))
|
python
|
import convert
keypointSimilarity = .80
passwordSimilarity = .50
correctness = None
'Carries out the comparison between the stored password and the attempted password'
def compare_data(password, attempt):
global correctness
pass_array = stripOutZeros(password)
attempt_array = stripOutZeros(attempt)
pass_length = len(pass_array)
attempt_length = len(attempt_array)
longest = max(pass_length, attempt_length)
num_matches = longest_common_substring(pass_array, attempt_array)
correctness = float(num_matches)/longest
if correctness >= passwordSimilarity:
return True
else:
return False
'Runs a longest common substring algorithm implemented bottom up, to compare the two arrays of audio key points'
def longest_common_substring(password, attempt):
matrix = []
for g in range(len(password)+1):
horlist = []
for h in range(len(attempt)+1):
horlist.append(0)
matrix.append(horlist)
for i in range(len(password)):
for j in range(len(attempt)):
if closeEnough(password[i][0], attempt[j][0]):
matrix[i+1][j+1] = 1+matrix[i][j]
else:
matrix[i + 1][j + 1] = max(matrix[i+1][j], matrix[i][j+1])
return matrix[-1][-1]
'Gets rid of zeros signifying silence at the beingnning and end of the audio data'
def stripOutZeros(array):
j = 0
for i in range(len(array)):
if array[i][0] == 0:
j +=1
else:
break
g = 0
for h in range(len(array)):
if array[-h][0] ==0:
g +=1
else:
break
return array[i:len(array)-g]
'determines whether two compared frequencies are within the allotted similarity value'
def closeEnough(A, B):
C = min(A,B)
lowerBound = C*keypointSimilarity
upperBound = C*(2-keypointSimilarity)
if lowerBound < A < upperBound:
if lowerBound < B < upperBound:
return True
return False
# password1 = convert.process_audio("Test_Files\\lowHigh.wav")
# attempt1 = convert.process_audio("Test_Files\\lowHigh1.wav")
# attempt2 = convert.process_audio("Test_Files\\high.wav")
# attempt3 = convert.process_audio("Test_Files\\highLow.wav")
#
#
# print(compare_data(password1, password1))
# print correctness
# print(compare_data(password1, attempt1))
# print correctness
# print(compare_data(password1, attempt2))
# print correctness
# print(compare_data(password1, attempt3))
# print correctness
|
python
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
# Pocket PiAP
# ......................................................................
# Copyright (c) 2017-2020, Kendrick Walls
# ......................................................................
# Licensed under MIT (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# ......................................................................
# http://www.github.com/reactive-firewall/PiAP-python-tools/LICENSE.rst
# ......................................................................
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ......................................................................
try:
try:
import context
except Exception as ImportErr: # pragma: no branch
ImportErr = None
del ImportErr
from . import context
if context.__name__ is None:
raise ImportError("[CWE-758] Failed to import context")
else:
from context import unittest as unittest
except Exception:
raise ImportError("[CWE-758] Failed to import test context")
class MoreClientChecksTestSuite(unittest.TestCase):
"""More Unit test cases for piaplib.lint.check_clients_status."""
def test_piaplib_lint_import_syntax(self):
"""Test case importing code."""
theResult = False
try:
from .context import piaplib
from piaplib import pocket
from piaplib import lint as lint
from piaplib import pku as pku
from pku import interfaces as interfaces
from lint import check as check
from lint import clients_check_status as clients_check_status
for depends in [piaplib, pocket, pku, interfaces, lint, check, clients_check_status]:
if depends.__name__ is None:
theResult = False
theResult = True
except Exception as impErr:
print(str(type(impErr)))
print(str(impErr))
theResult = False
assert theResult
def test_clients_check_status_aa(self):
"""Test case for piaplib.lint.clients_check_status
with show_client("1.2.3.4", False, False, "eth0")"""
theResult = False
from .context import piaplib
from piaplib import lint as lint
from piaplib import pku as pku
from pku import interfaces as interfaces
from lint import clients_check_status as clients_check_status
for depends in [piaplib, pku, interfaces, lint, clients_check_status]:
if depends.__name__ is None:
theResult = False
try:
temp_val = clients_check_status.show_client(
"1.2.3.4",
False,
False,
interfaces.INTERFACE_CHOICES[0]
)
self.assertIsNotNone(temp_val)
self.assertIsInstance(temp_val, str, "Test output is NOT a string")
theResult = isinstance(temp_val, str)
except Exception as err:
print(str(""))
print(str(type(err)))
print(str(err))
print(str((err.args)))
print(str(""))
err = None
del err
theResult = False
assert theResult
def test_clients_check_status_ab(self):
"""Test case for piaplib.lint.clients_check_status
with show_client("1.2.3.4", True, False, "eth0")"""
theResult = False
from .context import piaplib
from piaplib import lint as lint
from piaplib import pku as pku
from pku import interfaces as interfaces
from lint import clients_check_status as clients_check_status
for depends in [piaplib, pku, interfaces, lint, clients_check_status]:
if depends.__name__ is None:
theResult = False
try:
temp = clients_check_status.show_client(
"1.2.3.4",
True,
False,
interfaces.INTERFACE_CHOICES[0]
)
self.assertIsNotNone(temp)
self.assertIsInstance(temp, str, "Test output is Not a string")
theResult = isinstance(temp, str)
except Exception as err:
print(str(""))
print(str(type(err)))
print(str(err))
print(str((err.args)))
print(str(""))
err = None
del err
theResult = False
assert theResult
def test_clients_check_status_ac(self):
"""Test case for piaplib.lint.clients_check_status
with show_client("1.2.3.4", True, True, "eth0")"""
theResult = False
from .context import piaplib
from piaplib import lint as lint
from piaplib import pku as pku
from pku import interfaces as interfaces
from lint import clients_check_status as clients_check_status
for depends in [piaplib, pku, interfaces, lint, clients_check_status]:
if depends.__name__ is None:
theResult = False
try:
temp = clients_check_status.show_client(
"1.2.3.4",
True,
True,
interfaces.INTERFACE_CHOICES[0]
)
self.assertIsNotNone(temp)
self.assertIsInstance(temp, str, "Test output is Not a string")
theResult = isinstance(temp, str)
except Exception as err:
print(str(""))
print(str(type(err)))
print(str(err))
print(str((err.args)))
print(str(""))
err = None
del err
theResult = False
assert theResult
def test_clients_check_status_ad(self):
"""Test case for piaplib.lint.clients_check_status
with show_client("1.2.3.4", False, True, "eth0")"""
theResult = False
from .context import piaplib
from piaplib import lint as lint
from piaplib import pku as pku
from pku import interfaces as interfaces
from lint import clients_check_status as clients_check_status
for depends in [piaplib, pku, interfaces, lint, clients_check_status]:
if depends.__name__ is None:
theResult = False
try:
temp = clients_check_status.show_client(
"1.2.3.4",
False,
True,
interfaces.INTERFACE_CHOICES[0]
)
self.assertIsNotNone(temp)
self.assertIsInstance(temp, str, "Test output is NOT a string")
theResult = isinstance(temp, str)
except Exception as err:
print(str(""))
print(str(type(err)))
print(str(err))
print(str((err.args)))
print(str(""))
err = None
del err
theResult = False
assert theResult
def test_clients_check_status_ae(self):
"""Test case for piaplib.lint.clients_check_status
with show_client("1.2.3.4", *, *, "JUNK")"""
theResult = False
from .context import piaplib
from piaplib import lint as lint
from lint import clients_check_status as clients_check_status
for depends in [piaplib, lint, clients_check_status]:
if depends.__name__ is None:
theResult = False
try:
self.assertIsNotNone(clients_check_status.show_client("1.2.3.4", False, False, "JUNK"))
self.assertIsNotNone(clients_check_status.show_client("1.2.3.4", False, True, "JUNK"))
self.assertIsNotNone(clients_check_status.show_client("1.2.3.4", True, False, "JUNK"))
temp = clients_check_status.show_client("1.2.3.4", True, True, "JUNK")
self.assertIsNotNone(temp)
self.assertIsInstance(temp, str, "Test output is NOT a string")
theResult = isinstance(temp, str)
except Exception as err:
print(str(""))
print(str(type(err)))
print(str(err))
print(str((err.args)))
print(str(""))
err = None
del err
theResult = False
assert theResult
def test_clients_check_status_ba(self):
"""Test case for piaplib.lint.clients_check_status.get_client_name(None IP)"""
theResult = False
from .context import piaplib
from piaplib import lint as lint
from piaplib import pku as pku
from pku import interfaces as interfaces
from lint import clients_check_status as clients_check_status
for depends in [piaplib, pku, interfaces, lint, clients_check_status]:
if depends.__name__ is None:
theResult = False
try:
temp = clients_check_status.get_client_name(
None,
False,
interfaces.INTERFACE_CHOICES[0]
)
self.assertIsNone(temp)
theResult = True
except Exception as err:
print(str(""))
print(str(type(err)))
print(str(err))
print(str((err.args)))
print(str(""))
err = None
del err
theResult = False
assert theResult
def test_clients_check_status_bb(self):
"""Test case for piaplib.lint.clients_check_status.get_client_name(None Iface)"""
theResult = False
from .context import piaplib
from piaplib import lint as lint
from piaplib import pku as pku
from pku import interfaces as interfaces
from lint import clients_check_status as clients_check_status
for depends in [piaplib, pku, interfaces, lint, clients_check_status]:
if depends.__name__ is None:
theResult = False
try:
temp = clients_check_status.get_client_name(
"1.2.3.4",
False,
None
)
self.assertEqual(temp, str("UNKNOWN"))
theResult = True
except Exception as err:
print(str(""))
print(str(type(err)))
print(str(err))
print(str((err.args)))
print(str(""))
err = None
del err
theResult = False
assert theResult
def test_clients_check_status_raw_cc(self):
"""Test case for piaplib.lint.clients_check_status.get_client_sta_status_raw()"""
theResult = False
from .context import piaplib
from piaplib import lint as lint
from piaplib import pku as pku
from pku import interfaces as interfaces
from lint import clients_check_status as clients_check_status
for depends in [piaplib, pku, interfaces, lint, clients_check_status]:
if depends.__name__ is None:
theResult = False
try:
self.assertIsNotNone(clients_check_status.get_client_sta_status_raw())
theResult = True
except Exception as err:
print(str(""))
print(str(type(err)))
print(str(err))
print(str((err.args)))
print(str(""))
err = None
del err
theResult = False
assert theResult
def test_clients_check_status_raw_ce(self):
"""Test case for piaplib.lint.clients_check_status.get_client_lease_status_raw(None)"""
theResult = False
from .context import piaplib
from piaplib import lint as lint
from piaplib import pku as pku
from pku import interfaces as interfaces
from lint import clients_check_status as clients_check_status
for depends in [piaplib, pku, interfaces, lint, clients_check_status]:
if depends.__name__ is None:
theResult = False
try:
self.assertIsNotNone(clients_check_status.get_client_lease_status_raw())
theResult = True
except Exception as err:
print(str(""))
print(str(type(err)))
print(str(err))
print(str((err.args)))
print(str(""))
err = None
del err
theResult = False
assert theResult
def test_client_insane_or_no_mac_handled(self):
"""Tests the imposible state for client mac given bad values."""
theResult = True
try:
from lint import clients_check_status as clients_check_status
except Exception:
import lint.clients_check_status as clients_check_status
if clients_check_status.__name__ is None:
theResult = False
else:
try:
test_mac = str("11:AA:22:33:44:55")
self.assertIsNotNone(
clients_check_status.get_client_lease_status(test_mac)
)
self.assertIsNotNone(
clients_check_status.get_client_lease_status(None)
)
except Exception as err:
print(str(""))
print(str(type(err)))
print(str(err))
print(str((err.args)))
print(str(""))
err = None
del err
theResult = False
assert theResult
if __name__ == '__main__':
unittest.main()
|
python
|
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.keys import Keys
import time
import tkinter as tk
import os, sys
browser = None
def login(user, passw, check_browser):
global browser
def resource_path(relative_path):
try:
base_path = sys._MEIPASS
except Exception:
base_path = os.path.dirname(__file__)
return os.path.join(base_path, relative_path)
chrome_driver = resource_path('./driver/chromedriver.exe')
if check_browser == 1:
browser = webdriver.Chrome(chrome_driver)
elif check_browser == 0:
chrome_options = Options()
chrome_options.add_argument("--headless")
chrome_options.add_argument("--window-size=1920x1080")
browser = webdriver.Chrome(options=chrome_options, executable_path=chrome_driver)
# Login
browser.get('https://wfg.xcelsolutions.com')
username = WebDriverWait(browser, 10).until(EC.presence_of_element_located((By.XPATH, '//*[@id="reduxFormInputField1"]')))
password = browser.find_element_by_xpath('//*[@id="reduxFormInputField3"]')
username.send_keys(user)
password.send_keys(passw)
password.send_keys(Keys.RETURN)
# Navigate to course directory
# TODO universal navigation for different courses
# Check for bad login
try:
course_button = WebDriverWait(browser, 10).until\
(EC.element_to_be_clickable((By.XPATH, '/html/body/div/div/div[3]/div/div[2]/div/div[2]/div/div/div[2]')))
course_button.click()
except:
raise NameError('Bad Login')
choose_course = WebDriverWait(browser, 10).until\
(EC.element_to_be_clickable((By.XPATH, '//*[@id="root"]/div/div[3]/div/div[2]/div/div[3]/div[2]/div[1]/div/div/div/div/div[2]/div/div/div[1]/div/button')))
choose_course.click()
# Close dialog box if it exists and open course
try:
choose_course_content = WebDriverWait(browser, 10).until\
(EC.element_to_be_clickable((By.XPATH, '/html/body/div/div/div[3]/div/div[2]/div/div[4]/div[1]/div[2]/div/div[4]/div[1]/div[2]/div/div/div/div')))
choose_course_content.click()
except:
dialog_box = WebDriverWait(browser, 5).until \
(EC.element_to_be_clickable((By.XPATH, '//*[@id="root"]/div/div[4]/div[1]/div[2]/button')))
dialog_box.click()
choose_course_content = WebDriverWait(browser, 10).until \
(EC.element_to_be_clickable((By.XPATH, '/html/body/div/div/div[3]/div/div[2]/div/div[4]/div[1]/div[2]/div/div[4]/div[1]/div[2]/div/div/div/div')))
choose_course_content.click()
get_hours()
# Navigates bot to course page, begin idle
def get_hours():
global after_id
global after_id2
# print('Farm start')
complete_course = WebDriverWait(browser, 10).until\
(EC.element_to_be_clickable((By.XPATH, '//*[@id="root"]/div/div[3]/div/div[2]/div/div[4]/div[1]/div[2]/div/div[3]/div[2]/div[2]/div/div[1]/div/div[2]/div/button')))
complete_course.click()
# Click past identification check
frame = WebDriverWait(browser, 10).until \
(EC.presence_of_element_located((By.XPATH, '//*[@id="root"]/div/div[3]/iframe')))
browser.switch_to.frame(frame)
identify_button = WebDriverWait(browser, 10).until\
(EC.element_to_be_clickable((By.XPATH, '//*[@id="identify_student_submit"]')))
identify_button.click()
after_id = window.after(596000, save)
after_id2 = window.after(605000, get_hours)
# Save and exit from course
def save():
global browser
save_button = WebDriverWait(browser, 10).until \
(EC.element_to_be_clickable((By.XPATH, '//*[@id="exitButton"]')))
save_button.click()
# print('Farmed 10 minutes')
# Start/Stop web bot after button event
def run():
global browser
global btn
global ent_user
global ent_pass
global C2
global user
global passw
global check_browser
global after_id
global after_id2
after_id = None
after_id2 = None
user1 = user.get()
passw1 = passw.get()
check_browser1 = check_browser.get()
if btn.get() == 'Run':
btn.set('Stop')
ent_user.configure(state='disabled')
ent_pass.configure(state='disabled')
C2.configure(state='disabled')
# Run bot, if login error exit, if arbitrary error reset and rerun bot
# If bot is not working, GUI will stay unresponsive
# If wrong login, bot quits and prompts user to try to run again
try:
login(user1, passw1, check_browser1)
except NameError:
browser.quit()
btn.set('Run')
ent_user.configure(state='normal')
ent_pass.configure(state='normal')
C2.configure(state='normal')
except Exception as e:
browser.quit()
if after_id is not None:
window.after_cancel(after_id)
window.after_cancel(after_id2)
btn.set('Run')
run()
elif btn.get() == 'Stop':
btn.set('Run')
ent_user.configure(state='normal')
ent_pass.configure(state='normal')
C2.configure(state='normal')
try:
save()
time.sleep(3)
browser.quit()
window.after_cancel(after_id)
window.after_cancel(after_id2)
except:
browser.quit()
if after_id is not None:
window.after_cancel(after_id)
window.after_cancel(after_id2)
# Create a new window
window = tk.Tk()
window.title("Web Idler")
window.resizable(False, False)
# Create a new frame for data entries and checkboxes
frm = tk.Frame(relief=tk.SUNKEN, borderwidth=3)
frm.pack()
# Username
user = tk.StringVar()
lbl_user = tk.Label(master=frm, text="Username:")
ent_user = tk.Entry(master=frm, width=50, textvariable=user)
lbl_user.grid(row=0, column=0, sticky="e")
ent_user.grid(row=0, column=1)
# Password
passw = tk.StringVar()
lbl_pass = tk.Label(master=frm, text="Password:")
ent_pass = tk.Entry(master=frm, width=50, textvariable=passw, show="*")
lbl_pass.grid(row=1, column=0, sticky="e")
ent_pass.grid(row=1, column=1)
# Toggle show password
def showPass():
if check_pass.get() == 1:
ent_pass.configure(show="")
elif check_pass.get() == 0:
ent_pass.configure(show="*")
# Checkboxes
check_pass = tk.IntVar()
check_browser = tk.IntVar()
C1 = tk.Checkbutton(frm, text="Show password", variable=check_pass, onvalue=1, offvalue=0, command=showPass)
C2 = tk.Checkbutton(frm, text="Display browser", variable=check_browser, onvalue=1, offvalue=0)
C1.grid(row=2, column=1, sticky="w")
C2.grid(row=3, column=1, sticky="w")
# Create a new frame for Run/Stop button
frm_buttons = tk.Frame()
frm_buttons.pack(fill=tk.X, ipadx=5, ipady=0)
# Create the 'Run/Stop' button
btn = tk.StringVar()
btn.set('Run')
btn_run = tk.Button(master=frm_buttons, textvariable=btn, command=run)
btn_run.pack(side=tk.TOP, ipadx=10, pady=2.5)
# Start the application
window.mainloop()
|
python
|
import collections
import importlib
import glob
import h5py
import numpy as np
import torch
from torch.utils.data import Dataset, DataLoader, ConcatDataset
from unet3d.utils import get_logger
logger = get_logger('HDF5Dataset')
class HDF5Dataset(Dataset):
def __init__(self, file_path, phase):
assert phase in ['train', 'val', 'test']
print('Phase now: ', phase)
# file_path: train_data path / val_data path / test_data path
self.file_path = file_path # datasets/train_data/
self.data_dir_list = glob.glob(self.file_path + '*')
def __getitem__(self, idx):
raws, labels = self._loader(self.data_dir_list[idx])
# raws = np.rollaxis(raws, 3, 1)
# labels = np.rollaxis(labels, 3, 1)
# raws = self.crop_img(raws)
# labels = self.crop_img(labels)
img_data = self._normalization(raws)
seg_data = labels
img_data = img_data.reshape(1, 16, 256, 256)
seg_data = seg_data.reshape(1, 16, 256, 256)
# label 1,2,4 和背景区域做二分类
# print('Before: ', seg_data.shape)
# seg_mask = np.zeros((1, 128, 160, 160))
# seg_mask[0] = ( (seg_data[0] + seg_data[1] + seg_data[2]) > 0.1 ).astype(int)
# print('After: ', seg_mask.shape)
# label 1,4 和 2做二分类
# print('Before: ', seg_data.shape)
# seg_mask = np.zeros((2, 128, 160, 160))
# seg_mask[0] = ( (seg_data[0] + seg_data[2]) > 0.1 ).astype(int)
# seg_mask[1] = ( seg_data[1] > 0.1 ).astype(int)
# seg_data = ( (seg_data[0] + seg_data[2]) > 0.1 ).astype(int)
# print('After: ', seg_data.shape)
# label 1 和 4 做二分类
# print('Before: ', seg_data.shape)
# seg_mask = np.zeros((2, 128, 160, 160))
# seg_mask[0] = seg_data[0]
# seg_mask[1] = seg_data[2]
# seg_data = ( (seg_data[0] + seg_data[2]) > 0.1 ).astype(int)
# print('After: ', seg_data.shape)
return img_data, seg_data
def __len__(self):
return len(self.data_dir_list)
@staticmethod
def _loader(path):
with h5py.File(path, 'r') as input_file:
raws = input_file['raw'][()]
labels = input_file['label'][()]
raws = np.array(raws)
labels = np.array(labels)
return raws, labels
@staticmethod
def _normalization(img_data):
# 归一化
img_nonzero = img_data[np.nonzero(img_data)]
img = (img_data - np.mean(img_nonzero)) / np.std(img_nonzero)
img[img == img.min()] = 0
return img
@staticmethod
def crop_img(img_data):
img_data = img_data[:, 13:141, 40:200, 40:200] # shape: (4, 128, 160, 160)
return img_data
def get_train_loaders(config):
assert 'loaders' in config, 'Could not find data loaders configuration'
loaders_config = config['loaders']
logger.info('Creating training and validation set loaders...')
num_workers = loaders_config.get('num_workers', 1)
logger.info(f'Number of workers for train/val dataloader: {num_workers}')
batch_size = loaders_config.get('batch_size', 1)
logger.info(f'Batch size for train/val loader: {batch_size}')
# when training with volumetric data use batch_size of 1 due to GPU memory constraints
train_dataset = HDF5Dataset(loaders_config['train_path'], phase='train')
val_dataset = HDF5Dataset(loaders_config['val_path'], phase='val')
return {
'train': DataLoader(train_dataset, batch_size=batch_size, shuffle=True, num_workers=num_workers),
'val': DataLoader(val_dataset, batch_size=batch_size, shuffle=True, num_workers=num_workers)
}
def get_test_loaders(config):
assert 'datasets' in config, 'Could not find data sets configuration'
datasets_config = config['datasets']
test_path = datasets_config['test_path']
num_workers = datasets_config.get('num_workers', 1)
logger.info(f'Number of workers for the dataloader: {num_workers}')
batch_size = datasets_config.get('batch_size', 1)
logger.info(f'Batch size for dataloader: {batch_size}')
# construct datasets lazily
test_dataset = HDF5Dataset(test_path, phase='test')
# img_data, seg_data
return {'test': DataLoader(test_dataset, batch_size=batch_size, shuffle=False, num_workers=num_workers)}
|
python
|
from .aggregate_representation import AggregateRepresentationTransformation
from .aggregate_representation_softmax import AggregateRepresentationTransformationSoftmax
from .edge_state_update import EdgeStateUpdateTransformation
from .input_sequence_direct import InputSequenceDirectTransformation
from .new_nodes_vote import NewNodesVoteTransformation
from .new_nodes_inform import NewNodesInformTransformation
from .node_state_update import NodeStateUpdateTransformation
from .direct_reference_update import DirectReferenceUpdateTransformation
from .output_category import OutputCategoryTransformation
from .output_sequence import OutputSequenceTransformation
from .output_set import OutputSetTransformation
from .propagation import PropagationTransformation
from .sequence_aggregate_summary import SequenceAggregateSummaryTransformation
|
python
|
import appuifw as ui
import globalui
from pytriloquist import Const
from pytriloquist.btclient import BluetoothError
from pytriloquist.gui import Dialog
from pytriloquist.gui.settings import SettingsDialog
from pytriloquist.gui.app import ApplicationsDialog
from pytriloquist.gui.input import InputDialog
class IntroDialog(Dialog):
"""
Application starting point.
"""
def __init__(self, app):
Dialog.__init__(self, app)
def get_title(self):
"""Returns the dialog title.
"""
return Const.APP_TITLE
def init_ui(self):
"""Initializes the user interface.
"""
self.main_dialog = MainDialog(self.app, self)
self.settings_dialog = SettingsDialog(self.app, self)
self.menu = [
(_(u"Open") , self.opt_list_observe),
(_(u"About"), self.about),
(_(u"Exit") , self.app.exit),
]
self.options = [
(1, _("Connect") , self.connect),
(2, _("Settings"), self.settings)
]
self.opt_list = ui.Listbox([opt[1] for opt in self.options], self.opt_list_observe)
def display(self):
"""Displays the dialog on the device.
"""
ui.app.screen = "normal"
ui.app.set_tabs([], None)
ui.app.menu = self.menu
ui.app.body = self.opt_list
ui.app.exit_key_handler = self.app.exit
def opt_list_observe(self):
"""Function called when a mode is selected from the list.
"""
selected = self.options[self.opt_list.current()]
selected[2]()
def connect(self):
"""Connects to the server.
"""
try:
self.app.btclient.connect()
except BluetoothError, e:
ui.note(_(e.msg), "error")
else:
self.main_dialog.execute()
def settings(self):
"""Opens the Settings dialog.
"""
self.settings_dialog.execute()
def about(self):
"""Opens the About dialog.
"""
data = {
"title" : Const.APP_TITLE,
"version": Const.APP_VERSION,
"year" : Const.APP_YEAR,
"url" : Const.APP_URL,
"author" : Const.APP_AUTHOR,
"lauthor": _(u"Authors:"),
}
text = u"%(title)s v%(version)s (c) %(year)s\n" \
"%(url)s\n\n" \
"%(lauthor)s\n" \
"%(author)s" % data
globalui.global_msg_query(text, _(u"About"), 0)
class MainDialog(Dialog):
"""
This dialog displays the list of applications and input methods.
"""
def __init__(self, app, parent):
Dialog.__init__(self, app, parent)
def get_title(self):
"""Returns the dialog title.
"""
return Const.APP_TITLE
def init_ui(self):
"""Initializes the user interface.
"""
self.tabs = [
(_(u"Apps"), self.open_apps),
]
self.menu = [
(_(u"Orientation"), (
(_(u"Automatic"), self.set_orientation("automatic")),
(_(u"Landscape"), self.set_orientation("landscape")),
(_(u"Portrait") , self.set_orientation("portrait")),
)),
(_(u"Disconnect"), self.back)
]
# Dialogs
self.apps_dialog = ApplicationsDialog(self.app, self)
if ui.touch_enabled():
# Only works with touch-enabled devices
self.input_dialog = InputDialog(self.app, self)
self.tabs.append((_(u"Input"), self.open_input))
def set_orientation(self, orientation):
"""Returns a function that changes the display orientation.
"""
def fn():
ui.app.orientation = orientation
return fn
def display(self):
"""Displays the dialog on the device.
"""
ui.app.set_tabs([t[0] for t in self.tabs], self.tab_handler)
ui.app.exit_key_handler = self.app.exit
self.tab_handler(0)
def back(self):
"""Executes the parent dialog.
"""
Dialog.back(self)
self.disconnect()
def disconnect(self):
"""Disconnects from the server.
"""
try:
self.app.btclient.close()
except BluetoothError, e:
ui.note(_(e.msg), "error")
def open_apps(self):
"""Opens the applications dialog.
"""
self.apps_dialog.execute()
def open_input(self):
"""Opens the input dialog.
"""
if ui.touch_enabled():
self.input_dialog.execute()
else:
ui.note(_(u"Touch not enabled."), "error")
def tab_handler(self, index):
"""Handles tab events.
"""
[t[1] for t in self.tabs][index]()
|
python
|
import tensorflow as tf
def nalu(input_layer, num_outputs, epsilon=1e-6):
""" Calculate the Neural Arithmetic Logic Unit (NALU).
Arguments:
input_layer - the input vector we want to the NALU of.
num_outputs - dimension of the output vector.
epsilon - small shift to prevent log(0)
Returns:
y - vector of dimension (X.shape.dims[0], num_outputs)
"""
shape = (input_layer.shape.dims[-1].value, num_outputs)
with tf.name_scope("NALU"):
W_hat = tf.Variable(tf.truncated_normal(shape, stddev=5), name="W_hat")
M_hat = tf.Variable(tf.truncated_normal(shape, stddev=5), name="M_hat")
G = tf.Variable(tf.truncated_normal(shape, stddev=0.02), name="G")
W = tf.multiply(tf.tanh(W_hat), tf.sigmoid(M_hat))
m = tf.exp(tf.matmul(tf.log(tf.abs(input_layer) + epsilon), W))
a = tf.matmul(input_layer, W)
g = tf.sigmoid(tf.matmul(input_layer, G))
y = tf.multiply(g, a) + tf.multiply(1-g, m)
return y
|
python
|
from unifuncnet.fetchers.compound_fetchers.compound_fetcher import *
from unifuncnet.utils.rhea_sqlite_connector import RheaSqliteConnector
class CompoundFetcherRhea(CompoundFetcher, RheaSqliteConnector):
def __init__(self, compound_id, memory_storage=None):
CompoundFetcher.__init__(self, compound_id=compound_id, memory_storage=memory_storage)
self.db = 'rhea'
self.set_convergence_args()
self.compound = self.get_compound_rhea()
self.add_compound()
def set_convergence_args(self):
# args for convergence
self.convergence_args['reactions'] = set()
def get_compound_rhea(self):
compound_instance = Compound({'chebi': self.compound_id})
self.convergence_args['reactions'] = self.fetch_reactions_rhea_from_chebi(self.compound_id)
return compound_instance
def converge_compound_global(self):
self.converge_compound_to_reaction()
def converge_compound_to_reaction(self):
if self.convergence_args['reactions']:
for reaction_id in self.convergence_args['reactions']:
print(f'Linking from compound {self.compound_id} in {self.db} to reaction {reaction_id}')
self.find_reaction(query_id=reaction_id)
if __name__ == '__main__':
search = CompoundFetcherRhea('7580')
search.compound.get_all_info()
|
python
|
import json
from unittest import TestCase
from django.test.client import Client
from mock import patch
from regcore_write.views.notice import *
class ViewsNoticeTest(TestCase):
def test_add_not_json(self):
url = '/notice/docdoc'
response = Client().put(url, content_type='application/json',
data='{Invalid}')
self.assertEqual(400, response.status_code)
@patch('regcore_write.views.notice.db')
def test_add_label_success(self, db):
url = '/notice/docdoc'
response = Client().put(url, content_type='application/json',
data=json.dumps({'some': 'struct'}))
self.assertTrue(db.Notices.return_value.put.called)
args = db.Notices.return_value.put.call_args[0]
self.assertEqual('docdoc', args[0])
self.assertEqual({'some': 'struct', 'cfr_parts': []}, args[1])
response = Client().put(
url, content_type='application/json',
data=json.dumps({'some': 'struct', 'cfr_part': '1111'}))
self.assertTrue(db.Notices.return_value.put.called)
args = db.Notices.return_value.put.call_args[0]
self.assertEqual('docdoc', args[0])
self.assertEqual({'some': 'struct', 'cfr_parts': ['1111']}, args[1])
response = Client().put(
url, content_type='application/json',
data=json.dumps({'some': 'struct', 'cfr_parts': ['111', '222']}))
self.assertTrue(db.Notices.return_value.put.called)
args = db.Notices.return_value.put.call_args[0]
self.assertEqual('docdoc', args[0])
self.assertEqual({'some': 'struct', 'cfr_parts': ['111', '222']},
args[1])
|
python
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.