content
stringlengths 0
894k
| type
stringclasses 2
values |
---|---|
from tensorflow.keras import layers, models # (Input, Dense), (Model)
from tensorflow.keras.datasets import mnist
import numpy as np
import matplotlib.pyplot as plt
class AE(models.Model):
def __init__(self, x_nodes=784, h_dim=36):
x_shape = (x_nodes,)
x = layers.Input(shape=x_shape)
h = layers.Dense(h_dim, activation='relu')(x)
y = layers.Dense(x_nodes, activation='sigmoid')(h)
super().__init__(x, y)
self.x = x
self.h = h
self.h_dim = h_dim
self.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy'])
def Encoder(self):
return models.Model(self.x, self.h)
def Decoder(self):
h_shape = (self.h_dim,)
h = layers.Input(shape=h_shape)
y_layer = self.layers[-1]
y = y_layer(h)
return models.Model(h, y)
(X_train, _), (X_test, _) = mnist.load_data()
X_train = X_train.astype('float32') / 255.
X_test = X_test.astype('float32') / 255.
X_train = X_train.reshape((len(X_train), np.prod(X_train.shape[1:])))
X_test = X_test.reshape((len(X_test), np.prod(X_test.shape[1:])))
def show_ae(autoencoder):
encoder = autoencoder.Encoder()
decoder = autoencoder.Decoder()
encoded_imgs = encoder.predict(X_test)
decoded_imgs = decoder.predict(encoded_imgs)
n = 10
plt.figure(figsize=(20, 6))
for i in range(n):
ax = plt.subplot(2, n, i + 1)
plt.imshow(X_test[i].reshape(28, 28))
plt.gray()
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
ax = plt.subplot(2, n, i + 1 + n)
plt.imshow(decoded_imgs[i].reshape(28, 28))
plt.gray()
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
plt.show()
def main():
x_nodes = 784
h_dim = 36
autoencoder = AE(x_nodes, h_dim)
autoencoder.fit(X_train, X_train,
epochs=30,
batch_size=256,
shuffle=True,
validation_data=(X_test, X_test))
show_ae(autoencoder)
plt.show()
main()
|
python
|
"""Find and filter files. Supports BIDSPath objects from `mne-bids`."""
from dataclasses import dataclass, field
from pathlib import Path
from typing import Optional, Sequence, Union
import mne_bids
from pte.filetools.filefinder_abc import DirectoryNotFoundError, FileFinder
def get_filefinder(
datatype: str, hemispheres: Optional[dict] = None, **kwargs
) -> FileFinder:
"""Create and return FileFinder of desired type.
Parameters
----------
datatype : str
Allowed values for `datatype`: ["any", "bids"].
Returns
-------
FileFinder
Instance of FileFinder for reading given `datatype`.
"""
finders = {
"any": DefaultFinder,
"bids": BIDSFinder,
}
datatype = datatype.lower()
if datatype not in finders:
raise FinderNotFoundError(datatype, finders)
return finders[datatype](hemispheres=hemispheres, **kwargs)
@dataclass
class DefaultFinder(FileFinder):
"""Class for finding and handling any type of file."""
def find_files(
self,
directory: Union[Path, str],
extensions: Optional[Union[Sequence, str]] = None,
keywords: Optional[Union[list[str], str]] = None,
hemisphere: Optional[str] = None,
stimulation: Optional[str] = None,
medication: Optional[str] = None,
exclude: Optional[str] = None,
verbose: bool = False,
) -> None:
"""Find files in directory with optional
keywords and extensions.
Args:
directory (string)
keywords (list): e.g. ["SelfpacedRota", "ButtonPress] (optional)
extensions (list): e.g. [".json" or "tsv"] (optional)
verbose (bool): verbosity level (optional, default=True)
"""
self.directory = Path(directory)
if not self.directory.is_dir():
raise DirectoryNotFoundError(self.directory)
self._find_files(self.directory, extensions)
self._filter_files(
keywords=keywords,
hemisphere=hemisphere,
stimulation=stimulation,
medication=medication,
exclude=exclude,
)
if verbose:
print(self)
def filter_files(
self,
keywords: Optional[list] = None,
hemisphere: Optional[str] = None,
stimulation: Optional[str] = None,
medication: Optional[str] = None,
exclude: Optional[str] = None,
verbose: bool = False,
) -> None:
"""Filter filepaths for given parameters and return filtered list."""
self._filter_files(
keywords=keywords,
hemisphere=hemisphere,
stimulation=stimulation,
medication=medication,
exclude=exclude,
)
if verbose:
print(self)
@dataclass
class BIDSFinder(FileFinder):
"""Class for finding and handling data files in BIDS-compliant format."""
bids_root: str = field(init=False)
def find_files(
self,
directory: str,
extensions: Optional[Union[Sequence, str]] = (".vhdr", ".edf"),
keywords: Optional[list] = None,
hemisphere: Optional[str] = None,
stimulation: Optional[str] = None,
medication: Optional[str] = None,
exclude: Optional[str] = None,
verbose: bool = False,
):
"""Find files in directory with optional keywords and extensions.
Parameters
----------
directory (string)
keywords (list): e.g. ["SelfpacedRota", "ButtonPress] (optional)
extensions (list): e.g. [".json" or "tsv"] (optional)
verbose (bool): verbosity level (optional, default=True)
"""
self.directory = directory
self._find_files(self.directory, extensions)
self._filter_files(
keywords=keywords,
hemisphere=hemisphere,
stimulation=stimulation,
medication=medication,
exclude=exclude,
)
self.files = self._make_bids_paths(self.files)
if verbose:
print(self)
def filter_files(
self,
keywords: Optional[list] = None,
hemisphere: Optional[str] = None,
stimulation: Optional[str] = None,
medication: Optional[str] = None,
exclude: Optional[str] = None,
verbose: bool = False,
) -> None:
"""Filter list of filepaths for given parameters."""
self.files = [str(file.fpath.resolve()) for file in self.files]
self._filter_files(
keywords=keywords,
hemisphere=hemisphere,
stimulation=stimulation,
medication=medication,
exclude=exclude,
)
self.files = self._make_bids_paths(self.files)
if verbose:
print(self)
def _make_bids_paths(
self, filepaths: list[str]
) -> list[mne_bids.BIDSPath]:
"""Create list of mne-bids BIDSPath objects from list of filepaths."""
bids_paths = []
for filepath in filepaths:
# entities = mne_bids.get_entities_from_fname(filepath)
try:
bids_path = mne_bids.get_bids_path_from_fname(
fname=filepath, verbose=False
)
bids_path.update(root=self.directory)
except ValueError as err:
print(
f"ValueError while creating BIDS_Path object for file "
f"{filepath}: {err}"
)
else:
bids_paths.append(bids_path)
return bids_paths
class FinderNotFoundError(Exception):
"""Exception raised when invalid Finder is passed.
Attributes:
datatype -- input datatype which caused the error
finders -- allowed datatypes
message -- explanation of the error
"""
def __init__(
self,
datatype,
finders,
message="Input ``datatype`` is not an allowed value.",
) -> None:
self.datatype = datatype
self.finders = finders.values
self.message = message
super().__init__(self.message)
def __str__(self):
return (
f"{{self.message}} Allowed values: {self.finders}."
f" Got: {self.datatype}."
)
|
python
|
import chex
import jax
import jax.numpy as jnp
from typing import Callable, Optional, Tuple
from .moment import MomentTransform, MomentTransformClass
from chex import Array, dataclass
import tensorflow_probability.substrates.jax as tfp
dist = tfp.distributions
class UnscentedTransform(MomentTransformClass):
def __init__(
self,
gp_pred,
n_features: int,
alpha: float = 1.0,
beta: float = 1.0,
kappa: Optional[float] = None,
):
self.gp_pred = gp_pred
self.sigma_pts = get_unscented_sigma_points(n_features, kappa, alpha)
self.Wm, self.wc = get_unscented_weights(n_features, kappa, alpha, beta)
self.Wc = jnp.diag(self.wc)
def predict_mean(self, x, x_cov):
# cholesky decomposition
L = jnp.linalg.cholesky(x_cov)
# calculate sigma points
# (D,M) = (D,1) + (D,D)@(D,M)
x_sigma_samples = x[..., None] + L @ self.sigma_pts
# ===================
# Mean
# ===================
# function predictions over mc samples
# (P,M) = (D,M)
y_mu_sigma = jax.vmap(self.gp_pred.predict_mean, in_axes=2, out_axes=1)(
x_sigma_samples
)
# mean of mc samples
# (N,M,P) @ (M,) -> (N,P)
y_mu = jnp.einsum("ijk,j->ik", y_mu_sigma, self.Wm)
return y_mu
def predict_f(self, x, x_cov, full_covariance=False):
# cholesky decomposition
L = jnp.linalg.cholesky(x_cov)
# calculate sigma points
# (D,M) = (D,1) + (D,D)@(D,M)
x_sigma_samples = x[..., None] + L @ self.sigma_pts
# ===================
# Mean
# ===================
# function predictions over mc samples
# (N,M,P) = (D,M)
y_mu_sigma = jax.vmap(self.gp_pred.predict_mean, in_axes=2, out_axes=1)(
x_sigma_samples
)
# mean of mc samples
# (N,M,P) @ (M,) -> (N,P)
y_mu = jnp.einsum("ijk,j->ik", y_mu_sigma, self.Wm)
# ===================
# Covariance
# ===================
if full_covariance:
# (N,P,M) - (N,P,1) -> (N,P,M)
dfydx = y_mu_sigma - y_mu[..., None]
# (N,M,P) @ (M,M) @ (N,M,P) -> (N,P,D)
cov = jnp.einsum("ijk,jl,mlk->ikm", dfydx, self.Wc, dfydx.T)
return y_mu, cov
else:
# (N,P,M) - (N,P,1) -> (N,P,M)
dfydx = y_mu_sigma - y_mu[..., None]
# (N,M,P) @ (M,) -> (N,P)
var = jnp.einsum("ijk,j->ik", dfydx ** 2, self.wc)
return y_mu, var
def predict_cov(self, key, x, x_cov):
# cholesky decomposition
L = jnp.linalg.cholesky(x_cov)
# calculate sigma points
# (D,M) = (D,1) + (D,D)@(D,M)
x_sigma_samples = x[..., None] + L @ self.sigma_pts
# ===================
# Mean
# ===================
# function predictions over mc samples
# (N,P,M) = (D,M)
y_mu_sigma = jax.vmap(self.gp_pred.predict_mean, in_axes=2, out_axes=1)(
x_sigma_samples
)
# mean of mc samples
# (N,P,M) @ (M,) -> (N,P)
y_mu = jnp.einsum("ijk,j->ik", y_mu_sigma, self.Wm)
# ===================
# Covariance
# ===================
# (N,P,M) - (N,P,1) -> (N,P,M)
dfydx = y_mu_sigma - y_mu[..., None]
# (N,M,P) @ (M,M) @ (N,M,P) -> (N,P,D)
y_cov = jnp.einsum("ijk,jl,mlk->ikm", dfydx, self.Wc, dfydx.T)
return y_cov
def predict_var(self, key, x, x_cov):
# cholesky decomposition
L = jnp.linalg.cholesky(x_cov)
# calculate sigma points
# (D,M) = (D,1) + (D,D)@(D,M)
x_sigma_samples = x[..., None] + L @ self.sigma_pts
# ===================
# Mean
# ===================
# function predictions over mc samples
# (N,P,M) = (D,M)
y_mu_sigma = jax.vmap(self.gp_pred.predict_mean, in_axes=2, out_axes=1)(
x_sigma_samples
)
# mean of mc samples
# (N,P,M) @ (M,) -> (N,P)
y_mu = jnp.einsum("ijk,j->ik", y_mu_sigma, self.Wm)
# ===================
# Variance
# ===================
# (N,P,M) - (N,P,1) -> (N,P,M)
dfydx = y_mu_sigma - y_mu[..., None]
# (N,M,P) @ (M,) -> (N,P)
var = jnp.einsum("ijk,j->ik", dfydx ** 2, self.wc)
return var
class SphericalTransform(UnscentedTransform):
def __init__(self, gp_pred, n_features: int):
super().__init__(
gp_pred=gp_pred, n_features=n_features, alpha=1.0, beta=0.0, kappa=0.0
)
def get_unscented_sigma_points(
n_features: int, kappa: Optional[float] = None, alpha: float = 1.0
) -> Tuple[chex.Array, chex.Array]:
"""Generate Unscented samples"""
# calculate kappa value
if kappa is None:
kappa = jnp.maximum(3.0 - n_features, 0.0)
lam = alpha ** 2 * (n_features + kappa) - n_features
c = jnp.sqrt(n_features + lam)
return jnp.hstack(
(jnp.zeros((n_features, 1)), c * jnp.eye(n_features), -c * jnp.eye(n_features))
)
def get_unscented_weights(
n_features: int,
kappa: Optional[float] = None,
alpha: float = 1.0,
beta: float = 2.0,
) -> Tuple[float, float]:
"""Generate normalizers for MCMC samples"""
# calculate kappa value
if kappa is None:
kappa = jnp.maximum(3.0 - n_features, 0.0)
lam = alpha ** 2 * (n_features + kappa) - n_features
wm = 1.0 / (2.0 * (n_features + lam)) * jnp.ones(2 * n_features + 1)
wc = wm.copy()
wm = jax.ops.index_update(wm, 0, lam / (n_features + lam))
wc = jax.ops.index_update(wc, 0, wm[0] + (1 - alpha ** 2 + beta))
return wm, wc
|
python
|
"""
Core contributors is the cardinality of the smallest set of contributors whose \
total number of commits to a source code repository accounts for 80% or more of \
the total contributions.
"""
from pydriller import Repository
def core_contributors(path_to_repo: str) -> int:
"""
Return the number of developers that contributed more than 80% to the code
:param path_to_repo: the path to the repository to analyze
:return: the number of core contributors
"""
total_commits = 0
contributors = dict()
for commit in Repository(path_to_repo).traverse_commits():
total_commits += 1
contributors[commit.committer.email] = contributors.get(commit.committer.email, 0) + 1
contributions = [v for k, v in sorted(contributors.items(), key=lambda item: item[1], reverse=True)]
i = 0
core_contribution = 0
core_contributors_ = 0
while i < len(contributions) and core_contribution < round(.8 * total_commits):
core_contribution += contributions[i]
core_contributors_ += 1
i += 1
return core_contributors_
|
python
|
import numpy as np
from scipy.stats import pearsonr
import argparse
import data
import logging, sys
logger = logging.getLogger(__name__)
logger.setLevel(10)
ch = logging.StreamHandler(sys.stdout)
ch.setFormatter(logging.Formatter("[%(asctime)s] %(levelname)s - %(message)s"))
logger.addHandler(ch)
def main():
parser = argparse.ArgumentParser()
parser.add_argument('emb', type=str)
parser.add_argument('wordsim')
args = parser.parse_args()
logger.info('Load embedding')
with open(args.emb) as f:
word2id = {}
n_vocab, dim = map(int, f.readline().split())
emb = np.empty((n_vocab, dim))
for wid, line in enumerate(f):
word, vec_str = line.split(' ', 1)
emb[wid] = np.fromstring(vec_str, sep=' ')
word2id[word] = wid
logger.info('Load wordsim')
wordsim = data.load_wordsim(args.wordsim, word2id)
logger.info('Evaluate')
models = []
golds = []
for word1, word2, sim in wordsim:
vec1 = emb[word1]
vec2 = emb[word2]
models.append(np.dot(vec1, vec2) / (np.linalg.norm(vec1) * np.linalg.norm(vec2)))
golds.append(sim)
pearson = pearsonr(golds, models)[0]
logger.info('pearson={:.3f}'.format(pearson))
if __name__ == '__main__':
main()
|
python
|
from cocoa.web.main.utils import Messages as BaseMessages
class Messages(BaseMessages):
ChatCompleted = "Great, you reached a final offer!"
ChatIncomplete = "Sorry, you weren't able to reach a deal. :("
Redirect = "Sorry, that chat did not meet our acceptance criteria."
#BetterDeal = "Congratulations, you got the better deal! We'll award you a bonus on Mechanical Turk."
#WorseDeal = "Sorry, your partner got the better deal. :("
|
python
|
class Book:
def __init__(self, content: str):
self.content = content
class Formatter:
def format(self, book: Book) -> str:
return book.content
class Printer:
def get_book(self, book: Book, formatter: Formatter):
formatted_book = formatter.format(book)
return formatted_book
b = Book("Some content")
f = Formatter()
p = Printer()
print(p.get_book(b, f))
|
python
|
import math
import numpy as np
import torch
import torch.nn as nn
def default_conv(in_channels, out_channels, kernel_size=3, bias=True):
return nn.Conv2d(
in_channels, out_channels, kernel_size,
padding=(kernel_size//2), bias=bias)
class MeanShift(nn.Conv2d):
def __init__(self, rgb_range, rgb_mean, rgb_std, sign=-1):
super(MeanShift, self).__init__(3, 3, kernel_size=1)
std = torch.Tensor(rgb_std)
self.weight.data = torch.eye(3).view(3, 3, 1, 1)
self.weight.data.div_(std.view(3, 1, 1, 1))
self.bias.data = sign * rgb_range * torch.Tensor(rgb_mean)
self.bias.data.div_(std)
self.requires_grad = False
class Upsampler(nn.Sequential):
def __init__(self, conv, scale, n_feats, bn=False, act=False, bias=True):
m = []
if (scale & (scale - 1)) == 0: # Is scale = 2^n?
for _ in range(int(math.log(scale, 2))):
m.append(conv(n_feats, 4 * n_feats, 3, bias=bias))
m.append(nn.PixelShuffle(2))
if bn: m.append(nn.BatchNorm2d(n_feats))
if act == 'relu':
m.append(nn.ReLU(True))
elif act == 'prelu':
m.append(nn.PReLU(n_feats))
elif scale == 3:
m.append(conv(n_feats, 9 * n_feats, 3, bias=bias))
m.append(nn.PixelShuffle(3))
if bn: m.append(nn.BatchNorm2d(n_feats))
if act == 'relu':
m.append(nn.ReLU(True))
elif act == 'prelu':
m.append(nn.PReLU(n_feats))
else:
raise NotImplementedError
super(Upsampler, self).__init__(*m)
class CALayer(nn.Module):
def __init__(self, channel, reduction=16):
super(CALayer, self).__init__()
# global average pooling: feature --> point
self.avg_pool = nn.AdaptiveAvgPool2d(1)
# feature channel downscale and upscale --> channel weight
self.conv_du = nn.Sequential(
nn.Conv2d(channel, channel // reduction, 1, padding=0, bias=True),
nn.ReLU(inplace=True),
nn.Conv2d(channel // reduction, channel, 1, padding=0, bias=True),
nn.Sigmoid()
)
def forward(self, x):
y = self.avg_pool(x)
y = self.conv_du(y)
return x * y
class DenseBlock(nn.Module):
def __init__(self, depth=8, rate=8, input_dim=64, out_dims=64):
super(DenseBlock, self).__init__()
self.depth = depth
filters = out_dims - rate * depth
self.dense_module = [
nn.Sequential(
nn.Conv2d(input_dim, filters+rate, kernel_size=3, padding=1, bias=True),
nn.ReLU(inplace=True)
)
]
for i in range(1, depth):
self.dense_module.append(
nn.Sequential(
nn.Conv2d(filters+i*rate, rate, kernel_size=3, padding=1, bias=True),
nn.ReLU(inplace=True)
)
)
self.dense_module = nn.ModuleList(self.dense_module)
def forward(self, x):
features = [x]
x = self.dense_module[0](features[-1])
features.append(x)
for idx in range(1, self.depth):
x = self.dense_module[idx](features[-1])
features.append(x)
features[-1] = torch.cat(features[-2:], 1)
return features[-1]
class CADensenet(nn.Module):
def __init__(self, conv, n_feat, n_CADenseBlocks=5):
super(CADensenet, self).__init__()
self.n_blocks = n_CADenseBlocks
denseblock = [
DenseBlock(input_dim=n_feat, out_dims=64) for _ in range(n_CADenseBlocks)]
calayer = []
# The rest upsample blocks
for _ in range(n_CADenseBlocks):
calayer.append(CALayer(n_feat, reduction=16))
self.CADenseblock = nn.ModuleList()
for idx in range(n_CADenseBlocks):
self.CADenseblock.append(nn.Sequential(denseblock[idx], calayer[idx]))
self.CADenseblock.append(nn.Conv2d((n_CADenseBlocks+1)*n_feat, n_feat, kernel_size=1))
def forward(self, x):
feat = [x]
for idx in range(self.n_blocks):
x = self.CADenseblock[idx](feat[-1])
feat.append(x)
x = torch.cat(feat[:], 1)
x = self.CADenseblock[-1](x)
return x
class RCAB(nn.Module):
def __init__(self, conv, n_feat, kernel_size, reduction=16, bias=True, bn=False, act=nn.ReLU(True), res_scale=1):
super(RCAB, self).__init__()
modules_body = []
for i in range(2):
modules_body.append(conv(n_feat, n_feat, kernel_size, bias=bias))
if bn: modules_body.append(nn.BatchNorm2d(n_feat))
if i == 0: modules_body.append(act)
modules_body.append(CALayer(n_feat, reduction))
self.body = nn.Sequential(*modules_body)
self.res_scale = res_scale
def forward(self, x):
res = self.body(x)
res += x
return res
|
python
|
from calendar import timegm
from datetime import datetime, timedelta
import jwt
import pytest
from oauthlib.oauth2 import (
InvalidClientError,
InvalidGrantError,
InvalidRequestFatalError,
)
from h.services.oauth._errors import (
InvalidJWTGrantTokenClaimError,
MissingJWTGrantTokenClaimError,
)
from h.services.oauth._jwt_grant_token import JWTGrantToken, VerifiedJWTGrantToken
class TestJWTGrantToken:
def test_init_decodes_token_without_verifying(self, patch):
jwt_decode = patch("h.services.oauth._jwt_grant_token.jwt.decode")
JWTGrantToken("abcdef123456")
jwt_decode.assert_called_once_with(
"abcdef123456", options={"verify_signature": False}
)
def test_init_raises_for_invalid_token(self):
with pytest.raises(InvalidRequestFatalError) as exc:
JWTGrantToken("abcdef123456")
assert exc.value.description == "Invalid JWT grant token format."
def test_issuer_returns_iss_claim(self):
jwttok = jwt_token({"iss": "test-issuer", "foo": "bar"})
grant_token = JWTGrantToken(jwttok)
assert grant_token.issuer == "test-issuer"
def test_issuer_raises_for_missing_iss_claim(self):
jwttok = jwt_token({"foo": "bar"})
grant_token = JWTGrantToken(jwttok)
with pytest.raises(MissingJWTGrantTokenClaimError) as exc:
_ = grant_token.issuer
assert exc.value.description == "Missing claim 'iss' (issuer) from grant token."
def test_verified_initializes_verified_token(self, patch):
verified_token = patch(
"h.services.oauth._jwt_grant_token.VerifiedJWTGrantToken"
)
jwttok = jwt_token({"iss": "test-issuer"})
grant_token = JWTGrantToken(jwttok)
grant_token.verified("top-secret", "test-audience")
verified_token.assert_called_once_with(jwttok, "top-secret", "test-audience")
def test_verified_returns_verified_token(self, patch):
verified_token = patch(
"h.services.oauth._jwt_grant_token.VerifiedJWTGrantToken"
)
jwttok = jwt_token({"iss": "test-issuer"})
grant_token = JWTGrantToken(jwttok)
actual = grant_token.verified("top-secret", "test-audience")
assert actual == verified_token.return_value
class TestVerifiedJWTGrantToken:
def test_init_returns_token_when_valid(self, claims):
jwttok = jwt_token(claims)
actual = VerifiedJWTGrantToken(jwttok, "top-secret", "test-audience")
assert isinstance(actual, VerifiedJWTGrantToken)
def test_init_raises_for_none_key(self, claims):
jwttok = jwt_token(claims)
with pytest.raises(InvalidClientError) as exc:
VerifiedJWTGrantToken(jwttok, None, "test-audience")
assert exc.value.description == "Client is invalid."
def test_init_raises_for_empty_key(self, claims):
pass
def test_init_raises_for_too_long_token_lifetime(self, claims):
claims["exp"] = epoch(delta=timedelta(minutes=15))
jwttok = jwt_token(claims)
with pytest.raises(InvalidGrantError) as exc:
VerifiedJWTGrantToken(jwttok, "top-secret", "test-audience")
assert exc.value.description == "Grant token lifetime is too long."
def test_init_raises_for_invalid_signature(self, claims):
jwttok = jwt_token(claims)
with pytest.raises(InvalidGrantError) as exc:
VerifiedJWTGrantToken(jwttok, "wrong-secret", "test-audience")
assert exc.value.description == "Invalid grant token signature."
def test_init_raises_for_invalid_signature_algorithm(self, claims):
jwttok = jwt_token(claims, alg="HS512")
with pytest.raises(InvalidGrantError) as exc:
VerifiedJWTGrantToken(jwttok, "top-secret", "test-audience")
assert exc.value.description == "Invalid grant token signature algorithm."
@pytest.mark.parametrize(
"claim,description",
[["aud", "audience"], ["exp", "expiry"], ["nbf", "start time"]],
)
def test_init_raises_for_missing_claims(self, claims, claim, description):
del claims[claim]
jwttok = jwt_token(claims)
with pytest.raises(InvalidGrantError) as exc:
VerifiedJWTGrantToken(jwttok, "top-secret", "test-audience")
assert (
exc.value.description
== f"Missing claim '{claim}' ({description}) from grant token."
)
def test_init_raises_for_invalid_aud(self, claims):
claims["aud"] = "different-audience"
jwttok = jwt_token(claims)
with pytest.raises(InvalidJWTGrantTokenClaimError) as exc:
VerifiedJWTGrantToken(jwttok, "top-secret", "test-audience")
assert exc.value.description == "Invalid claim 'aud' (audience) in grant token."
@pytest.mark.parametrize(
"claim,description", [["exp", "expiry"], ["nbf", "start time"]]
)
def test_init_raises_for_invalid_timestamp_types(self, claims, claim, description):
claims[claim] = "wut"
jwttok = jwt_token(claims)
with pytest.raises(InvalidJWTGrantTokenClaimError) as exc:
VerifiedJWTGrantToken(jwttok, "top-secret", "test-audience")
assert (
exc.value.description
== f"Invalid claim '{claim}' ({description}) in grant token."
)
def test_init_returns_token_when_expired_but_in_leeway(self, claims):
claims["exp"] = epoch(delta=timedelta(seconds=-8))
jwttok = jwt_token(claims)
VerifiedJWTGrantToken(jwttok, "top-secret", "test-audience")
def test_init_raises_when_expired_with_leeway(self, claims):
claims["exp"] = epoch(delta=timedelta(minutes=-2))
jwttok = jwt_token(claims)
with pytest.raises(InvalidGrantError) as exc:
VerifiedJWTGrantToken(jwttok, "top-secret", "test-audience")
assert exc.value.description == "Grant token is expired."
def test_init_raises_for_nbf_claim_in_future(self, claims):
claims["nbf"] = epoch(delta=timedelta(minutes=2))
jwttok = jwt_token(claims)
with pytest.raises(InvalidGrantError) as exc:
VerifiedJWTGrantToken(jwttok, "top-secret", "test-audience")
assert exc.value.description == "Grant token is not yet valid."
def test_expiry_returns_exp_claim(self, claims):
now = datetime.utcnow().replace(microsecond=0)
delta = timedelta(minutes=2)
claims["exp"] = epoch(timestamp=now, delta=delta)
jwttok = jwt_token(claims)
grant_token = VerifiedJWTGrantToken(jwttok, "top-secret", "test-audience")
assert grant_token.expiry == (now + delta)
def test_not_before_returns_nbf_claim(self, claims):
now = datetime.utcnow().replace(microsecond=0)
delta = timedelta(minutes=-2)
claims["nbf"] = epoch(timestamp=now, delta=delta)
jwttok = jwt_token(claims)
grant_token = VerifiedJWTGrantToken(jwttok, "top-secret", "test-audience")
assert grant_token.not_before == (now + delta)
def test_subject_returns_sub_claim(self, claims):
jwttok = jwt_token(claims)
grant_token = VerifiedJWTGrantToken(jwttok, "top-secret", "test-audience")
assert grant_token.subject == "test-subject"
def test_subject_raises_for_missing_sub_claim(self, claims):
del claims["sub"]
jwttok = jwt_token(claims)
grant_token = VerifiedJWTGrantToken(jwttok, "top-secret", "test-audience")
with pytest.raises(InvalidGrantError) as exc:
_ = grant_token.subject
assert (
exc.value.description == "Missing claim 'sub' (subject) from grant token."
)
def test_subject_raises_for_empty_sub_claim(self, claims):
claims["sub"] = ""
jwttok = jwt_token(claims)
grant_token = VerifiedJWTGrantToken(jwttok, "top-secret", "test-audience")
with pytest.raises(InvalidGrantError) as exc:
_ = grant_token.subject
assert (
exc.value.description == "Missing claim 'sub' (subject) from grant token."
)
@pytest.fixture
def claims(self):
"""Return claims for a valid JWT token."""
return {
"aud": "test-audience",
"exp": epoch(delta=timedelta(minutes=5)),
"iss": "test-issuer",
"nbf": epoch(),
"sub": "test-subject",
}
def epoch(timestamp=None, delta=None):
if timestamp is None:
timestamp = datetime.utcnow()
if delta is not None:
timestamp = timestamp + delta
return timegm(timestamp.utctimetuple())
def jwt_token(claims, alg="HS256"):
return jwt.encode(claims, "top-secret", algorithm=alg)
|
python
|
import logging
from django.core.management import BaseCommand
from oldp.apps.laws.models import LawBook
LAW_BOOK_ORDER = {
'bgb': 10,
'agg': 9,
'bafog': 9,
}
# Get an instance of a logger
logger = logging.getLogger(__name__)
class Command(BaseCommand):
help = 'Assign predefined order values to law books based on slug'
def __init__(self):
super(Command, self).__init__()
def add_arguments(self, parser):
pass
def handle(self, *args, **options):
order_mapping = LAW_BOOK_ORDER
for book_slug in order_mapping:
try:
book = LawBook.objects.get(slug=book_slug)
book.order = order_mapping[book_slug]
book.save()
logger.info('Updated %s' % book)
except LawBook.DoesNotExist:
logger.debug('Does not exist: %s' % book_slug)
pass
logger.info('done')
|
python
|
# -*- coding: utf-8 -*-
from faker.providers import BaseProvider
class CustomProvider(BaseProvider):
sentences = (
'Hello world',
'Hi there',
'Ciao Bello',
)
def greeting(self):
return self.random_element(self.sentences)
|
python
|
from dcgan import *
from resnet import *
|
python
|
import socket, os, json, tqdm
import shutil
SERVER_PORT = 8800
SERVER_HOST = "0.0.0.0"
BUFFER_SIZE = 1024
root_dir = "/home/hussein/dfs_dir"
def connect_to_name_server(sock, command, name_server):
print(f"[+] Connecting to {name_server[0]}:{name_server[1]}")
sock.connect(name_server)
print("[+] Connected.")
data = {"command_type": "system", "command": command, "params": []}
json_data = json.dumps(data)
sock.send(json_data.encode())
# receive response from the name server
received_msg = sock.recv(BUFFER_SIZE).decode()
print(received_msg)
def create_file(path):
filepath = root_dir + path
if not os.path.exists(os.path.dirname(filepath)):
os.makedirs(os.path.dirname(filepath))
try:
with open(filepath, "w") as file:
pass
return {"status": "OK",
"details": "File created Successfully"}
except Exception as e:
return {"message": {"status": "FAILED",
"details": 'Failed to create %s. Reason: %s' % (filepath, e)}}
def receive_file(sock, path, filesize):
filepath = root_dir + path
# progress = tqdm.tqdm(range(filesize), f"Receiving {path}", unit="B", unit_scale=True, unit_divisor=1024)
try:
with open(filepath, "wb") as f:
bytes_read = sock.recv(filesize)
f.write(bytes_read)
return {"status": "OK",
"details": "File Dowonloaded Successfully"}
except Exception as e:
return {"status": "FAILED",
"details": 'Failed to download %s. Reason: %s' % (path, e)}
def send_file(sock, filename):
filename = root_dir + filename
try:
filesize = os.path.getsize(filename)
response = {"status": "OK",
"details": "File Found",
"size": filesize}
except Exception as e:
response = {"status": "FAILED",
"details": 'Failed to find file %s. Reason: %s' % (filename, e)}
sock.send((json.dumps(response) + ' ' * (1024 - len(json.dumps(response).encode()))).encode())
# sock.send((' ' * (1024 - len(json.dumps(response).encode()))).encode())
with open(filename, "rb") as f:
sock.send(f.read())
def delete_file(path):
filepath = root_dir + path
try:
os.remove(filepath)
return {"message": "File deleted Successfully"}
except:
return {"message": "Failed to delete file"}
def get_file_info(path):
filepath = path + root_dir
os.path.getsize(filepath)
def copy_file(src, dst):
try:
shutil.copyfile(root_dir + src, root_dir + dst)
return {"message": "File copied Successfully"}
except:
return {"message": "Failed to copy file"}
def move_file(src, dst):
try:
shutil.move(root_dir + src, root_dir + dst)
return {"message": "File moved Successfully"}
except:
return {"message": "Failed to move file"}
def delete_directory(dir_path):
dir_path = root_dir + dir_path
if not os.path.isdir(dir_path):
return {"message": 'No such file or directory'}
for filename in os.listdir(dir_path):
file_path = os.path.join(dir_path, filename)
try:
if os.path.isfile(file_path) or os.path.islink(file_path):
os.unlink(file_path)
elif os.path.isdir(file_path):
shutil.rmtree(file_path)
return {"message": f"The storage is initialized"}
except Exception as e:
return {"message": 'Failed to delete %s. Reason: %s' % (file_path, e)}
if __name__ == '__main__':
# command = 'register-storage-server'
# param = (input('Enter name server IP: '), int(input('Enter name server port: ')))
# registration_sock = socket.socket()
# connect_to_name_server(registration_sock, command, param)
# registration_sock.close()
s = socket.socket()
s.bind((SERVER_HOST, SERVER_PORT))
s.listen(5)
while True:
print(f"[*] Listening as {SERVER_HOST}:{SERVER_PORT}")
client_socket, address = s.accept()
print(f"[+] {address} is connected.")
data = client_socket.recv(BUFFER_SIZE).decode()
data = json.loads(data)
if data["command_type"] == "file":
if data["command"] == "create":
print(data["params"][0])
print(json.dumps(create_file(data["params"][0])))
client_socket.send(json.dumps(create_file(data["params"][0])).encode())
if data["command"] == "write":
filepath = data["params"][0]
filesize = data["params"][1]
received = receive_file(client_socket, filepath, filesize)
client_socket.send(json.dumps(received).encode())
if data["command"] == "delete":
filepath = data["params"][0]
deleted = delete_file(filepath)
client_socket.send(json.dumps(deleted).encode())
if data["command"] == "info":
filepath = data["params"][0]
get_file_info(filepath)
if data["command"] == "read":
filepath = data["params"][0]
send_file(client_socket, filepath)
if data["command"] == "copy":
source = data["params"][0]
destination = data["params"][1]
copied = copy_file(source, destination)
client_socket.send(json.dumps(copied).encode())
if data["command"] == "move":
current_path = data["params"][0]
new_path = data["params"][1]
moved = move_file(current_path, new_path)
client_socket.send(json.dumps(moved).encode())
if data["command_type"] == "directory":
if data["command"] == "delete":
dir_path = data["params"][0]
deleted = delete_directory(dir_path)
client_socket.send(json.dumps(deleted).encode())
if data["command_type"] == "system":
if data["command"] == "init":
if os.path.exists(root_dir):
shutil.rmtree(root_dir)
response = {
"status": "OK",
"details": f"The storage is initialized"
}
client_socket.send(json.dumps(response).encode())
client_socket.close()
s.close()
|
python
|
from rest_framework import serializers, status
from rest_framework.exceptions import APIException
from projects.serializers.base_serializers import ProjectReferenceWithMemberSerializer
from users.serializers import UserSerializer
from teams.serializers import TeamSerializer
from teams.models import Team
from .base_serializers import OrganizationReferenceSerializer
from ..models import OrganizationUser, OrganizationUserRole, ROLES
class OrganizationSerializer(OrganizationReferenceSerializer):
pass
class OrganizationDetailSerializer(OrganizationSerializer):
projects = ProjectReferenceWithMemberSerializer(many=True)
teams = TeamSerializer(many=True)
openMembership = serializers.BooleanField(source="open_membership")
scrubIPAddresses = serializers.BooleanField(source="scrub_ip_addresses")
class Meta(OrganizationSerializer.Meta):
fields = OrganizationSerializer.Meta.fields + (
"projects",
"openMembership",
"scrubIPAddresses",
"teams",
)
class HTTP409APIException(APIException):
status_code = status.HTTP_409_CONFLICT
class OrganizationUserSerializer(serializers.ModelSerializer):
user = UserSerializer(required=False, read_only=True)
role = serializers.CharField(source="get_role")
roleName = serializers.CharField(source="get_role_display", read_only=True)
dateCreated = serializers.DateTimeField(source="created", read_only=True)
teams = serializers.SlugRelatedField(
many=True, write_only=True, slug_field="slug", queryset=Team.objects.none()
)
class Meta:
model = OrganizationUser
fields = (
"role",
"id",
"user",
"roleName",
"dateCreated",
"email",
"teams",
"pending",
)
def __init__(self, *args, request_user=None, **kwargs):
super().__init__(*args, **kwargs)
if "request" in self.context:
organization_slug = self.context["view"].kwargs.get("organization_slug")
self.fields["teams"].child_relation.queryset = Team.objects.filter(
organization__slug=organization_slug
)
def get_extra_kwargs(self):
""" email should be read only when updating """
extra_kwargs = super().get_extra_kwargs()
if self.instance is not None:
extra_kwargs["email"] = {"read_only": True}
extra_kwargs["user"] = {"read_only": True}
return extra_kwargs
def create(self, validated_data):
role = OrganizationUserRole.from_string(validated_data.get("get_role"))
email = validated_data.get("email")
organization = validated_data.get("organization")
teams = validated_data.get("teams")
if organization.organization_users.filter(email=email).exists():
raise HTTP409APIException(f"The user {email} is already invited", "email")
if organization.organization_users.filter(user__email=email).exists():
raise HTTP409APIException(f"The user {email} is already a member", "email")
org_user = super().create(
{"role": role, "email": email, "organization": organization}
)
org_user.team_set.add(*teams)
return org_user
def update(self, instance, validated_data):
get_role = validated_data.pop("get_role", None)
if get_role:
role = OrganizationUserRole.from_string(get_role)
validated_data["role"] = role
return super().update(instance, validated_data)
def to_representation(self, obj):
""" Override email for representation to potientially show user's email """
self.fields["email"] = serializers.SerializerMethodField()
return super().to_representation(obj)
def get_email(self, obj):
""" Prefer user primary email over org user email (which is only for invites) """
if obj.user:
return obj.user.email
return obj.email
class OrganizationUserDetailSerializer(OrganizationUserSerializer):
teams = serializers.SlugRelatedField(
source="team_set", slug_field="slug", read_only=True, many=True
)
roles = serializers.SerializerMethodField()
class Meta(OrganizationUserSerializer.Meta):
fields = OrganizationUserSerializer.Meta.fields + ("roles",)
def get_roles(self, obj):
return ROLES
class OrganizationUserProjectsSerializer(OrganizationUserSerializer):
projects = serializers.SerializerMethodField()
class Meta(OrganizationUserSerializer.Meta):
fields = OrganizationUserSerializer.Meta.fields + ("projects",)
def get_projects(self, obj):
return obj.organization.projects.filter(team__members=obj).values_list(
"slug", flat=True
)
class ReinviteSerializer(serializers.Serializer):
reinvite = serializers.IntegerField()
def update(self, instance, validated_data):
if validated_data.get("reinvite"):
pass
# Send email
return instance
class OrganizationUserOrganizationSerializer(OrganizationUserSerializer):
""" Organization User Serializer with Organization info """
organization = OrganizationSerializer()
class Meta(OrganizationUserSerializer.Meta):
fields = OrganizationUserSerializer.Meta.fields + ("organization",)
class AcceptInviteSerializer(serializers.Serializer):
accept_invite = serializers.BooleanField()
org_user = OrganizationUserOrganizationSerializer(read_only=True)
|
python
|
from django.conf import settings
from django.urls import include, path, re_path
from django.conf.urls.static import static
from django.contrib import admin
from django.views.generic import TemplateView
from django.views import defaults as default_views
# CVAT engine.urls is redirecting 'unknown url' to /dashboard/ which
# messes with our routing of unknown paths to index.html for reactjs
# so we have to strip the client entry point from cvat url patterns
from cvat.apps.engine.urls import urlpatterns as cvat_urlpatterns
cvat_urlpatterns = cvat_urlpatterns[1:]
urlpatterns = [
# Django Admin, use {% url 'admin:index' %}
path(settings.ADMIN_URL, admin.site.urls),
# use CVAT for labeling
#path("", include("cvat.apps.engine.urls")),
path("", include(cvat_urlpatterns)),
path("cvat-ui", include("opentpod.cvat_ui_adapter.urls")),
# use rest_auth for authentication and registration
path("auth/", include("rest_auth.urls")),
path("auth/registration/", include('rest_auth.registration.urls')),
path("django-rq/", include('django_rq.urls')),
path("", include("opentpod.object_detector.urls")),
# React SPA
path("manifest.json", TemplateView.as_view(template_name="manifest.json")),
path("favicon.ico", default_views.page_not_found,
kwargs={"exception": Exception("Page not Found")}),
re_path(".*", TemplateView.as_view(template_name="index.html")),
] + static(
settings.STATIC_URL,
document_root=settings.STATIC_ROOT
)
# + static( # django only serves static files when DEBUG=True
# settings.DATA_URL,
# document_root=settings.DATA_ROOT
# )
# + static(
# settings.MEDIA_URL,
# document_root=settings.MEDIA_ROOT
# )
if settings.DEBUG:
# This allows the error pages to be debugged during development, just visit
# these url in browser to see how these error pages look like.
urlpatterns += [
path(
"400/",
default_views.bad_request,
kwargs={"exception": Exception("Bad Request!")},
),
path(
"403/",
default_views.permission_denied,
kwargs={"exception": Exception("Permission Denied")},
),
path(
"404/",
default_views.page_not_found,
kwargs={"exception": Exception("Page not Found")},
),
path("500/", default_views.server_error),
]
if "debug_toolbar" in settings.INSTALLED_APPS:
import debug_toolbar
urlpatterns = [path("__debug__/", include(debug_toolbar.urls))] + urlpatterns
|
python
|
# -*- coding:utf-8 -*-
import re
def parse(s):
l = re.sub(r'\s+', ', ', (' '+s.lower()+' ').replace('(', '[').replace(')', ']'))[2:-2]
return eval(re.sub(r'(?P<symbol>[\w#%\\/^*+_\|~<>?!:-]+)', lambda m : '"%s"' % m.group('symbol'), l))
def cons(a, d):
if atom(d):
return (a, d)
return (lambda *args : list(args))(a, *d)
def car(s):
return s[0]
def cdr(s):
if isinstance(s, tuple):
return s[1]
if len(s) == 1:
return []
return s[1:]
def atom(s):
return not isinstance(s, list)
def eq(s, t):
return s == t
def cond(l, d):
for [p, e] in cdr(l):
if eval_(p, d):
return eval_(e, d)
class lambda_object:
count = 0
def __init__(self, l, d):
self.dic = d
self.li = l[1]
self.ex = l[2]
lambda_object.count += 1
self.serial = lambda_object.count
def __call__(self, *args):
for i in range(len(self.li)):
self.dic[self.li[i]] = args[i]
return eval_(self.ex, self.dic)
def __str__(self):
return '<COMPOND-PROCEDURE-#%d>' % self.serial
__repr__ = __str__
def label(l, d):
d[l[1]] = eval_(l[2])
try:
if re.match(r'<COMPOND-PROCEDURE-#\d+>', str(d[l[1]])):
symbol_t[str(d[l[1]])] = '%s' % l[1]
finally:
pass
def quote(l, d):
return l[1]
symbol_s = {'cons':cons, 'car':car, 'cdr':cdr, 'atom?':atom, 'eq?':eq, '#t':True, '#f':False}
syntax_s = {'cond':cond, 'lambda':lambda_object, 'quote':quote, 'label':label}
symbol_t = dict()
for k, v in symbol_s.items():
symbol_t[str(v)] = '%s' % k
symbol_t[True] = '#t'
symbol_t[False] = '#f'
def sstring(l, align=0):
if atom(l):
if str(l) in symbol_t:
return symbol_t[str(l)]
elif l == None:
return 'unspecific-return-value'
elif isinstance(l, tuple):
return '%s . %s' % (l[0], l[1])
else:
return str(l)
elif l == []:
return '()'
s = '('
for x in l:
s += sstring(x, align) + ' '
return s[:-1] + ')'
def eval_(l, s=symbol_s):
if atom(l):
return symbol_s[l]
eval_.depth += 1
print '; ='+'>'*eval_.depth, sstring(l)
if atom(l[0]) and l[0] in syntax_s:
u = syntax_s[l[0]](l, s)
print '; ='+'|'*eval_.depth, sstring(u), '--', l[0]
eval_.depth -= 1
return u
else:
operator = eval_(l[0], s)
operands = map(lambda e: eval_(e,s), l[1:])
#print 'sval ='+'|'*eval_.depth, sstring(cons(operator, operands))
u = operator(*operands)
print '; -' +'|'*eval_.depth, sstring(u), '<<', '%s[%s]' % (sstring(operator), (len(operands) > 1) and str.join(', ', map(sstring, operands)) or sstring(*operands))
eval_.depth -= 1
return u
eval_.depth = 0
if __name__ == '__main__':
code = '''
(label ff
(lambda (s)
(cond
((atom? s) s)
(#t (ff (car s))))))
'''
print eval_(parse(code))
print symbol_s
print symbol_t
print sstring(eval_(parse("(cons (ff (quote (((a b) c)))) (quote (d)))")))
eval_(parse('''
((cond (#f cdr) (#t car)) (quote a b c))'''))
|
python
|
from flatland.envs.agent_utils import RailAgentStatus
from flatland.envs.malfunction_generators import malfunction_from_params
from flatland.envs.observations import GlobalObsForRailEnv
from flatland.envs.rail_env import RailEnv
from flatland.envs.rail_generators import sparse_rail_generator
from flatland.envs.schedule_generators import sparse_schedule_generator
import random
import r2_solver
from flatland.utils.rendertools import RenderTool
import sys
import time
class stoch_data:
def __init__(self):
self.malfunction_rate = malfunction_rate
self.min_duration = malfunction_min_duration
self.max_duration = malfunction_max_duration
def GetTestParams(tid):
seed = tid * 19997 + 0
random.seed(seed)
width = 50 #+ random.randint(0, 100)
height = 50 #+ random.randint(0, 100)
nr_cities = 4 + random.randint(0, (width + height) // 10)
nr_trains = min(nr_cities * 20, 100 + random.randint(0, 100))
max_rails_between_cities = 2
max_rails_in_cities = 3 + random.randint(0, 5)
malfunction_rate = 30 + random.randint(0, 100)
malfunction_min_duration = 3 + random.randint(0, 7)
malfunction_max_duration = 20 + random.randint(0, 80)
return (seed, width, height, nr_trains, nr_cities, max_rails_between_cities, max_rails_in_cities, malfunction_rate, malfunction_min_duration, malfunction_max_duration)
def ShouldRunTest(tid):
# return tid >= 7
#return tid >= 3
return True
DEFAULT_SPEED_RATIO_MAP = {1.: 0.25,
1. / 2.: 0.25,
1. / 3.: 0.25,
1. / 4.: 0.25}
NUM_TESTS = 10
d_base = {}
f = open("scores.txt", "r")
for line in f.readlines():
lsplit = line.split(" ")
if len(lsplit) >= 4:
test_id = int(lsplit[0])
num_done_agents = int(lsplit[1])
percentage_num_done_agents = float(lsplit[2])
score = float(lsplit[3])
d_base[test_id] = (num_done_agents, score)
f.close()
f = open("tmp-scores.txt", "w")
total_percentage_num_done_agents = 0.0
total_score = 0.0
total_base_percentage_num_done_agents = 0.0
total_base_score = 0.0
num_tests = 0
for test_id in range(NUM_TESTS):
seed, width, height, nr_trains, nr_cities, max_rails_between_cities, max_rails_in_cities, malfunction_rate, malfunction_min_duration, malfunction_max_duration = GetTestParams(test_id)
if not ShouldRunTest(test_id):
continue
rail_generator = sparse_rail_generator(max_num_cities=nr_cities,
seed=seed,
grid_mode=False,
max_rails_between_cities=max_rails_between_cities,
max_rails_in_city=max_rails_in_cities,
)
schedule_generator = sparse_schedule_generator(DEFAULT_SPEED_RATIO_MAP)
stochastic_data = {'malfunction_rate': malfunction_rate,
'min_duration': malfunction_min_duration,
'max_duration': malfunction_max_duration
}
observation_builder = GlobalObsForRailEnv()
env = RailEnv(width=width,
height=height,
rail_generator=rail_generator,
schedule_generator=schedule_generator,
number_of_agents=nr_trains,
malfunction_generator_and_process_data=malfunction_from_params(stoch_data()),
obs_builder_object=observation_builder,
remove_agents_at_target=True
)
obs = env.reset()
env_renderer = RenderTool(env)
solver = r2_solver.Solver(test_id)
score = 0.0
num_steps = 8 * (width + height + 20)
print("test_id=%d seed=%d nr_trains=%d nr_cities=%d num_steps=%d" % (test_id, seed, nr_trains, nr_cities, num_steps))
for step in range(num_steps):
moves = solver.GetMoves(env.agents, obs[0])
print(moves)
next_obs, all_rewards, done, _ = env.step(moves)
env_renderer.render_env(show=True, frames=False, show_observations=False)
time.sleep(0.3)
for a in range(env.get_num_agents()):
score += float(all_rewards[a])
obs = next_obs.copy()
if done['__all__']:
break
num_done_agents = 0
for aid, agent in enumerate(env.agents):
if agent.status == RailAgentStatus.DONE_REMOVED:
num_done_agents += 1
percentage_num_done_agents = 100.0 * num_done_agents / len(env.agents)
total_percentage_num_done_agents += percentage_num_done_agents
total_score += score
num_tests += 1
base_num_done_agents = 0
base_score = -1e9
if test_id in d_base:
base_num_done_agents, base_score = d_base[test_id]
base_percentage_num_done_agents = 100.0 * base_num_done_agents / len(env.agents)
total_base_percentage_num_done_agents += base_percentage_num_done_agents
total_base_score += base_score
avg_nda = total_percentage_num_done_agents / num_tests
avg_nda_dif = (total_percentage_num_done_agents - total_base_percentage_num_done_agents) / num_tests
print("\n### test_id=%d nda=%d(dif=%d) pnda=%.6f(dif=%.6f) score=%.6f(dif=%.6f) avg_nda=%.6f(dif=%.6f) avg_sc=%.6f(dif=%.6f)\n" % (test_id, num_done_agents, num_done_agents - base_num_done_agents, percentage_num_done_agents, percentage_num_done_agents - base_percentage_num_done_agents, score, score - base_score, avg_nda, avg_nda_dif, total_score / num_tests, (total_score - total_base_score) / num_tests))
f.write("%d %d% .10f %.10f %d %.10f %.10f\n" % (test_id, num_done_agents, percentage_num_done_agents, score, num_done_agents - base_num_done_agents, percentage_num_done_agents - base_percentage_num_done_agents, avg_nda_dif))
f.flush()
f.close()
|
python
|
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
class FeatureMap():
"""FeatureMap class."""
# TODO: is a class for this necessary?
def __init__(self, mapping):
"""Initialize a FeatureMap.
Args:
mapping : dict
Dictionary of (qubit, features for qubit) key-value pairs.
"""
self.map = mapping
def _has_all_features(self):
"""Checks to make sure all features are present."""
# TODO: implement
pass
def _is_valid_mapping(self):
"""Returns True if the mapping is valid."""
# TODO: implement
# TODO: what defines a valid mapping?
pass
def direct(num_features):
"""Returns a FeatureMap with the direct encoding
Feature[i] --> Qubit[i].
"""
mapping = dict((k, (k,)) for k in range(num_features))
return FeatureMap(mapping)
def nearest_neighbor(num_features, num_qubits):
"""Returns a FeatureMap with nearest neighbor encoding.
Examples:
nearest_neighbor(4, 2) --> {0 : (0, 1), 1 : (2, 3)}
"""
bin_size = num_features // num_qubits
mapping = dict((k, tuple(range(k * bin_size, (k + 1) * bin_size))) for k in range(0, num_qubits))
return FeatureMap(mapping)
def group_biggest(data, num_features, num_qubits):
"""Returns a FeatureMap with the biggest features in the
first qubits.
Examples:
"""
# TODO: implement
def group_smallest(data, num_features, num_qubits):
"""Returns a FeatureMap with smallest features
in the first qubits.
Examples:
"""
# TODO: implement
|
python
|
# Generated by Django 3.2.12 on 2022-03-31 23:23
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='LFPost',
fields=[
('id', models.AutoField(primary_key=True, serialize=False)),
('author', models.CharField(max_length=32)),
('date', models.DateField()),
('place', models.CharField(max_length=64)),
('name', models.CharField(max_length=64)),
('text', models.CharField(blank=True, default=None, max_length=256)),
('pic1', models.FileField(blank=True, upload_to='')),
('pic2', models.FileField(blank=True, upload_to='')),
('pic3', models.FileField(blank=True, upload_to='')),
('public', models.BooleanField()),
('time', models.DateTimeField()),
('status', models.SmallIntegerField(default=0)),
('type', models.CharField(choices=[('L', 'Lost'), ('F', 'Found')], max_length=1)),
],
),
migrations.CreateModel(
name='LFReply',
fields=[
('id', models.AutoField(primary_key=True, serialize=False)),
('post_id', models.IntegerField()),
('author', models.CharField(max_length=32)),
('text', models.CharField(blank=True, default=None, max_length=256)),
('pic1', models.FileField(blank=True, upload_to='')),
('pic2', models.FileField(blank=True, upload_to='')),
('pic3', models.FileField(blank=True, upload_to='')),
('public', models.BooleanField()),
('time', models.DateTimeField()),
],
),
]
|
python
|
from hamcrest import *
from utils import *
from vinyldns_context import VinylDNSTestContext
from vinyldns_python import VinylDNSClient
class ListGroupsTestContext(object):
def __init__(self):
self.client = VinylDNSClient(VinylDNSTestContext.vinyldns_url, access_key='listGroupAccessKey',
secret_key='listGroupSecretKey')
self.support_user_client = VinylDNSClient(VinylDNSTestContext.vinyldns_url, 'supportUserAccessKey',
'supportUserSecretKey')
def build(self):
try:
for runner in range(0, 50):
new_group = {
'name': "test-list-my-groups-{0:0>3}".format(runner),
'email': '[email protected]',
'members': [{'id': 'list-group-user'}],
'admins': [{'id': 'list-group-user'}]
}
self.client.create_group(new_group, status=200)
except:
# teardown if there was any issue in setup
try:
self.tear_down()
except:
pass
raise
def tear_down(self):
clear_zones(self.client)
clear_groups(self.client)
|
python
|
#!/usr/bin/env python3
# Hackish script to automatically generate some parts of JSON file required for android-prepare-vendor
# Requires 4 arguments:
# (1) device name
# (2) module-info.json from build (3) below, can be found under out/target/product/<device>/module-info.json
# (3) listing of extracted files from a build of AOSP for device with a minimal vendor directory (see autogenerate.nix)
# (4) listing of extracted files from the upstream factory image for device
import sys
import json
from typing import List
def main() -> None:
def _replace_system_system(s: str) -> str:
if s.startswith('system/system/'):
return s[len('system/'):]
else:
return s
device_name = sys.argv[1]
module_json = json.load(open(sys.argv[2]))
built_files = set(_replace_system_system(s) for s in open(sys.argv[3]).read().split('\n'))
upstream_files = set(_replace_system_system(s) for s in open(sys.argv[4]).read().split('\n'))
filename_prefix = f'out/target/product/{device_name}/'
file_module_lookup = {
filename: modulename
for modulename, data in module_json.items()
for filename in data['installed']
if filename.startswith(filename_prefix)
}
needed_files = set()
needed_modules = set()
for filename in upstream_files:
if filename not in built_files:
key = filename_prefix + filename
if key in file_module_lookup:
# if filename.startswith('vendor/') or filename.startswith('system_ext/'):
if filename.startswith('vendor/'):
needed_modules.add(file_module_lookup[key])
else:
if not filename.startswith('vendor/lib/modules/'):
needed_files.add(filename)
modules_files = set()
for modulename in needed_modules:
for filename in module_json[modulename]['installed']:
if filename.startswith(filename_prefix):
modules_files.add(filename[len(filename_prefix):])
def _is_bytecode(s: str) -> bool:
return s.endswith('.apk') or s.endswith('.jar')
DEP_DSOS: List[str] = [
"vendor/lib/libadsprpc.so",
"vendor/lib/libsdsprpc.so",
"vendor/lib64/libadsprpc.so",
"vendor/lib64/libsdsprpc.so",
]
SKIP_MODULES: List[str] = []
vendor_skip_files = set(filename[len('vendor/'):] for filename in modules_files
if filename.startswith('vendor/')
)
vendor_skip_files.update(filename[len('vendor/'):] for filename in built_files
if filename in upstream_files and filename.startswith('vendor/')
)
# Manual addition. Might not be needed if we include the corresponding stuff in system_ext
vendor_skip_files.add('etc/vintf/manifest/manifest_wifi_ext.xml')
naked_config = {
# 'new-modules': [],
'dep-dso': [
dso for dso in DEP_DSOS
if dso in needed_files
],
# 'rro-overlays': [],
'forced-modules': sorted(set(modulename for modulename in needed_modules if modulename not in SKIP_MODULES)),
'vendor-skip-files': sorted(vendor_skip_files),
'system-bytecode': sorted(
filename for filename in needed_files
if (filename.startswith('system/') and _is_bytecode(filename)
and not ('Google/' in filename or '/Google' in filename))
),
# 'system-other': sorted(
# filename for filename in needed_files
# if (filename.startswith('system/') and not _is_bytecode(filename)
# and not (filename.endswith('.odex') or filename.endswith('.vdex') or filename.endswith('.apex')))
# ),
# 'system_ext-bytecode': sorted(
# filename for filename in needed_files
# if (filename.startswith('system_ext/') and _is_bytecode(filename)
# and not ('Google/' in filename or '/Google' in filename))
# ),
# 'system_ext-other': sorted(
# filename for filename in needed_files
# if (filename.startswith('system_ext/') and not _is_bytecode(filename)
# and not (filename.endswith('.odex') or filename.endswith('.vdex')))
# ),
# 'product-bytecode': sorted(
# filename for filename in needed_files
# if filename.startswith('product/') and _is_bytecode(filename)
# ),
# 'product-other': sorted(
# filename for filename in needed_files
# if (filename.startswith('product/') and not _is_bytecode(filename)
# and not (filename.endswith('.odex') or filename.endswith('.vdex')))
# ),
}
apv_config = {
'api-30': {
'naked': naked_config
}
}
print(json.dumps(apv_config, sort_keys=True, indent=2, separators=(',', ': ')))
if __name__ == "__main__":
main()
|
python
|
import csv
from math import *
class Point:
def __init__(self, x, y, z, mag=0):
self.x = x
self.y = y
self.z = z
self.mag = mag
class Polar:
def __init__(self, r, theta, vphi, mag=0):
self.r = r
self.theta = theta # 0 < theta < pi
self.vphi = vphi # -pi < vphi < pi
self.mag = mag
def get_polar_from(point):
r = sqrt(point.x**2 + point.y**2 + point.z**2)
theta = acos(point.z/r)
varphi = atan2(point.y,point.x)
return Polar(r, theta, varphi, point.mag)
def get_data_from_csv(filename):
points = []
with open(filename, 'rb') as f:
f.readline()
reader = csv.reader(f, delimiter=',', quoting=csv.QUOTE_NONE)
for row in reader:
try:
data = row[17:20]
data.append(row[13])
data = map(float,data)
except:
pass
points.append(Point(*data))
return points
cartesian_stars = get_data_from_csv('hygxyz_bigger9.csv')
polar_stars = map(get_polar_from, cartesian_stars)
stereographic_stars = map(lambda x: [1/(tan(x.theta)*2), x.vphi], polar_stars) # R, phi
normalized_cartesian = map(lambda x: Point(x.x/10000000,x.y/10000000,x.z/10000000), cartesian_stars)
import matplotlib.pyplot as plt
import pylab
a1 = map(lambda x: x.theta, polar_stars)
a2 = map(lambda x: x.vphi, polar_stars)
a3 = map(lambda x: x.mag, polar_stars)
plt.clf()
#plt.scatter(a1, a2, s=0.01)
#plt.scatter(a1,a2,s=map(lambda x: x/21.0,a3))
#plt.scatter(a1,a2,s=map(lambda x: x/210.0,a3))
plt.scatter(a1,a2,s=map(lambda x: exp(x)/exp(21.0),a3))
plt.show()
plt.clf()
plt.scatter(a1, a2, s=0.01)
F = pylab.gcf()
F.patch.set_facecolor('black')
DPI = F.get_dpi()
DefaultSize = F.get_size_inches()
F.set_size_inches( (DefaultSize[0]*2, DefaultSize[1]*2) )
F.savefig("s1.eps")
plt.clf()
fig = plt.figure(figsize=(10,5),dpi=300,facecolor='black')
fig.subplots_adjust(wspace=.001,hspace=.001,left=.001,bottom=.001)
ax = fig.add_subplot(1,1,1,axisbg='black')
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.spines['bottom'].set_visible(False)
ax.spines['left'].set_visible(False)
ax.scatter(a1, a2, s=0.01, color='white', linewidth=0)
fig.patch.set_visible(False)
ax.axis('off')
with open('starfield.eps', 'w') as outfile:
fig.canvas.print_eps(outfile, dpi=300)
plt.savefig("scatter.eps", facecolor=fig.get_facecolor(), transparent=True)
|
python
|
"""
The MIT License (MIT)
Copyright (c) 2014 NTHUOJ team
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
from json import dumps
from django.contrib import messages
from django.contrib.auth import authenticate, login, update_session_auth_hash
from django.contrib.auth.decorators import login_required
from django.core.context_processors import csrf
from django.core.urlresolvers import reverse
from django.shortcuts import get_object_or_404
from axes.decorators import *
from django.http import Http404
from django.shortcuts import redirect
from contest.public_user import is_public_user
from problem.models import Problem
from users.admin import UserCreationForm, AuthenticationForm
from users.forms import CodeSubmitForm
from users.forms import UserProfileForm, UserLevelForm, UserForgetPasswordForm
from users.models import UserProfile, Notification
from users.templatetags.profile_filters import can_change_userlevel
from utils.log_info import get_logger
from utils.user_info import get_user_statistics, send_activation_email, send_forget_password_email
from utils.render_helper import render_index, get_current_page, get_next_page
from utils.config_info import get_config
# Create your views here.
logger = get_logger()
def user_profile(request, username):
try:
profile_user = User.objects.get(username=username)
piechart_data = get_user_statistics(profile_user)
render_data = {}
render_data['profile_user'] = profile_user
render_data['piechart_data'] = dumps(piechart_data)
if request.user == profile_user and not is_public_user(profile_user):
render_data['profile_form'] = UserProfileForm(
instance=profile_user)
if can_change_userlevel(request.user, profile_user):
render_data['userlevel_form'] = UserLevelForm(instance=profile_user,
request_user=request.user)
if request.user == profile_user and request.method == 'POST' \
and 'profile_form' in request.POST:
profile_form = UserProfileForm(request.POST, instance=profile_user)
render_data['profile_form'] = profile_form
if profile_form.is_valid() and request.user == profile_user:
logger.info('User %s update profile' % username)
profile_form.save()
update_session_auth_hash(request, profile_user)
request.user = profile_user
messages.success(request, 'Update profile successfully!')
if request.method == 'POST' and 'userlevel_form' in request.POST:
userlevel_form = UserLevelForm(
request.POST, request_user=request.user)
if can_change_userlevel(request.user, profile_user):
if userlevel_form.is_valid(request.user):
user_level = userlevel_form.cleaned_data['user_level']
logger.info("User %s update %s's user level to %s" %
(request.user, username, user_level))
profile_user.user_level = user_level
profile_user.save()
render_data['userlevel_form'] = userlevel_form
messages.success(
request, 'Update user level successfully!')
else:
user_level = userlevel_form.cleaned_data['user_level']
messages.warning(request, "You can't switch user %s to %s" %
(profile_user, user_level))
return render_index(request, 'users/profile.html', render_data)
except User.DoesNotExist:
logger.warning('User %s does not exist' % username)
raise Http404('User %s does not exist' % username)
def user_create(request):
args = {}
args.update(csrf(request))
if request.method == 'POST':
user_form = UserCreationForm(request.POST)
args['user_form'] = user_form
if user_form.is_valid():
user = user_form.save()
send_activation_email(request, user)
user.backend = 'django.contrib.auth.backends.ModelBackend'
logger.info('user %s created' % str(user))
return redirect(reverse('index:alert', kwargs={'alert_info': 'mailbox'}))
else:
return render_index(request, 'users/auth.html', {'form': user_form, 'title': 'Sign Up'})
return render_index(request, 'users/auth.html', {'form': UserCreationForm(), 'title': 'Sign Up'})
def user_logout(request):
logger.info('user %s logged out' % str(request.user))
logout(request)
return redirect(reverse('index:index'))
def user_login(request):
next_page = get_next_page(request.GET.get('next'))
if request.user.is_authenticated():
return redirect(next_page)
if request.method == 'POST':
user_form = AuthenticationForm(data=request.POST)
if user_form.is_valid():
user = authenticate(
username=user_form.cleaned_data['username'],
password=user_form.cleaned_data['password'])
user.backend = 'django.contrib.auth.backends.ModelBackend'
ip = get_ip(request)
logger.info('user %s @ %s logged in' % (str(user), ip))
hours = int(get_config('session_expiry', 'expiry'))
expiry = hours * 60 * 60
request.session.set_expiry(expiry)
logger.info('user %s set session timeout %d-hour' %
(str(user), hours))
login(request, user)
return redirect(next_page)
else:
return render_index(request, 'users/auth.html', {'form': user_form, 'title': 'Login'})
return render_index(request, 'users/auth.html', {'form': AuthenticationForm(), 'title': 'Login'})
def user_forget_password(request):
if request.user.is_authenticated():
return redirect(reverse('index:index'))
if request.method == 'POST':
user_form = UserForgetPasswordForm(data=request.POST)
if user_form.is_valid():
user = User.objects.get(
username=user_form.cleaned_data['username'])
send_forget_password_email(request, user)
messages.success(request, 'Confirm email has sent to you.')
else:
return render_index(request, 'users/auth.html', {'form': user_form, 'title': 'Forget Password'})
return render_index(request, 'users/auth.html',
{'form': UserForgetPasswordForm(), 'title': 'Forget Password'})
def forget_password_confirm(request, activation_key):
"""check if user is already logged in and if he
is redirect him to some other url, e.g. home
"""
if request.user.is_authenticated():
HttpResponseRedirect(reverse('index:index'))
'''check if there is UserProfile which matches
the activation key (if not then display 404)
'''
# clear expired activation_key
UserProfile.objects.filter(
active_time__lte=datetime.datetime.now()).delete()
user_profile = get_object_or_404(
UserProfile, activation_key=activation_key)
user = user_profile.user
user.backend = 'django.contrib.auth.backends.ModelBackend'
user.is_active = True
user.save()
# Let user login, so as to modify password
login(request, user)
logger.info('User %s is ready to reset his/her password' % user.username)
return redirect(reverse('users:profile', kwargs={'username': user.username}))
def user_block_wrong_tries(request):
"""Block login for over 3 wrong tries."""
attempts = AccessAttempt.objects.filter(ip_address=get_ip(request))
for attempt in attempts:
if attempt.failures_since_start >= FAILURE_LIMIT:
unblock_time = attempt.attempt_time + COOLOFF_TIME
return render_index(request, 'users/blockWrongTries.html', {'unblock_time': unblock_time})
# No block attempt
return redirect(reverse('index:index'))
@login_required()
def submit(request, pid=None):
render_data = {}
render_data['form'] = CodeSubmitForm(initial={'pid': pid})
if request.method == 'POST':
codesubmitform = CodeSubmitForm(request.POST, user=request.user)
render_data['form'] = codesubmitform
if codesubmitform.is_valid():
codesubmitform.submit()
return redirect('%s?username=%s' % (reverse('status:status'), request.user.username))
# Get problem name
try:
pid = request.POST.get('pid', pid)
render_data['problem_name'] = str(Problem.objects.get(id=pid))
except:
logger.warning('Submit pid %s does not exist!' % pid)
return render_index(request, 'users/submit.html', render_data)
def register_confirm(request, activation_key):
"""check if user is already logged in and if he
is redirect him to some other url, e.g. home
"""
if request.user.is_authenticated():
HttpResponseRedirect(reverse('index:index'))
'''check if there is UserProfile which matches
the activation key (if not then display 404)
'''
# clear expired activation_key
UserProfile.objects.filter(
active_time__lte=datetime.datetime.now()).delete()
user_profile = get_object_or_404(
UserProfile, activation_key=activation_key)
user = user_profile.user
user.is_active = True
user.save()
logger.info('user %s has already been activated' % user.username)
user.backend = 'django.contrib.auth.backends.ModelBackend'
login(request, user)
return render_index(request, 'users/confirm.html', {'username': user.username})
@login_required()
def user_notification(request, current_tab='none'):
unread_notifications = Notification.objects. \
filter(receiver=request.user, read=False).order_by('-id')
all_notifications = Notification.objects. \
filter(receiver=request.user).order_by('-id')
return render_index(request, 'users/notification.html',
{'all_notifications': all_notifications,
'unread_notifications': unread_notifications, 'current_tab': current_tab})
@login_required()
def user_readify(request, read_id, current_tab):
try:
Notification.objects.filter(
id=long(read_id), receiver=request.user).update(read=True)
logger.info('Notification id %ld updates successfully!' %
long(read_id))
except Notification.DoesNotExist:
logger.warning('Notification id %ld does not exist!' % long(read_id))
return HttpResponseRedirect(reverse('users:tab', kwargs={'current_tab': current_tab}))
@login_required()
def user_delete_notification(request, delete_ids, current_tab):
id_list = delete_ids.split(',')
if delete_ids != '':
for delete_id in id_list:
try:
Notification.objects.filter(
id=long(delete_id), receiver=request.user).delete()
logger.info(
'Notification id %ld deletes successfully!' % long(delete_id))
except Notification.DoesNotExist:
logger.warning(
'Notification id %ld does not exist!' % long(delete_id))
return HttpResponseRedirect(reverse('users:tab', kwargs={'current_tab': current_tab}))
|
python
|
from tests.utils import W3CTestCase
class TestBottomOffsetPercentage(W3CTestCase):
vars().update(W3CTestCase.find_tests(__file__, 'bottom-offset-percentage-'))
|
python
|
from collections import defaultdict
def top_3_words(text):
text = text.lower().strip(',').strip('.').strip('"')
print(text)
counter = defaultdict(int)
for word in text.split():
counter[word] += 1
return sorted(
counter.keys(),
reverse=True,
key = lambda item : counter[item]
)
if __name__ == '__main__':
print(top_3_words("a a a b c c d d d d e e e e e"))
|
python
|
# Q. Take input from the user to get fibonacci series till the number entered
# Ex - input:3
# Ouput: 0, 1, 1
nterms = int(input("How many terms do you want? "))
# first two terms
n1, n2 = 0, 1
count = 0
# check if the number of terms is valid
if nterms <= 0:
print("Please enter a positive integer")
# if there is only one term, return n1
elif nterms == 1:
print("Fibonacci sequence upto",nterms,":")
print(n1)
# generate fibonacci sequence
else:
print(f"Fibonacci sequence upto {nterms} digits :")
while count < nterms:
print(n1)
nth = n1 + n2
# update values
n1 = n2
n2 = nth
count += 1
|
python
|
import topologylayer.nn
import topologylayer.functional
from topologylayer.functional.persistence import SimplicialComplex
|
python
|
# -*- coding: utf-8 -*-
"""
Created on Mon Jan 27 08:01:36 2020
@author: Ardhendu
"""
# -*- coding: utf-8 -*-
"""
Created on Thu Dec 13 10:33:32 2019
@author: Ardhendu
"""
from keras.layers import Layer
#from keras import layers
from keras import backend as K
import tensorflow as tf
#from SpectralNormalizationKeras import ConvSN2D
def hw_flatten(x) :
x_shape = K.shape(x)
return K.reshape(x, [x_shape[0], -1, x_shape[-1]]) # return [BATCH, W*H, CHANNELS]
class SelfAttention(Layer):
def __init__(self, filters, **kwargs):
self.dim_ordering = K.image_dim_ordering()
assert self.dim_ordering in {'tf', 'th'}, 'dim_ordering must be in {tf, th}'
self.filters = filters
#self.f = f
#self.g = g
#self.h = h
#self.gamma_name = gamma_name
super(SelfAttention, self).__init__(**kwargs)
def build(self, input_shape):
#self.f = ConvSN2D(self.filters // 8, kernel_size=1, strides=1, padding='same')# [bs, h, w, c']
#self.g = ConvSN2D(self.filters // 8, kernel_size=1, strides=1, padding='same') # [bs, h, w, c']
#self.h = ConvSN2D(self.filters, kernel_size=1, strides=1, padding='same') # [bs, h, w, c]
#self.f = layers.Conv2D(self.filters // 8, kernel_size=1, strides=1, padding='same')# [bs, h, w, c']
#self.g = layers.Conv2D(self.filters // 8, kernel_size=1, strides=1, padding='same') # [bs, h, w, c']
#self.h = layers.Conv2D(self.filters, kernel_size=1, strides=1, padding='same') # [bs, h, w, c]
#self.gamma = tf.get_variable(self.gamma_name, [1], initializer=tf.constant_initializer(0.0))
self.gamma = self.add_weight(shape=(1,),
name='{}_b'.format(self.name),
initializer='zeros', trainable=True)
super(SelfAttention, self).build(input_shape) # Be sure to call this at the end
def call(self,x):
assert(len(x) == 4)
img = x[0]
f = x[1]
g = x[2]
h = x[3]
# N = h * w
s = tf.matmul(hw_flatten(g), hw_flatten(f), transpose_b=True) # # [bs, N, N]
beta = K.softmax(s) # attention map
o = tf.matmul(beta, hw_flatten(h)) # [bs, N, C]
o = K.reshape(o, shape=[K.shape(img)[0], K.shape(img)[1], K.shape(img)[2], self.filters]) # [bs, h, w, C]
#o = K.reshape(o, shape=[K.shape(x)[0], K.shape(x)[1], K.shape(x)[2], self.filters // 2]) # [bs, h, w, C]
#print(o.shape[0])
#print(o.shape[1])
#print(o.shape[2])
#print(o.shape[3])
#o = ConvSN2D(self.filters, kernel_size=1, strides=1, padding='same')(o)
img = self.gamma * o + img
return img
def compute_output_shape(self, input_shape):
return input_shape[0]
def get_config(self):
config = {'filters': self.filters}
base_config = super(SelfAttention, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
|
python
|
from typing import Union, List, Optional
from pyspark.sql.types import (
StructType,
StructField,
StringType,
ArrayType,
DateType,
DataType,
)
# This file is auto-generated by generate_schema so do not edit manually
# noinspection PyPep8Naming
class DeviceSchema:
"""
This resource identifies an instance or a type of a manufactured item that is
used in the provision of healthcare without being substantially changed
through that activity. The device may be a medical or non-medical device.
Medical devices include durable (reusable) medical equipment, implantable
devices, as well as disposable equipment used for diagnostic, treatment, and
research for healthcare and public health. Non-medical devices may include
items such as a machine, cellphone, computer, application, etc.
"""
# noinspection PyDefaultArgument
@staticmethod
def get_schema(
max_nesting_depth: Optional[int] = 6,
nesting_depth: int = 0,
nesting_list: List[str] = [],
max_recursion_limit: Optional[int] = 2,
include_extension: Optional[bool] = False,
extension_fields: Optional[List[str]] = [
"valueBoolean",
"valueCode",
"valueDate",
"valueDateTime",
"valueDecimal",
"valueId",
"valueInteger",
"valuePositiveInt",
"valueString",
"valueTime",
"valueUnsignedInt",
"valueUri",
"valueQuantity",
],
extension_depth: int = 0,
max_extension_depth: Optional[int] = 2,
) -> Union[StructType, DataType]:
"""
This resource identifies an instance or a type of a manufactured item that is
used in the provision of healthcare without being substantially changed
through that activity. The device may be a medical or non-medical device.
Medical devices include durable (reusable) medical equipment, implantable
devices, as well as disposable equipment used for diagnostic, treatment, and
research for healthcare and public health. Non-medical devices may include
items such as a machine, cellphone, computer, application, etc.
id: The logical id of the resource, as used in the URL for the resource. Once
assigned, this value never changes.
extension: May be used to represent additional information that is not part of the basic
definition of the resource. In order to make the use of extensions safe and
manageable, there is a strict set of governance applied to the definition and
use of extensions. Though any implementer is allowed to define an extension,
there is a set of requirements that SHALL be met as part of the definition of
the extension.
meta: The metadata about the resource. This is content that is maintained by the
infrastructure. Changes to the content may not always be associated with
version changes to the resource.
implicitRules: A reference to a set of rules that were followed when the resource was
constructed, and which must be understood when processing the content.
language: The base language in which the resource is written.
text: A human-readable narrative that contains a summary of the resource, and may be
used to represent the content of the resource to a human. The narrative need
not encode all the structured data, but is required to contain sufficient
detail to make it "clinically safe" for a human to just read the narrative.
Resource definitions may define what content should be represented in the
narrative to ensure clinical safety.
contained: These resources do not have an independent existence apart from the resource
that contains them - they cannot be identified independently, and nor can they
have their own independent transaction scope.
resourceType: This is a Device resource
identifier: Unique instance identifiers assigned to a device by manufacturers other
organizations or owners.
udi: [Unique device identifier (UDI)](device.html#5.11.3.2.2) assigned to device
label or package.
status: Status of the Device availability.
type: Code or identifier to identify a kind of device.
lotNumber: Lot number assigned by the manufacturer.
manufacturer: A name of the manufacturer.
manufactureDate: The date and time when the device was manufactured.
expirationDate: The date and time beyond which this device is no longer valid or should not be
used (if applicable).
model: The "model" is an identifier assigned by the manufacturer to identify the
product by its type. This number is shared by the all devices sold as the same
type.
version: The version of the device, if the device has multiple releases under the same
model, or if the device is software or carries firmware.
patient: Patient information, If the device is affixed to a person.
owner: An organization that is responsible for the provision and ongoing maintenance
of the device.
contact: Contact details for an organization or a particular human that is responsible
for the device.
location: The place where the device can be found.
url: A network address on which the device may be contacted directly.
note: Descriptive information, usage information or implantation information that is
not captured in an existing element.
safety: Provides additional safety characteristics about a medical device. For
example devices containing latex.
"""
from spark_fhir_schemas.stu3.complex_types.extension import ExtensionSchema
from spark_fhir_schemas.stu3.complex_types.meta import MetaSchema
from spark_fhir_schemas.stu3.complex_types.narrative import NarrativeSchema
from spark_fhir_schemas.stu3.simple_types.resourcelist import ResourceListSchema
from spark_fhir_schemas.stu3.complex_types.identifier import IdentifierSchema
from spark_fhir_schemas.stu3.complex_types.device_udi import Device_UdiSchema
from spark_fhir_schemas.stu3.complex_types.codeableconcept import (
CodeableConceptSchema,
)
from spark_fhir_schemas.stu3.complex_types.reference import ReferenceSchema
from spark_fhir_schemas.stu3.complex_types.contactpoint import (
ContactPointSchema,
)
from spark_fhir_schemas.stu3.complex_types.annotation import AnnotationSchema
if (
max_recursion_limit and nesting_list.count("Device") >= max_recursion_limit
) or (max_nesting_depth and nesting_depth >= max_nesting_depth):
return StructType([StructField("id", StringType(), True)])
# add my name to recursion list for later
my_nesting_list: List[str] = nesting_list + ["Device"]
schema = StructType(
[
# The logical id of the resource, as used in the URL for the resource. Once
# assigned, this value never changes.
StructField("id", StringType(), True),
# May be used to represent additional information that is not part of the basic
# definition of the resource. In order to make the use of extensions safe and
# manageable, there is a strict set of governance applied to the definition and
# use of extensions. Though any implementer is allowed to define an extension,
# there is a set of requirements that SHALL be met as part of the definition of
# the extension.
StructField(
"extension",
ArrayType(
ExtensionSchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth,
max_extension_depth=max_extension_depth,
)
),
True,
),
# The metadata about the resource. This is content that is maintained by the
# infrastructure. Changes to the content may not always be associated with
# version changes to the resource.
StructField(
"meta",
MetaSchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth + 1,
max_extension_depth=max_extension_depth,
),
True,
),
# A reference to a set of rules that were followed when the resource was
# constructed, and which must be understood when processing the content.
StructField("implicitRules", StringType(), True),
# The base language in which the resource is written.
StructField("language", StringType(), True),
# A human-readable narrative that contains a summary of the resource, and may be
# used to represent the content of the resource to a human. The narrative need
# not encode all the structured data, but is required to contain sufficient
# detail to make it "clinically safe" for a human to just read the narrative.
# Resource definitions may define what content should be represented in the
# narrative to ensure clinical safety.
StructField(
"text",
NarrativeSchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth + 1,
max_extension_depth=max_extension_depth,
),
True,
),
# These resources do not have an independent existence apart from the resource
# that contains them - they cannot be identified independently, and nor can they
# have their own independent transaction scope.
StructField(
"contained",
ArrayType(
ResourceListSchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth,
max_extension_depth=max_extension_depth,
)
),
True,
),
# This is a Device resource
StructField("resourceType", StringType(), True),
# Unique instance identifiers assigned to a device by manufacturers other
# organizations or owners.
StructField(
"identifier",
ArrayType(
IdentifierSchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth,
max_extension_depth=max_extension_depth,
)
),
True,
),
# [Unique device identifier (UDI)](device.html#5.11.3.2.2) assigned to device
# label or package.
StructField(
"udi",
Device_UdiSchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth + 1,
max_extension_depth=max_extension_depth,
),
True,
),
# Status of the Device availability.
StructField("status", StringType(), True),
# Code or identifier to identify a kind of device.
StructField(
"type",
CodeableConceptSchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth + 1,
max_extension_depth=max_extension_depth,
),
True,
),
# Lot number assigned by the manufacturer.
StructField("lotNumber", StringType(), True),
# A name of the manufacturer.
StructField("manufacturer", StringType(), True),
# The date and time when the device was manufactured.
StructField("manufactureDate", DateType(), True),
# The date and time beyond which this device is no longer valid or should not be
# used (if applicable).
StructField("expirationDate", DateType(), True),
# The "model" is an identifier assigned by the manufacturer to identify the
# product by its type. This number is shared by the all devices sold as the same
# type.
StructField("model", StringType(), True),
# The version of the device, if the device has multiple releases under the same
# model, or if the device is software or carries firmware.
StructField("version", StringType(), True),
# Patient information, If the device is affixed to a person.
StructField(
"patient",
ReferenceSchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth + 1,
max_extension_depth=max_extension_depth,
),
True,
),
# An organization that is responsible for the provision and ongoing maintenance
# of the device.
StructField(
"owner",
ReferenceSchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth + 1,
max_extension_depth=max_extension_depth,
),
True,
),
# Contact details for an organization or a particular human that is responsible
# for the device.
StructField(
"contact",
ArrayType(
ContactPointSchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth,
max_extension_depth=max_extension_depth,
)
),
True,
),
# The place where the device can be found.
StructField(
"location",
ReferenceSchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth + 1,
max_extension_depth=max_extension_depth,
),
True,
),
# A network address on which the device may be contacted directly.
StructField("url", StringType(), True),
# Descriptive information, usage information or implantation information that is
# not captured in an existing element.
StructField(
"note",
ArrayType(
AnnotationSchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth,
max_extension_depth=max_extension_depth,
)
),
True,
),
# Provides additional safety characteristics about a medical device. For
# example devices containing latex.
StructField(
"safety",
ArrayType(
CodeableConceptSchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth,
max_extension_depth=max_extension_depth,
)
),
True,
),
]
)
if not include_extension:
schema.fields = [
c
if c.name != "extension"
else StructField("extension", StringType(), True)
for c in schema.fields
]
return schema
|
python
|
# Copyright 2019-2021 ETH Zurich and the DaCe authors. All rights reserved.
import dace
import numpy as np
from dace.frontend.python.common import DaceSyntaxError
@dace.program
def for_loop():
A = dace.ndarray([10], dtype=dace.int32)
A[:] = 0
for i in range(0, 10, 2):
A[i] = i
return A
def test_for_loop():
A = for_loop()
A_ref = np.array([0, 0, 2, 0, 4, 0, 6, 0, 8, 0], dtype=np.int32)
assert (np.array_equal(A, A_ref))
@dace.program
def for_loop_with_break_continue():
A = dace.ndarray([10], dtype=dace.int32)
A[:] = 0
for i in range(20):
if i >= 10:
break
if i % 2 == 1:
continue
A[i] = i
return A
def test_for_loop_with_break_continue():
A = for_loop_with_break_continue()
A_ref = np.array([0, 0, 2, 0, 4, 0, 6, 0, 8, 0], dtype=np.int32)
assert (np.array_equal(A, A_ref))
@dace.program
def nested_for_loop():
A = dace.ndarray([10, 10], dtype=dace.int32)
A[:] = 0
for i in range(20):
if i >= 10:
break
if i % 2 == 1:
continue
for j in range(20):
if j >= 10:
break
if j % 2 == 1:
continue
A[i, j] = j
return A
def test_nested_for_loop():
A = nested_for_loop()
A_ref = np.zeros([10, 10], dtype=np.int32)
for i in range(0, 10, 2):
A_ref[i] = [0, 0, 2, 0, 4, 0, 6, 0, 8, 0]
assert (np.array_equal(A, A_ref))
@dace.program
def while_loop():
A = dace.ndarray([10], dtype=dace.int32)
A[:] = 0
i = 0
while (i < 10):
A[i] = i
i += 2
return A
def test_while_loop():
A = while_loop()
A_ref = np.array([0, 0, 2, 0, 4, 0, 6, 0, 8, 0], dtype=np.int32)
assert (np.array_equal(A, A_ref))
@dace.program
def while_loop_with_break_continue():
A = dace.ndarray([10], dtype=dace.int32)
A[:] = 0
i = -1
while i < 20:
i += 1
if i >= 10:
break
if i % 2 == 1:
continue
A[i] = i
return A
def test_while_loop_with_break_continue():
A = while_loop_with_break_continue()
A_ref = np.array([0, 0, 2, 0, 4, 0, 6, 0, 8, 0], dtype=np.int32)
assert (np.array_equal(A, A_ref))
@dace.program
def nested_while_loop():
A = dace.ndarray([10, 10], dtype=dace.int32)
A[:] = 0
i = -1
while i < 20:
i += 1
if i >= 10:
break
if i % 2 == 1:
continue
j = -1
while j < 20:
j += 1
if j >= 10:
break
if j % 2 == 1:
continue
A[i, j] = j
return A
def test_nested_while_loop():
A = nested_while_loop()
A_ref = np.zeros([10, 10], dtype=np.int32)
for i in range(0, 10, 2):
A_ref[i] = [0, 0, 2, 0, 4, 0, 6, 0, 8, 0]
assert (np.array_equal(A, A_ref))
@dace.program
def nested_for_while_loop():
A = dace.ndarray([10, 10], dtype=dace.int32)
A[:] = 0
for i in range(20):
if i >= 10:
break
if i % 2 == 1:
continue
j = -1
while j < 20:
j += 1
if j >= 10:
break
if j % 2 == 1:
continue
A[i, j] = j
return A
def test_nested_for_while_loop():
A = nested_for_while_loop()
A_ref = np.zeros([10, 10], dtype=np.int32)
for i in range(0, 10, 2):
A_ref[i] = [0, 0, 2, 0, 4, 0, 6, 0, 8, 0]
assert (np.array_equal(A, A_ref))
@dace.program
def nested_while_for_loop():
A = dace.ndarray([10, 10], dtype=dace.int32)
A[:] = 0
i = -1
while i < 20:
i += 1
if i >= 10:
break
if i % 2 == 1:
continue
for j in range(20):
if j >= 10:
break
if j % 2 == 1:
continue
A[i, j] = j
return A
def test_nested_while_for_loop():
A = nested_while_for_loop()
A_ref = np.zeros([10, 10], dtype=np.int32)
for i in range(0, 10, 2):
A_ref[i] = [0, 0, 2, 0, 4, 0, 6, 0, 8, 0]
assert (np.array_equal(A, A_ref))
@dace.program
def map_with_break_continue():
A = dace.ndarray([10], dtype=dace.int32)
A[:] = 0
for i in dace.map[0:20]:
if i >= 10:
break
if i % 2 == 1:
continue
A[i] = i
return A
def test_map_with_break_continue():
try:
map_with_break_continue()
except Exception as e:
if isinstance(e, DaceSyntaxError):
return 0
assert (False)
@dace.program
def nested_map_for_loop():
A = np.ndarray([10, 10], dtype=np.int64)
for i in dace.map[0:10]:
for j in range(10):
A[i, j] = i * 10 + j
return A
def test_nested_map_for_loop():
ref = np.zeros([10, 10], dtype=np.int64)
for i in range(10):
for j in range(10):
ref[i, j] = i * 10 + j
val = nested_map_for_loop()
assert (np.array_equal(val, ref))
@dace.program
def nested_map_for_for_loop():
A = np.ndarray([10, 10, 10], dtype=np.int64)
for i in dace.map[0:10]:
for j in range(10):
for k in range(10):
A[i, j, k] = i * 100 + j * 10 + k
return A
def test_nested_map_for_for_loop():
ref = np.zeros([10, 10, 10], dtype=np.int64)
for i in range(10):
for j in range(10):
for k in range(10):
ref[i, j, k] = i * 100 + j * 10 + k
val = nested_map_for_for_loop()
assert (np.array_equal(val, ref))
@dace.program
def nested_for_map_for_loop():
A = np.ndarray([10, 10, 10], dtype=np.int64)
for i in range(10):
for j in dace.map[0:10]:
for k in range(10):
A[i, j, k] = i * 100 + j * 10 + k
return A
def test_nested_for_map_for_loop():
ref = np.zeros([10, 10, 10], dtype=np.int64)
for i in range(10):
for j in range(10):
for k in range(10):
ref[i, j, k] = i * 100 + j * 10 + k
val = nested_for_map_for_loop()
assert (np.array_equal(val, ref))
@dace.program
def nested_map_for_loop_with_tasklet():
A = np.ndarray([10, 10], dtype=np.int64)
for i in dace.map[0:10]:
for j in range(10):
@dace.tasklet
def comp():
out >> A[i, j]
out = i * 10 + j
return A
def test_nested_map_for_loop_with_tasklet():
ref = np.zeros([10, 10], dtype=np.int64)
for i in range(10):
for j in range(10):
ref[i, j] = i * 10 + j
val = nested_map_for_loop_with_tasklet()
assert (np.array_equal(val, ref))
@dace.program
def nested_map_for_for_loop_with_tasklet():
A = np.ndarray([10, 10, 10], dtype=np.int64)
for i in dace.map[0:10]:
for j in range(10):
for k in range(10):
@dace.tasklet
def comp():
out >> A[i, j, k]
out = i * 100 + j * 10 + k
return A
def test_nested_map_for_for_loop_with_tasklet():
ref = np.zeros([10, 10, 10], dtype=np.int64)
for i in range(10):
for j in range(10):
for k in range(10):
ref[i, j, k] = i * 100 + j * 10 + k
val = nested_map_for_for_loop_with_tasklet()
assert (np.array_equal(val, ref))
@dace.program
def nested_for_map_for_loop_with_tasklet():
A = np.ndarray([10, 10, 10], dtype=np.int64)
for i in range(10):
for j in dace.map[0:10]:
for k in range(10):
@dace.tasklet
def comp():
out >> A[i, j, k]
out = i * 100 + j * 10 + k
return A
def test_nested_for_map_for_loop_with_tasklet():
ref = np.zeros([10, 10, 10], dtype=np.int64)
for i in range(10):
for j in range(10):
for k in range(10):
ref[i, j, k] = i * 100 + j * 10 + k
val = nested_for_map_for_loop_with_tasklet()
assert (np.array_equal(val, ref))
@dace.program
def nested_map_for_loop_2(B: dace.int64[10, 10]):
A = np.ndarray([10, 10], dtype=np.int64)
for i in dace.map[0:10]:
for j in range(10):
A[i, j] = 2 * B[i, j] + i * 10 + j
return A
def test_nested_map_for_loop_2():
B = np.ones([10, 10], dtype=np.int64)
ref = np.zeros([10, 10], dtype=np.int64)
for i in range(10):
for j in range(10):
ref[i, j] = 2 + i * 10 + j
val = nested_map_for_loop_2(B)
assert (np.array_equal(val, ref))
@dace.program
def nested_map_for_loop_with_tasklet_2(B: dace.int64[10, 10]):
A = np.ndarray([10, 10], dtype=np.int64)
for i in dace.map[0:10]:
for j in range(10):
@dace.tasklet
def comp():
inp << B[i, j]
out >> A[i, j]
out = 2 * inp + i * 10 + j
return A
def test_nested_map_for_loop_with_tasklet_2():
B = np.ones([10, 10], dtype=np.int64)
ref = np.zeros([10, 10], dtype=np.int64)
for i in range(10):
for j in range(10):
ref[i, j] = 2 + i * 10 + j
val = nested_map_for_loop_with_tasklet_2(B)
assert (np.array_equal(val, ref))
if __name__ == "__main__":
test_for_loop()
test_for_loop_with_break_continue()
test_nested_for_loop()
test_while_loop()
test_while_loop_with_break_continue()
test_nested_while_loop()
test_nested_for_while_loop()
test_nested_while_for_loop()
test_map_with_break_continue()
test_nested_map_for_loop()
test_nested_map_for_for_loop()
test_nested_for_map_for_loop()
test_nested_map_for_loop_with_tasklet()
test_nested_map_for_for_loop_with_tasklet()
test_nested_for_map_for_loop_with_tasklet()
test_nested_map_for_loop_2()
test_nested_map_for_loop_with_tasklet_2()
|
python
|
import WebArticleParserCLI
urls = ['http://lenta.ru/news/2013/03/dtp/index.html',
'https://lenta.ru/news/2017/02/11/maroder/',
'https://lenta.ru/news/2017/02/10/polygon/',
'https://russian.rt.com/world/article/358299-raketa-koreya-ssha-yaponiya-kndr-tramp',
'https://russian.rt.com/russia/news/358337-sk-proverka-gibel-devochki',
'https://www.gazeta.ru/lifestyle/style/2017/02/a_10521767.shtml',
'http://www.vedomosti.ru/realty/articles/2017/02/11/677217-moskva-zarabotala-na-parkovkah']
for url in urls:
argv = ['-a', url, '-c', './../webarticleparser.ini', '-v']
WebArticleParserCLI.main(argv)
#argv = ['-a','https://www.gazeta.ru/lifestyle/style/2017/02/a_10521767.shtml', '-c', './../webarticleparser.ini']
#WebArticleParserCLI.main(argv)
|
python
|
#
# ida_kernelcache/build_struct.py
# Brandon Azad
#
# A module to build an IDA structure automatically from code accesses.
#
import collections
import idc
import idautils
import idaapi
from . import ida_utilities as idau
_log = idau.make_log(3, __name__)
def field_name(offset):
"""Automatically generated IDA structs have their fields named by their absolute offset."""
return 'field_{:x}'.format(offset)
def create_struct_fields(sid=None, name=None, accesses=None, create=False, base=0):
"""Create an IDA struct with fields corresponding to the specified access pattern.
Given a sequence of (offset, size) tuples designating the valid access points to the struct,
create fields in the struct at the corresponding positions.
Options:
sid: The struct id, if the struct already exists.
name: The name of the struct to update or create.
accesses: The set of (offset, size) tuples representing the valid access points in the
struct.
create: If True, then the struct will be created with the specified name if it does not
already exist. Default is False.
base: The base offset for the struct. Offsets smaller than this are ignored, otherwise the
field is created at the offset minus the base. Default is 0.
Either sid or name must be specified.
"""
# Get the struct id.
if sid is None:
sid = idau.struct_open(name, create=True)
if sid is None:
_log(0, 'Could not open struct {}', name)
return False
else:
name = idc.GetStrucName(sid)
if name is None:
_log(0, 'Invalid struct id {}', sid)
return False
# Now, for each (offset, size) pair, create a struct member. Right now we completely ignore the
# possibility that some members will overlap (for various reasons; it's actually more common
# than I initially thought, though I haven't investigated why).
# TODO: In the future we should address this by either automatically generating sub-unions or
# choosing the most appropriate member when permissible (e.g. (0, 8), (0, 2), (4, 4) might
# create (0, 2), (2, 2), (4, 4)). I think the most reasonable default policy is to create the
# biggest members that satisfy all accesses.
success = True
for offset, size in accesses:
if offset < base:
continue
member = field_name(offset)
ret = idau.struct_add_word(sid, member, offset - base, size)
if ret != 0:
if ret == idc.STRUC_ERROR_MEMBER_OFFSET:
_log(2, 'Could not add {}.{} for access ({}, {})', name, member, offset, size)
else:
success = False
_log(1, 'Could not add {}.{} for access ({}, {}): {}', name, member, offset, size,
ret)
return success
|
python
|
#! /usr/bin/python
# -*- coding: utf-8 -*-
#
# data_test.py
# Jun/05/2018
# ---------------------------------------------------------------
import cgi
import json
# ---------------------------------------------------------------
form = cgi.FieldStorage()
#
message = []
data_in = ""
message.append("start")
#
if "arg" in form:
message.append ("*** arg exist ***")
arg = form["arg"].value
message.append(arg)
#
if "aa" in form:
message.append ("*** aa exist ***")
aa = form["aa"].value
message.append(aa)
#
if "bb" in form:
message.append ("*** bb exist ***")
bb = form["bb"].value
message.append(bb)
#
if "cc" in form:
message.append ("*** cc exist ***")
cc = form["cc"].value
message.append(cc)
#
if "data_bb" in form:
message.append ("*** data_bb exist ***")
data_bb = form["data_bb"].value
message.append(data_bb)
#
rvalue = {}
message.append("end")
rvalue['message'] = message
print("Content-Type: text/json")
print("")
print(json.dumps(rvalue))
# ---------------------------------------------------------------
|
python
|
import numpy as np
IS_AUTONOMOUS = False
X_TARGET = 2.0
Y_TARGET = 2.0
STOP_THRESHOLD = 0.03 # Unit: m
ROBOT_MARGIN = 130 # Unit: mm
THRESHOLD = 3.6 # Unit: m
MIN_THRESHOLD = 0.1
THRESHOLD_STEP = 0.25
THRESHOLD_ANGLE = 95 # Unit: deg, has to be greater than 90 deg
ANGLE_TO_START_MOVING = 10 / 180*np.pi # Unit: rad
class Colour:
#OpenCV use BGR tuple
def __init__(self):
self.blue = (255, 0, 0)
self.green = (0, 255, 0)
self.light_green = (0, 255, 110)
self.red = (0, 0, 255)
self.yellow = (0, 220, 255)
self.orange = (0, 120, 255)
self.black = (0, 0, 0)
self.white = (255, 255, 255)
def grey(self, percentage):
level = int(percentage/100*255)
return (level, level, level)
def Green(self, percentage=100):
level = int(percentage/100*255)
return (0, level, 0)
|
python
|
# -*- coding: utf-8 -*-
def relu(name, bottom, top, type="ReLU"):
layer = "layer {\n"
layer += " name: \"" + name + "\"\n"
if type not in ["ReLU", "ReLU6", "CReLU"]:
raise Exception("unknown relu: %s" % type)
layer += " type: \"" + type + "\"\n"
layer += " bottom: \"" + bottom + "\"\n"
layer += " top: \"" + top + "\"\n"
layer += "}"
return layer, top
def softmax(name, bottom, top=None, axis=-1):
if not top:
top = name
layer = "layer {\n"
layer += " name: \"" + name + "\"\n"
layer += " type: \"Softmax\"\n"
layer += " bottom: \"" + bottom + "\"\n"
layer += " top: \"" + top + "\"\n"
if axis > 0:
layer += " softmax_param {\n"
layer += " axis: " + str(axis) + "\n"
layer += " }\n"
layer += "}"
return layer, top
def sigmoid(name, bottom, top=None):
if not top:
top = name
layer = "layer {\n"
layer += " name: \"" + name + "\"\n"
layer += " type: \"Sigmoid\"\n"
layer += " bottom: \"" + bottom + "\"\n"
layer += " top: \"" + top + "\"\n"
layer += "}"
return layer, top
def test_layer():
layer, top = relu("relu1", "conv1", "conv1", type="ReLU6")
print(layer)
if __name__ == '__main__':
test_layer()
|
python
|
# Вступление
# В этом руководстве вы узнали, как создать достаточно интеллектуального агента с помощью алгоритма минимакса. В
# этом упражнении вы проверите свое понимание и представите своего агента для участия в конкурсе.
# 1) Присмотритесь
# Эвристика из учебника рассматривает все группы из четырех соседних местоположений сетки в одной строке,
# столбце или диагонали и назначает точки для каждого вхождения следующих шаблонов:
#
# Неужели действительно необходимо использовать такое количество чисел для определения эвристики? Попробуйте
# упростить его, как показано на изображении ниже.
# Как каждая эвристика оценивает потенциальные ходы в приведенном ниже примере (где в этом случае агент смотрит
# только на один шаг вперед)? Какая эвристика позволит агенту выбрать лучший ход?
# Решение: первая эвристика гарантированно выберет столбец 2, чтобы заблокировать победу противника. Вторая
# эвристика выбирает либо столбец 2, либо столбец 3 (каждый из которых выбирается с вероятностью 50%). Таким
# образом, для этого игрового поля лучше использовать первую эвристику. В общем, мы можем ожидать, что первая
# эвристика будет лучшей эвристикой, поскольку мы не можем доверять второй эвристике, чтобы помешать оппоненту
# выиграть.
# 2) Подсчитайте листья
# В туториале мы работали с небольшим деревом игр.
# В приведенном выше игровом дереве есть 8 узловых листьев, которые появляются в нижней части дерева. По
# определению, «листовые узлы» в дереве игры - это узлы, ниже которых нет узлов.
#
# В соревновании ConnectX деревья игр будут намного больше!
#
# Чтобы увидеть это, рассмотрим минимаксного агента, который пытается спланировать свой первый ход,
# когда все столбцы на игровом поле пусты. Предположим, агент строит игровое дерево глубины 3. Сколько листовых
# узлов в игровом дереве?
#
# Используйте свой ответ, чтобы заполнить бланк ниже.
# # Заполнить бланк
num_leaves = 7*7*7
# 3) Какой ход выберет агент?
# В этом вопросе вы проверите свое понимание минимаксного алгоритма. Помните, что с помощью этого алгоритма
# Агент выбирает ходы, чтобы получить как можно более высокий счет, и предполагает, что противник будет
# противодействовать этому, выбирая ходы, чтобы сделать счет как можно более низким.
# Рассмотрим приведенный ниже игрушечный пример дерева игры, который агент будет использовать для выбора своего
# следующего хода.
#
# Какой ход выберет агент? Используйте свой ответ, чтобы установить значение переменной selected_move ниже. Ваш
# ответ должен быть одним из 1, 2 или 3.
#
selected_move = 3
#
# 4) Изучите предположения
# Минимаксный агент предполагает, что его противник играет оптимально (с точки зрения эвристики и использования
# дерева игр с ограниченной глубиной). Но на практике этого почти никогда не бывает: гораздо более вероятно,
# что агент столкнется с неоптимальным (то есть хуже оптимального) противником.
#
# Скажем, минимаксный агент встречает неоптимального противника. Следует ли ожидать, что минимаксный агент
# по-прежнему будет хорошо играть в игру, несмотря на противоречие с его предположениями? Если да, то почему?
# Решение: мы все еще можем ожидать, что минимаксный агент будет работать хорошо. На высоком уровне предположение об
# оптимальном оппоненте просто переоценивает оппонента, но не нарушает алгоритм. Эффект переоценки оппонента просто
# состоит в том, что минимаксному агенту потребуется больше времени, чтобы победить, чем если бы он имел более
# точное понимание своего оппонента. Например, очень маловероятно, что минимаксный агент выберет один и тот же
# столбец три раза в свои первые три хода (поскольку он предполагает оптимального оппонента, который обязательно
# заблокирует выигрышную игру на следующем ходу), но это неплохая начальная стратегия для игра против агента,
# который выбирает столбцы случайным образом.
# 5) Подать заявку на участие в конкурсе
# А теперь пора выставить агента на конкурс! Используйте следующую ячейку кода, чтобы определить агента. (Вы можете
# увидеть пример того, как написать действующего агента в этой записной книжке.)
#
# Если вы решите использовать минимаксный код из учебника, вы можете добавить альфа-бета-обрезку, чтобы уменьшить
# время вычислений (то есть заставить алгоритм минимакса работать намного быстрее!). В этом случае «альфа» и «бета»
# относятся к двум значениям, которые поддерживаются во время работы алгоритма, что помогает идентифицировать
# условия ранней остановки.
#
# Без обрезки альфа-бета минимакс оценивает каждый листовой узел. При отсечении альфа-бета минимакс оценивает только
# те узлы, которые могут предоставить информацию, влияющую на выбор действия агента. Другими словами, он определяет
# узлы, которые не могут повлиять на конечный результат, и избегает их оценки.
def my_agent(obs, config):
# Your code here: Amend the agent!
import random
valid_moves = [col for col in range(config.columns) if obs.board[col] == 0]
return random.choice(valid_moves)
# subm
import inspect
import os
def write_agent_to_file(function, file):
with open(file, "a" if os.path.exists(file) else "w") as f:
f.write(inspect.getsource(function))
print(function, "written to", file)
write_agent_to_file(my_agent, "submission.py")
|
python
|
# Copyright 2017 Insurance Australia Group Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Module for creating the bakery environment CloudFormation file.
"""
import os
import common
from configuration.initialise_config import BAKERY_VARS
TEMPLATE_SOURCE = os.environ["LOCATION_CORE"] + \
"/deploy_cloudformation/bakery/templates/bakery_env.tmpl"
TEMPLATE_DESTINATION = os.environ["LOCATION_CORE"] + "/deploy_cloudformation/bakery/bakery_env.yml"
def get_roles(environment, access_type):
"""Gets the role arns for the specified environment and access type.
Args:
environment: Environment, e.g. NonProd, Prod, Stg
access_type: Access type, e.g. Admin, PowerUser, ReadOnly
Returns:
String with the role arns
"""
roles = ""
for account in environment["Accounts"]:
if roles:
roles += "\n"
roles += "{}- arn:aws:iam::{}:role/{}-{}-{}".format(
" " * 14,
account["Id"],
account["Name"],
environment["Environment"],
access_type
)
return roles
def get_groups_policies():
"""Gets the CloudFormation snippet for IAM groups and IAM managed policies.
Returns:
String with the CloudFormation snippet for IAM groups and IAM policies.
"""
groups_policies = ""
for environment in BAKERY_VARS.Environments:
for access_type in BAKERY_VARS.AccessTypes:
snippet = \
""" Group{1}{2}:
Type: AWS::IAM::Group
Properties:
GroupName: {0}{1}{2}
""".format(BAKERY_VARS.TeamName, environment["Environment"], access_type["Type"])
snippet += \
""" Policy{1}{2}:
Type: AWS::IAM::ManagedPolicy
Properties:
ManagedPolicyName: {0}{1}{2}
Description: This policy allows to assume a role
Groups:
- !Ref Group{1}{2}
PolicyDocument:
Version: "2012-10-17"
Statement:
- Effect: Allow
Action: sts:AssumeRole
Resource:
__roles__
""".format(
BAKERY_VARS.TeamName,
environment["Environment"],
access_type["Type"]
).replace(
"__roles__",
get_roles(environment, access_type["Type"])
)
groups_policies += snippet
return groups_policies
def main():
"""Main function."""
template = common.get_template(TEMPLATE_SOURCE).replace(
"{{groups_policies}}",
get_groups_policies()
)
common.generate_file(TEMPLATE_DESTINATION, template)
if __name__ == "__main__":
main()
|
python
|
import translate, command
while 1:
b=raw_input(">>>")
a=open("console.txt","w")
a.write(b)
a.close()
print(command.run(b))
|
python
|
import xmlrpclib
from tornado.options import options
from ate_logger import AteLogger
class BaseXmlRpcProcess(object):
def __init__(self):
self.logger = AteLogger('XmlRpcProcess')
self._tf_status = False
def status(self):
self.logger.debug('Calling status')
traceback = None
try:
c = xmlrpclib.ServerProxy('http://127.0.0.1:{}'.format(options.xmlrpc_server_port))
process, tf, traceback = c.sys.status()
self._tf_status = tf
tf_health, _ = self._tf_health(force=False)
xmlrpc = True
except Exception as exc:
process = False
tf = False
tf_health = {}
xmlrpc = False
traceback = str(exc)
self.logger.warning("Can't access XML RPC")
return {
'error': traceback,
'type': 'system',
'result': [
{'type': 'process', 'status': process, 'description': 'TF Server process'},
{'type': 'xmlrpc', 'status': xmlrpc, 'description': 'TF Network connection'},
{'type': 'test_fixture', 'status': tf, 'description': 'TF Object status'},
{'type': 'test_fixture_health',
'status': tf_health.get('fixture_status', False),
'description': 'TF Hardware status'},
]
}
def _tf_health(self, force=False):
self.logger.debug('Calling tf_health')
traceback = None
tf_health = {}
if self._tf_status:
try:
c = xmlrpclib.ServerProxy('http://127.0.0.1:{}'.format(options.xmlrpc_server_port))
tf_health = c.sys.tf_health(force)
except Exception as exc:
tf_health = {}
traceback = str(exc)
self.logger.warning("Can't access XML RPC")
return tf_health, traceback
def tf_health(self, force=False):
tf_health, traceback = self._tf_health(force=force)
return {
'type': 'test_fixture_health',
'error': traceback,
'result': tf_health
}
def cavities(self):
self.logger.debug('Calling cavities')
traceback = None
cavities_list = []
tf_health, traceback = self._tf_health(force=False)
cavities = tf_health.get('cavities', {})
for cavity_name, cavity in cavities.items():
cavity['name'] = cavity_name
devices = cavity.get('devices', {})
for key, device in devices.items():
cavity['devices'][key].pop('error', 0)
cavity['devices'][key].pop('traceback', 0)
cavities_list.append(cavity)
return {
'type': 'cavities',
'error': traceback,
'result': cavities_list
}
|
python
|
import dash_core_components as dcc
import dash_html_components as html
import dash_bootstrap_components as dbc
from plotly.subplots import make_subplots
import plotly.graph_objects as go
import plotly.express as px
import price_loader as pl
import spy_investments as spy
from datetime import datetime
import pandas as pd
def format_date_from_current_investments(date):
return datetime.strptime(date, '%b %d, %Y').strftime('%Y-%m-%d')
def format_date_from_base_stock(date):
return datetime.strptime(str(date), '%Y-%m-%d').strftime('%Y-%m-%d')
class InvestmentChartGenerator:
def __init__(self, df):
self.df = df
self.base_stock = 'VOO'
self.price_loader = pl.PriceLoader('2019-10-23', '30m')
self.comparison_data = None
self.get_comparison_data()
self.base_investment = spy.BaseStockInvestmentCalculator(self.comparison_data, self.df)
self.investment_history = pd.DataFrame()
def get_comparison_data(self):
try:
self.comparison_data = self.price_loader.getData(self.base_stock)
self.comparison_data['Date'] = self.comparison_data['Date'].apply(format_date_from_base_stock)
except:
print('Personal-Finance -> Error in downloading Comparison Data from Yahoo Finance')
def generate_line(self, df, x, y, title,):
Line = go.Figure(px.area(df, x=x, y=y, title=title))
lowest_point = y.min() * 0.98
highest_point = y.max() * 1.02
Line.update_layout(
yaxis=dict(range=[lowest_point, highest_point])
)
return Line
def generate_base_stock_plot(self):
self.comparison_data = self.base_investment.getBaseInvestmentHoldings()
purchase_price = self.comparison_data['Weighted Average Price'] * self.comparison_data['Shares Available']
equity_close = self.comparison_data['Equity Close']
equity_low = self.comparison_data['Equity Low']
equity_high = self.comparison_data['Equity High']
equity_purchase_amount = self.comparison_data.tail(1)['Invested Amount'].values[0]
fig = go.Figure([
go.Scatter(
name='Daily Close',
x=self.comparison_data['Date'],
y=equity_purchase_amount + equity_close - purchase_price,
mode='lines',
line=dict(color='rgb(102, 166, 30)'),
),
go.Scatter(
name='Daily High',
x=self.comparison_data['Date'],
y=equity_purchase_amount + equity_high - purchase_price,
mode='lines',
marker=dict(color='rgba(166, 216, 84, 0.5)'),
line=dict(width=0),
showlegend=False
),
go.Scatter(
name='Daily Low',
x=self.comparison_data['Date'],
y=equity_purchase_amount + equity_low - purchase_price,
marker=dict(color='rgba(166, 216, 84, 0.5)'),
line=dict(width=0),
mode='lines',
fillcolor='rgba(166, 216, 84, 0.5)',
fill='tonexty',
showlegend=False
)
])
fig.update_layout(
yaxis_title='Invested Amount',
# title=self.base_stock,
hovermode="x"
)
return fig
def generate_current_investment_plot(self):
base_stock_plot = self.generate_base_stock_plot()
self.comparison_data = self.base_investment.getBaseInvestmentHoldings()
purchase_price = self.comparison_data['Weighted Average Price'] * self.comparison_data['Shares Available']
equity_close = self.comparison_data['Equity Close']
equity_low = self.comparison_data['Equity Low']
equity_high = self.comparison_data['Equity High']
equity_purchase_amount = self.comparison_data.tail(1)['Invested Amount'].values[0]
self.df['Date'] = self.df['Date'].apply(format_date_from_current_investments)
stocks_held = self.df['Ticker Tag'].unique()
for stock in stocks_held:
success, stock_file_name = self.price_loader.storeData(stock)
if success == False:
print('Error fetching data for ' + stock)
continue
self.investment_history['Date'] = self.comparison_data['Date']
for stock in stocks_held:
profit_per_stock = []
for index, row in self.comparison_data.iterrows():
date = row.Date
if datetime.strptime(date, '%Y-%m-%d') < datetime.strptime(self.df[self.df['Ticker Tag'] == stock].Date.iloc[0], '%Y-%m-%d'):
profit_per_stock.append(0.0)
continue
stock_data = pd.read_csv('Stock Information/' + stock + '.csv')
df_date = self.df[self.df['Date'] <= date]
df_company = df_date[df_date['Ticker Tag'] == stock]
stock_close_price = stock_data[stock_data['Date'] == date].Close.values
if len(stock_close_price) > 0:
stock_close_price = stock_close_price[0]
else:
profit_per_stock.append(0.0)
continue
if df_company.Quantity.sum() <= 1e-5:
profit_per_stock.append(0.0)
else:
profit_per_stock.append(stock_close_price * df_company.Quantity.sum() - df_company.Amount.sum())
self.investment_history[stock] = profit_per_stock
self.investment_history['Return'] = self.investment_history.sum(axis=1)
self.investment_history['Return'] += self.df.Amount.sum()
return go.Scatter(x=self.investment_history['Date'],
y=self.investment_history['Return'], name='Current Investments')
def generate_comparison_plot(self):
base_stock_plot = self.generate_base_stock_plot()
self.comparison_data = self.base_investment.getBaseInvestmentHoldings()
purchase_price = self.comparison_data['Weighted Average Price'] * self.comparison_data['Shares Available']
equity_close = self.comparison_data['Equity Close']
equity_low = self.comparison_data['Equity Low']
equity_high = self.comparison_data['Equity High']
equity_purchase_amount = self.comparison_data.tail(1)['Invested Amount'].values[0]
base_stock_plot.add_trace(self.generate_current_investment_plot())
return base_stock_plot
def generate_pie(self, labels, values, sym='₹'):
Pie = go.Figure(
go.Pie(
labels=labels,
values=values,
hole=0.4,
texttemplate="%{label}<br>%{percent}",
textposition='inside',
insidetextorientation='radial',
# direction="counterclockwise",
hovertemplate="Category: %{label}<br>"
+ sym + "%{value:,.2f}<br>"
"%{percent}<extra></extra>"))
Pie.update_layout(margin=dict(t=0, b=0, l=0, r=0), legend=dict(
yanchor="middle",
y=0.5,
xanchor="right",
x=0.99
))
return Pie
def generate_bar(self, labels, values, df_category, sym='₹'):
Bar = go.Figure(
go.Bar(
x=labels,
y=values,
hovertext="Name: " +
df_category.Name +
"<br>Payment Mode: " +
df_category['Payment Mode'] +
"<br>Tags: " +
df_category.Tags,
hovertemplate="Date: %{x}<br>"
"Amount: " + sym + "%{y} <br>"
"%{hovertext}<extra></extra>",
hoverinfo="skip",
showlegend=False),
)
Bar.update_layout(bargap=0.5)
Bar.update_xaxes(tickformat="%b %e, %Y", tickangle=45, dtick='0')
return Bar
def pie(self, currency_symbol='₹'):
return self.generate_pie(self.df['Sector'], self.df['Amount'], currency_symbol)
def bar(self, currency, currency_symbol='₹'):
graphs = []
for selector in self.df[self.category].unique():
if selector == 'Artificial':
continue
df_category = self.df[self.df[self.category].eq(selector) |
self.df[self.category].eq('Artificial')]
fig_bar=self.generate_bar(df_category.Date,
df_category[currency],
df_category, currency_symbol)
graphs.append(
dbc.Card(
[
html.H4(selector, className="card-title",
style={'textAlign':'center'}),
html.H6(df_category['Date'].tolist()[0].month_name() +
' ' +
str(df_category['Date'].tolist()[0].year),
className="card-subtitle",
style={'textAlign':'center'}),
dcc.Graph(id='bargraph', figure=go.Figure(fig_bar)),
], body=True)
)
graphs.append(html.H4('', className="card-title", style={'padding':20}))
return graphs
|
python
|
from gym_network_intrusion.envs.network_intrusion_env_1 import NetworkIntrusionEnv
from gym_network_intrusion.envs.network_intrusion_extrahard_env_1 import NetworkIntrusionExtraHardEnv
|
python
|
from koans_plugs import *
def test_has_true_literal():
"""
У логического типа есть литерал, обозначающий истину
"""
a = True # попробуйте такие варианты: TRUE, true, True
assert a
def test_has_false_literal():
"""
У логического типа есть литерал, обозначающий ложь
"""
a = False # попробуйте такие варианты: FALSE, false, False
assert not a
def test_python_can_calculate_bool_expressions():
"""
Python может проверять, является выражение истиной или ложью
"""
assert (3 > 2) == True # "3 > 2" – это верно (True) или ложно (False)?
def test_can_assign_bool_expressions_to_variable():
"""
Логические выражения можно записывать в переменную.
Тогда в этой переменной окажется True или False в зависимости от того,
истинно выражение или ложно.
"""
a = 3 < 2
assert a == False
def test_assert_accepts_bool():
"""
Конструкция assert требует указания bool следом за словом assert.
Если в bool записана истина, то всё работает.
"""
a = 3 < 14 # укажите любое число, чтобы в a было True
assert bool(a)
def test_can_use_not():
"""
not превращает True в False, а False в True.
"""
a = True
assert not a == False
def test_can_use_equality_check():
"""
== возвращает True, если слева находится такое же значение, что и справа.
Иначе возвращает False.
"""
assert 3 + 2 == 1 + 4
def test_can_assign_equality_check_to_variable():
"""
Результат сравнения можно записывать в переменную.
"""
a = 3 + 2 == 1 + 4
assert a == True
|
python
|
import csv
import cv2
import numpy as np
import re
np.random.seed(0)
# Load data csv file
def read_csv(file_name):
lines = []
with open(file_name) as driving_log:
reader = csv.reader(driving_log)
next(reader, None)
for line in reader:
lines.append(line)
return lines
def load_image(image_path):
pattern = re.compile(r'/|\\')
file_name = pattern.split(image_path)[-1]
current_path = 'data/IMG/' + file_name
#print(current_path)
image_bgr = cv2.imread(current_path)
image_rgb = cv2.cvtColor(image_bgr, cv2.COLOR_BGR2RGB)
return image_rgb
def preprocess_data(lines):
images = []
steerings = []
for line in lines:
# centre
images.append(load_image(line[0]))
# left
images.append(load_image(line[1]))
# right
images.append(load_image(line[2]))
centre_steering = float(line[3])
correction = 0.2
# centre
steerings.append(centre_steering)
# left
steerings.append(centre_steering+correction)
# right
steerings.append(centre_steering-correction)
return images, steerings
def random_translate(image, steering, range_x=100, range_y=10):
trans_x = range_x * (np.random.rand() - 0.5)
trans_y = range_y * (np.random.rand() - 0.5)
steering += trans_x * 0.002
trans_m = np.float32([[1, 0, trans_x], [0, 1, trans_y]])
height, width = image.shape[:2]
image = cv2.warpAffine(image, trans_m, (width, height))
return image, steering
def random_exposure(image):
image1 = cv2.cvtColor(image,cv2.COLOR_RGB2HSV)
random_bright = .25+np.random.uniform()
image1[:,:,2] = image1[:,:,2]*random_bright
image1 = cv2.cvtColor(image1,cv2.COLOR_HSV2RGB)
return image1
def random_shadow(image, strength=0.50):
top_y = 320*np.random.uniform()
top_x = 0
bot_x = 160
bot_y = 320*np.random.uniform()
image_hls = cv2.cvtColor(image,cv2.COLOR_RGB2HLS)
shadow_mask = 0*image_hls[:,:,1]
X_m = np.mgrid[0:image.shape[0],0:image.shape[1]][0]
Y_m = np.mgrid[0:image.shape[0],0:image.shape[1]][1]
shadow_mask[((X_m-top_x)*(bot_y-top_y) -(bot_x - top_x)*(Y_m-top_y) >=0)]=1
if np.random.randint(2)==1:
random_bright = .5
cond1 = shadow_mask==1
cond0 = shadow_mask==0
if np.random.randint(2)==1:
image_hls[:,:,1][cond1] = image_hls[:,:,1][cond1]*random_bright
else:
image_hls[:,:,1][cond0] = image_hls[:,:,1][cond0]*random_bright
image = cv2.cvtColor(image_hls,cv2.COLOR_HLS2RGB)
return image
def augment_data(images, steerings):
augmented_images = []
augmented_steerings = []
for image, steering in zip(images, steerings):
# add original
augmented_images.append(image)
augmented_steerings.append(steering)
# add horizontally flipped
augmented_images.append(cv2.flip(image, 1))
augmented_steerings.append(steering*-1.0)
# add randomly translated
image_augmented, steering_augmented = random_translate(image, steering)
# add random exposure
image_augmented = random_exposure(image_augmented)
# add random shadow
rand_shadow = np.random.uniform(0,1)
if rand_shadow > 0.6:
image_augmented = random_shadow(image_augmented)
augmented_images.append(image_augmented)
augmented_steerings.append(steering_augmented)
return augmented_images, augmented_steerings
from keras.models import Sequential
from keras.layers import Flatten, Dense, Lambda, Cropping2D, Dropout
from keras.layers.convolutional import Conv2D
from keras.layers.pooling import MaxPooling2D
from keras.callbacks import ModelCheckpoint, EarlyStopping
def model_LeNet():
model = Sequential()
model.add(Lambda(lambda x : (x / 255.0) - 0.5, input_shape=(160,320,3)))
model.add(Conv2D(6, (5,5), activation='relu'))
model.add(MaxPooling2D())
model.add(Conv2D(6, (5,5), activation='relu'))
model.add(MaxPooling2D())
model.add(Flatten())
model.add(Dense(128))
model.add(Dense(84))
model.add(Dense(1))
return model
def model_nvidia():
model = Sequential()
model.add(Lambda(lambda x : (x / 255.0) - 0.5, input_shape=(160,320,3)))
model.add(Cropping2D(cropping=((70,25), (0,0))))
model.add(Conv2D(24, (5, 5), subsample=(2,2), activation='relu'))
model.add(Conv2D(36, (5, 5), subsample=(2,2), activation='relu'))
model.add(Conv2D(48, (5, 5), subsample=(2,2), activation='relu'))
model.add(Conv2D(64, (3, 3), activation='relu'))
model.add(Conv2D(64, (3, 3), activation='relu'))
model.add(Dropout(0.5))
model.add(Flatten())
model.add(Dense(100))
model.add(Dense(50))
model.add(Dense(10))
model.add(Dense(1))
return model
import sklearn
import threading
from math import ceil
from random import shuffle
from sklearn.model_selection import train_test_split
import matplotlib.pyplot as plt
class threadsafe_iter:
"""Takes an iterator/generator and makes it thread-safe by
serializing call to the `next` method of given iterator/generator.
"""
def __init__(self, it):
self.it = it
self.lock = threading.Lock()
def __iter__(self):
return self
def __next__(self):
with self.lock:
return self.it.__next__()
def threadsafe_generator(f):
"""A decorator that takes a generator function and makes it thread-safe.
"""
def g(*a, **kw):
return threadsafe_iter(f(*a, **kw))
return g
@threadsafe_generator
def generator(samples, batch_size = 128):
print('generator initialized')
num_samples = len(samples)
while 1:
shuffle(samples)
for offset in range(0, num_samples, batch_size):
batch_samples = samples[offset:offset+batch_size]
images, steerings = preprocess_data(batch_samples)
images, steerings = augment_data(images, steerings)
X_train = np.array(images)
y_train = np.array(steerings)
yield sklearn.utils.shuffle(X_train, y_train)
if __name__ == '__main__':
print("Loading csv file ...")
csv_file_name = 'data/driving_log.csv'
lines = read_csv(csv_file_name)
csv_file_name = 'data1/driving_log.csv'
lines.extend(read_csv(csv_file_name))
print("Finished loading csv file")
# This should be adjusted according to memory size
batch_size = 64
train_samples, validation_samples = train_test_split(lines, test_size=0.2)
train_generator = generator(train_samples, batch_size=batch_size)
validation_generator = generator(validation_samples, batch_size=batch_size)
print("Finished Preprocessing images")
# Hyper parameters
epochs_num = 10
model = model_nvidia()
model.summary()
model.compile(loss='mse', optimizer='adam')
checkpoint = ModelCheckpoint("model.h5", monitor='val_loss', verbose=1,
save_best_only=True, mode='min')
early_stop = EarlyStopping(monitor='val_loss', min_delta=0.0001, patience=4,
verbose=1, mode='min')
history_object = model.fit_generator(train_generator,
steps_per_epoch=ceil(len(train_samples)/batch_size),
validation_data=validation_generator,
validation_steps=ceil(len(validation_samples)/batch_size),
callbacks=[checkpoint, early_stop],
nb_epoch=epochs_num, verbose=1)
# Plot the training and validation loss for each epoch
print('Generating loss chart...')
plt.plot(history_object.history['loss'])
plt.plot(history_object.history['val_loss'])
plt.title('model mean squared error loss')
plt.ylabel('mean squared error loss')
plt.xlabel('epoch')
plt.legend(['training set', 'validation set'], loc='upper right')
plt.savefig('model.png')
print('Finished')
|
python
|
from copy import deepcopy
import json
import os
from uuid import uuid4
import pytest
from starlette.testclient import TestClient
from hetdesrun.utils import get_uuid_from_seed
from hetdesrun.service.webservice import app
from hetdesrun.models.code import CodeModule
from hetdesrun.models.component import (
ComponentRevision,
ComponentInput,
ComponentOutput,
ComponentNode,
)
from hetdesrun.models.workflow import (
WorkflowNode,
WorkflowConnection,
WorkflowInput,
WorkflowOutput,
)
from hetdesrun.models.wiring import OutputWiring, InputWiring, WorkflowWiring
from hetdesrun.models.run import (
ConfigurationInput,
ExecutionEngine,
WorkflowExecutionInput,
WorkflowExecutionResult,
)
from hetdesrun.runtime.context import execution_context
from hetdesrun.utils import load_data, file_pathes_from_component_json
async def run_workflow_with_client(workflow_json, open_async_test_client):
response = await open_async_test_client.post("/runtime", json=workflow_json)
return response.status_code, response.json()
def gen_execution_input_from_single_component(
component_json_path, direct_provisioning_data_dict=None, wf_wiring=None
):
"""Wraps a single component into a workflow and generates the execution input json
input data is provided directly
"""
if (direct_provisioning_data_dict is None) == (wf_wiring is None):
raise ValueError(
"Excatly one of direct_provisioning_data_dict or wf_wiring must be provided"
)
# Load component stuff
(
base_name,
path_to_component_json,
component_doc_file,
component_code_file,
) = file_pathes_from_component_json(component_json_path)
info, doc, code = load_data(
path_to_component_json, component_doc_file, component_code_file
)
# Build up execution input Json
code_module_uuid = str(get_uuid_from_seed("code_module_uuid"))
component_uuid = str(get_uuid_from_seed("component_uuid"))
comp_inputs = [
ComponentInput(id=str(uuid4()), name=inp["name"], type=inp["type"])
for inp in info["inputs"]
]
comp_outputs = [
ComponentOutput(id=str(uuid4()), name=outp["name"], type=outp["type"])
for outp in info["outputs"]
]
component_node_id = "component_node_id"
return WorkflowExecutionInput(
code_modules=[CodeModule(code=code, uuid=code_module_uuid)],
components=[
ComponentRevision(
uuid=component_uuid,
name=info["name"],
code_module_uuid=code_module_uuid,
function_name="main",
inputs=comp_inputs,
outputs=comp_outputs,
)
],
workflow=WorkflowNode(
id="root_node",
sub_nodes=[
ComponentNode(component_uuid=component_uuid, id=component_node_id)
],
connections=[],
inputs=[
WorkflowInput(
id=str(get_uuid_from_seed(str(comp_input.id) + "_as_wf_input")),
id_of_sub_node=component_node_id,
name=comp_input.name,
name_in_subnode=comp_input.name,
type=comp_input.type,
)
for comp_input in comp_inputs
],
outputs=[
WorkflowOutput(
id=str(get_uuid_from_seed(str(comp_output.id) + "_as_wf_output")),
id_of_sub_node=component_node_id,
name=comp_output.name,
name_in_subnode=comp_output.name,
type=comp_output.type,
)
for comp_output in comp_outputs
],
name="root node",
),
configuration=ConfigurationInput(engine="plain", run_pure_plot_operators=True),
workflow_wiring=WorkflowWiring(
input_wirings=[
InputWiring(
workflow_input_name=comp_input.name,
adapter_id=1,
filters={"value": direct_provisioning_data_dict[comp_input.name]},
)
for comp_input in comp_inputs
],
output_wirings=[
OutputWiring(
workflow_output_name=comp_output.name,
adapter_id=1,
)
for comp_output in comp_outputs
],
)
if wf_wiring is None
else wf_wiring,
)
async def run_single_component(
component_json_file_path, input_data_dict, open_async_test_client
):
response = await open_async_test_client.post(
"/runtime",
json=json.loads(
gen_execution_input_from_single_component(
component_json_file_path,
input_data_dict,
).json()
),
)
return WorkflowExecutionResult(**response.json())
@pytest.mark.asyncio
async def test_null_values_pass_any_pass_through(async_test_client):
async with async_test_client as client:
exec_result = await run_single_component(
"./components/Connectors/pass_through.json",
{"input": {"a": 1.5, "b": None}},
client,
)
assert exec_result.output_results_by_output_name["output"] == {
"a": 1.5,
"b": None,
}
@pytest.mark.asyncio
async def test_null_list_values_pass_any_pass_through(async_test_client):
async with async_test_client as client:
exec_result = await run_single_component(
"./components/Connectors/pass_through.json", {"input": [1.2, None]}, client
)
assert exec_result.output_results_by_output_name["output"] == [1.2, None]
@pytest.mark.asyncio
async def test_null_values_pass_series_pass_through(async_test_client):
async with async_test_client as client:
exec_result = await run_single_component(
"./components/Connectors/pass_through_series.json",
{"input": {"2020-01-01T00:00:00Z": 1.5, "2020-01-02T00:00:00Z": None}},
client,
)
assert exec_result.output_results_by_output_name["output"] == {
"2020-01-01T00:00:00.000Z": 1.5,
"2020-01-02T00:00:00.000Z": None,
}
exec_result = await run_single_component(
"./components/Connectors/pass_through_series.json",
{"input": [1.2, 2.5, None]},
client,
)
assert exec_result.output_results_by_output_name["output"] == {
"0": 1.2,
"1": 2.5,
"2": None,
}
@pytest.mark.asyncio
async def test_all_null_values_pass_series_pass_through(async_test_client):
async with async_test_client as client:
exec_result = await run_single_component(
"./components/Connectors/pass_through_series.json",
{"input": {"2020-01-01T00:00:00Z": None, "2020-01-02T00:00:00Z": None}},
client,
)
assert exec_result.output_results_by_output_name["output"] == {
"2020-01-01T00:00:00.000Z": None,
"2020-01-02T00:00:00.000Z": None,
}
@pytest.mark.asyncio
async def test_nested_wf_execution(async_test_client):
async with async_test_client as client:
with open(os.path.join("tests", "data", "nested_wf_execution_input.json")) as f:
loaded_workflow_exe_input = json.load(f)
response_status_code, response_json = await run_workflow_with_client(
loaded_workflow_exe_input, client
)
assert response_status_code == 200
assert response_json["result"] == "ok"
assert response_json["output_results_by_output_name"][
"limit_violation_timestamp"
].startswith("2020-05-28T20:16:41")
|
python
|
'''
Title : Day 25: Running Time and Complexity
Domain : Tutorials
Author : Ahmedur Rahman Shovon
Created : 03 April 2019
'''
def is_prime(n):
if n == 2:
return True
if n%2 == 0 or n < 2:
return False
max_limit = int(n**0.5) + 1
for i in range(3, max_limit):
if n % i == 0:
return False
return True
t = int(input())
for k in range(t):
n = int(input())
if is_prime(n):
print("Prime")
else:
print("Not prime")
|
python
|
from cassandra.cluster import Cluster
def create_connection():
# TO DO: Fill in your own contact point
cluster = Cluster(['127.0.0.1'])
return cluster.connect('demo')
def set_user(session, lastname, age, city, email, firstname):
# TO DO: execute SimpleStatement that inserts one user into the table
session.execute("INSERT INTO users (lastname, age, city, email, firstname) VALUES (%s,%s,%s,%s,%s)", [lastname, age, city, email, firstname])
def get_user(session, lastname):
# TO DO: execute SimpleStatement that retrieves one user from the table
# TO DO: print firstname and age of user
result = session.execute("SELECT * FROM users WHERE lastname = %s", [lastname]).one()
print result.firstname, result.age
def update_user(session, new_age, lastname):
# TO DO: execute SimpleStatement that updates the age of one user
session.execute("UPDATE users SET age =%s WHERE lastname = %s", [new_age, lastname])
def delete_user(session, lastname):
# TO DO: execute SimpleStatement that deletes one user from the table
session.execute("DELETE FROM users WHERE lastname = %s", [lastname])
def main():
session = create_connection()
lastname = "Jones"
age = 35
city = "Austin"
email = "[email protected]"
firstname = "Bob"
new_age = 36
set_user(session, lastname, age, city, email, firstname)
get_user(session, lastname)
update_user(session, new_age, lastname)
get_user(session, lastname)
delete_user(session, lastname)
if __name__ == "__main__":
main()
|
python
|
"""
Tests for todos module
"""
import random
import string
from django.test import TestCase
DEFAULT_LABELS = [
'low-energy',
'high-energy',
'vague',
'work',
'home',
'errand',
'mobile',
'desktop',
'email',
'urgent',
'5 minutes',
'25 minutes',
'60 minutes',
]
class AnyArg(): # pylint: disable=R0903
"""
Arg matcher which matches everything
"""
def __eq__(self, other):
return True
def _generate_random_string():
return ''.join(random.choices(string.ascii_uppercase + string.digits, k=5))
def _stub_todo_matcher(description, labels):
return {
'id': AnyArg(),
'description': description,
'archived': False,
'archived_at': AnyArg(),
'completed': False,
'completed_at': AnyArg(),
'created_at': AnyArg(),
'labels': labels,
}
def _stub_label_matcher(name):
return {
'id': AnyArg(),
'name': name,
}
class ServiceTests(TestCase):
"""
Tests for todo view
"""
maxDiff = None
def test_todos_api(self):
"""
Basic test which creates, updates, & deletes todos
and fetches them to ensure they're persisted.
"""
todo_description1 = _generate_random_string()
labels1 = ['desktop', 'home']
todo_description2 = _generate_random_string()
labels2 = ['work']
# Create a todo
todo1_id = self._create_todo({
'description': todo_description1,
'labels': labels1,
})['id']
# Create another todo
self._create_todo({
'description': todo_description2,
'labels': labels2,
})
# Fetch todos and verify they match expectations
fetched_data = self._fetch_todos()
expected_data = [
_stub_todo_matcher(todo_description1, labels1),
_stub_todo_matcher(todo_description2, labels2),
]
self.assertCountEqual(fetched_data, expected_data)
# Update first todo
patch = {
'description': _generate_random_string(),
'labels': ['urgent'],
}
self._update_todo(todo1_id, patch)
# Fetch todos and verify they match expectations
# Expect created_at to be unchanged
expected_data[0]['created_at'] = fetched_data[0]['created_at']
expected_data[1]['created_at'] = fetched_data[1]['created_at']
expected_data[0].update(patch)
fetched_data = self._fetch_todos()
self.assertCountEqual(fetched_data, expected_data)
# Delete first todo
self._delete_todo(todo1_id)
# Fetch todos and verify they match expectations
expected_data = [expected_data[1]]
fetched_data = self._fetch_todos()
self.assertCountEqual(fetched_data, expected_data)
def test_labels_api(self):
"""
Basic test which creates, updates, & deletes labels
and fetches them to ensure they're persisted.
"""
new_label = _generate_random_string()
# Fetch and verify expectations
fetched_data = self._fetch_labels()
expected_data = [_stub_label_matcher(label) for label in DEFAULT_LABELS]
self.assertCountEqual(fetched_data, expected_data)
# Create
label_id = self._create_label({
'name': new_label,
})['id']
# Fetch and verify expectations
fetched_data = self._fetch_labels()
expected_data.append(_stub_label_matcher(new_label))
self.assertCountEqual(fetched_data, expected_data)
# Update label
patch = {
'name': _generate_random_string(),
}
self._update_label(label_id, patch)
# Fetch and verify expectations
expected_data[-1].update(patch)
fetched_data = self._fetch_labels()
self.assertCountEqual(fetched_data, expected_data)
# Delete label
self._delete_label(label_id)
# Fetch and verify expectations
expected_data = [_stub_label_matcher(label) for label in DEFAULT_LABELS]
fetched_data = self._fetch_labels()
self.assertCountEqual(fetched_data, expected_data)
def _create_todo(self, data):
return self._create_entity(data, 'todos')
def _fetch_todos(self):
return self._fetch_entity('todos')
def _update_todo(self, entry_id, patch):
return self._update_entity(entry_id, patch, 'todos')
def _delete_todo(self, entry_id):
return self._delete_entity(entry_id, 'todos')
def _create_label(self, data):
return self._create_entity(data, 'labels')
def _fetch_labels(self):
return self._fetch_entity('labels')
def _update_label(self, entry_id, patch):
return self._update_entity(entry_id, patch, 'labels')
def _delete_label(self, entry_id):
return self._delete_entity(entry_id, 'labels')
def _create_entity(self, data, route):
response = self.client.post(f'/api/todos/{route}/',
data,
content_type='application/json')
self._assert_status_code(201, response)
return response.json()
def _fetch_entity(self, route):
response = self.client.get(f'/api/todos/{route}/')
self._assert_status_code(200, response)
return response.json()
def _update_entity(self, entry_id, patch, route):
response = self.client.patch(f'/api/todos/{route}/{entry_id}/',
patch,
content_type='application/json')
self._assert_status_code(200, response)
return response.json()
def _delete_entity(self, entry_id, route):
response = self.client.delete(f'/api/todos/{route}/{entry_id}/')
self._assert_status_code(204, response)
def _assert_status_code(self, expected_code, response):
self.assertEqual(
response.status_code, expected_code,
(f'Expected status {expected_code}, '
f'received {response.status_code}. {response.content}'))
|
python
|
from .singleton import Singleton
from .visitor import visitor
|
python
|
#!/usr/bin/env python
#-*- coding:utf-8 -*-
import tornado.httpserver
import tornado.ioloop
import tornado.web
import tornado.options
import tornado.gen
import os.path
from tornado.options import define, options
from cache_module import CacheHandler
define("port", default=8888, help="run on the given port", type=int)
class BaseHandler(tornado.web.RequestHandler):
def get_current_user(self):
currentUser = self.get_secure_cookie("user")
cacheHandler = CacheHandler()
return cacheHandler.get_cache(self, currentUser)
class MainHandler(BaseHandler):
@tornado.web.authenticated
def get(self):
self.render('index.html')
class LoginHandler(BaseHandler):
@tornado.gen.coroutine
def get(self):
incorrect = self.get_secure_cookie("incorrect")
if incorrect and int(incorrect) > 20:
self.write('<center>blocked</center>')
return
self.render('login.html')
@tornado.gen.coroutine
def post(self):
incorrect = self.get_secure_cookie("incorrect")
if incorrect and int(incorrect) > 20:
self.write('<center>blocked</center>')
return
getusername = tornado.escape.xhtml_escape(self.get_argument("username"))
getpassword = tornado.escape.xhtml_escape(self.get_argument("password"))
if "demo" == getusername and "demo" == getpassword:
username = self.get_argument("username")
self.set_secure_cookie("user", username)
cacheHandler = CacheHandler()
cacheHandler.set_cache(self, username)
self.set_secure_cookie("incorrect", "0")
self.redirect(self.reverse_url("main"))
else:
incorrect = self.get_secure_cookie("incorrect") or 0
increased = str(int(incorrect)+1)
self.set_secure_cookie("incorrect", increased)
self.write("""<center>
Something Wrong With Your Data (%s)<br />
<a href="/">Go Home</a>
</center>""" % increased)
class LogoutHandler(BaseHandler):
def get(self):
self.clear_cookie("user")
self.redirect(self.get_argument("next", self.reverse_url("main")))
class Application(tornado.web.Application):
def __init__(self):
base_dir = os.path.dirname(__file__)
settings = {
"cookie_secret": "bZJc2sWbQLKos6GkHn/VB9oXwQt8S0R0kRvJ5/xJ89E=",
"login_url": "/login",
'template_path': os.path.join(base_dir, "templates"),
'static_path': os.path.join(base_dir, "static"),
'debug':True,
"xsrf_cookies": True,
}
tornado.web.Application.__init__(self, [
tornado.web.url(r"/", MainHandler, name="main"),
tornado.web.url(r'/login', LoginHandler, name="login"),
tornado.web.url(r'/logout', LogoutHandler, name="logout"),
], **settings)
def main():
tornado.options.parse_command_line()
Application().listen(options.port)
tornado.ioloop.IOLoop.instance().start()
if __name__ == "__main__":
main()
|
python
|
"""
Exposes commonly used classes and functions.
"""
from .bencode import Bencode
from .torrent import Torrent
from .utils import upload_to_cache_server, get_open_trackers_from_local, get_open_trackers_from_remote
|
python
|
import numpy as np
import re
#------------------------------------------------------------------------------
"""
Correct positions in the vicon data to center on the middle of the enclosure.
This is due to inexact positioning of the enclosure and/or the wand during
motion capture, so this is only necessary to perform on raw vicon data.
"""
def correct_position(x, dims=(1,2)):
for dim in dims:
mx = max(x[:,dim])
mn = min(x[:,dim])
x[:,dim] -= mx-(mx-mn)/2.0
return x
"""
Parse position information from the data format used in the raw wb vicon files
"""
def parse_raw_vicon_position(line):
a = re.split( ':', line )
a = re.split( ',', a[1] )
a = np.array(map(float, a))
#print( a )
return a
"""
Parse rotation information from the data format used in the raw wb vicon files
"""
def parse_raw_vicon_rotation(line):
a = re.split( ':', line )
a = re.split( ',', a[1] )
a = np.array(map(float, a))
#print( a )
return a
"""
Read a raw wb vicon file and extract all state data as a numpy array
"""
def read(path, center=True):
try:
f = open(path)
except Exception:
return[]
content = [x.strip('\r\n') for x in f.readlines() ]
f.close()
state = np.array([0,0,0,0,0,0,0,0])
t = 0
dt = 0.01
i = 0
for line in content:
if i == 0:
i = i + 1
elif i == 1:
i = i + 1
pos = parse_raw_vicon_position(line)
elif i == 2:
i = i + 1
rot = parse_raw_vicon_rotation(line)
x = np.array([t, pos[0], pos[1], pos[2], rot[0], rot[1], rot[2], rot[3]])
state = np.vstack([state, x])
else:
i = 0
t = t + dt
state = np.delete(state, (0), axis=0)
if center:
state = correct_position(state)
return state
|
python
|
import csv
import sys
import urllib3
import json
from urllib.parse import quote
METAMAP = 'https://knowledge.ncats.io/ks/umls/metamap'
DISAPI = 'https://disease-knowledge.ncats.io/api'
DISEASE = DISAPI + '/search'
def parse_disease_map (codes, data):
if len(data) > 0:
for d in data:
if 'I_CODE' in d:
value = d['I_CODE']
if isinstance (value, list):
for v in value:
codes[v] = None
else:
codes[value] = None
def fetch_codes (http, url, codes):
r = http.request('GET', url)
data = json.loads(r.data.decode('utf-8'))['contents']
parse_disease_map(codes, data)
def map_cui (cui, name):
http = urllib3.PoolManager()
codes = {}
fetch_codes (http, DISEASE+'/UMLS:'+cui, codes)
if len(codes) == 0:
fetch_codes (http, DISEASE+'/'+quote(name, safe=''), codes)
omim = []
gard = []
for k in codes.keys():
if k.startswith('GARD:'):
gard.append(k)
elif k.startswith('OMIM:'):
omim.append(k)
if len(gard) == 0:
# do expansion around omim
for id in omim:
fetch_codes (http, DISEASE+'/'+id, codes)
codes = list(codes.keys())
codes.sort()
return codes
def fetch_node (path, node):
if 'label' in node:
path.append(node['label'])
if 'children' in node:
for n in node['children']:
fetch_node(path, n)
def mondo_hierarchies (id):
http = urllib3.PoolManager()
r = http.request('GET', DISAPI+'/tree/'+id, fields={'field': 'label'})
data = json.loads(r.data.decode('utf-8'))
categories = []
if 'children' in data:
for n in data['children']:
path = []
fetch_node(path, n)
# for now we only care about rare genetic disease
if (len(path) > 0 and len(path) < 20
and (path[0] == 'rare genetic disease'
or path[0] == 'inherited genetic disease')):
categories.append(list(reversed(path)))
return categories
def parse_metamap (data, *args):
mapped = {}
types = {}
for st in args:
types[st] = None
for sent in data['utteranceList']:
for token in sent['pcmlist']:
if 'mappingList' in token:
text = token['phrase']['phraseText']
concepts = []
seen = {}
for map in token['mappingList']:
for ev in map['evList']:
cui = ev['conceptId']
name = ev['preferredName']
## see this https://mmtx.nlm.nih.gov/MMTx/semanticTypes.shtml
for st in ev['semanticTypes']:
if st in types and cui not in seen:
if name != '0%':
c = {
'cui': cui,
'name': name,
'sty': st
}
if st == 'dsyn' or st == 'neop':
maps = map_cui(cui, name)
for id in maps:
if (id.startswith('MONDO:')
and id != 'MONDO:0000001'):
cat = mondo_hierarchies(id)
if len(cat) > 0:
c['categories'] = cat
c['mapping'] = maps
concepts.append(c)
seen[cui] = None
if len(concepts) > 0:
mapped[text] = concepts
#print ('... %s => %s' % (text, concepts))
return mapped
def parse_oopd_file (file):
http = urllib3.PoolManager()
cache = {}
with open (file) as f:
reader = csv.reader(f, delimiter='\t', quotechar='"')
header = {}
count = 0
jstr = ''
print ('[',end='')
for row in reader:
if len(header) == 0:
for i,n in enumerate (row):
header[n] = i
if not 'Orphan Drug Status' in header:
raise Exception ('Not an OOPD file; please download from here https://www.accessdata.fda.gov/scripts/opdlisting/oopd/index.cfm!')
else:
designation = row[header['Designation']]
#print (designation)
resp = ''
if designation in cache:
resp = cache[designation]
else:
r = http.request(
'POST', METAMAP,
body=designation,
headers={'Content-Type': 'text/plain'}
)
resp = json.loads(r.data.decode('utf-8'))
cache[designation] = resp
data = {'row': count+1}
for k,v in header.items():
if v < len(row):
data[k] = row[v]
data['DesignationMapped'] = parse_metamap (resp,
'dsyn',
'neop',
'fndg',
'gngm',
'comd',
'aapp',
'patf',
'ortf',
'fngs');
indication = data['Approved Indication']
if len(indication) > 0:
r = http.request(
'POST', METAMAP, body=indication,
headers={'Content-Type': 'text/plain'}
)
resp = json.loads(r.data.decode('utf-8'))
data['ApprovedIndicationMapped'] = parse_metamap(
resp, 'dsyn', 'neop')
if len(jstr) > 0:
print (jstr, end=',')
jstr = json.dumps(data, indent=4,separators=(',',': '))
count += 1
# if count > 10:
# break
if len(jstr) > 0:
print (jstr, end='')
print (']')
if __name__ == "__main__":
if len(sys.argv) == 1:
print ('usage: %s FILE' % (sys.argv[0]))
sys.exit(1)
parse_oopd_file (sys.argv[1])
|
python
|
##
# @file elasticsearch.py
# @author Lukas Koszegy
# @brief Elasticsearch klient
##
from elasticsearch import Elasticsearch
import json
from time import time
from database.schemes.mapping import mappingApp, mappingEvent
import logging
class ElasticsearchClient():
def __init__(self, host="127.0.0.1", port=9200, ssl=False):
self.db = self.connect(host, port, ssl)
self.manageIndex = 'manage'
self.manageDocType = 'app'
self.eventDocType = 'event'
logging.getLogger('elasticsearch').setLevel(logging.CRITICAL)
logging.getLogger('urllib3').setLevel(logging.CRITICAL)
try:
self.initManageIndex()
except:
assert('Cannot init manage index in database')
# Inicializacia zakladnych struktur pre prazdnu DB
def initManageIndex(self):
if self.db.indices.exists(index=self.manageIndex):
return
self.db.indices.create(index=self.manageIndex)
self.db.indices.put_mapping(index=self.manageIndex, doc_type=self.manageDocType, body=mappingApp)
def connect(self, host, port, ssl):
return Elasticsearch(['{}:{}'.format(host, port)],
use_ssl=ssl,
max_retries=0)
def createEvent(self, msg):
appId = msg['appId']
self.existApp(appId, False)
result = self.db.index(index=appId, doc_type=self.eventDocType, body=msg)
return result['_shards']['failed'] == 0
def existApp(self, appId, noexist=True):
result = self.db.exists(index=self.manageIndex, id=appId, doc_type=self.manageDocType)
if noexist and result:
raise Exception('Application ' + appId + ' exist')
if (not noexist) and (not result):
raise Exception('Invalid application name '+ appId)
def createApp(self, msg):
id = msg['id']
self.existApp(id)
del msg['id']
result = self.db.index(index=self.manageIndex, doc_type=self.manageDocType, id=id, body=msg);
self.db.indices.create(index=id)
self.db.indices.put_mapping(index=id, doc_type=self.eventDocType, body=mappingEvent)
return result['_shards']['failed'] == 0
def deleteApp(self, msg):
id = msg['id']
self.existApp(id, False)
self.db.indices.delete(index=id, ignore=[400, 404])
self.db.indices.delete(index='result-{}-*'.format(id), expand_wildcards='all', ignore=[400, 404])
self.db.delete(index=self.manageIndex, doc_type=self.manageDocType, id=id);
return True
def delete(self, type, msg): pass
def update(self, type, msg): pass
def setLastTestId(self, msg):
query = {'doc': { 'scenarios': { msg['scenarioId']: { 'lastTestId': msg['testId']}}}}
result = self.db.update(index=self.manageIndex, doc_type=self.manageDocType, id=msg['appId'], body=query)
return result['_shards']['failed'] == 0
def setTestState(self, msg):
query = {'doc': {'scenarios': {msg['scenarioId']: {'state': msg['state']}}}}
if msg['testId'] != 0:
query['doc']['scenarios'][msg['scenarioId']]['tests'] = {msg['testId']: {'state': msg['state']}}
result = self.db.update(index=self.manageIndex, doc_type=self.manageDocType, id=msg['appId'], body=query)
return result['_shards']['failed'] == 0
def setRegressTest(self, msg):
query = {'doc': {'scenarios': {msg['scenarioId']: {'regressTestId': msg['testId']}}}}
result = self.db.update(index=self.manageIndex, doc_type=self.manageDocType, id=msg['appId'], body=query)
return result['_shards']['failed'] == 0
def setRegressTestForTest(self, msg):
result = self.db.update(index=self.manageIndex, doc_type=self.manageDocType, id=msg['appId'],
body={'doc': {'scenarios': {msg['scenarioId']: {'tests': {msg['testId']:
{'regressTestId': msg['regressTestId']}}}}}})
return result['_shards']['failed'] == 0
def setScenarioName(self, msg):
query = {'doc': {'scenarios': {msg['scenarioId']: {'name': msg['name']}}}}
result = self.db.update(index=self.manageIndex, doc_type=self.manageDocType, id=msg['appId'], body=query)
return result['_shards']['failed'] == 0
def createTest(self, msg):
id = 'result-' + msg['appId'] + '-' + msg['scenarioId']
del msg['appId']
del msg['scenarioId']
result = self.db.create(index=id, doc_type='result', id=time(), body=msg)
return result['_shards']['failed'] == 0
def getResultAgg(self, msg):
resultIndex = 'result-{}-{}'.format(msg['appId'], msg['scenarioId'])
indexes = '{},{}'.format(self.manageIndex, resultIndex)
query = ('{"index": "' + self.manageIndex + '"}\n'
'{"query": {"term": {"_id": "' + msg['appId'] + '"}}}\n'
'{"index": "' + resultIndex + '"}\n'
'{"size": 0, "aggs": {"results": {"terms": {"field": "testId", "size": 10000}}}}\n'
)
filter = ['responses.hits.hits', 'responses.aggregations.results', 'error']
result = self.db.msearch(index=indexes, filter_path=filter, body=query)
if 'error' in result:
raise RuntimeError(result['error']['reason'])
answer = []
if not 'scenarios' in result['responses'][0]['hits']['hits'][0]['_source']:
return answer
generalInfo = result['responses'][0]['hits']['hits'][0]['_source']['scenarios'][msg['scenarioId']]['tests']
bucketIter = result['responses'][1]['aggregations']['results']['buckets']
noExistInfo = {'regressTestId': 0, 'state': -1}
for item in bucketIter:
testInfo = generalInfo[str(item['key'])]
tmpObj = {'testId': item['key'], 'events': item['doc_count']}
tmpObj.update(noExistInfo)
for element in ['regressTestId', 'state']:
try:
tmpObj[element] = testInfo[element]
except:
pass
answer.append(tmpObj)
return answer
def getResult(self, msg):
index = 'result-{}-{}'.format(msg['appId'], msg['scenarioId'])
filter = ['hits.hits', 'error']
if 'testId' in msg:
query = {'query': {'terms': {'testId': msg['testId']}}, 'size': 10000}
else:
query = {'query': {'match_all': {}}, 'size': 10000}
result = self.db.search(index=index, filter_path=filter,
body=query)
if 'error' in result:
raise RuntimeError(result['error']['reason'])
answer = []
noExistInfo = {'score': -1, 'regressTestId': -1, 'events': -1, 'state': -1, 'performTime': -1, 'image': ''}
for item in result['hits']['hits']:
tmp = {'id': item['_id']}
tmp.update(noExistInfo)
for key, value in item['_source'].items():
tmp[key] = value
answer.append(tmp)
answer.sort(key=lambda x: x['id'])
return answer
def setImgScore(self, msg):
index = 'result-{}-{}'.format(msg['appId'], msg['scenarioId'])
id = msg['id']
query = {'doc': {'score': msg['score'], 'regressTestId': msg['regressTestId']}}
result = self.db.update(index=index, doc_type='result', id=id, body=query)
return result['_shards']['failed'] == 0
def getTest(self, msg):
answer = []
indexes = self.manageIndex + ',' + msg['appId']
filter=['responses.hits', 'error']
query = ('{"index": "' + self.manageIndex + '"}\n'
'{"query": {"exists": {"field": "scenarios.' + msg['scenarioId'] + '"}}}\n'
'{"index": "' + msg['appId'] + '"}\n'
'{"query": {"term": {"scenarioId": "' + msg['scenarioId'] + '"}}, "size": 10000}\n'
)
result = self.db.msearch(index=indexes, body=query, filter_path=filter)
if 'error' in result:
raise RuntimeError(result['error']['reason'])
manage = None
if result['responses'][0]['hits']['total'] != 0:
manage = result['responses'][0]['hits']['hits'][0]['_source']['scenarios'][msg['scenarioId']]
for item in result['responses'][1]['hits']['hits']:
item['_source']['_id'] = item['_id']
answer.append(item['_source'])
answer.sort(key=lambda x: x['timestamp'])
return (manage, answer)
def getApp(self, msg):
answer = []
filter=['hits.hits', 'error']
if msg:
query = {'query': {'terms': {'_id': [msg['id']] }}}
else:
query = {'query': {'match_all': {}}}
result = self.db.search(index=self.manageIndex, body=query, filter_path=filter, request_cache=False, size=100)
if 'error' in result:
raise RuntimeError(result['error']['reason'])
for item in result['hits']['hits']:
answer.append({'id': item['_id'], 'domain': item['_source']['domain'],
'created': item['_source']['created']})
return answer
def getScenarios(self, msg):
answer = []
indexes = '{},{}'.format(self.manageIndex, msg['scenarioId'])
filter=['responses.aggregations.scenarios', 'responses.hits', 'error']
query = ('{"index": "' + self.manageIndex + '"}\n'
'{"query": {"term": {"_id": "' + msg['scenarioId'] + '"}}}\n'
'{"index": "' + msg['scenarioId'] + '"}\n'
'{"size": 0, "aggs": {"scenarios": {"terms": {"field": "scenarioId.keyword", "size": 10000}}}}\n'
)
result = self.db.msearch(index=indexes, body=query, filter_path=filter)
if 'error' in result:
raise RuntimeError(result['error']['reason'])
testInfoIter = []
if 'scenarios' in result['responses'][0]['hits']['hits'][0]['_source']:
testInfoIter = result['responses'][0]['hits']['hits'][0]['_source']['scenarios']
bucketIter = result['responses'][1]['aggregations']['scenarios']['buckets']
noExistInfo = {'lastTestId': 0, 'regressTestId': 0, 'name': '', 'state': -1}
for item in bucketIter:
tmpObj = {'scenarioId': item['key'], 'events': item['doc_count']}
tmpObj.update(noExistInfo)
if item['key'] in testInfoIter:
tmpObj.update(testInfoIter[item['key']])
if 'tests' in tmpObj:
del tmpObj['tests']
answer.append(tmpObj)
return answer
|
python
|
from argparse import ArgumentParser
import numpy as np
import pytorch_lightning as pl
from scipy.io import savemat
from torch.utils.data import DataLoader
from helmnet import IterativeSolver
from helmnet.dataloaders import get_dataset
class Evaluation:
def __init__(self, path, testset, gpus):
self.path = path
self.testset = get_dataset(testset)
self.testloader = DataLoader(
self.testset, batch_size=32, num_workers=32, shuffle=False
)
self.gpus = gpus
self.model = self.get_model()
self.model.eval()
self.model.freeze()
def move_model_to_gpu(self):
self.model.to("cuda:" + str(self.gpus[0]))
def results_on_test_set(self):
trainer = pl.Trainer(gpus=self.gpus)
trainer.test(self.model, self.testloader)
def compare_to_gmres(self):
# self.testset.dataset.save_for_matlab('testset.mat')
savemat("test_indices.mat", {"test_indices": np.array(self.testset.indices)})
def single_example(self, idx, get_wavefield=True, get_states=True, iterations=1000):
sos_map = self.testset[idx].unsqueeze(0).to("cuda:" + str(self.gpus[0]))
output = self.model.forward(
sos_map,
num_iterations=iterations,
return_wavefields=get_wavefield,
return_states=get_wavefield,
)
# Get loss
losses = [self.model.test_loss_function(x) for x in output["residuals"]]
return output, losses
def get_model(self, domain_size=None, source_location=None):
# Loading model and its hyperparams
model = IterativeSolver.load_from_checkpoint(self.path, strict=False, test_data_path=None)
hparams = model.hparams
# Customizing hparams if needed
if domain_size is not None:
hparams["domain_size"] = domain_size
if source_location is not None:
hparams["source_location"] = source_location
new_model = IterativeSolver(**hparams)
# loading weights and final setup
new_model.f.load_state_dict(model.f.state_dict())
new_model.set_laplacian()
new_model.set_source()
new_model.freeze()
print("--- MODEL HYPERPARAMETERS ---")
print(new_model.hparams)
return new_model
def set_domain_size(self, domain_size, source_location=None, source_map=None):
self.model.hparams.domain_size = domain_size
self.model.f.domain_size = self.model.hparams.domain_size
self.model.set_laplacian()
if source_location is not None:
self.model.set_multiple_sources([source_location])
else:
self.model.set_source_maps(source_map)
self.model.f.init_by_size()
for enc, size in zip(self.model.f.enc, self.model.f.states_dimension):
enc.domain_size = size
if __name__ == "__main__":
parser = ArgumentParser()
parser.add_argument(
"--model_checkpoint",
type=str,
default="checkpoints/trained_weights.ckpt",
help="Checkpoint file with model weights",
)
parser.add_argument(
"--test_set",
type=str,
default="datasets/splitted_96/testset.ph",
help="Test-set file",
)
parser.add_argument(
"--gpu",
type=int,
default=1,
help="Which gpu to use",
)
args = parser.parse_args()
evaluator = Evaluation(
path=args.model_checkpoint, testset=args.test_set, gpus=[args.gpu]
)
# Making results on the test set
evaluator.results_on_test_set()
|
python
|
import cv2
import numpy as np
import pandas as pd
date=datetime.datetime.now().strftime("%d/%m/20%y")
faceDetect = cv2.CascadeClassifier("C:/Users/Administrator/Desktop/haarcascade_frontalface_default.xml")
cam = cv2.VideoCapture(0)
rec = cv2.face.LBPHFaceRecognizer_create()
rec.read("C:/Users/Administrator/Desktop/trainningData.yml")
id = 0
font = cv2.FONT_HERSHEY_COMPLEX_SMALL
df=pd.read_csv("C:/Users/Administrator/Desktop/at1.csv")
while (True):
ret, img = cam.read()
gray = cv2.cvtColor(img, cv2.COLOR_BGRA2GRAY)
faces = faceDetect.detectMultiScale(gray, 2, 4)
for (x, y, w, h) in faces:
cv2.rectangle(img, (x, y), (x + w, y + h), (0, 0, 255), 2)
id, conf = rec.predict(gray[y:y + h,x:x + w])
id1 = df['Name'][df.Id==id]
df[date][df.Id==id]='P'
df.to_csv("C:/Users/Administrator/Desktop/at1.csv",index=False)
cv2.putText(img, str(id1), (x, y + h), font, 3, 255)
cv2.imshow("Face", img)
if (cv2.waitKey(1) & 0xFF == ord('q')):
break
cam.release()
cv2.destroyAllWindows()
|
python
|
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
from torch.autograd import Variable
import time
import os
import shutil
import copy
import datetime
from layers import ConvNet
import network_operators
import utils
# hyperparameters
mutation_time_limit_hours = 23
n_models = 8 # number of child models per generation
n_mutations = 5 # number of mutations applied per generation
budgets = 5 # budget for training all models combined (in epochs)
n_epochs_between = 10 # epochs for warm restart of learning rate
epoch_final = 200 # epochs for final training
lr_final = 0.025
n_experiments = 8
max_n_params = 20*10**6
expfolder = "./results_shr/"
shutil.rmtree('./results_shr', ignore_errors=True)
os.makedirs(expfolder)
# data
trainloader, validloader, testloader = utils.prepare_data(batch_size=128, valid_frac=0.1)
# one batch which will use for many computations
for batch_idx, (inputs, targets) in enumerate(trainloader):
data, target = Variable(inputs.cuda()), Variable(targets.cuda())
batch = data
batch_y = target
break
# basic data structure
layer0 = {'type': 'input', 'params': {'shape': (32,32,3)}, 'input': [-1],'id': 0}
layer1 = {'type': 'conv', 'params': {'channels': 64, 'ks1': 3, 'ks2': 3, "in_channels": 3}, 'input': [0], 'id': 1}
layer1_1 = {'type': 'batchnorm', 'params': {"in_channels": 64}, 'input': [1], 'id': 2}
layer1_2 = {'type': 'activation', 'params': {}, 'input': [2], 'id': 3}
layer4 = {'type': 'pool', 'params': {'pooltype': 'max', 'poolsize': 2}, 'input': [3],'id': 10}
layer5 = {'type': 'conv', 'params': {'channels': 128, 'ks1': 3, 'ks2': 3, "in_channels": 64}, 'input': [10], 'id': 11}
layer5_1 = {'type': 'batchnorm', 'params': {"in_channels": 128}, 'input': [11], 'id': 12}
layer5_2 = {'type': 'activation', 'params': {}, 'input' : [12], 'id': 13}
layer8 = {'type': 'pool', 'params': {'pooltype': 'max', 'poolsize': 2}, 'input': [13],'id': 20}
layer9 = {'type': 'conv', 'params': {'channels': 256, 'ks1': 3, 'ks2': 3, "in_channels": 128}, 'input': [20], 'id': 21}
layer9_1 = {'type': 'batchnorm', 'params': {"in_channels": 256}, 'input': [21], 'id': 22}
layer9_2 = {'type': 'activation', 'params': {}, 'input' : [22], 'id': 23}
layer11 = {'type': 'dense', 'params': {'units': 10, "in_channels": 256, "in_size": 8}, 'input': [23], 'id': 27}
lr_vanilla = 0.01
opt_algo = {'name': optim.SGD, 'lr': lr_vanilla, 'momentum': 0.9, 'weight_decay': 0.0005, 'alpha': 1.0}
sch_algo = {'name': optim.lr_scheduler.CosineAnnealingLR, 'T_max': 5, 'eta_min': 0, 'last_epoch': -1}
comp = {'optimizer': opt_algo, 'scheduler': sch_algo, 'loss': nn.CrossEntropyLoss, 'metrics': ['accuracy']}
model_descriptor = {}
model_descriptor['layers'] = [layer0, layer1, layer1_1, layer1_2,
layer4, layer5, layer5_1, layer5_2,
layer8, layer9, layer9_1, layer9_2, layer11]
model_descriptor['compile']= comp
# create a new basic model
mod = ConvNet(model_descriptor)
mod.cuda()
vanilla_model = {'pytorch_model': mod, 'model_descriptor': model_descriptor, 'topo_ordering': mod.topo_ordering}
# train initially our vanilla model and save
vanilla_model['pytorch_model'].fit_vanilla(trainloader, epochs=20)
# save vanilla model weights
torch.save(vanilla_model['pytorch_model'].state_dict(), expfolder + "vanilla_model")
def SpecialChild(n_models, n_mutations, n_epochs_total, initial_model, savepath, folder_out):
"""
generate and train children, update best model
n_models = number of child models
n_mutations = number of mutations/network operators to be applied per model_descriptor
n_epochs_total = number of epochs for training in total
initial model = current best model_descriptor
savepath = where to save stuff
folder_out = where to save the general files for one run
"""
n_epochs_each = int(n_epochs_total)
print('Train all models for', int(n_epochs_each), 'epochs.')
init_weights_path = savepath + 'ini_weights'
torch.save(initial_model['pytorch_model'].state_dict(), init_weights_path)
performance = np.zeros(shape=(n_models,))
descriptors = []
for model_idx in range(0, n_models):
print('\nmodel idx ' + str(model_idx))
# save some data
time_overall_s = time.time()
pytorch_model = ConvNet(initial_model['model_descriptor'])
pytorch_model.cuda()
pytorch_model.load_state_dict(torch.load(init_weights_path), strict=False)
model = {'pytorch_model': pytorch_model,
'model_descriptor': copy.deepcopy(initial_model['model_descriptor']),
'topo_ordering': pytorch_model.topo_ordering}
descriptors.append(model['model_descriptor'])
mutations_applied = []
# overall , mutations, training
times = [0, 0, 0]
# apply operators
for i in range(0, n_mutations):
time_mut_s = time.time()
# we don't mutate the first child!
if model_idx != 0:
mutations_probs = np.array([1, 1, 1, 1, 1, 0])
[model, mutation_type, params] = network_operators.MutateNetwork(model, batch,
mutation_probs=mutations_probs)
mutations_applied.append(mutation_type)
time_mut_e = time.time()
times[1] = times[1] + (time_mut_e - time_mut_s)
pytorch_total_params = sum(p.numel() for p in model['pytorch_model'].parameters() if p.requires_grad)
if pytorch_total_params > max_n_params:
break
# train
time_train_s = time.time()
# initial short training of the children
model['pytorch_model'].fit(trainloader, epochs=n_epochs_each)
time_train_e = time.time()
times[2] = times[2] + (time_train_e - time_train_s)
performance[model_idx] = model['pytorch_model'].evaluate(validloader)
pytorch_total_params_child = sum(p.numel() for p in model['pytorch_model'].parameters() if p.requires_grad)
torch.save(model['pytorch_model'].state_dict(), savepath + 'model_' + str(model_idx))
with open(folder_out + "performance.txt", "a+") as f_out:
f_out.write('child ' + str(model_idx) + ' performance ' +str(performance[model_idx])+' num params '+str(pytorch_total_params_child) +'\n')
descriptors[model_idx] = copy.deepcopy(model['model_descriptor'])
time_overall_e = time.time()
times[0] = times[0] + (time_overall_e - time_overall_s)
np.savetxt(savepath + 'model_' + str(model_idx) + '_times', times)
descriptor_file = open(savepath + 'model_' + str(model_idx) + '_model_descriptor.txt', 'w')
for layer in model['model_descriptor']['layers']:
layer_str = str(layer)
descriptor_file.write(layer_str + "\n")
descriptor_file.close()
# delete the model (attempt to clean the memory)
del model['pytorch_model']
del model
torch.cuda.empty_cache()
# continue SH steps
sorted_children = np.argsort(performance)
n_children = len(sorted_children)
n_epochs_train_children = n_epochs_each
while n_children > 1:
# pick the best halve of the children
best_children = sorted_children[(n_children // 2):]
# increase the training budget for them
n_epochs_train_children = n_epochs_train_children * 2
print("\nbest_children", best_children)
print("n_epochs_train_children", n_epochs_train_children)
for child in best_children:
print("child ", child)
# load the child parameters
pytorch_model = ConvNet(descriptors[child])
pytorch_model.cuda()
pytorch_model.load_state_dict(torch.load(savepath + 'model_' + str(child)), strict=False)
model = {'pytorch_model': pytorch_model,
'model_descriptor': copy.deepcopy(descriptors[child]),
'topo_ordering': pytorch_model.topo_ordering}
# train a child
model['pytorch_model'].fit(trainloader, epochs=n_epochs_train_children)
# evaluate a child
performance[child] = model['pytorch_model'].evaluate(validloader)
pytorch_total_params_child = sum(p.numel() for p in model['pytorch_model'].parameters() if p.requires_grad)
with open(folder_out + "performance.txt", "a+") as f_out:
f_out.write('child ' + str(child) + ' performance ' +str(performance[child])+' num params '+str(pytorch_total_params_child) +'\n')
# update a child model
torch.save(model['pytorch_model'].state_dict(), savepath + 'model_' + str(child))
# delete the model (attempt to clean the memory)
del model['pytorch_model']
del model
torch.cuda.empty_cache()
print("\nperformance", performance)
temp_children_array = np.argsort(performance)
sorted_children = []
for i, t in enumerate(temp_children_array):
if t in best_children:
sorted_children.append(t)
print("sorted_children", sorted_children)
n_children = len(sorted_children)
print("it should be the winner", sorted_children[0])
print("it should be the best performance", performance[sorted_children[0]])
# load the best child
the_best_child = sorted_children[0]
pytorch_model = ConvNet(descriptors[the_best_child])
pytorch_model.cuda()
pytorch_model.load_state_dict(torch.load(savepath + 'model_' + str(the_best_child)), strict=False)
model = {'pytorch_model': pytorch_model,
'model_descriptor': copy.deepcopy(descriptors[the_best_child]),
'topo_ordering': pytorch_model.topo_ordering}
with open(folder_out + "performance.txt", "a+") as f_out:
f_out.write("****************************\n")
return model, performance[sorted_children[0]]
# main part
for outeriter_idx in range(0, n_experiments):
# the start point of the run
start_run = datetime.datetime.now()
# create folder for this best model
folder_out = expfolder + 'run_' + str(outeriter_idx) + '/'
os.mkdir(folder_out)
# load vanilla model
initial_model = vanilla_model
# load the vanilla model parameters
initial_model['pytorch_model'].load_state_dict(torch.load(expfolder + "vanilla_model"), strict=False)
# the counter for steps in one particular run
sh_idx = 0
while True:
# create a folder for all models in this iteration
savepath = folder_out + str(sh_idx) + '/'
os.mkdir(savepath)
st = time.time()
initial_model, perf = SpecialChild(n_models, n_mutations,
budgets, initial_model, savepath, folder_out)
end = time.time()
print("\n\n" + 20 * "*")
print("Performance before final train for run " + str(outeriter_idx) + " model " + str(
sh_idx) + " performance:" + str(perf))
print(20 * "*" + "\n\n")
# check the number of params
pytorch_total_params = sum(p.numel() for p in initial_model['pytorch_model'].parameters() if p.requires_grad)
# even we reach the limit of parameters
if pytorch_total_params > max_n_params:
break
# or we reach the limit of mutation duration
if datetime.datetime.now() > (start_run + datetime.timedelta(hours=mutation_time_limit_hours)):
break
sh_idx += 1
print('final training')
# load training data without validation part before final training
trainloader_final, _, testloader_final = utils.prepare_data(valid_frac=0.0)
# change lr for the final training for some reasons
initial_model['pytorch_model'].optimizer.param_groups[0]['initial_lr'] = lr_final
initial_model['pytorch_model'].optimizer.param_groups[0]['lr'] = lr_final
# train the model
initial_model['pytorch_model'].fit(trainloader_final, epochs=epoch_final)
# evaluate the performance
performance = initial_model['pytorch_model'].evaluate(testloader_final)
final_num_params = sum(p.numel() for p in initial_model['pytorch_model'].parameters() if p.requires_grad)
# save everything
with open(folder_out + "performance.txt", "a+") as f_out:
f_out.write('final perf ' + str(performance) + ' final number of params ' + str(final_num_params))
torch.save(initial_model['pytorch_model'].state_dict(), folder_out + 'best_model')
descriptor_file = open(folder_out + 'best_model_descriptor.txt', 'w')
for layer in initial_model['model_descriptor']['layers']:
layer_str = str(layer)
descriptor_file.write(layer_str + "\n")
descriptor_file.close()
|
python
|
import subprocess , os
from tkinter import messagebox
from tkinter import filedialog
class Cmd:
def __init__(self,tk, app):
self.app = app
self.tk = tk
self.default_compileur_path_var = os.path.join(os.path.dirname(os.path.abspath(__file__)), "dart-sass\\sass.bat")
self.option = tk.IntVar()
self.css_path_var = tk.StringVar()
self.sass_path_var = tk.StringVar()
self.label_error_var = tk.StringVar()
def simple_compilation(self ,css_file:str ,sass_file:str):
output = subprocess.Popen(f'{self.default_compileur_path_var} "{sass_file}" "{css_file}"', shell=True , stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout , stderr = output.communicate()
print(output.returncode)
if output.returncode == 0:
# print(stdout.decode())
# print(stderr.decode().split()[0])
self.label_error_var.set("")
else:
stderrlist = stderr.decode().split()
file = stderrlist[-4].split('\\')[-1]
message_error = f"{stderrlist[0]} {file} {stderrlist[1]} {stderrlist[2]} ligne : {stderrlist[-3].split(':')[0]}"
print(stderr.decode())
self.label_error_var.set(f"{message_error.lower()}")
# print(message_error)
def watch_compilation(self ,css_file:str ,sass_file:str):
cmd = f'@echo off\n{self.default_compileur_path_var} "{sass_file}" "{css_file}" --watch'
path = os.path.dirname(sass_file)+"/watch-file.bat"
subprocess.os.system(f'start {self.default_compileur_path_var} "{sass_file}" "{css_file}" --watch')
with open(path, "w+") as watch_file:
watch_file.write(cmd)
def compressor_css_file(self ,css_file:str ,sass_file:str):
output = subprocess.Popen(f'{self.default_compileur_path_var} "{sass_file}" "{css_file}" --style=compressed --no-source-map',shell=True,stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout , stderr = output.communicate()
if output.returncode == 0:
self.label_error_var.set("")
else:
stderrlist = stderr.decode().split()
file = stderrlist[-4].split('\\')[-1]
message_error = f"{stderrlist[0]} {file} {stderrlist[1]} {stderrlist[2]} ligne : {stderrlist[-3].split(':')[0]}"
print(stderr.decode())
self.label_error_var.set(f"{message_error.lower()}")
# print(message_error)
def select_method(self , option=1):
css_file = self.css_path_var.get()
sass_file = self.sass_path_var.get()
controle = self.message_error(css_file=css_file , sass_file=sass_file )
if controle == True:
if option == 1:
self.watch_compilation(css_file ,sass_file)
elif option == 0:
self.simple_compilation(css_file ,sass_file)
elif option == 2:
self.compressor_css_file(css_file ,sass_file)
def open_css_file(self):
self.app.cssfile = filedialog.askopenfilename(filetypes=(("css files" , "*.css"),("alls" , "*.*"),))
if self.app.cssfile.split('.')[-1] == "css":
self.css_path_var.set(self.app.cssfile)
print(self.css_path_var.get())
elif self.app.sassfile == "":
pass
else:
print("[error] You must select the css file")
messagebox.showinfo("css" , "You must select the css file")
def open_sass_file(self):
self.app.sassfile = filedialog.askopenfilename(filetypes=( ("scss files" , "*.scss"),("sass files" , "*.sass") ,("alls" , "*.*"),))
if(self.app.sassfile.split('.')[-1] == "scss") or (self.app.sassfile.split('.')[-1] == "sass"):
self.sass_path_var.set(self.app.sassfile)
print(self.sass_path_var.get())
elif self.app.sassfile == "":
pass
else:
print("[error] You must select the scss or sass file")
messagebox.showinfo("sass" , "You must select the scss or sass file")
def message_error(self, sass_file:str , css_file:str) -> bool:
if css_file == "" and sass_file == "":
messagebox.showinfo("scss and css" , "You must select the css and scss file")
return False
elif sass_file == "" and css_file != "":
messagebox.showinfo("scss" , "You must select the scss file")
return False
elif css_file == "" and sass_file != "":
messagebox.showinfo("css" , "You must select the css file")
return False
return True
|
python
|
import math
from typing import Optional
import torch
from torch import nn, Tensor
from torch.autograd import grad
from torch.nn import functional as F
from adv_lib.utils.visdom_logger import VisdomLogger
def ddn(model: nn.Module,
inputs: Tensor,
labels: Tensor,
targeted: bool = False,
steps: int = 100,
γ: float = 0.05,
init_norm: float = 1.,
levels: Optional[int] = 256,
callback: Optional[VisdomLogger] = None) -> Tensor:
"""
Decoupled Direction and Norm attack from https://arxiv.org/abs/1811.09600.
Parameters
----------
model : nn.Module
Model to attack.
inputs : Tensor
Inputs to attack. Should be in [0, 1].
labels : Tensor
Labels corresponding to the inputs if untargeted, else target labels.
targeted : bool
Whether to perform a targeted attack or not.
steps : int
Number of optimization steps.
γ : float
Factor by which the norm will be modified. new_norm = norm * (1 + or - γ).
init_norm : float
Initial value for the norm of the attack.
levels : int
If not None, the returned adversarials will have quantized values to the specified number of levels.
callback : Optional
Returns
-------
adv_inputs : Tensor
Modified inputs to be adversarial to the model.
"""
if inputs.min() < 0 or inputs.max() > 1: raise ValueError('Input values should be in the [0, 1] range.')
device = inputs.device
batch_size = len(inputs)
batch_view = lambda tensor: tensor.view(batch_size, *[1] * (inputs.ndim - 1))
# Init variables
multiplier = -1 if targeted else 1
δ = torch.zeros_like(inputs, requires_grad=True)
ε = torch.full((batch_size,), init_norm, device=device, dtype=torch.float)
worst_norm = torch.max(inputs, 1 - inputs).flatten(1).norm(p=2, dim=1)
# Init trackers
best_l2 = worst_norm.clone()
best_δ = torch.zeros_like(inputs)
adv_found = torch.zeros(batch_size, dtype=torch.bool, device=device)
for i in range(steps):
α = torch.tensor(0.01 + (1 - 0.01) * (1 + math.cos(math.pi * i / steps)) / 2, device=device)
l2 = δ.data.flatten(1).norm(p=2, dim=1)
adv_inputs = inputs + δ
logits = model(adv_inputs)
pred_labels = logits.argmax(1)
ce_loss = F.cross_entropy(logits, labels, reduction='none')
loss = multiplier * ce_loss
is_adv = (pred_labels == labels) if targeted else (pred_labels != labels)
is_smaller = l2 < best_l2
is_both = is_adv & is_smaller
adv_found.logical_or_(is_adv)
best_l2 = torch.where(is_both, l2, best_l2)
best_δ = torch.where(batch_view(is_both), δ.detach(), best_δ)
δ_grad = grad(loss.sum(), δ, only_inputs=True)[0]
# renorming gradient
grad_norms = δ_grad.flatten(1).norm(p=2, dim=1)
δ_grad.div_(batch_view(grad_norms))
# avoid nan or inf if gradient is 0
if (zero_grad := (grad_norms < 1e-12)).any():
δ_grad[zero_grad] = torch.randn_like(δ_grad[zero_grad])
if callback is not None:
cosine = F.cosine_similarity(δ_grad.flatten(1), δ.data.flatten(1), dim=1).mean()
callback.accumulate_line('ce', i, ce_loss.mean())
callback_best = best_l2.masked_select(adv_found).mean()
callback.accumulate_line(['ε', 'l2', 'best_l2'], i, [ε.mean(), l2.mean(), callback_best])
callback.accumulate_line(['cosine', 'α', 'success'], i, [cosine, α, adv_found.float().mean()])
if (i + 1) % (steps // 20) == 0 or (i + 1) == steps:
callback.update_lines()
δ.data.add_(δ_grad, alpha=α)
ε = torch.where(is_adv, (1 - γ) * ε, (1 + γ) * ε)
ε = torch.minimum(ε, worst_norm)
δ.data.mul_(batch_view(ε / δ.data.flatten(1).norm(p=2, dim=1)))
δ.data.add_(inputs).clamp_(0, 1)
if levels is not None:
δ.data.mul_(levels - 1).round_().div_(levels - 1)
δ.data.sub_(inputs)
return inputs + best_δ
|
python
|
#!/usr/bin/python
def get_memory(file_name):
# vmstat
#procs - ----------memory - --------- ---swap - - -----io - --- -system - - ------cpu - ----
# r;b; swpd;free;buff;cache; si;so; bi;bo; in;cs; us;sy;id;wa;st
memory_swpd = list()
memory_free = list()
memory_buff = list()
memory_cache = list()
swap_si = list()
swap_so = list()
io_bi = list()
io_bo = list()
system_sin = list()
system_scs = list()
cpu_us = list()
cpu_sy = list()
cpu_id = list()
cpu_wa = list()
cpu_st = list()
try:
with open(file_name) as f:
for line in f:
l = str(line).replace("\'", "").replace("\n", "").split(";")
memory_swpd.append(int(l[2]))
memory_free.append(int(l[3]))
memory_buff.append(int(l[4]))
memory_cache.append(int(l[5]))
swap_si.append(int(l[6]))
swap_so.append(int(l[7]))
io_bi.append(int(l[8]))
io_bo.append(int(l[9]))
system_sin.append(int(l[10]))
system_scs.append(int(l[11]))
cpu_us.append(int(l[12]))
cpu_sy.append(int(l[13]))
cpu_id.append(int(l[14]))
cpu_wa.append(int(l[15]))
cpu_st.append(int(l[16]))
except:
print "Could not open file: " + file_name
return
def get_memory(file_name):
# vmstat
#procs - ----------memory - --------- ---swap - - -----io - --- -system - - ------cpu - ----
# r;b; swpd;free;buff;cache; si;so; bi;bo; in;cs; us;sy;id;wa;st
memory_swpd = list()
memory_free = list()
memory_buff = list()
memory_cache = list()
try:
with open(file_name) as f:
for line in f:
l = str(line).replace("\'", "").replace("\n", "").split(";")
memory_swpd.append(int(l[2]))
memory_free.append(int(l[3]))
memory_buff.append(int(l[4]))
memory_cache.append(int(l[5]))
except:
print "Could not open file: " + file_name
return memory_swpd, memory_free, memory_buff, memory_cache
def get_swap(file_name):
# vmstat
#procs - ----------memory - --------- ---swap - - -----io - --- -system - - ------cpu - ----
# r;b; swpd;free;buff;cache; si;so; bi;bo; in;cs; us;sy;id;wa;st
swap_si = list()
swap_so = list()
try:
with open(file_name) as f:
for line in f:
l = str(line).replace("\'", "").replace("\n", "").split(";")
swap_si.append(int(l[6]))
swap_so.append(int(l[7]))
except:
print "Could not open file: " + file_name
return swap_si, swap_so
def get_io(file_name):
# vmstat
#procs - ----------memory - --------- ---swap - - -----io - --- -system - - ------cpu - ----
# r;b; swpd;free;buff;cache; si;so; bi;bo; in;cs; us;sy;id;wa;st
io_bi = list()
io_bo = list()
try:
with open(file_name) as f:
for line in f:
l = str(line).replace("\'", "").replace("\n", "").split(";")
io_bi.append(int(l[8]))
io_bo.append(int(l[9]))
except:
print "Could not open file: " + file_name
return io_bi, io_bo
def get_cpu(file_name):
# vmstat
#procs - ----------memory - --------- ---swap - - -----io - --- -system - - ------cpu - ----
# r;b; swpd;free;buff;cache; si;so; bi;bo; in;cs; us;sy;id;wa;st
cpu_us = list()
cpu_sy = list()
cpu_id = list()
cpu_wa = list()
cpu_st = list()
try:
with open(file_name) as f:
for line in f:
l = str(line).replace("\'", "").replace("\n", "").split(";")
cpu_us.append(int(l[12]))
cpu_sy.append(int(l[13]))
cpu_id.append(int(l[14]))
cpu_wa.append(int(l[15]))
cpu_st.append(int(l[16]))
except:
print "Could not open file: " + file_name
return cpu_us, cpu_sy, cpu_id, cpu_wa, cpu_st
import seaborn as sns
import matplotlib
import matplotlib.pyplot as plt
import matplotlib.ticker as ticker
if __name__ == '__main__':
sns.set_context("paper", font_scale=1.5)
sns.set_style("whitegrid")
sns.set_style("ticks")
# sizes = ["1", "4", "64", "128"]
sizes = ["64"]
# gpuset = ["0", "0,1", "0,2", "1,2,3"]
gpuset = ["0"]
gpusetname= {"0": "0", "0,1": "0-1", "0,2": "0-2", "1,2,3": "1-2-3"}
# applications = ["bvlc_alexnet", "bvlc_googlenet", "bvlc_reference_caffenet"]
applications = ["bvlc_alexnet"]
fancyName = {"bvlc_alexnet": "AlexNet", "bvlc_googlenet": "GoogLeNet"}
folder = "/home/mamaral/power8/multi-gpus/minsky/minsky-results/varying-gpu-number/results"
placement = "solo"
for app in applications:
for size in sizes:
for gpus in gpuset:
ylim = 0
fignum = -1
# for gpus in ["0-2", "1-3"]:
# data_bandwidth = list()
# data_L3_misses = list()
array_length = 0
# for run in range(1, 3):
# solo/bvlc_alexnet/gpus-0/batch-size-1/run1/metrics
for algo in ["bf", "fcfs", "utilityaware-policy-neutral-postponed-False", "utilityaware-policy-neutral-postponed-True"]:
file_name = "../../../results/workloads-5/31-03-17--18-22-06-real/algo-" + \
algo + "/logs/vmstat-formatted.out"
cpu_us, cpu_sy, cpu_id, cpu_wa, cpu_st = get_cpu(file_name)
print cpu_us
if len(cpu_us) > 0:
fig, ax = plt.subplots(1, 1)
# print data
x = [i * 5 for i in range(len(cpu_sy))]
ax = sns.tsplot(time=x, data=cpu_us, condition="cpu_us", color="g", linestyle=":")
ax = sns.tsplot(time=x, data=cpu_sy, condition="cpu_sy", color="b", linestyle="--")
ax.set_xticks(x)
x_ticket = int(size)
if x_ticket == 1:
x_ticket = 4
ax.xaxis.set_major_locator(matplotlib.ticker.MultipleLocator(x_ticket))
ax.grid()
ax.set_xlabel('Time (s)', alpha=0.8)
ax.set_ylabel('CPU usage (%)', alpha=0.8)
ax.set_title(algo)
plt.legend()
# folde_plot_tmp = folde_plot + "/memory-bandwidth/"
# if not os.path.exists(folde_plot_tmp):
# os.makedirs(folde_plot_tmp)
# plt.savefig(folde_plot_tmp + '/memory-bandwidth-' + label + ".pdf", bbox_inches='tight')
plt.show()
|
python
|
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from odoo import api, fields, models, _
from odoo.exceptions import ValidationError
class CouponReward(models.Model):
_name = 'coupon.reward'
_description = "Coupon Reward"
_rec_name = 'reward_description'
# VFE FIXME multi company
"""Rewards are not restricted to a company...
You could have a reward_product_id limited to a specific company A.
But still use this reward as reward of a program of company B...
"""
reward_description = fields.Char('Reward Description')
reward_type = fields.Selection([
('discount', 'Discount'),
('product', 'Free Product'),
], string='Reward Type', default='discount',
help="Discount - Reward will be provided as discount.\n" +
"Free Product - Free product will be provide as reward \n" +
"Free Shipping - Free shipping will be provided as reward (Need delivery module)")
# Product Reward
reward_product_id = fields.Many2one('product.product', string="Free Product",
help="Reward Product")
reward_product_quantity = fields.Integer(string="Quantity", default=1, help="Reward product quantity")
# Discount Reward
discount_type = fields.Selection([
('percentage', 'Percentage'),
('fixed_amount', 'Fixed Amount')], default="percentage",
help="Percentage - Entered percentage discount will be provided\n" +
"Amount - Entered fixed amount discount will be provided")
discount_percentage = fields.Float(string="Discount", default=10,
help='The discount in percentage, between 1 and 100')
discount_apply_on = fields.Selection([
('on_order', 'On Order'),
('cheapest_product', 'On Cheapest Product'),
('specific_products', 'On Specific Products')], default="on_order",
help="On Order - Discount on whole order\n" +
"Cheapest product - Discount on cheapest product of the order\n" +
"Specific products - Discount on selected specific products")
discount_specific_product_ids = fields.Many2many('product.product', string="Products",
help="Products that will be discounted if the discount is applied on specific products")
discount_max_amount = fields.Float(default=0,
help="Maximum amount of discount that can be provided")
discount_fixed_amount = fields.Float(string="Fixed Amount", help='The discount in fixed amount')
reward_product_uom_id = fields.Many2one(related='reward_product_id.product_tmpl_id.uom_id', string='Unit of Measure', readonly=True)
discount_line_product_id = fields.Many2one('product.product', string='Reward Line Product', copy=False,
help="Product used in the sales order to apply the discount. Each coupon program has its own reward product for reporting purpose")
@api.constrains('discount_percentage')
def _check_discount_percentage(self):
if self.filtered(lambda reward: reward.discount_type == 'percentage' and (reward.discount_percentage < 0 or reward.discount_percentage > 100)):
raise ValidationError(_('Discount percentage should be between 1-100'))
def name_get(self):
"""
Returns a complete description of the reward
"""
result = []
for reward in self:
reward_string = ""
if reward.reward_type == 'product':
reward_string = _("Free Product - %s", reward.reward_product_id.name)
elif reward.reward_type == 'discount':
if reward.discount_type == 'percentage':
reward_percentage = str(reward.discount_percentage)
if reward.discount_apply_on == 'on_order':
reward_string = _("%s%% discount on total amount", reward_percentage)
elif reward.discount_apply_on == 'specific_products':
if len(reward.discount_specific_product_ids) > 1:
reward_string = _("%s%% discount on products", reward_percentage)
else:
reward_string = _(
"%(percentage)s%% discount on %(product_name)s",
percentage=reward_percentage,
product_name=reward.discount_specific_product_ids.name
)
elif reward.discount_apply_on == 'cheapest_product':
reward_string = _("%s%% discount on cheapest product", reward_percentage)
elif reward.discount_type == 'fixed_amount':
program = self.env['coupon.program'].search([('reward_id', '=', reward.id)])
reward_string = _(
"%(amount)s %(currency)s discount on total amount",
amount=reward.discount_fixed_amount,
currency=program.currency_id.name
)
result.append((reward.id, reward_string))
return result
|
python
|
"""
=====================
Configuration Manager
=====================
"""
import os
from configparser import ConfigParser
from typing import Optional, Dict, TypeVar, Callable
from PySide2 import QtWidgets
WIDGET = TypeVar('QWidget')
########################################################################
class ConfigManager(ConfigParser):
"""File based configurations manager."""
# ----------------------------------------------------------------------
def __init__(self, filename='.bciframework'):
""""""
super().__init__()
if os.path.isabs(filename):
self.filename = filename
else:
user_dir = os.path.join(os.getenv('BCISTREAM_HOME'))
os.makedirs(user_dir, exist_ok=True)
self.filename = os.path.join(user_dir, filename)
self.load()
# ----------------------------------------------------------------------
def load(self) -> None:
"""Load the filename with configirations."""
assert os.path.exists(
self.filename), f'"{self.filename} does not exist!"'
self.read(self.filename)
# ----------------------------------------------------------------------
def set(self, section: str, option: str, value: Optional[str] = '', save: Optional[bool] = False) -> None:
"""Write and save configuration option."""
if not self.has_section(section):
self.add_section(section)
super().set(section, option, value)
if save:
self.save()
# ----------------------------------------------------------------------
def get(self, section: str, option: str, default: Optional[str] = None, *args, **kwargs) -> None:
"""Read a configuration value, if not exists then save the default."""
if self.has_option(section, option):
return super().get(section, option, *args, **kwargs)
else:
self.set(section, option, default)
return default
# ----------------------------------------------------------------------
def save(self) -> None:
"""Save configurations."""
with open(self.filename, 'w') as configfile:
self.write(configfile)
# ----------------------------------------------------------------------
def save_widgets(self, section: str, config: Dict[str, WIDGET]) -> None:
"""Automatically save values from widgets."""
for option in config:
widget = config[option]
# QComboBox
if isinstance(widget, QtWidgets.QComboBox):
self.set(section, option, widget.currentText())
# QCheckBox
elif isinstance(widget, QtWidgets.QCheckBox):
self.set(section, option, str(widget.isChecked()))
# QSpinBox
elif isinstance(widget, QtWidgets.QSpinBox):
self.set(section, option, str(widget.value()))
else:
widget
self.save()
# ----------------------------------------------------------------------
def load_widgets(self, section: str, config: Dict[str, WIDGET]) -> None:
"""Automatically load values from configurations and set them in widgets."""
for option in config:
widget = config[option]
if not (self.has_section(section) and self.has_option(section, option)):
return
# QComboBox
if isinstance(widget, QtWidgets.QComboBox):
widget.setCurrentText(self.get(section, option))
# QCheckBox
elif isinstance(widget, QtWidgets.QCheckBox):
widget.setChecked(self.getboolean(section, option))
# QSpinBox
elif isinstance(widget, QtWidgets.QSpinBox):
widget.setValue(int(self.get(section, option)))
else:
widget
# ----------------------------------------------------------------------
def connect_widgets(self, method: Callable, config: Dict[str, WIDGET]) -> None:
"""Automatically connect widgets with events."""
for option in config:
widget = config[option]
# QComboBox
if isinstance(widget, QtWidgets.QComboBox):
widget.activated.connect(method)
# QCheckBox
elif isinstance(widget, QtWidgets.QCheckBox):
widget.clicked.connect(method)
# QSpinBox
elif isinstance(widget, QtWidgets.QSpinBox):
widget.valueChanged.connect(method)
else:
widget
|
python
|
import json
import yaml
from flask import jsonify, Blueprint, redirect
from flask_restless import APIManager
from flask_restless.helpers import *
sqlalchemy_swagger_type = {
'INTEGER': ('integer', 'int32'),
'SMALLINT': ('integer', 'int32'),
'NUMERIC': ('number', 'double'),
'DECIMAL': ('number', 'double'),
'VARCHAR': ('string', ''),
'TEXT': ('string', ''),
'DATE': ('string', 'date'),
'BOOLEAN': ('boolean', ''),
'BLOB': ('string', 'binary'),
'BYTE': ('string', 'byte'),
'BINARY': ('string', 'binary'),
'VARBINARY': ('string', 'binary'),
'FLOAT': ('number', 'float'),
'REAL': ('number', 'float'),
'DATETIME': ('string', 'date-time'),
'BIGINT': ('integer', 'int64'),
'ENUM': ('string', ''),
'INTERVAL': ('string', 'date-time'),
}
class SwagAPIManager(object):
swagger = {
'openapi': '3.0.0',
'info': {
'description': 'Api definition: Model field has * is required, but '
'created_at & updated_at will be set current '
'timestamp automatically. The id field should '
'be ignored in POST also. The del_flag and state '
'are enum and has server default value, can also be '
'treated like unrequired. Any field in the form '
'like Xxxxx_id is foreign key, which refer to table '
'Xxxxx.',
'version': 'v1'
},
'servers': [{'url': 'http://localhost:5000/'}],
'tags': [],
'paths': {
'/upload': {
'post': {
'requestBody': {
'content': {
'multipart/form-data': {
'schema': {
'type': 'object',
'properties': {
'file': {
'type': 'array',
'items': {
'type': 'string',
'format': 'binary'
}
}
}
}
}
}
},
'responses': {
'200': {
'content': {
'application/json': {
'schema': {
'items': {
'upload_file': {
'type': 'string'
},
'download_file': {
'type': 'string'
}
},
'type': 'array'
}
}
}
}
},
"tags": [
"upload_file"
]
}
}
},
# global security setting enabled for all endpoints
# 'security': {
# 'bearerAuth': []
# },
'components': {
'securitySchemes': {
'bearerAuth': {
'type': 'http',
'scheme': 'bearer',
'bearerFormat': 'JWT'
}
},
'schemas': {}
}
}
def setup_swagger_blueprint(self, method, url, model_name, description):
self.swagger['paths'][url] = {}
self.swagger['paths'][url][method.lower()] = {
'description': description,
'requestBody': {
'content': {
'application/json': {
'schema': {
'$ref': '#/components/schemas/' +
model_name + '_req'
}
}
}
},
'responses': {
'200': {
'content': {
'application/json': {
'schema': {
'$ref':
'#/components/schemas/' +
model_name + '_res'
}
}
},
'description': 'Success',
}
},
'tags': [url.split('/')[1]]
}
def __init__(self, app=None, **kwargs):
self.app = None
self.manager = None
# iterate all urls, if its docstring contains swagger spec,
# add it to /swagger
for url_mapping in app.url_map.iter_rules():
doc_string = app.view_functions[url_mapping.endpoint].__doc__
if doc_string:
# app.logger.debug('-----------------------')
# app.logger.debug(url_mapping)
# app.logger.debug(url_mapping.methods)
# app.logger.debug(url_mapping.endpoint)
# app.logger.debug(app.view_functions[url_mapping.endpoint])
index = doc_string.find('swagger-doc:')
if index == -1:
continue
swagger_doc = doc_string.replace('swagger-doc:', 'description:')
swagger_dict = yaml.load(swagger_doc)
url = str(url_mapping)
model_name = url.replace('/', '_')
self.swagger['components']['schemas'][
model_name + "_req"] = {
'required': swagger_dict['required'],
'properties': swagger_dict['req']
}
self.swagger['components']['schemas'][
model_name + "_res"] = {
'properties': swagger_dict['res']
}
if 'POST' in url_mapping.methods:
self.setup_swagger_blueprint('POST', url, model_name,
swagger_dict['description'])
if 'GET' in url_mapping.methods:
self.setup_swagger_blueprint('GET', url, model_name,
swagger_dict['description'])
if 'PUT' in url_mapping.methods:
pass
if app is not None:
self.init_app(app, **kwargs)
def to_json(self, **kwargs):
return json.dumps(self.swagger, **kwargs)
def to_yaml(self, **kwargs):
return yaml.dump(self.swagger, **kwargs)
def __str__(self):
return self.to_json(indent=4)
@property
def version(self):
if 'version' in self.swagger['info']:
return self.swagger['info']['version']
return None
@version.setter
def version(self, value):
self.swagger['info']['version'] = value
@property
def title(self):
if 'title' in self.swagger['info']:
return self.swagger['info']['title']
return None
@title.setter
def title(self, value):
self.swagger['info']['title'] = value
@property
def description(self):
if 'description' in self.swagger['info']:
return self.swagger['info']['description']
return None
@description.setter
def description(self, value):
self.swagger['info']['description'] = value
def add_path(self, model, **kwargs):
name = model.__tablename__
schema = model.__name__
path = kwargs.get('url_prefix', "") + '/' + name
id_path = "{0}/{{{1}Id}}".format(path, schema.lower())
self.swagger['paths'][path] = {}
tag = {
'name': schema,
'description': 'Table restful endpoint of ' + name
}
self.swagger['tags'].append(tag)
for method in [m.lower() for m in kwargs.get('methods', ['GET'])]:
if method == 'get':
self.swagger['paths'][path][method] = {
'tags': [schema],
'parameters': [{
'name': 'q',
'in': 'query',
'description': 'searchjson',
'required': False,
'schema': {'type': 'string'}
}],
'responses': {
200: {
'description': 'List ' + schema,
'content': {
'application/json': {
'schema': {
'title': name,
'type': 'array',
'items': {
'$ref': '#/components/schemas/' +
name
}
}
}
}
}
}
}
if model.__doc__:
self.swagger['paths'][path]['description'] = model.__doc__
if id_path not in self.swagger['paths']:
self.swagger['paths'][id_path] = {}
self.swagger['paths'][id_path][method] = {
'tags': [schema],
'parameters': [{
'name': schema.lower() + 'Id',
'in': 'path',
'description': 'ID of ' + schema,
'required': False,
'schema': {
'type': 'integer',
'format': 'int64'
}
}],
'responses': {
200: {
'description': 'Success',
'content': {
'application/json': {
'schema': {
'title': name,
'type': 'array',
'items': {
'$ref': '#/components/schemas/' +
name
}
}
}
}
}
}
}
if model.__doc__:
self.swagger['paths'][id_path][
'description'] = model.__doc__
elif method == 'delete':
if id_path not in self.swagger['paths']:
self.swagger['paths'][id_path] = {}
self.swagger['paths'][
"{0}/{{{1}Id}}".format(path, schema.lower())][method] = {
'tags': [schema],
'parameters': [{
'name': schema.lower() + 'Id',
'in': 'path',
'description': 'ID of ' + schema,
'required': True,
'schema': {
'type': 'integer',
'format': 'int64'
}
}],
'responses': {
200: {
'description': 'Success'
}
}
}
if model.__doc__:
self.swagger['paths'][id_path][
'description'] = model.__doc__
elif method == 'post':
self.swagger['paths'][path][method] = {
'tags': [schema],
'requestBody': {
'content': {
'application/json': {
'schema': {
'$ref': '#/components/schemas/' + name
}
}
}
},
'responses': {
200: {
'description': 'Success'
}
}
}
if model.__doc__:
self.swagger['paths'][path]['description'] = model.__doc__
elif method == 'put' or method == 'patch':
if model.__doc__:
self.swagger['paths'][path]['description'] = model.__doc__
if id_path not in self.swagger['paths']:
self.swagger['paths'][id_path] = {}
self.swagger['paths'][id_path][method] = {
'tags': [schema],
'parameters': [{
'name': schema.lower() + 'Id',
'in': 'path',
'description': 'ID of ' + schema,
'required': False,
'schema': {
'type': 'integer',
'format': 'int64'
}
}],
'requestBody': {
'content': {
'application/json': {
'schema': {
'$ref': '#/components/schemas/' + name
}
}
}
},
'responses': {
200: {
'description': 'Success'
}
}
}
else:
pass
def add_defn(self, model, **kwargs):
name = model.__tablename__
self.swagger['components']['schemas'][name] = {
'properties': {}
}
columns = [c for c in get_columns(model).keys()]
required = []
for column_name, column in get_columns(model).items():
if column_name in kwargs.get('exclude_columns', []):
continue
try:
column_type = str(column.type)
if '(' in column_type:
column_type = column_type.split('(')[0]
column_defn = sqlalchemy_swagger_type[column_type]
column_val = {'type': column_defn[0]}
if column_defn[1]:
column_val['format'] = column_defn[1]
t = column.type
if hasattr(t, 'native_enum') and t.native_enum:
column_val['enum'] = t.enums
if not column.nullable:
required.append(column_name)
if hasattr(column, 'comment'):
column_val['description'] = getattr(column, 'comment')
self.swagger['components']['schemas'][name]['properties'][
column_name] = column_val
except AttributeError:
schema = get_related_model(model, column_name)
associates = schema.__tablename__
column_defn = {
'type': 'array',
'items': {
'$ref': '#/components/schemas/' + associates
}
}
if associates + '_id' not in columns:
self.swagger['components']['schemas'][name]['properties'][
column_name] = column_defn
self.swagger['components']['schemas'][name]['required'] = required
def init_app(self, app, **kwargs):
self.app = app
self.manager = APIManager(self.app, **kwargs)
if app and app.debug:
host = app.config['HOST']
if host == '0.0.0.0':
host = '127.0.0.1'
self.swagger['servers'][0]['url'] = 'http://{}:{}/'.format(
host, app.config['PORT'])
if app.config['ESHOST']:
self.swagger['servers'][0]['url'] = 'http://{}:{}/'.format(
app.config['ESHOST'], app.config['PORT'])
# self.swagger['servers'].append({
# 'url': 'http://127.0.0.1:5000/'
# })
swaggerbp = Blueprint('swagger', __name__,
static_folder='swagger_ui')
@swaggerbp.route('/swagger')
def swagger_ui():
return redirect('/swagger_ui/index.html')
@swaggerbp.route('/swagger.json')
def swagger_json():
# I can only get this from a request context
return jsonify(self.swagger)
app.register_blueprint(swaggerbp)
def create_api(self, model, **kwargs):
self.manager.create_api(model, **kwargs)
self.add_defn(model, **kwargs)
self.add_path(model, **kwargs)
|
python
|
# -*- coding: utf-8 -*-
"""
Created on Tue Jul 24 12:35:04 2018
@author: Matthias N.
"""
import requests
from bs4 import BeautifulSoup
from fake_useragent import UserAgent
import asyncio
import concurrent.futures
import json, codecs
async def speedcrawl(pages):
data = []
for p in range(1,pages+1):
data.append ({'page': p})
with concurrent.futures.ThreadPoolExecutor(max_workers=min(pages,100)) as executor:
loop = asyncio.get_event_loop()
futures = [
loop.run_in_executor(
executor,
requests.get,
'https://www.dndbeyond.com/monsters',
d
)
for d in data
]
for response in await asyncio.gather(*futures):
pass
return [f.result() for f in futures]
ua = UserAgent()
header = {'User-Agent':str(ua.chrome)}
url = 'https://www.dndbeyond.com/monsters'
htmlContent = requests.get(url, headers=header)
soup = BeautifulSoup(htmlContent.text, "html.parser")
uldiv = soup.find_all("a", class_="b-pagination-item")
pages = int(uldiv[-1].text)
print('{} pages found.'.format(pages))
loop = asyncio.get_event_loop()
r = loop.run_until_complete(speedcrawl(pages))
monster_type_url_dict = {'aberration': 'https://i.imgur.com/qI39ipJ.jpg',
'beast': 'https://i.imgur.com/GrjN1HL.jpg',
'celestial': 'https://i.imgur.com/EHaX5Pz.jpg',
'construct': 'https://i.imgur.com/me0a3la.jpg',
'dragon': 'https://i.imgur.com/92iC5ga.jpg',
'elemental': 'https://i.imgur.com/egeiuFf.jpg',
'fey': 'https://i.imgur.com/hhSXx7Y.jpg',
'fiend': 'https://i.imgur.com/OWTsHDl.jpg',
'giant': 'https://i.imgur.com/lh3eZGN.jpg',
'humanoid': 'https://i.imgur.com/ZSH9ikY.jpg',
'monstrosity': 'https://i.imgur.com/5iY8KhJ.jpg',
'ooze': 'https://i.imgur.com/WDHbliU.jpg',
'plant': 'https://i.imgur.com/FqEpGiQ.jpg',
'undead': 'https://i.imgur.com/MwdXPAX.jpg'}
monsters = {}
for p in r:
soup = BeautifulSoup(p.text, "html.parser")
infos = soup.find_all('div', class_='info')
#css_links = [link["href"] for link in soup.findAll("link") if "stylesheet" in link.get("rel", [])]
for info in infos:
divs = info.find_all('div')
for d in divs:
c = d.get('class')
if 'monster-icon' in c:
a = d.find('a')
if a == None:
creature_type = d.find('div').get('class')[1]
img_url = monster_type_url_dict[creature_type]
else:
img_url = a.get('href')
elif 'monster-challenge' in c:
cr = d.find('span').text
elif 'monster-name' in c:
name = d.find('a').text
source = d.find('span', class_="source").text
elif 'monster-type' in c:
monster_type = d.find('span').text
elif 'monster-size' in c:
size = d.find('span').text
elif 'monster-alignment' in c:
alignment = d.find('span').text
#monsters[name] = {'name': name, 'source': source, 'type': monster_type, 'size': size, 'alignment': alignment, 'CR': cr, 'img_url': img_url}
monsters[name] = {'name': name,'size': size,'img_url': img_url}
with open('monsters.json', 'wb') as f:
json.dump(monsters, codecs.getwriter('utf-8')(f), ensure_ascii=False, indent=4, sort_keys=True)
|
python
|
from .src import dwarfgen
def process(*args, **kwargs):
return dwarfgen.process(*args, **kwargs)
|
python
|
import tesseract
api = tesseract.TessBaseAPI()
api.SetOutputName("outputName");
api.Init("E:\\Tesseract-OCR\\test-slim","eng",tesseract.OEM_DEFAULT)
api.SetPageSegMode(tesseract.PSM_AUTO)
mImgFile = "eurotext.jpg"
pixImage=tesseract.pixRead(mImgFile)
api.SetImage(pixImage)
outText=api.GetUTF8Text()
print("OCR output:\n%s"%outText);
api.End()
|
python
|
import math
import torch
from torch import Tensor
from .optimizer import Optimizer
from typing import List, Optional
class RAdam(Optimizer):
r"""Implements RAdam algorithm.
.. math::
\begin{aligned}
&\rule{110mm}{0.4pt} \\
&\textbf{input} : \gamma \text{ (lr)}, \: \beta_1, \beta_2
\text{ (betas)}, \: \theta_0 \text{ (params)}, \:f(\theta) \text{ (objective)}, \:
\lambda \text{ (weightdecay)}, \\
&\hspace{13mm} \epsilon \text{ (epsilon)} \\
&\textbf{initialize} : m_0 \leftarrow 0 \text{ ( first moment)},
v_0 \leftarrow 0 \text{ ( second moment)}, \\
&\hspace{18mm} \rho_{\infty} \leftarrow 2/(1-\beta_2) -1 \\[-1.ex]
&\rule{110mm}{0.4pt} \\
&\textbf{for} \: t=1 \: \textbf{to} \: \ldots \: \textbf{do} \\
&\hspace{6mm}g_t \leftarrow \nabla_{\theta} f_t (\theta_{t-1}) \\
&\hspace{5mm} \textbf{if} \: \lambda \neq 0 \\
&\hspace{10mm} g_t \leftarrow g_t + \lambda \theta_{t-1} \\
&\hspace{6mm}m_t \leftarrow \beta_1 m_{t-1} + (1 - \beta_1) g_t \\
&\hspace{6mm}v_t \leftarrow \beta_2 v_{t-1} + (1-\beta_2) g^2_t \\
&\hspace{6mm}\widehat{m_t} \leftarrow m_t/\big(1-\beta_1^t \big) \\
&\hspace{6mm}\rho_t \leftarrow \rho_{\infty} -
2 t \beta^t_2 /\big(1-\beta_2^t \big) \\[0.1.ex]
&\hspace{6mm}\textbf{if} \: \rho_t > 5 \\
&\hspace{12mm} l_t \leftarrow \sqrt{ (1-\beta^t_2) / \big( v_t +\epsilon \big) } \\
&\hspace{12mm} r_t \leftarrow
\sqrt{\frac{(\rho_t-4)(\rho_t-2)\rho_{\infty}}{(\rho_{\infty}-4)(\rho_{\infty}-2) \rho_t}} \\
&\hspace{12mm}\theta_t \leftarrow \theta_{t-1} - \gamma \widehat{m_t} r_t l_t \\
&\hspace{6mm}\textbf{else} \\
&\hspace{12mm}\theta_t \leftarrow \theta_{t-1} - \gamma \widehat{m_t} \\
&\rule{110mm}{0.4pt} \\[-1.ex]
&\bf{return} \: \theta_t \\[-1.ex]
&\rule{110mm}{0.4pt} \\[-1.ex]
\end{aligned}
For further details regarding the algorithm we refer to `On the variance of the adaptive learning rate and beyond`_.
Args:
params (iterable): iterable of parameters to optimize or dicts defining
parameter groups
lr (float, optional): learning rate (default: 1e-3)
betas (Tuple[float, float], optional): coefficients used for computing
running averages of gradient and its square (default: (0.9, 0.999))
eps (float, optional): term added to the denominator to improve
numerical stability (default: 1e-8)
weight_decay (float, optional): weight decay (L2 penalty) (default: 0)
foreach (bool, optional): whether foreach implementation of optimizer
is used (default: None)
.. _On the variance of the adaptive learning rate and beyond:
https://arxiv.org/abs/1908.03265
"""
def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8,
weight_decay=0, foreach: Optional[bool] = None):
if not 0.0 <= lr:
raise ValueError("Invalid learning rate: {}".format(lr))
if not 0.0 <= eps:
raise ValueError("Invalid epsilon value: {}".format(eps))
if not 0.0 <= betas[0] < 1.0:
raise ValueError("Invalid beta parameter at index 0: {}".format(betas[0]))
if not 0.0 <= betas[1] < 1.0:
raise ValueError("Invalid beta parameter at index 1: {}".format(betas[1]))
if not 0.0 <= weight_decay:
raise ValueError("Invalid weight_decay value: {}".format(weight_decay))
defaults = dict(lr=lr, betas=betas, eps=eps, weight_decay=weight_decay,
foreach=foreach)
super(RAdam, self).__init__(params, defaults)
def __setstate__(self, state):
super().__setstate__(state)
for group in self.param_groups:
group.setdefault('foreach', None)
state_values = list(self.state.values())
step_is_tensor = (len(state_values) != 0) and torch.is_tensor(state_values[0]['step'])
if not step_is_tensor:
for s in state_values:
s['step'] = torch.tensor(float(s['step']))
@torch.no_grad()
def step(self, closure=None):
"""Performs a single optimization step.
Args:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
"""
loss = None
if closure is not None:
with torch.enable_grad():
loss = closure()
for group in self.param_groups:
params_with_grad = []
grads = []
exp_avgs = []
exp_avg_sqs = []
state_steps = []
beta1, beta2 = group['betas']
for p in group['params']:
if p.grad is not None:
params_with_grad.append(p)
if p.grad.is_sparse:
raise RuntimeError('RAdam does not support sparse gradients')
grads.append(p.grad)
state = self.state[p]
# Lazy state initialization
if len(state) == 0:
state['step'] = torch.tensor(0.)
# Exponential moving average of gradient values
state['exp_avg'] = torch.zeros_like(p, memory_format=torch.preserve_format)
# Exponential moving average of squared gradient values
state['exp_avg_sq'] = torch.zeros_like(p, memory_format=torch.preserve_format)
exp_avgs.append(state['exp_avg'])
exp_avg_sqs.append(state['exp_avg_sq'])
state_steps.append(state['step'])
radam(params_with_grad,
grads,
exp_avgs,
exp_avg_sqs,
state_steps,
beta1=beta1,
beta2=beta2,
lr=group['lr'],
weight_decay=group['weight_decay'],
eps=group['eps'],
foreach=group['foreach'])
return loss
def radam(params: List[Tensor],
grads: List[Tensor],
exp_avgs: List[Tensor],
exp_avg_sqs: List[Tensor],
state_steps: List[Tensor],
# kwonly args with defaults are not supported by functions compiled with torchscript issue #70627
# setting this as kwarg for now as functional API is compiled by torch/distributed/optim
foreach: bool = None,
*,
beta1: float,
beta2: float,
lr: float,
weight_decay: float,
eps: float):
r"""Functional API that performs RAdam algorithm computation.
See :class:`~torch.optim.RAdam` for details.
"""
if not all([isinstance(t, torch.Tensor) for t in state_steps]):
raise RuntimeError("API has changed, `state_steps` argument must contain a list of singleton tensors")
if foreach is None:
# Placeholder for more complex foreach logic to be added when value is not set
foreach = False
if foreach and torch.jit.is_scripting():
raise RuntimeError('torch.jit.script not supported with foreach optimizers')
if foreach and not torch.jit.is_scripting():
func = _multi_tensor_radam
else:
func = _single_tensor_radam
func(params,
grads,
exp_avgs,
exp_avg_sqs,
state_steps,
beta1=beta1,
beta2=beta2,
lr=lr,
weight_decay=weight_decay,
eps=eps)
def _single_tensor_radam(params: List[Tensor],
grads: List[Tensor],
exp_avgs: List[Tensor],
exp_avg_sqs: List[Tensor],
state_steps: List[Tensor],
*,
beta1: float,
beta2: float,
lr: float,
weight_decay: float,
eps: float):
for i, param in enumerate(params):
grad = grads[i]
exp_avg = exp_avgs[i]
exp_avg_sq = exp_avg_sqs[i]
step_t = state_steps[i]
# update step
step_t += 1
step = step_t.item()
bias_correction1 = 1 - beta1 ** step
bias_correction2 = 1 - beta2 ** step
if weight_decay != 0:
grad = grad.add(param, alpha=weight_decay)
# Decay the first and second moment running average coefficient
exp_avg.mul_(beta1).add_(grad, alpha=1 - beta1)
exp_avg_sq.mul_(beta2).addcmul_(grad, grad, value=1 - beta2)
# correcting bias for the first moving moment
bias_corrected_exp_avg = exp_avg / bias_correction1
# maximum length of the approximated SMA
rho_inf = 2 / (1 - beta2) - 1
# compute the length of the approximated SMA
rho_t = rho_inf - 2 * step * (beta2 ** step) / bias_correction2
if rho_t > 5.:
# Compute the variance rectification term and update parameters accordingly
rect = math.sqrt((rho_t - 4) * (rho_t - 2) * rho_inf / ((rho_inf - 4) * (rho_inf - 2) * rho_t))
adaptive_lr = math.sqrt(bias_correction2) / exp_avg_sq.sqrt().add_(eps)
param.add_(bias_corrected_exp_avg * lr * adaptive_lr * rect, alpha=-1.0)
else:
param.add_(bias_corrected_exp_avg * lr, alpha=-1.0)
def _multi_tensor_radam(params: List[Tensor],
grads: List[Tensor],
exp_avgs: List[Tensor],
exp_avg_sqs: List[Tensor],
state_steps: List[Tensor],
*,
beta1: float,
beta2: float,
lr: float,
weight_decay: float,
eps: float):
if len(params) == 0:
return
# Update steps
torch._foreach_add_(state_steps, 1)
# maximum length of the approximated SMA
rho_inf = 2 / (1 - beta2) - 1
# compute the length of the approximated SMA
rho_t_list = [rho_inf - 2 * step.item() * (beta2 ** step.item()) / (1 - beta2 ** step.item()) for step in state_steps]
bias_correction1 = [1 - beta1 ** step.item() for step in state_steps]
bias_correction2 = [1 - beta2 ** step.item() for step in state_steps]
if weight_decay != 0:
torch._foreach_add_(grads, params, alpha=weight_decay)
# Decay the first and second moment running average coefficient
torch._foreach_mul_(exp_avgs, beta1)
torch._foreach_add_(exp_avgs, grads, alpha=1 - beta1)
torch._foreach_mul_(exp_avg_sqs, beta2)
torch._foreach_addcmul_(exp_avg_sqs, grads, grads, 1 - beta2)
rect = [math.sqrt((rho_t - 4) * (rho_t - 2) * rho_inf / ((rho_inf - 4) * (rho_inf - 2) * rho_t))
if rho_t > 5 else 0 for rho_t in rho_t_list]
unrectified = [0 if rect > 0 else 1. for rect in rect]
exp_avg_sq_sqrt = torch._foreach_sqrt(exp_avg_sqs)
bias_correction_sqrt = [math.sqrt(bc) for bc in bias_correction2]
denom = torch._foreach_div(exp_avg_sq_sqrt, bias_correction_sqrt)
step_size = [(lr * rect / bc) * -1 for rect, bc in zip(rect, bias_correction1)]
torch._foreach_addcdiv_(params, exp_avgs, denom, step_size)
denom = [torch.ones_like(exp_av, memory_format=torch.preserve_format) for exp_av in exp_avgs]
step_size = [(lr * rect / bc) * -1 for rect, bc in zip(unrectified, bias_correction1)]
torch._foreach_addcdiv_(params, exp_avgs, denom, step_size)
|
python
|
"""
Author: Darren
Date: 30/05/2021
Solving https://adventofcode.com/2016/day/6
Part 1:
Need to get most frequent char in each column, given many rows of data.
Transpose columns to rows.
Find the Counter d:v that has the max value, keyed using v from the k,v tuple.
Part 2:
As part 1, but using min instead of max.
"""
import logging
import os
import time
from collections import Counter
SCRIPT_DIR = os.path.dirname(__file__)
INPUT_FILE = "input/input.txt"
SAMPLE_INPUT_FILE = "input/sample_input.txt"
def main():
logging.basicConfig(level=logging.DEBUG, format="%(asctime)s:%(levelname)s:\t%(message)s")
# input_file = os.path.join(SCRIPT_DIR, SAMPLE_INPUT_FILE)
input_file = os.path.join(SCRIPT_DIR, INPUT_FILE)
with open(input_file, mode="rt") as f:
data = f.read().splitlines()
# First, we need to transpose columns to rows
transposed = list(zip(*data))
most_common_chars = [] # Part 1
least_common_chars = [] # Part 2
for line in transposed:
char_counts = Counter(line)
# Get the least / most frequent char
most_common_chars.append(max(char_counts.items(), key=lambda x: x[1])[0])
least_common_chars.append(min(char_counts.items(), key=lambda x: x[1])[0])
# Convert to str representation
least_common = "".join(str(char) for char in least_common_chars)
most_common = "".join(str(char) for char in most_common_chars)
logging.info(f"Part 1 message: {most_common}")
logging.info(f"Part 2 message: {least_common}")
if __name__ == "__main__":
t1 = time.perf_counter()
main()
t2 = time.perf_counter()
print(f"Execution time: {t2 - t1:0.4f} seconds")
|
python
|
from flask import Flask, render_template, redirect
from flask_pymongo import PyMongo, ObjectId
from flask_wtf import FlaskForm
from wtforms import StringField, SubmitField
from wtforms.validators import ValidationError, URL
app = Flask(__name__)
app.config['SECRET_KEY'] = 'test'
app.config["MONGO_URI"] = "mongodb://149.129.79.176:27017/Video"
db = PyMongo(app)
@app.route('/', methods=['GET', 'POST'])
def index():
ref = None
form = RefForm()
finished_live = db.db.Video.find()
queues = db.db.Queues.find()
if form.validate_on_submit():
ref = form.ref.data
form.ref.data = ''
db.db.Queues.insert({'Link': ref})
return redirect('/')
return render_template('index.html', form=form, ref=ref, queues=queues, finished_live=finished_live)
@app.route('/delete/<_id>')
def delete(_id):
db.db.Queues.delete_one({"_id": ObjectId(_id)})
return redirect('/')
class RefForm(FlaskForm):
ref = StringField('Youtube链接', validators=[URL])
submit = SubmitField('提交')
def validate_ref(self, field):
data = field.data
if 'www.youtube.com/watch?v=' not in data:
raise ValidationError("Error: You need to input a Youtube LIVE link")
if 'https://' not in data:
raise ValidationError("Error: You need to input a link with 'https://'")
if __name__ == '__main__':
app.run(debug=True)
|
python
|
"""added date_read_date
Revision ID: a544d948cd1b
Revises: 5c5bf645c104
Create Date: 2021-11-28 20:52:31.230691
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'a544d948cd1b'
down_revision = '5c5bf645c104'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
with op.batch_alter_table('article', schema=None) as batch_op:
batch_op.add_column(sa.Column('date_read_date', sa.Date(), nullable=True))
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
with op.batch_alter_table('article', schema=None) as batch_op:
batch_op.drop_column('date_read_date')
# ### end Alembic commands ###
|
python
|
from django.contrib.auth import get_user_model
from django.test import TestCase
from src.activities.models import Activity
from src.questions.models import Question, Answer
class QuestionVoteTest(TestCase):
def setUp(self):
self.user = get_user_model().objects.create_user(
username='test_user',
email='[email protected]',
password='top_secret'
)
self.other_user = get_user_model().objects.create_user(
username='other_test_user',
email='[email protected]',
password='top_secret'
)
self.question_one = Question.objects.create(
user=self.user, title='This is a sample question',
description='This is a sample question description',
tags='test1,test2')
self.question_two = Question.objects.create(
user=self.user,
title='A Short Title',
description='''This is a really good content, just if somebody
published it, that would be awesome, but no, nobody wants to
publish it, because they know this is just a test, and you
know than nobody wants to publish a test, just a test;
everybody always wants the real deal.''',
favorites=0,
has_accepted_answer=True
)
self.answer = Answer.objects.create(
user=self.user,
question=self.question_two,
description='A reaaaaally loooong content',
votes=0,
is_accepted=True
)
def test_can_up_vote_question(self):
activity = Activity.objects.create(user=self.user, activity_type='U',
question=self.question_one.id)
activity = Activity.objects.create(user=self.user, activity_type='U',
question=self.question_one.id)
self.assertTrue(isinstance(activity, Activity))
self.assertEqual(self.question_one.calculate_votes(), 2)
def test_can_down_vote_question(self):
votes = self.question_one.calculate_votes()
activity = Activity.objects.create(user=self.user, activity_type='D',
question=self.question_one.id)
self.assertTrue(isinstance(activity, Activity))
self.assertEqual(self.question_one.calculate_votes(), votes - 1)
def test_question_str_return_value(self):
self.assertTrue(isinstance(self.question_one, Question))
self.assertEqual(str(self.question_one), 'This is a sample question')
def test_question_non_answered_question(self):
self.assertEqual(self.question_one, Question.get_unanswered()[0])
def test_question_answered_question(self):
self.assertEqual(self.question_two, Question.get_answered()[0])
def test_question_answers_returns(self):
self.assertEqual(self.answer, self.question_two.get_answers()[0])
def test_question_answer_count(self):
self.assertEqual(self.question_two.get_answers_count(), 1)
def test_question_accepted_answer(self):
self.assertEqual(self.question_two.get_accepted_answer(), self.answer)
def test_question_markdown_return(self):
self.assertEqual(self.question_one.get_description_as_markdown(),
'<p>This is a sample question description</p>')
self.assertEqual(self.question_two.get_description_as_markdown(),
'''<p>This is a really good content, just if somebody
published it, that would be awesome, but no, nobody wants to
publish it, because they know this is just a test, and you
know than nobody wants to publish a test, just a test;
everybody always wants the real deal.</p>''')
def test_question_return_summary(self):
self.assertEqual(len(self.question_two.get_description_preview()), 258)
self.assertEqual(self.question_two.get_description_preview(),
'''This is a really good content, just if somebody
published it, that would be awesome, but no, nobody wants to
publish it, because they know this is just a test, and you
know than nobody wants to publish a test, just a te...''')
self.assertEqual(self.question_one.get_description_preview(),
'This is a sample question description')
def test_question_markdown_description_preview(self):
self.assertTrue(
self.question_two.get_description_preview_as_markdown(),
'''<p>This is a really good content, just if somebody
published it, that would be awesome, but no, nobody wants to
publish it, because they know this is just a test, and you
know than nobody wants to publish a test, just a te...</p>''')
def test_favorite_question(self):
activity = Activity.objects.create(
user=self.user,
activity_type='F',
question=self.question_one.id
)
self.assertTrue(isinstance(activity, Activity))
self.assertEqual(self.question_one.calculate_favorites(), 1)
def test_question_favoriters(self):
activity = Activity.objects.create(
user=self.user,
activity_type='F',
question=self.question_one.id
)
self.assertTrue(isinstance(activity, Activity))
self.assertEqual(self.user, self.question_one.get_favoriters()[0].user)
def test_question_voters_retun_values(self):
activity = Activity.objects.create(user=self.user, activity_type='U',
question=self.question_one.id)
activity = Activity.objects.create(user=self.other_user,
activity_type='D',
question=self.question_one.id)
self.assertTrue(isinstance(activity, Activity))
self.assertEqual(self.question_one.get_up_voters()[0].user, self.user)
self.assertEqual(
self.question_one.get_down_voters()[0].user, self.other_user)
# Answer model tests
def test_answer_return_value(self):
self.assertEqual(str(self.answer), 'A reaaaaally loooong content')
def test_answer_accept_method(self):
answer_one = Answer.objects.create(
user=self.user,
question=self.question_one,
description='A reaaaaally loooonger content'
)
answer_two = Answer.objects.create(
user=self.user,
question=self.question_one,
description='A reaaaaally even loooonger content'
)
answer_three = Answer.objects.create(
user=self.user,
question=self.question_one,
description='Even a reaaaaally loooonger content'
)
self.assertFalse(answer_one.is_accepted)
self.assertFalse(answer_two.is_accepted)
self.assertFalse(answer_three.is_accepted)
self.assertFalse(self.question_one.has_accepted_answer)
answer_one.accept()
self.assertTrue(answer_one.is_accepted)
self.assertFalse(answer_two.is_accepted)
self.assertFalse(answer_three.is_accepted)
self.assertTrue(self.question_one.has_accepted_answer)
self.assertEqual(self.question_one.get_accepted_answer(), answer_one)
def test_answers_vote_calculation(self):
activity = Activity.objects.create(user=self.user, activity_type='U',
answer=self.answer.id)
activity = Activity.objects.create(user=self.other_user,
activity_type='U',
answer=self.answer.id)
self.assertTrue(isinstance(activity, Activity))
self.assertEqual(self.answer.calculate_votes(), 2)
def test_answer_voters_return_values(self):
activity = Activity.objects.create(user=self.user, activity_type='U',
answer=self.answer.id)
activity = Activity.objects.create(user=self.other_user,
activity_type='D',
answer=self.answer.id)
self.assertTrue(isinstance(activity, Activity))
self.assertEqual(self.answer.get_up_voters()[0].user, self.user)
self.assertEqual(
self.answer.get_down_voters()[0].user, self.other_user)
def test_answer_description_markdown(self):
self.assertEqual(self.answer.get_description_as_markdown(),
'<p>A reaaaaally loooong content</p>')
|
python
|
#!/usr/bin/env python
import astropy.units as u
from typing import Union
from dataclasses import dataclass, field, is_dataclass
from cached_property import cached_property
import copy
from typing import ClassVar
from schema import Or
from tollan.utils.dataclass_schema import add_schema
from tollan.utils.log import get_logger, logit, log_to_file
from tollan.utils.fmt import pformat_yaml
from tollan.utils import rupdate
from ..utils.common_schema import PhysicalTypeSchema
from ..utils.config_registry import ConfigRegistry
from ..utils.config_schema import add_config_schema
from ..utils.runtime_context import RuntimeContext, RuntimeContextError
from ..utils import config_from_cli_args
__all__ = ['SimulatorRuntime', 'SimulatorRuntimeError']
@add_schema
@dataclass
class ObsParamsConfig(object):
"""The config class for ``simu.obs_params``."""
t_exp: Union[u.Quantity, None] = field(
default=None,
metadata={
'description': 'The duration of the observation to simulate.',
'schema': Or(PhysicalTypeSchema('time'), None),
}
)
f_smp_mapping: u.Quantity = field(
default=12. << u.Hz,
metadata={
'description': 'The sampling frequency to '
'evaluate mapping models.',
'schema': PhysicalTypeSchema("frequency"),
}
)
f_smp_probing: u.Quantity = field(
default=120. << u.Hz,
metadata={
'description': 'The sampling frequency '
'to evaluate detector signals.',
'schema': PhysicalTypeSchema("frequency"),
}
)
class Meta:
schema = {
'ignore_extra_keys': False,
'description': 'The parameters related to observation.'
}
@add_schema
@dataclass
class PerfParamsConfig(object):
"""The config class for ``simu.pef_params``."""
chunk_len: u.Quantity = field(
default=10 << u.s,
metadata={
'description': 'Chunk length to split the simulation to '
'reduce memory footprint.',
'schema': PhysicalTypeSchema("time"),
}
)
catalog_model_render_pixel_size: u.Quantity = field(
default=0.5 << u.arcsec,
metadata={
'description': 'Pixel size to render catalog source model.',
'schema': PhysicalTypeSchema("angle"),
}
)
mapping_eval_interp_len: Union[u.Quantity, None] = field(
default=None,
metadata={
'description': 'Interp length to speed-up mapping evaluation.',
'schema': PhysicalTypeSchema("time"),
}
)
mapping_erfa_interp_len: u.Quantity = field(
default=300 << u.s,
metadata={
'description': 'Interp length to speed-up AltAZ to '
'ICRS coordinate transformation.',
'schema': PhysicalTypeSchema("time"),
}
)
aplm_eval_interp_alt_step: u.Quantity = field(
default=60 << u.arcmin,
metadata={
'description': (
'Interp altitude step to speed-up '
'array power loading model eval.'),
'schema': PhysicalTypeSchema("angle"),
}
)
pre_eval_sky_bbox_padding_size: u.Quantity = field(
default=3. << u.arcmin,
metadata={
'description': (
'Padding size to add to the sky bbox for '
'pre-eval calculations.'),
'schema': PhysicalTypeSchema("angle"),
}
)
pre_eval_t_grid_size: int = field(
default=100,
metadata={
'description': 'Size of time grid used for pre-eval calculations.',
'schema': PhysicalTypeSchema("angle"),
}
)
anim_frame_rate: u.Quantity = field(
default=300 << u.s,
metadata={
'description': 'Frame rate for plotting animation.',
'schema': PhysicalTypeSchema("frequency"),
}
)
class Meta:
schema = {
'ignore_extra_keys': False,
'description': 'The parameters related to performance tuning.'
}
mapping_registry = ConfigRegistry.create(
name='MappingConfig',
dispatcher_key='type',
dispatcher_description='The mapping type.'
)
"""The registry for ``simu.mapping``."""
instrument_registry = ConfigRegistry.create(
name='InstrumentConfig',
dispatcher_key='name',
dispatcher_description='The instrument name.'
)
"""The registry for ``simu.instrument``."""
sources_registry = ConfigRegistry.create(
name='SourcesConfig',
dispatcher_key='type',
dispatcher_description='The simulator source type.'
)
"""The registry for ``simu.sources``."""
plots_registry = ConfigRegistry.create(
name='PlotsConfig',
dispatcher_key='type',
dispatcher_description='The plot type.'
)
"""The registry for ``simu.plots``."""
exports_registry = ConfigRegistry.create(
name='ExportsConfig',
dispatcher_key='type',
dispatcher_description='The export type.'
)
"""The registry for ``simu.exports``."""
# Load submodules here to populate the registries
from . import mapping as _ # noqa: F401, E402, F811
from . import sources as _ # noqa: F401, E402, F811
from . import plots as _ # noqa: F401, E402, F811
from . import exports as _ # noqa: F401, E402, F811
from . import toltec as _ # noqa: F401, E402, F811
# from . import lmt as _ # noqa: F401, E402, F811
@add_config_schema
@add_schema
@dataclass
class SimuConfig(object):
"""The config for `tolteca.simu`."""
jobkey: str = field(
metadata={
'description': 'The unique identifier the job.'
}
)
instrument: dict = field(
metadata={
'description': 'The dict contains the instrument setting.',
'schema': instrument_registry.schema,
'pformat_schema_type': f'<{instrument_registry.name}>',
})
mapping: dict = field(
metadata={
'description': "The simulator mapping trajectory config.",
'schema': mapping_registry.schema,
'pformat_schema_type': f'<{mapping_registry.name}>'
}
)
obs_params: ObsParamsConfig = field(
metadata={
'description': 'The dict contains the observation parameters.',
})
sources: list = field(
default_factory=list,
metadata={
'description': 'The list contains input sources for simulation.',
'schema': list(sources_registry.item_schemas),
'pformat_schema_type': f"[<{sources_registry.name}>, ...]"
})
perf_params: PerfParamsConfig = field(
default_factory=PerfParamsConfig,
metadata={
'description': 'The dict contains the performance related'
' parameters.',
})
plots: list = field(
default_factory=list,
metadata={
'description': 'The list contains config for plotting.',
'schema': list(plots_registry.item_schemas),
'pformat_schema_type': f"[<{plots_registry.name}>, ...]"
})
exports: list = field(
default_factory=list,
metadata={
'description': 'The list contains config for exporting.',
'schema': list(exports_registry.item_schemas),
'pformat_schema_type': f"[<{exports_registry.name}>, ...]"
})
plot_only: bool = field(
default=False,
metadata={
'description': 'Make plots of those defined in `plots`.'
})
export_only: bool = field(
default=False,
metadata={
'description': 'Export the simu config as defined in `exports`.'
})
class Meta:
schema = {
'ignore_extra_keys': True,
'description': 'The config dict for the simulator.'
}
config_key = 'simu'
logger: ClassVar = get_logger()
def get_or_create_output_dir(self):
logger = get_logger()
rootpath = self.runtime_info.config_info.runtime_context_dir
output_dir = rootpath.joinpath(self.jobkey)
if not output_dir.exists():
with logit(logger.debug, 'create output dir'):
output_dir.mkdir(parents=True, exist_ok=True)
return output_dir
def get_log_file(self):
return self.runtime_info.logdir.joinpath('simu.log')
@cached_property
def mapping_model(self):
return self.mapping(self)
@cached_property
def source_models(self):
return [s(self) for s in self.sources]
@cached_property
def simulator(self):
return self.instrument(self)
@cached_property
def t_simu(self):
"""The length of the simulation.
It equals `obs_params.t_exp` when set, otherwise ``t_pattern``
of the mapping pattern is used.
"""
t_simu = self.obs_params.t_exp
if t_simu is None:
t_pattern = self.mapping_model.t_pattern
self.logger.debug(f"mapping pattern time: {t_pattern}")
t_simu = t_pattern
self.logger.info(f"use t_simu={t_simu} from mapping pattern")
else:
self.logger.info(f"use t_simu={t_simu} from obs_params")
return t_simu
class SimulatorRuntimeError(RuntimeContextError):
"""Raise when errors occur in `SimulatorRuntime`."""
pass
class SimulatorRuntime(RuntimeContext):
"""A class that manages the runtime context of the simulator.
This class drives the execution of the simulator.
"""
config_cls = SimuConfig
logger = get_logger()
@cached_property
def simu_config(self):
"""Validate and return the simulator config object..
The validated config is cached. :meth:`SimulatorRuntime.update`
should be used to update the underlying config and re-validate.
"""
return self.config_cls.from_config(
self.config, rootpath=self.rootpath,
runtime_info=self.runtime_info)
def update(self, config):
self.config_backend.update_override_config(config)
if 'simu_config' in self.__dict__:
del self.__dict__['simu_config']
def cli_run(self, args=None):
"""Run the simulator with CLI as save the result.
"""
if args is not None:
_cli_cfg = config_from_cli_args(args)
# note the cli_cfg is under the namespace simu
cli_cfg = {self.config_cls.config_key: _cli_cfg}
if _cli_cfg:
self.logger.info(
f"config specified with commandline arguments:\n"
f"{pformat_yaml(cli_cfg)}")
self.update(cli_cfg)
cfg = self.simu_config.to_config()
# here we recursively check the cli_cfg and report
# if any of the key is ignored by the schema and
# throw an error
def _check_ignored(key_prefix, d, c):
if isinstance(d, dict) and isinstance(c, dict):
ignored = set(d.keys()) - set(c.keys())
ignored = [f'{key_prefix}.{k}' for k in ignored]
if len(ignored) > 0:
raise SimulatorRuntimeError(
f"Invalid config items specified in "
f"the commandline: {ignored}")
for k in set(d.keys()).intersection(c.keys()):
_check_ignored(f'{key_prefix}{k}', d[k], c[k])
_check_ignored('', cli_cfg, cfg)
return self.run()
def run(self):
cfg = self.simu_config
self.logger.debug(
f"run simu with config dict: "
f"{pformat_yaml(cfg.to_config())}")
if cfg.plot_only:
return self._run_plot()
if cfg.export_only:
return self._run_export()
return self._run_simu()
def _run_plot(self):
cfg = self.simu_config
self.logger.info(
f"make simu plots:\n"
f"{pformat_yaml(cfg.to_dict()['plots'])}")
results = []
for plotter in cfg.plots:
result = plotter(cfg)
results.append(result)
if plotter.save:
# TODO handle save here
pass
return results
def _run_export(self):
cfg = self.simu_config
self.logger.info(
f"export simu:\n"
f"{pformat_yaml(cfg.to_dict()['exports'])}")
results = []
for exporter in cfg.exports:
result = exporter(cfg)
results.append(result)
return results
def _run_simu(self):
"""Run the simulator."""
cfg = self.simu_config
simu = cfg.simulator
t_simu = cfg.t_simu
mapping_model = cfg.mapping_model
source_models = cfg.source_models
output_dir = cfg.get_or_create_output_dir()
self.logger.debug(
f'run {simu} with:{{}}\n'.format(
pformat_yaml({
'obs_params': cfg.obs_params.to_dict(),
'perf_params': cfg.perf_params.to_dict(),
})))
self.logger.debug(
'mapping:\n{}\n\nsources:\n{}\n'.format(
mapping_model,
'\n'.join(str(s) for s in source_models)
)
)
self.logger.debug(
f'simu output dir: {output_dir}\nsimu length={t_simu}'
)
# run the simulator
log_file = cfg.get_log_file()
self.logger.info(f'setup logging to file {log_file}')
with log_to_file(
filepath=log_file,
level='DEBUG',
disable_other_handlers=False
):
output_ctx = simu.output_context(dirpath=output_dir)
with output_ctx.open():
self.logger.info(
f"write output to {output_ctx.rootpath}")
# save the config file as YAML
config_filepath = output_ctx.make_output_filename(
'tolteca', '.yaml')
with open(config_filepath, 'w') as fo:
config = copy.deepcopy(self.config)
rupdate(config, self.simu_config.to_config())
self.yaml_dump(config, fo)
with simu.iter_eval_context(cfg) as (iter_eval, t_chunks):
# save mapping model meta
output_ctx.write_mapping_meta(
mapping=mapping_model, simu_config=cfg)
# save simulator meta
output_ctx.write_sim_meta(simu_config=cfg)
# run simulator for each chunk and save the data
n_chunks = len(t_chunks)
for ci, t in enumerate(t_chunks):
self.logger.info(
f"simulate chunk {ci}/{n_chunks} "
f"t_min={t.min()} t_max={t.max()}")
output_ctx.write_sim_data(iter_eval(t))
return output_dir
def plot(self, type, **kwargs):
"""Make plot of type `type`."""
if type not in plots_registry:
raise ValueError(
f"Invalid plot type {type}. "
f"Available types: {plots_registry.keys()}")
plotter = plots_registry[type].from_dict(kwargs)
return plotter(self.simu_config)
# make a list of all simu config item types
_locals = list(locals().values())
simu_config_item_types = list()
for v in _locals:
if is_dataclass(v) and hasattr(v, 'schema'):
simu_config_item_types.append(v)
elif isinstance(v, ConfigRegistry):
simu_config_item_types.append(v)
|
python
|
from hashlib import md5
def mine(secret: str, zero_count=5) -> int:
i = 0
target = '0' * zero_count
while True:
i += 1
dig = md5((secret + str(i)).encode()).hexdigest()
if dig.startswith(target):
return i
if __name__ == '__main__':
key = 'bgvyzdsv'
print(f'Part1: {mine(key)}')
print(f'Part2: {mine(key, 6)}')
|
python
|
import json
import os
def setAccount( persAcc, money, input=True ):
rez = True
if input == True:
persAcc += money
else:
if persAcc < money:
rez = False
else:
persAcc -= money
return persAcc, rez
# **********************************************
def setHistory( product, money, pers_acc, history, accept ):
dic={}
dic['step'] = product
dic['money'] = str(money)
dic['account'] = str(pers_acc)
dic['accept'] = accept
history.append( dic )
return history
# **********************************************
def list_history( history ):
lst = []
for d in history:
lst.append( d['step']+', '+d['money']+'руб., остаток:'+d['account']+'руб., - '+d['accept'] )
return lst
# *********************************************
def getAccount( history ):
if len( history ) > 0:
d = history[-1]
r = float( d['account'] )
else:
r = 0
return r
# *********************************************
def read_history():
if os.path.exists( 'history.json' ):
with open( 'history.json', 'r' ) as f:
history = json.load( f )
else:
history = []
return history
# **********************************************
def write_history( h ):
if len(h) > 0:
with open( 'history.json', 'w' ) as f:
json.dump( h, f )
# **********************************************
|
python
|
# Databricks notebook source
import pandas as pd
import random
# COMMAND ----------
# columns = ['id','amount_currency','amount_value','channel','deviceDetails_browser',
# 'deviceDetails_device','deviceDetails_deviceIp','merchantRefTransactionId','paymentMethod_apmType',
# 'paymentMethod_cardNumber','paymentMethod_cardSubType','paymentMethod_cardType','paymentMethod_cvv',
# 'paymentMethod_encodedPaymentToken','paymentMethod_expiryMonth','paymentMethod_expiryYear',
# 'shopperDetails_address_addressLine1','shopperDetails_address_addressLine2',
# 'shopperDetails_address_city','shopperDetails_address_country','shopperDetails_address_postalCode',
# 'shopperDetails_address_state','shopperDetails_email','shopperDetails_firstName','shopperDetails_lastName',
# 'shopperDetails_phoneNumber','shopperDetails_shopperKey']
columns = ['InvoiceNo', 'StockCode', 'Description', 'Quantity', 'InvoiceDate', 'UnitPrice', 'CustomerID', 'Country']
# COMMAND ----------
dbutils.fs.mounts()
# COMMAND ----------
import random
def initial_values(rows):
amounts = []
for i in range(rows):
amounts.append({'id':i, 'amount_value':round(random.uniform(0.00, 100000), 2)})
return amounts
# COMMAND ----------
numbers_of_rows_in_dataset = 3000000
data = initial_values(numbers_of_rows_in_dataset)
ml_set = pd.DataFrame(data)
# COMMAND ----------
ml_set.shape
# COMMAND ----------
ml_set['amount_currency'] = 'USD'
# COMMAND ----------
def random_channel(row):
channel = ''
rnd = random.randrange(101)
if 0 < rnd <= 10:
channel = 'pos'
elif 10 < rnd <= 50:
channel = 'online'
elif 51 < rnd <= 100:
channel = 'virtual'
else:
channel = 'mobile'
return channel
# COMMAND ----------
ml_set['channel'] = ml_set.apply(random_channel, axis = 1)
# COMMAND ----------
ml_set.channel.unique()
# COMMAND ----------
def deviceDetails_browser(row):
browser = ''
rnd = random.randrange(101)
if 0 < rnd <= 10:
browser = 'mozilla'
elif 10 < rnd <= 50:
browser = 'chrome'
elif 51 < rnd <= 100:
browser = 'edge'
else:
browser = 'chromio'
return browser
# COMMAND ----------
ml_set['deviceDetails_browser'] = ml_set.apply(deviceDetails_browser, axis = 1)
# COMMAND ----------
def deviceDetails_device(row):
device = ''
rnd = random.randrange(101)
if 0 < rnd <= 10:
device = 'mobile'
elif 10 < rnd <= 50:
device = 'pc'
else:
device = 'pos'
return device
# COMMAND ----------
ml_set['deviceDetails_device'] = ml_set.apply(deviceDetails_device, axis = 1)
# COMMAND ----------
import socket
import struct
def deviceDetails_deviceIp(row):
ip = socket.inet_ntoa(struct.pack('>I', random.randint(1, 0xffffffff)))
return ip
# COMMAND ----------
ml_set['deviceDetails_deviceIp'] = ml_set.apply(deviceDetails_deviceIp, axis = 1)
# COMMAND ----------
import string
def merchantRefTransactionId(row):
letters = string.digits
return ''.join(random.choice(letters) for i in range(10))
# COMMAND ----------
ml_set['merchantRefTransactionId'] = ml_set.apply(merchantRefTransactionId, axis = 1)
# COMMAND ----------
def paymentMethod_apmType(row):
amp = ''
rnd = random.randrange(101)
if 0 < rnd <= 10:
amp = 'chip'
elif 10 < rnd <= 50:
amp = 'magstripe '
else:
amp = 'nfcc'
return amp
# COMMAND ----------
ml_set['paymentMethod_apmType'] = ml_set.apply(paymentMethod_apmType, axis = 1)
# COMMAND ----------
import string
def paymentMethod_cardNumber(row):
card_number = ''
numbers = string.digits
for part in range(4):
card_number += ''.join((random.choice(numbers) for i in range(4))) + '-'
return card_number[:-1]
# COMMAND ----------
ml_set['paymentMethod_cardNumber'] = ml_set.apply(paymentMethod_cardNumber, axis = 1)
# COMMAND ----------
def paymentMethod_cardType(row):
card_type = ''
rnd = random.randrange(201)
if 0 < rnd <= 10:
card_type = 'MasterCard'
elif 10 < rnd <= 50:
card_type = 'Visa '
elif 51 < rnd <= 100:
card_type = 'Discover '
elif 51 < rnd <= 100:
card_type = 'JCB'
else:
card_type = 'American Express'
return card_type
# COMMAND ----------
ml_set['paymentMethod_cardType'] = ml_set.apply(paymentMethod_cardType, axis = 1)
# COMMAND ----------
def paymentMethod_cardSubType(row):
card_subtype = ''
rnd = random.randrange(201)
if 0 < rnd <= 10:
card_subtype = 'Secured'
elif 10 < rnd <= 50:
card_subtype = 'Prepaid '
elif 51 < rnd <= 100:
card_subtype = 'Business '
elif 51 < rnd <= 100:
card_subtype = 'Student'
else:
card_subtype = 'Generic'
return card_subtype
# COMMAND ----------
ml_set['paymentMethod_cardSubType'] = ml_set.apply(paymentMethod_cardSubType, axis = 1)
# COMMAND ----------
import string
def paymentMethod_cvv(row):
numbers = string.digits
return ''.join((random.choice(numbers) for i in range(3)))
# COMMAND ----------
ml_set['paymentMethod_cvv'] = ml_set.apply(paymentMethod_cvv, axis = 1)
# COMMAND ----------
def paymentMethod_encodedPaymentToken(row):
letters = string.ascii_letters
return ''.join(random.choice(letters) for i in range(8))
# COMMAND ----------
ml_set['paymentMethod_encodedPaymentToken'] = ml_set.apply(paymentMethod_encodedPaymentToken, axis = 1)
# COMMAND ----------
ml_set.head(5)
# COMMAND ----------
from random import randrange
def paymentMethod_expiryMonth(row):
return randrange(12)
# COMMAND ----------
ml_set['paymentMethod_expiryMonth'] = ml_set.apply(paymentMethod_expiryMonth, axis = 1)
# COMMAND ----------
from random import randrange
def paymentMethod_expiryYear(row):
start_year = randrange(2018,2020)
return randrange(start_year, start_year + 10)
# COMMAND ----------
ml_set['paymentMethod_expiryYear'] = ml_set.apply(paymentMethod_expiryYear, axis = 1)
# COMMAND ----------
ml_set.head(5)
# COMMAND ----------
ml_set.to_csv('ml_datasert.csv')
# COMMAND ----------
|
python
|
import numpy as np
import cv2
def handle_image(input_image, width=60, height=60):
"""
Function to preprocess input image and return it in a shape accepted by the model.
Default arguments are set for facial landmark model requirements.
"""
preprocessed_image = cv2.resize(input_image, (width, height))
preprocessed_image = preprocessed_image.transpose((2,0,1))
preprocessed_image = preprocessed_image.reshape(1, 3, height, width)
return preprocessed_image
def get_eyes_crops(face_crop, right_eye, left_eye, relative_eye_size=0.20):
crop_w = face_crop.shape[1]
crop_h = face_crop.shape[0]
x_right_eye = right_eye[0]*crop_w
y_right_eye = right_eye[1]*crop_h
x_left_eye = left_eye[0]*crop_w
y_left_eye = left_eye[1]*crop_h
relative_eye_size_x = crop_w*relative_eye_size
relative_eye_size_y = crop_h*relative_eye_size
right_eye_dimensions = [int(y_right_eye-relative_eye_size_y/2), int(y_right_eye+relative_eye_size_y/2),
int(x_right_eye-relative_eye_size_x/2), int(x_right_eye+relative_eye_size_x/2)]
left_eye_dimensions = [int(y_left_eye-relative_eye_size_y/2), int(y_left_eye+relative_eye_size_y/2),
int(x_left_eye-relative_eye_size_x/2), int(x_left_eye+relative_eye_size_x/2)]
right_eye_crop = face_crop[right_eye_dimensions[0]:right_eye_dimensions[1],
right_eye_dimensions[2]:right_eye_dimensions[3]]
left_eye_crop = face_crop[left_eye_dimensions[0]:left_eye_dimensions[1],
left_eye_dimensions[2]:left_eye_dimensions[3]]
return right_eye_crop, left_eye_crop, right_eye_dimensions, left_eye_dimensions
|
python
|
class Solution:
def isPalindrome(self, x: int) -> bool:
if x < 0 or (x != 0 and x % 10 == 0):
return False
res = 0
# 1221
while x > res:
res *= 10
res += x % 10
x //= 10
if x == res or x == res // 10:
return True
return False
if __name__ == "__main__":
print(Solution.isPalindrome(Solution, 1230))
|
python
|
from model.contact import Contact
from random import randrange
def test_delete_some_contact(app):
app.contact.ensure_contact_exists(Contact(fname="contact to delete"))
old_contacts = app.contact.get_contact_list()
index = randrange(len(old_contacts))
app.contact.delete_contact_by_index(index)
assert len(old_contacts) - 1 == app.contact.count()
new_contacts = app.contact.get_contact_list()
old_contacts[index:index+1] = []
assert old_contacts == new_contacts
|
python
|
"""
Number Mind
"""
|
python
|
#!/usr/bin/env python3
# Up / Down ISC graph for ALL bins
# Like SfN Poster Figure 2
# But now for all ROIs in Yeo atlas
import tqdm
import matplotlib.pyplot as plt
import seaborn as sns
import pandas as pd
import deepdish as dd
from HMM_settings import *
ISCdir = ISCpath+'shuff_Yeo_outlier_'
figdir = figurepath+'up_down_outlier/'
pvals = dd.io.load(pvals_file)
task = 'DM'
n_time = 750
bins = np.arange(nbinseq)
nbins = len(bins)
subh = [[[],[]]]
subh[0][0] = np.concatenate((np.arange(0,minageeq[0]//2),
minageeq[0]+np.arange(0,minageeq[1]//2)))
subh[0][1] = np.concatenate((np.arange(minageeq[0]//2,minageeq[0]),
minageeq[0]+np.arange(minageeq[1]//2,minageeq[1])))
plt.rcParams.update({'font.size': 15})
xticks = [str(int(round(eqbins[i])))+\
' - '+str(int(round(eqbins[i+1])))+' y.o.' for i in range(len(eqbins)-1)]
for roi in pvals['roidict'].keys():
if pvals['roidict'][roi]['ISC_e']['q'] < 0.05:
print(roi)
vall = pvals['seeddict']['0'][roi]['vall']
n_vox = len(vall)
ISC_w = np.zeros((len(seeds),nbins,n_vox))
for si,seed in tqdm.tqdm(enumerate(seeds)):
for b in bins:
D,Age,Sex = load_D(roidir+seed+'/'+roi+'.h5',task,[b])
ISC_w_,_ = ISC_w_calc(D,n_vox,n_time,nsub,subh)
ISC_w[si,b] = np.reshape(ISC_w_,n_vox)
ISC_w = np.mean(ISC_w,axis=0)
plt.rcParams.update({'font.size': 30})
fig,ax = plt.subplots()
ax.plot(np.arange(len(xticks)),np.mean(ISC_w,axis=1), linestyle='-', marker='o', color='k')
#ax.axes.errorbar(np.arange(len(xticks)),
# np.mean(ISC_w,axis=1),
# yerr = np.std(ISC_w,axis=1),
# xerr = None, ls='none',capsize=10, elinewidth=1,fmt='.k',
# markeredgewidth=1)
ax.set_xticks(np.arange(len(xticks)))
ax.set_xticklabels(xticks,rotation=45, fontsize=20)
ax.set_xlabel('Age',fontsize=20)
ax.set_ylabel('ISC',fontsize=20)
plt.show()
fig.savefig(figdir+roi+'.png', bbox_inches="tight")
plt.rcParams.update({'font.size': 20})
fig,ax = plt.subplots(figsize=(2, 4))
parts = ax.violinplot(pvals['roidict'][roi]['ISC_e']['shuff'], showmeans=False, showmedians=False,showextrema=False)
for pc in parts['bodies']:
pc.set_facecolor('k')
#pc.set_edgecolor('black')
#pc.set_alpha(1)
ax.scatter(1,pvals['roidict'][roi]['ISC_e']['val']*-1,color='k',s=80)
ax.set_xticks([])
ax.set_ylabel('ISC difference',fontsize=30)
fig.savefig(figdir+roi+'_ISC_difference.png', bbox_inches="tight")
|
python
|
"""
Created Aug 2020
@author: TheDrDOS
"""
# Clear the Spyder console and variables
try:
from IPython import get_ipython
#get_ipython().magic('clear')
get_ipython().magic('reset -f')
except:
pass
from time import time as now
from time import perf_counter as pnow
import pickle
import numpy as np
import pandas as pd
from bokeh.models import ColumnDataSource # for interfacing with Pandas
import multiprocessing as mp
# mp_dic = {} # will use this for multi processing as a simple passing of input data
import progress_bar as pbar
T0 = now()
# %%
'''
------------------------------------------------------------------------------
Load Data
------------------------------------------------------------------------------
'''
print("Load Data:")
t0=pnow()
data_path = './tmp_data/'
data = pickle.load(open(data_path+'tmp_data_and_maps.p','rb'))
print(" Completed in :{} sec".format(pnow()-t0))
# From: (assign the keys as variables in the workspace)
# data = {
# 'covid_data': covid_data,
# 'GraphData':GraphData,
# 'MapData':MapData,
# 'Type_to_LocationNames':Type_to_LocationNames,
# 'LocationName_to_Type':LocationName_to_Type,
# }
for d in data:
globals()[d] = data[d]
del data
# %%
'''
------------------------------------------------------------------------------
Process Data for GraphData
------------------------------------------------------------------------------
'''
print("Process for GraphData:")
t0=pnow()
# Support Functions
def diff(x):
return np.diff(x)
def pdiff(x):
return np.clip(np.diff(x),0,None)
def cds_to_jsonreadydata(cds,nan_code):
data = {}
for k in cds.data:
data[k] = np.nan_to_num(cds.data[k].tolist(),nan=nan_code).tolist() # replace NaNs and cast to float using tolist, the first tolist is needed for string arrays with NaNs to be processed (will replace with 'nan')
return data
def dic_to_jsonreadydata(dic,nan_code):
data = {}
for k in dic:
data[k] = np.nan_to_num(dic[k],nan=nan_code).tolist() # replace NaNs and cast to float using tolist, the first tolist is needed for string arrays with NaNs to be processed (will replace with 'nan')
return data
# CPT key definitions: https://covidtracking.com/about-data/data-definitions
# JH only has: 'positive','death'
# List of keys all will have
keys_all = ['positive',
'positiveIncrease',
'positiveIncreaseMAV',
'recovered',
'recoveredIncrease',
'recoveredIncreaseMAV',
'positiveActive',
'positiveActiveIncrease',
'positiveActiveIncreaseMAV',
'death',
'deathIncrease',
'deathIncreaseMAV10',
]
popnorm_list = keys_all # list of keys to normalized
popnorm_postfix = 'PerMil' # postfix applied to normalized names
def mp_df_processing(l):
''' Process a given location, using Pandas for computation '''
df = covid_data[l]['dataframe'].copy()
# Remove dates with no reported cases and calculate active positive cases
df = df[df['positive']!=0]
# Fill in x day old cases as recovered if no recovery data is available
rdays = 15 # assumed number of days it takes to recovere
if df['recovered'].count()<7: # if less than one week of recovered reporting, then ignore it
df['recovered'] = 0
if df['recovered'].fillna(0).sum()==0:
stmp = df['positive']
df['recovered']=stmp.shift(rdays).fillna(0)-df['death']
df['recovered'] = df['recovered'].replace(0, float('NaN'))
# Calculate recovered increase
df['recoveredIncrease'] = df['recovered'].rolling(2).apply(pdiff)
df['recoveredIncreaseMAV'] = df['recoveredIncrease'].rolling(7).mean()
# Calculate positive active cases
df["positiveActive"] = df["positive"].fillna(0)-df["recovered"].fillna(0)-df["death"].fillna(0)
# Calculate actual and averaged increase
if 'positiveIncrease' not in df:
df['positiveIncrease'] = df['positive'].rolling(2).apply(pdiff)
df['positiveIncreaseMAV'] = df['positiveIncrease'].rolling(7).mean()
df['positiveActiveIncrease'] = df['positiveActive'].rolling(2).apply(diff)
# Remove active calculations from when recovered data was not available, and one more entry to avoid the resultant cliff
if len(df['positive'].values)>1:
df.loc[df['recovered'].isnull(),'positiveActiveIncrease'] = float('NaN')
try:
df.loc[df['positiveActiveIncrease'].first_valid_index(),'positiveActiveIncrease']=float('NaN')
except:
pass
df['positiveActiveIncreaseMAV'] = df['positiveActiveIncrease'].rolling(7).mean()
if len(df['positive'].values)>1:
df.loc[df['recovered'].isnull(),'positiveActiveIncrease'] = float('NaN')
# Calculate positiveIncreaseMAV/(positiveIncreaseMAV+negativeIncreaseMAV)
# Calculate actual and averaged increase
if 'negative' in df:
if 'negativeIncrease' not in df:
df['negativeIncrease'] = df['negative'].rolling(2).apply(pdiff)
df['negativeIncreaseMAV'] = df['negativeIncrease'].rolling(7).mean()
df['pospercentMAV_PosMAVoverPosPlusNegMAV'] = df['positiveIncreaseMAV'].div(df['positiveIncreaseMAV']+df['negativeIncreaseMAV'])
else:
df['negative'] = 0
df['negativeIncrease'] = 0
df['pospercentMAV_PosMAVoverPosPlusNegMAV'] = 0
# Calculate deaths
if 'deathIncrease' not in df:
df['deathIncrease'] = df['death'].rolling(2).apply(diff)
df['deathIncreaseMAV10'] = df['deathIncrease'].rolling(7).mean()*10
# Normalize wrt population
if covid_data[l]['population']>0:
pnorm = 1000000/covid_data[l]['population']
else:
pnorm = np.nan
for k in popnorm_list:
df[k+popnorm_postfix] = df[k]*pnorm
# Convert dataframe to ColumnDataSource
cds = ColumnDataSource(df)
# Convert the data in the ColumnDataSource to encoded float arrays ready to be json
extra = {
'population': covid_data[l]['population'],
'name': covid_data[l]['name'],
}
out = {
'l':l,
'data': cds_to_jsonreadydata(cds,GraphData[l]['nan_code']),
'extra': dic_to_jsonreadydata(extra,GraphData[l]['nan_code']),
}
return out
# N = len(GraphData)
# for n,l in enumerate(GraphData):
# # mp_df_processing(l)
# if n%10==0:
# pbar.progress_bar(n,N-1)
# pbar.progress_bar(n,N-1)
# Use multi processing to process the dataframes
N = len(GraphData)
Ncpu = min([mp.cpu_count(),N]) # use maximal number of local CPUs
chunksize = 1
pool = mp.Pool(processes=Ncpu)
for n,d in enumerate(pool.imap_unordered(mp_df_processing,GraphData,chunksize=chunksize)):
#pbar.progress_bar(n,-(-N/chunksize)-1)
#pbar.progress_bar(n,N-1)
GraphData[d['l']]['data'] = d['data']
GraphData[d['l']]['extra'] = d['extra']
if n%15==0:
pbar.progress_bar(n,N-1)
pass
pbar.progress_bar(n,N-1)
pool.terminate()
print(" Completed in :{} sec".format(pnow()-t0))
# %%
'''
------------------------------------------------------------------------------
Process Data for MapData
------------------------------------------------------------------------------
'''
print("Process for MapData:")
t0=pnow()
# Add a key with the latest datepoint of all they data fields
latest_keys = keys_all+[k+popnorm_postfix for k in keys_all]
def mp_mapdata_processing(l):
data = {k:[] for k in latest_keys}
latestDate = []
population = []
for ll in MapData[l]['data']['location']:
if len(GraphData[ll]['data']['date'])>0:
latestDate.append(GraphData[ll]['data']['date'][-1])
for k in data:
data[k].append(GraphData[ll]['data'][k][-1])
else:
latestDate.append(GraphData[ll]['nan_code'])
for k in data:
data[k].append(GraphData[ll]['nan_code'])
population.append(GraphData[ll]['extra']['population'])
data['latestDate'] = latestDate
data['population'] = population
out = {
'l':l,
'data': data}
return out
for l in MapData:
d = mp_mapdata_processing(l)
MapData[d['l']]['data'].update(d['data'])
print(" Completed in :{} sec".format(pnow()-t0))
t0=pnow()
# Use multi processing to process the dataframes - Slower
# N = len(MapData)
# Ncpu = min([mp.cpu_count(),N]) # use maximal number of local CPUs
# chunksize = 1
# pool = mp.Pool(processes=Ncpu)
# for n,d in enumerate(pool.imap_unordered(mp_mapdata_processing,MapData,chunksize=chunksize)):
# #pbar.progress_bar(n,-(-N/chunksize)-1)
# #pbar.progress_bar(n,N-1)
# MapData[d['l']]['data'].update(d['data'])
# if n%15==0:
# pbar.progress_bar(n,N-1)
# pass
# pbar.progress_bar(n,N-1)
# pool.terminate()
# print(" Completed in :{} sec".format(pnow()-t0))
print("Pickling COVID Data and Maps After Matching:")
t0=pnow()
data_path = './tmp_data/'
data = {
'covid_data': covid_data,
'GraphData':GraphData,
'MapData':MapData,
'Type_to_LocationNames':Type_to_LocationNames,
'LocationName_to_Type':LocationName_to_Type,
}
pickle.dump(data,open(data_path+'tmp_data_and_maps.p','wb'))
print(" Completed in :{} sec".format(pnow()-t0))
t0=pnow()
print("Script Completed in :{} sec".format(now()-T0))
|
python
|
# -*- coding: utf-8 -*-
import cv2
# トラックバーの値を変更する度にRGBを出力する
def changeColor(val):
r_min = cv2.getTrackbarPos("R_min", "img")
r_max = cv2.getTrackbarPos("R_max", "img")
g_min = cv2.getTrackbarPos("G_min", "img")
g_max = cv2.getTrackbarPos("G_max", "img")
b_min = cv2.getTrackbarPos("B_min", "img")
b_max = cv2.getTrackbarPos("B_max", "img")
mask_image = cv2.inRange(img, (b_min, g_min, r_min), (b_max, g_max, r_max)) # BGR画像なのでタプルもBGR並び
# (X)ウィンドウに表示
cv2.namedWindow("img", cv2.WINDOW_NORMAL)
cv2.imshow("img", mask_image)
# 画像の読み込み
# img = cv2.imread("../../../../resources/capture_l_plane.png", 1)
img = cv2.imread("../../../../resources/result.png", 1)
# img = cv2.resize(img , (int(img.shape[1]*0.5), int(img.shape[0]*0.5)))
# ウィンドウのサイズを変更可能にする
cv2.namedWindow("img", cv2.WINDOW_NORMAL)
# トラックバーの生成
cv2.createTrackbar("R_min", "img", 0, 255, changeColor)
cv2.createTrackbar("R_max", "img", 0, 255, changeColor)
cv2.createTrackbar("G_min", "img", 0, 255, changeColor)
cv2.createTrackbar("G_max", "img", 0, 255, changeColor)
cv2.createTrackbar("B_min", "img", 0, 255, changeColor)
cv2.createTrackbar("B_max", "img", 0, 255, changeColor)
# 「Q」が押されるまで画像を表示する
while (True):
# cv2.imshow("img", mask_image)
if cv2.waitKey(1) & 0xFF == ord("q"):
break
cv2.destroyAllWindows()
|
python
|
# Developed by Joseph M. Conti and Joseph W. Boardman on 1/21/19 6:29 PM.
# Last modified 1/21/19 6:29 PM
# Copyright (c) 2019. All rights reserved.
import logging
import logging.config as lc
import os
from pathlib import Path
from typing import Union, Dict
import numpy as np
from yaml import load
class Singleton(type):
"""A metaclass that can used to turn your class in a Singleton"""
__instances = dict()
def __call__(cls, *args, **kwargs):
if cls not in cls.__instances:
cls.__instances[cls] = super(Singleton, cls).__call__(*args, **kwargs)
return cls.__instances[cls]
class LogMessage(object):
def __init__(self, fmt, args):
self.fmt = fmt
self.args = args
def __str__(self):
return self.fmt.format(*self.args)
class Logger(logging.LoggerAdapter):
def __init__(self, logger, extra=None):
super().__init__(logger, extra or {})
def log(self, level, msg, *args, **kwargs):
if self.isEnabledFor(level):
msg, kwargs = self.process(msg, kwargs)
self.logger._log(level, LogMessage(msg, args), (), **kwargs)
class LogHelper:
__logger:Logger = None
@staticmethod
def __initialize():
# TODO set this from config options
logging.raiseExceptions = True
path:str = os.path.abspath(os.path.dirname(__file__))
file:str = os.path.join(path, "config/logging.conf")
if file is not None and len(file) > 0:
config_file:Path = Path(file)
if config_file.exists() and config_file.is_file():
conf = load(config_file.open("r"))
lc.dictConfig(conf)
LogHelper.__logger = Logger(logging.getLogger("openSpectra"))
logger = LogHelper.logger("Logger")
logger.info("Logger initialize from default configuration, {0}", file)
else:
LogHelper.__fallback_initialize()
else:
LogHelper.__fallback_initialize()
@staticmethod
def __fallback_initialize():
logging.basicConfig(format="{asctime} [{levelname}] [{name}] {message}",
style="{", level=logging.DEBUG)
LogHelper.__logger = logging.getLogger("openSpectra")
logger = LogHelper.logger("Logger")
logger.info("Could not find default logger configuration file, using fallback config.")
@staticmethod
def logger(name:str) -> logging.Logger:
if LogHelper.__logger is None:
LogHelper.__initialize()
return Logger(logging.getLogger("openSpectra").getChild(name))
class OpenSpectraDataTypes:
Floats = (np.float32, np.float64,)
Ints = (np.uint8, np.int16, np.int32, np.uint16,np.uint32, np.int64, np.uint64)
Complexes = (np.complex64, np.complex128)
class OpenSpectraProperties:
__LOG: Logger = LogHelper.logger("OpenSpectraProperties")
__properties = None
def __init__(self):
self.__prop_map:Dict[str, Union[str, int, float, bool]] = dict()
self.__load_properties()
def __load_properties(self, file_name:str=None):
file:str = file_name
if file_name is None:
path: str = os.path.abspath(os.path.dirname(__file__))
file: str = os.path.join(path, "config/openspectra.properties")
if file is not None and len(file) > 0:
config_file: Path = Path(file)
if config_file.exists() and config_file.is_file():
OpenSpectraProperties.__LOG.info("Loading configuration properties from {}".format(config_file))
with config_file.open() as properties_file:
for line in properties_file:
line = line.strip()
# ignore # as a comment
if not line.startswith("#") and len(line) > 0:
nv_pair = line.split('=')
if len(nv_pair) == 2:
name: str = nv_pair[0]
value: Union[str, int, float] = OpenSpectraProperties.__get_typed_value(nv_pair[1])
self.__prop_map[name] = value
OpenSpectraProperties.__LOG.info("name: {}, value: {}".format(name, value))
else:
OpenSpectraProperties.__LOG.warning("Ignore malformed line [{}] in file {}".
format(line, file))
else:
OpenSpectraProperties.__LOG.error("Failed to load configuration file {}, exists {}, is file {}".
format(file, config_file.exists(), config_file.is_file()))
def __get_property_value(self, name:str) -> Union[str, int, float, bool]:
return self.__prop_map.get(name)
@staticmethod
def __get_typed_value(value:str) -> Union[str, int, float, bool]:
result:Union[str, int, float] = None
if all(s.isalpha() or s.isspace() for s in value):
if value == "True":
result = True
elif value == "False":
result = False
else:
result = value
elif value.count(".") == 1:
try:
result = float(value)
except ValueError:
result = value
elif value.isdigit():
try:
result = int(value)
except ValueError:
result = value
return result
@staticmethod
def __get_instance():
if OpenSpectraProperties.__properties is None:
OpenSpectraProperties.__properties = OpenSpectraProperties()
return OpenSpectraProperties.__properties
@staticmethod
def get_property(name:str, defalut:Union[str, int, float, bool]=None) -> Union[str, int, float, bool]:
result = OpenSpectraProperties.__get_instance().__get_property_value(name)
if result is None:
return defalut
else:
return result
@staticmethod
def add_properties(file_name:str):
"""Add properties in addition to the default properties over writing any duplicates"""
OpenSpectraProperties.__get_instance().__load_properties(file_name)
|
python
|
# -*- coding: utf-8 -*-
# Resource object code
#
# Created by: The Resource Compiler for PyQt5 (Qt v5.15.0)
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore
qt_resource_data = b"\
\x00\x00\x07\x10\
\x89\
\x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\
\x00\x00\x40\x00\x00\x00\x40\x08\x06\x00\x00\x00\xaa\x69\x71\xde\
\x00\x00\x00\x04\x73\x42\x49\x54\x08\x08\x08\x08\x7c\x08\x64\x88\
\x00\x00\x00\x09\x70\x48\x59\x73\x00\x00\x0d\xd7\x00\x00\x0d\xd7\
\x01\x42\x28\x9b\x78\x00\x00\x00\x19\x74\x45\x58\x74\x53\x6f\x66\
\x74\x77\x61\x72\x65\x00\x77\x77\x77\x2e\x69\x6e\x6b\x73\x63\x61\
\x70\x65\x2e\x6f\x72\x67\x9b\xee\x3c\x1a\x00\x00\x06\x8d\x49\x44\
\x41\x54\x78\x9c\xed\x9a\x69\x6c\x54\x55\x14\xc7\x7f\xb7\xed\x0c\
\x53\x28\xd3\x0e\x9b\xb4\x80\x80\x2c\x6d\xb5\x60\xa4\x6c\x0a\x04\
\x81\xb0\x08\x2a\x04\x88\x4b\x48\x88\x26\x1a\xb7\x0f\xb8\x07\x13\
\x43\x14\x35\x88\x60\xa2\x48\xdc\xbe\x80\x41\x41\x8d\x02\x6a\x62\
\x24\x18\x11\xc2\x26\xb4\x18\x11\x2d\x50\x29\xb2\xb4\x15\x0a\xed\
\x00\x5d\xa7\x9d\x39\x7e\x98\x99\xf6\xcd\x9b\x37\xed\xb4\x9d\x37\
\xcf\xca\xfc\x93\x9b\xe9\x3d\xf7\xbe\xfb\xfe\xe7\x3f\xef\xdd\xce\
\x3d\xe7\x28\x11\xe1\x7a\x46\x92\xd5\x04\xac\x46\x42\x00\xab\x09\
\x58\x8d\x84\x00\x56\x13\xb0\x1a\x09\x01\xac\x26\x60\x35\x12\x02\
\x58\x4d\xc0\x6a\x24\x04\xb0\x9a\x80\xd5\x48\xb1\xea\xc6\x4a\xa9\
\x64\x20\x1d\xf0\x02\x57\xc5\xa2\x43\x89\x32\xeb\xbe\x4a\x29\x3b\
\x30\x1a\x18\x1b\x68\x43\x01\x17\xd0\x2b\xf0\xd9\x13\x50\x81\xe9\
\x3e\xc0\x0d\x54\x05\x5a\x25\x70\x02\x28\x08\xb4\xe3\x22\xe2\x33\
\x85\x67\x2c\x05\x50\x4a\x4d\x04\x1e\x00\x26\xe1\x77\xde\x1e\xa3\
\xa5\xab\x81\x5f\x81\x5d\xc0\x66\x11\x39\x11\xa3\x75\x41\x44\x3a\
\xd5\x80\x6c\x60\x25\xf0\x17\x20\x71\x6a\x05\xc0\x33\x40\x66\xa7\
\xf9\x77\xc2\xf1\x19\xc0\xfe\x8e\x3a\x91\xac\x94\xf4\x4a\xb5\x4b\
\x7a\x37\x9b\xa8\x8e\x0b\xe1\x05\xb6\x03\x79\x1d\xf5\xa3\xdd\xaf\
\x80\x52\xea\x36\x60\x35\x30\xb3\xad\xb9\xfd\xd3\x1c\x8c\xcd\xca\
\x20\x3f\x33\x83\xfc\xac\x0c\x06\x3a\x53\x71\x39\x6c\xb8\x52\x6d\
\x38\xbb\xd9\x5a\x36\x00\x11\xdc\xf5\x8d\x54\xd5\x37\x52\x59\xe7\
\xe1\x74\x55\x2d\x05\x65\x6e\x0a\xcb\xdd\x1c\x29\x77\xe3\xae\x6f\
\x6c\xeb\x56\x3e\x60\x13\xb0\x42\x44\xce\xb6\xcb\x9f\x68\x05\x50\
\x4a\xf5\x01\xde\x05\x1e\xa4\x65\xf3\x0a\x41\x0f\x5b\x32\xf3\x73\
\x32\x59\x7c\xf3\x00\x26\x0c\x70\x91\xd5\xd3\xd1\x1e\x2e\x86\x10\
\xe0\x54\x65\x0d\xfb\xcf\x55\xf2\xf9\xb1\xf3\xec\x2c\xb9\x48\x93\
\x2f\x22\xe7\x86\x00\xc7\x15\x22\xd2\x10\xcd\xfa\x51\x09\xa0\x94\
\xca\x07\xb6\x02\x37\x86\x8d\x01\x73\x86\xdf\xc0\x92\xd1\x03\x59\
\x90\x93\x45\x0f\x5b\x72\x34\xf7\xed\x30\x2a\x6a\x1b\xf8\xe2\x58\
\x29\x9b\x8e\x9e\xe3\x50\x69\x55\xa4\x69\x87\x80\x85\x22\x52\xda\
\xd6\x7a\x6d\x0a\xa0\x94\x5a\x0a\x7c\x04\x84\x7d\x9d\xd3\x87\xf6\
\x65\xed\xac\x3c\x6e\xeb\x9f\xde\x36\x73\x13\xf0\x7d\xf1\x05\x5e\
\xdc\x79\x8c\x3f\x2a\xae\x19\x0d\x5f\x00\x16\x8b\xc8\xde\xd6\xd6\
\x68\x55\x00\xa5\xd4\x5a\xe0\x39\xbd\xfd\x96\xbe\x3d\x79\x6b\x66\
\x1e\x73\x47\xdc\xd0\x4e\xca\xb1\x87\x57\x84\x0d\xbf\x9e\x65\xc5\
\xae\x22\xca\xab\xeb\xf5\xc3\x8d\xc0\x63\x22\xb2\x21\xd2\xf5\x11\
\x05\x50\x4a\x3d\x0f\xac\xd1\xdb\x97\x4f\x1e\xc9\xeb\xd3\x73\x49\
\x56\x86\xdb\x80\x65\xa8\xf6\x34\xf1\xd0\xf6\x23\x7c\x5d\x54\xa6\
\x1f\xf2\x02\xf3\x44\x64\x87\xd1\x75\x86\x02\x28\xa5\xe6\x01\xdf\
\xa2\x39\x2b\xa4\xd9\x53\xd8\xb8\x60\x0c\x8b\x72\xb3\x62\xc7\xda\
\x04\xac\xda\x7b\x92\x97\x7f\x2a\xc2\x17\xea\xd7\x15\x60\xa2\x88\
\x1c\xd7\xcf\x0f\x13\x40\x29\x95\x0b\x1c\x04\x9c\x41\x5b\x86\xc3\
\xc6\x9e\x87\xa7\x30\xaa\x9f\x93\xae\x80\x6f\x4e\x94\xb3\xe8\x8b\
\x43\x78\x43\x7d\x2b\x06\xc6\x8b\x88\x5b\x6b\x34\x3a\x0d\x7e\x80\
\xc6\xf9\x94\x24\xc5\x57\xf7\x8d\xef\x32\xce\x03\xcc\xcf\xce\xe4\
\xed\xd9\x79\x7a\xf3\x08\xe0\x15\xbd\x31\x44\x00\xa5\xd4\x24\x60\
\xaa\xd6\xb6\x7e\xee\xad\xcc\x18\xda\x37\xc6\x14\xcd\xc7\xb2\x09\
\xc3\x78\x7c\xec\x50\xbd\xf9\x51\xa5\x54\x88\x33\xfa\x27\xe0\x25\
\x6d\x67\xf6\xb0\x7e\x3c\x96\x3f\x24\xf6\xec\xe2\x84\x75\x77\x8d\
\x62\x90\x33\x55\x6b\xea\x0e\x2c\xd3\x1a\x9a\x05\x50\x4a\xdd\x02\
\xcc\xd3\x0e\x2e\x9f\x3c\xd2\x4c\x7e\xa6\xc3\x96\x94\xc4\xb3\xb7\
\x0f\xd7\x9b\x9f\x52\x4a\xf5\x08\x76\xb4\x4f\xc0\x1c\xed\xac\x71\
\x59\x2e\xee\x1c\xd2\xc7\x44\x7a\xf1\xc1\x23\x63\x06\xe3\x72\xd8\
\xb4\xa6\x0c\x60\x62\xb0\xa3\x15\x20\x47\x3b\xeb\xc1\x51\x03\xcd\
\x65\x16\x27\xa4\xd9\x53\xb8\x27\xbb\xbf\xde\xdc\xec\xab\x56\x80\
\x6c\xed\x8c\xec\xde\x69\x26\xd2\x8a\x2f\xb2\x7b\xf7\x0c\x33\x05\
\xff\x88\x28\x40\x4e\x9f\xff\x8f\x00\x06\xbe\x18\x0a\xd0\x5d\x3b\
\xc3\xd9\x2d\xe4\xbd\xe9\xd2\x30\xf0\xa5\xd9\x57\xad\x00\xa7\xb4\
\x33\xfe\xaa\xac\x36\x91\x52\x7c\x61\xe0\x4b\xb3\xaf\x5a\x01\x42\
\x02\x8d\xc5\x95\x35\x26\x52\x8a\x2f\x0c\x7c\x69\xf6\x55\x2b\x40\
\xc8\x41\x61\xf7\xdf\x97\x4c\xa4\x14\x5f\x18\xf8\xd2\xec\xab\x56\
\x80\x02\xed\x8c\x4d\x47\xcf\x51\x7a\x2d\xec\x7c\xdd\xe5\xb0\xb3\
\xe4\x22\x85\xe5\x21\xe7\x1f\x01\x0a\x83\x1d\xad\x00\xdf\x03\x25\
\xc1\x8e\xc7\xeb\x63\xcd\xbe\x62\xd3\x09\x9a\x8d\x37\xf6\x9c\xd4\
\x9b\xbe\xd3\x06\x4e\x9b\x05\x10\x11\x2f\xba\x00\xc8\xc7\x85\x7f\
\x47\x0a\x37\x75\x09\x6c\x2d\x2a\x63\xf7\x99\xb0\xc7\x7f\x95\xb6\
\xa3\x3f\x0c\x6d\x00\xfe\x09\x76\xea\x9a\xbc\xdc\xb3\xf9\x00\x97\
\x6a\x3d\xe6\x30\x34\x11\x47\x2f\x5c\x65\xe9\xb6\x42\xbd\xf9\x67\
\x11\x39\xa8\x35\x84\x08\x10\x08\x25\x2f\xd7\xda\x4e\xbb\x6b\x59\
\xf4\xe5\x2f\x34\xfa\x4c\x49\xcd\x99\x82\x8b\x35\x0d\xdc\xbb\xe5\
\x20\x35\x8d\x5e\xad\xd9\x03\xbc\xa8\x9f\x1b\x16\x10\x11\x91\x4f\
\x80\xf5\x5a\xdb\x9e\x33\x97\xb9\xeb\xd3\xae\xf1\x24\x14\x5d\xba\
\xc6\xd4\x8d\x7b\x39\x73\xa5\x56\x3f\xf4\xa4\x88\x1c\xd6\x1b\x23\
\xc5\x04\x53\x80\x1d\xc0\x74\xad\x7d\x90\x33\x95\xaf\xef\x1f\xcf\
\xb8\x2c\x57\x0c\x29\xc7\x0e\x5f\xfd\x59\xc6\xc3\xdf\x1c\xa1\xda\
\xd3\xa4\x1f\x5a\x27\x22\xcb\x8c\xae\x69\x2d\x2a\xdc\x0b\x38\x00\
\x84\x04\x05\xba\x25\x27\xf1\xea\xb4\x5c\x96\x4d\xb8\x09\x47\x8a\
\xb9\x49\x90\x68\x71\xb9\xce\xc3\x8a\x5d\x45\xbc\x7f\xf8\xb4\xd1\
\xf0\x0f\xc0\xdd\x81\x4d\x3e\x0c\x6d\xe5\x05\x7a\x01\x5b\x80\x59\
\xfa\xb1\x41\xce\x54\x56\x4e\xcb\x65\xe9\xad\x83\x48\xb2\x28\x44\
\x5e\xdb\xe8\xe5\x9d\x83\xa7\x58\xbd\xef\x24\x57\x1b\xc2\xbe\x75\
\x80\xf7\x81\xa7\x45\x24\x62\x72\x31\x9a\xcc\x50\x12\xf0\x1a\xfe\
\x70\x59\x98\xa7\x79\xfd\x9c\xbc\x70\xc7\x08\x16\xe6\x66\x92\x66\
\x8f\x4f\xc1\xc9\x85\x9a\x06\x36\xff\x7e\x9e\x35\xfb\x8a\x8d\x92\
\x21\x00\xf5\xc0\xe3\x81\xfd\xac\x55\xb4\x27\x39\xba\x00\xd8\x88\
\xbf\xac\x25\x0c\xdd\x6d\xc9\xdc\x9b\x9d\xc9\x92\x51\x03\x99\x3d\
\xbc\x1f\xb6\xa4\xd8\x96\x1f\x55\x7b\x9a\xd8\x76\xbc\x9c\xcf\x8e\
\x9e\xe3\xc7\x92\x0a\x7d\xc8\x5b\x8b\xd3\xf8\x53\x62\x47\xa2\x59\
\xb7\x5d\xe9\xf1\x40\x86\xf8\x65\xe0\x09\x5a\xa9\xfe\x70\x39\x6c\
\x8c\x1f\xe0\x22\x5f\x93\x1a\x1f\x9c\xde\x3d\xd2\xf4\x30\xf8\x44\
\x38\x71\xb9\x9a\xc2\x32\x7f\x7a\xbc\xb0\xdc\xcd\xe1\x52\x37\x75\
\x4d\x86\xaf\x71\x10\x55\xc0\x9b\xf8\x37\xbc\xa8\x7f\xc3\x77\xa8\
\x44\x46\x29\x35\x04\xff\x6b\xb1\x84\x08\xa9\x72\x3d\x7a\xa7\xda\
\x19\xe0\x74\xe0\x72\xd8\x71\xa5\xda\x02\x75\x02\x76\xbc\x3e\xa1\
\xaa\xde\x43\x55\x5d\x4b\x7d\xc0\xd9\x2b\x75\x46\x3b\x79\x24\xd4\
\x03\xef\x01\xab\x44\x24\x62\xba\x38\x22\x3a\x59\x1e\x93\x83\xff\
\xa7\xe5\x19\xe2\x57\x1e\x13\x6c\xbf\xe1\xff\x61\xd3\xa9\x32\x99\
\x4e\xd7\x08\x05\x84\x50\xc0\x14\xe0\x43\xa0\xc2\x44\xa7\x4b\x02\
\x82\x77\xb8\x24\xa6\xd3\x25\x32\xd1\x40\x29\x35\x0c\x18\x47\x4b\
\x89\xdc\x18\xfc\x65\x71\xed\x41\x05\x2d\x65\x72\x05\x40\x81\x88\
\x84\xa5\x7e\x3b\x0b\xd3\xea\x04\x43\x6e\xe2\xff\x57\x9a\x85\xbf\
\x3e\x30\x1d\x7f\x6c\x3e\xf8\xe9\xc5\x9f\xbd\x75\x07\xda\x15\xa0\
\x52\x44\xfe\x31\x5e\x2d\xc6\xdc\xe2\x21\xc0\x7f\x19\xd7\x7d\xad\
\x70\x42\x00\xab\x09\x58\x8d\x84\x00\x56\x13\xb0\x1a\x09\x01\xac\
\x26\x60\x35\x12\x02\x58\x4d\xc0\x6a\xfc\x0b\x97\xa0\x11\x24\x79\
\xaa\x96\xd6\x00\x00\x00\x00\x49\x45\x4e\x44\xae\x42\x60\x82\
\x00\x00\x01\xce\
\x89\
\x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\
\x00\x00\x40\x00\x00\x00\x40\x08\x06\x00\x00\x00\xaa\x69\x71\xde\
\x00\x00\x00\x04\x73\x42\x49\x54\x08\x08\x08\x08\x7c\x08\x64\x88\
\x00\x00\x00\x09\x70\x48\x59\x73\x00\x00\x0d\xd7\x00\x00\x0d\xd7\
\x01\x42\x28\x9b\x78\x00\x00\x00\x19\x74\x45\x58\x74\x53\x6f\x66\
\x74\x77\x61\x72\x65\x00\x77\x77\x77\x2e\x69\x6e\x6b\x73\x63\x61\
\x70\x65\x2e\x6f\x72\x67\x9b\xee\x3c\x1a\x00\x00\x01\x4b\x49\x44\
\x41\x54\x78\x9c\xed\x9b\x21\x52\xc3\x40\x18\x46\xdf\x02\x77\x00\
\x81\x42\x20\x30\x1c\x01\xcf\x11\xea\x50\x1c\x86\x13\x80\xa8\xe3\
\x2a\xdc\x80\x01\x81\x43\x60\x38\x01\xa2\x8b\x08\x22\x5b\x76\x06\
\xd3\xe4\x31\xcd\xf7\x66\x56\x64\x63\xde\xbe\xe9\x6c\x4d\xfe\x52\
\x6b\x65\xc9\x1c\xd8\x02\x36\x09\x60\x0b\xd8\x1c\xf5\x36\x4b\x29\
\xa7\xc0\x0d\x70\x06\x94\x59\x8d\xa6\x61\x03\xbc\x02\xeb\x5a\xeb\
\x67\xf3\xa6\xd6\xda\x2c\x86\x43\xbf\x03\x75\x0f\xd7\x0b\x70\xdc\
\x9c\xb7\x13\x60\xfd\x0f\x44\xa7\x5c\x77\xe3\xf3\xf6\xee\x80\xcb\
\xce\xde\x3e\x71\x31\x7e\xe8\xdd\x01\x87\xe3\x87\x5b\xe0\x64\x4a\
\x9d\x89\x79\x03\x1e\xdb\xad\xe6\x7c\xdd\x4b\x70\xcc\x15\x70\xbe\
\x4b\xa3\x99\x79\xe2\x57\x80\x86\xc5\xff\x0d\x26\x80\x2d\x60\x93\
\x00\xb6\x80\x4d\x02\xd8\x02\x36\x09\x60\x0b\xd8\x24\x80\x2d\x60\
\x93\x00\xb6\x80\x4d\x02\xd8\x02\x36\x09\x60\x0b\xd8\x24\x80\x2d\
\x60\x93\x00\xb6\x80\x4d\x02\xd8\x02\x36\x09\x60\x0b\xd8\x24\x80\
\x2d\x60\x93\x00\xb6\x80\x4d\x02\xd8\x02\x36\x09\x60\x0b\xd8\x24\
\x80\x2d\x60\x93\x00\xb6\x80\x4d\x02\xd8\x02\x36\x09\x60\x0b\xd8\
\x24\x80\x2d\x60\x93\x00\xb6\x80\xcd\xe2\x03\xfc\xf9\xb9\xfc\x6a\
\x0e\x0b\x91\xde\x2f\xe0\x63\x76\x8b\x79\x69\x86\xa6\x7a\x01\xee\
\x19\x66\x6b\xf6\x91\x2f\xe0\xa1\xd9\xd9\x1e\x9a\xfa\x19\x9c\x5a\
\x31\x4c\x9b\xd8\x03\x4e\xbb\x5a\x1b\xe0\x19\xb8\xde\x3e\x6b\xc9\
\xec\xf0\xc2\x49\x00\x5b\xc0\xe6\x1b\x5d\x1e\xff\x8f\xc6\x8f\xa9\
\x97\x00\x00\x00\x00\x49\x45\x4e\x44\xae\x42\x60\x82\
"
qt_resource_name = b"\
\x00\x05\
\x00\x6f\xa6\x53\
\x00\x69\
\x00\x63\x00\x6f\x00\x6e\x00\x73\
\x00\x0b\
\x07\x50\x31\x47\
\x00\x65\
\x00\x6c\x00\x6c\x00\x69\x00\x70\x00\x73\x00\x65\x00\x2e\x00\x70\x00\x6e\x00\x67\
\x00\x0d\
\x0f\x55\x06\x27\
\x00\x72\
\x00\x65\x00\x63\x00\x74\x00\x61\x00\x6e\x00\x67\x00\x6c\x00\x65\x00\x2e\x00\x70\x00\x6e\x00\x67\
"
qt_resource_struct_v1 = b"\
\x00\x00\x00\x00\x00\x02\x00\x00\x00\x01\x00\x00\x00\x01\
\x00\x00\x00\x00\x00\x02\x00\x00\x00\x02\x00\x00\x00\x02\
\x00\x00\x00\x10\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\
\x00\x00\x00\x2c\x00\x00\x00\x00\x00\x01\x00\x00\x07\x14\
"
qt_resource_struct_v2 = b"\
\x00\x00\x00\x00\x00\x02\x00\x00\x00\x01\x00\x00\x00\x01\
\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x02\x00\x00\x00\x02\x00\x00\x00\x02\
\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x10\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\
\x00\x00\x01\x6d\xf2\xc8\xe0\x90\
\x00\x00\x00\x2c\x00\x00\x00\x00\x00\x01\x00\x00\x07\x14\
\x00\x00\x01\x6d\xf2\xc8\xe4\x78\
"
qt_version = [int(v) for v in QtCore.qVersion().split('.')]
if qt_version < [5, 8, 0]:
rcc_version = 1
qt_resource_struct = qt_resource_struct_v1
else:
rcc_version = 2
qt_resource_struct = qt_resource_struct_v2
def qInitResources():
QtCore.qRegisterResourceData(rcc_version, qt_resource_struct, qt_resource_name, qt_resource_data)
def qCleanupResources():
QtCore.qUnregisterResourceData(rcc_version, qt_resource_struct, qt_resource_name, qt_resource_data)
qInitResources()
|
python
|
#!/usr/bin/python
# (c) 2018 Jim Hawkins. MIT licensed, see https://opensource.org/licenses/MIT
# Part of Blender Driver, see https://github.com/sjjhsjjh/blender-driver
"""Path Store unit test module. Tests in this module can be run like:
python3 path_store/test.py TestInterceptProperty
"""
# Exit if run other than as a module.
if __name__ == '__main__':
print(__doc__)
raise SystemExit(1)
# Standard library imports, in alphabetic order.
#
# Unit test module.
# https://docs.python.org/3.5/library/unittest.html
import unittest
#
# Local imports.
#
# Utilities.
import path_store.test.principal
principal = path_store.test.principal
#
# Modules under test.
from hosted import InterceptProperty, InterceptCast
class ReadOnly(list):
"""Class with same behaviour as the KX_GameObject.worldScale property:
Has __setitem__ and __delitem__ but is read only, so raises an error when
either is called.
"""
def __setitem__(self, specifier, value):
raise AttributeError("ReadOnly __setitem__ called.")
def __delitem__(self, specifier):
raise TypeError("ReadOnly __delitem__ called.")
class Destination(object):
destinationTuple = None
destinationList = None
destinationReadOnly = None
def __init__(self, tupleValue, listValue, readOnlyValue):
self.destinationTuple = tuple(tupleValue)
self.destinationList = listValue
self.destinationReadOnly = readOnlyValue
class Principal(principal.Principal):
"""Subclass of Principal that uses InterceptProperty to access properties of
an object that is itself a property, like a sub-property.
"""
@property
def destination(self):
return self._destination
@InterceptProperty(InterceptCast.IFDIFFERENTTHEN)
def destinationTuple(self):
return self.destination.destinationTuple
@destinationTuple.intercept_getter
def destinationTuple(self):
return self._destinationTuple
@destinationTuple.intercept_setter
def destinationTuple(self, value):
self._destinationTuple = value
@destinationTuple.destination_setter
def destinationTuple(self, value):
self.destination.destinationTuple = value
@InterceptProperty(InterceptCast.IFDIFFERENTNOW)
def destinationList(self):
return self.destination.destinationList
@destinationList.intercept_getter
def destinationList(self):
return self._destinationList
@destinationList.intercept_setter
def destinationList(self, value):
self._destinationList = value
@destinationList.destination_setter
def destinationList(self, value):
self.destination.destinationList = value
@InterceptProperty(InterceptCast.IFDIFFERENTTHEN)
def destinationReadOnly(self):
return self.destination.destinationReadOnly
@destinationReadOnly.intercept_getter
def destinationReadOnly(self):
return self._destinationReadOnly
@destinationReadOnly.intercept_setter
def destinationReadOnly(self, value):
self._destinationReadOnly = value
@destinationReadOnly.destination_setter
def destinationReadOnly(self, value):
self.destination.destinationReadOnly = value
def __init__(self, tupleValue, listValue, readOnlyValue, value=None):
self._destination = Destination(tupleValue, listValue, readOnlyValue)
super().__init__(value)
class Base(object):
@property
def destinationTuple(self):
return self._destinationTuple
@destinationTuple.setter
def destinationTuple(self, value):
self._destinationTuple = value
@property
def destinationList(self):
return self._destinationList
@destinationList.setter
def destinationList(self, value):
self._destinationList = value
@property
def destinationReadOnly(self):
return self._destinationReadOnly
@destinationReadOnly.setter
def destinationReadOnly(self, value):
self._destinationReadOnly = value
@property
def destinationStr(self):
return self._destinationStr
@destinationStr.setter
def destinationStr(self, value):
self._destinationStr = value
def __init__(self, tupleValue, listValue, readOnlyValue, strValue):
self._destinationTuple = tuple(tupleValue)
self._destinationList = listValue
self._destinationReadOnly = readOnlyValue
self._destinationStr = strValue
class InterceptSuper(Base):
"""Subclass of Base that uses InterceptProperty with super() in its
intercept setter and getter. It also has properties that bypass the
intercept.
This class has to use internal properties with different names than the base
class internal property names.
"""
@InterceptProperty(InterceptCast.IFDIFFERENTTHEN)
def destinationTuple(self):
return super().destinationTuple
@destinationTuple.intercept_getter
def destinationTuple(self):
return self._destinationTupleIntercept
@destinationTuple.intercept_setter
def destinationTuple(self, value):
self._destinationTupleIntercept = value
@destinationTuple.destination_setter
def destinationTuple(self, value):
# It'd be nice to do this:
#
# super(self).destinationTuple = value
#
# But see this issue: http://bugs.python.org/issue14965
# So instead, we have the following.
super(self.__class__, self.__class__
).destinationTuple.__set__(self, value)
#
# The following would also work and wouldn't incur instantiation of a
# super object.
# Base.destinationTuple.__set__(self, value)
@InterceptProperty(InterceptCast.IFDIFFERENTTHEN)
def destinationList(self):
return super().destinationList
@destinationList.intercept_getter
def destinationList(self):
return self._destinationListIntercept
@destinationList.intercept_setter
def destinationList(self, value):
self._destinationListIntercept = value
@destinationList.destination_setter
def destinationList(self, value):
Base.destinationList.__set__(self, value)
@InterceptProperty(InterceptCast.IFDIFFERENTTHEN)
def destinationReadOnly(self):
return super().destinationReadOnly
@destinationReadOnly.intercept_getter
def destinationReadOnly(self):
return self._destinationReadOnlyIntercept
@destinationReadOnly.intercept_setter
def destinationReadOnly(self, value):
self._destinationReadOnlyIntercept = value
@destinationReadOnly.destination_setter
def destinationReadOnly(self, value):
Base.destinationReadOnly.__set__(self, value)
# Properties for access to base properties without interception, for testing
# only.
@property
def baseTuple(self):
return super().destinationTuple
@property
def baseList(self):
return super().destinationList
@property
def baseReadOnly(self):
return super().destinationReadOnly
class InterceptAlternative(Base):
"""Subclass of Base that uses InterceptProperty with different property
names. The name have Items appended as a reminder that the main reason for
interception is to make accessible the items.
This class uses conventionally named internal properties.
"""
@InterceptProperty(InterceptCast.IFDIFFERENTTHEN)
def destinationTupleItems(self):
return self.destinationTuple
@destinationTupleItems.intercept_getter
def destinationTupleItems(self):
return self._destinationTupleItems
@destinationTupleItems.intercept_setter
def destinationTupleItems(self, value):
self._destinationTupleItems = value
@destinationTupleItems.destination_setter
def destinationTupleItems(self, value):
self.destinationTuple = value
@InterceptProperty(InterceptCast.IFDIFFERENTTHEN)
def destinationListItems(self):
return self.destinationList
@destinationListItems.intercept_getter
def destinationListItems(self):
return self._destinationListItems
@destinationListItems.intercept_setter
def destinationListItems(self, value):
self._destinationListItems = value
@destinationListItems.destination_setter
def destinationListItems(self, value):
self.destinationList = value
@InterceptProperty(InterceptCast.IFDIFFERENTTHEN)
def destinationReadOnlyItems(self):
return self.destinationReadOnly
@destinationReadOnlyItems.intercept_getter
def destinationReadOnlyItems(self):
return self._destinationReadOnlyItems
@destinationReadOnlyItems.intercept_setter
def destinationReadOnlyItems(self, value):
self._destinationReadOnlyItems = value
@destinationReadOnlyItems.destination_setter
def destinationReadOnlyItems(self, value):
self.destinationReadOnly = value
class InterceptCastOptions(Base):
@InterceptProperty(InterceptCast.NONE)
def destinationStrCastNo(self):
return self.destinationStr
@destinationStrCastNo.intercept_getter
def destinationStrCastNo(self):
return self._destinationStrCastNo
@destinationStrCastNo.intercept_setter
def destinationStrCastNo(self, value):
self._destinationStrCastNo = value
@destinationStrCastNo.destination_setter
def destinationStrCastNo(self, value):
self.destinationStr = value
class TestInterceptProperty(unittest.TestCase):
def test_destination_class(self):
tuple_ = (0,)
list_ = [1]
readonly = ReadOnly([2])
destination = Destination(tuple_, list_, readonly)
self.assertIs(destination.destinationTuple, tuple_)
self.assertIs(destination.destinationList, list_)
self.assertIs(destination.destinationReadOnly, readonly)
def test_destination_setter(self):
tuple0 = (1,2)
self.assertIs(tuple0, tuple(tuple0))
self.assertIs(tuple0, tuple0.__class__(tuple0))
list0 = [3,4]
self.assertIsNot(list0, list(list0))
self.assertIsNot(list0, list0.__class__(list0))
principal = Principal(tuple0, list0, tuple())
self.assertIs(principal.destination.destinationTuple, tuple0)
self.assertIs(principal.destination.destinationList.__class__
, list0.__class__)
self.assertIs(principal.destination.destinationList, list0)
tuple1 = (5,6)
destination = principal.destinationTuple
principal.destinationTuple = tuple1
#
# Check that the Holder object persists through the set.
self.assertIs(principal.destinationTuple, destination)
self.assertIs(principal.destination.destinationTuple, tuple1)
self.assertIsNot(principal.destination.destinationTuple, tuple0)
list1 = [7,8]
destination = principal.destinationList
principal.destinationList = list1
self.assertIs(principal.destinationList, destination)
self.assertIs(principal.destination.destinationList, list1)
self.assertIsNot(principal.destination.destinationList, list0)
destination = principal.destinationList
principal.destinationList = tuple1
# Getting principal.destinationList returns the holder, which doesn't
# change.
self.assertIs(principal.destinationList, destination)
self.assertEqual(principal.destination.destinationList, list(tuple1))
self.assertIsInstance(tuple1, tuple)
self.assertIsInstance(principal.destination.destinationList, list)
def test_tuple_destination_setitem(self):
principal = Principal([1,2], tuple(), tuple())
intercept = principal.destinationTuple
underlaying = principal.destination.destinationTuple
self.assertIsInstance(underlaying, tuple)
self.assertEqual((1,2), principal.destinationTuple[:])
principal.destinationTuple[1] = 3
#
# In all cases, the underlying property must change, because it is a
# tuple and therefore immutable, hence assertIsNot.
self.assertIsNot(underlaying, principal.destination.destinationTuple)
underlaying = principal.destination.destinationTuple
self.assertIsInstance(underlaying, tuple)
#
# The intercept variable is a reference to the holder, so it doesn't
# change in any case, hence assertIs.
self.assertIs(intercept, principal.destinationTuple)
self.assertEqual([1,3], list(principal.destinationTuple[:]))
principal.destinationTuple[2:2] = (4,)
self.assertIsNot(underlaying, principal.destination.destinationTuple)
underlaying = principal.destination.destinationTuple
self.assertIsInstance(underlaying, tuple)
self.assertIs(intercept, principal.destinationTuple)
self.assertEqual([1,3,4], list(principal.destinationTuple[:]))
principal.destinationTuple[0:1] = (5, 6, 7)
self.assertIsNot(underlaying, principal.destination.destinationTuple)
underlaying = principal.destination.destinationTuple
self.assertIsInstance(underlaying, tuple)
self.assertIs(intercept, principal.destinationTuple)
self.assertEqual([5,6,7,3,4], list(principal.destinationTuple[:]))
del principal.destinationTuple[1]
self.assertIsNot(underlaying, principal.destination.destinationTuple)
underlaying = principal.destination.destinationTuple
self.assertIsInstance(underlaying, tuple)
self.assertIs(intercept, principal.destinationTuple)
self.assertEqual([5,7,3,4], list(principal.destinationTuple[:]))
def test_readonly_destination_setitem(self):
principal = Principal(tuple(), tuple(), ReadOnly([1,2]))
intercept = principal.destinationReadOnly
underlaying = principal.destination.destinationReadOnly
self.assertIsInstance(underlaying, ReadOnly)
self.assertEqual([1,2], principal.destinationReadOnly[:])
principal.destinationReadOnly[1] = 3
#
# In all cases, the underlying property must change, because it is
# immutable, hence assertIsNot.
self.assertIsNot(underlaying, principal.destination.destinationReadOnly)
underlaying = principal.destination.destinationReadOnly
self.assertIsInstance(underlaying, ReadOnly)
self.assertIs(intercept, principal.destinationReadOnly)
self.assertEqual([1,3], principal.destinationReadOnly[:])
principal.destinationReadOnly[2:2] = (4,)
self.assertIsNot(underlaying, principal.destination.destinationReadOnly)
underlaying = principal.destination.destinationReadOnly
self.assertIsInstance(underlaying, ReadOnly)
self.assertIs(intercept, principal.destinationReadOnly)
self.assertEqual([1,3,4], principal.destinationReadOnly[:])
principal.destinationReadOnly[0:1] = (5, 6, 7)
self.assertIsNot(underlaying, principal.destination.destinationReadOnly)
underlaying = principal.destination.destinationReadOnly
self.assertIsInstance(underlaying, ReadOnly)
self.assertIs(intercept, principal.destinationReadOnly)
self.assertEqual([5,6,7,3,4], principal.destinationReadOnly[:])
del principal.destinationReadOnly[1]
self.assertIsNot(underlaying, principal.destination.destinationReadOnly)
underlaying = principal.destination.destinationReadOnly
self.assertIsInstance(underlaying, ReadOnly)
self.assertIs(intercept, principal.destinationReadOnly)
self.assertEqual([5,7,3,4], principal.destinationReadOnly[:])
def test_list_destination_setitem(self):
list_ = [1,2]
principal = Principal(tuple(), list_, tuple())
intercept = principal.destinationList
underlaying = principal.destination.destinationList
self.assertEqual(list_, intercept[:])
self.assertIs(list_, underlaying)
principal.destinationList[1] = 3
#
# The underlying property shouldn't change, because it is mutable.
self.assertIs(underlaying, principal.destination.destinationList)
self.assertIs(intercept, principal.destinationList)
self.assertIs(list_, underlaying)
self.assertEqual([1,3], list_)
principal.destinationList[2:2] = [4]
self.assertIs(underlaying, principal.destination.destinationList)
self.assertIs(intercept, principal.destinationList)
self.assertIs(list_, underlaying)
self.assertEqual([1,3,4], list_)
principal.destinationList[0:1] = (5, 6, 7)
self.assertIs(underlaying, principal.destination.destinationList)
self.assertIs(intercept, principal.destinationList)
self.assertIs(list_, underlaying)
self.assertEqual([5,6,7,3,4], list_)
del principal.destinationList[1]
self.assertIs(underlaying, principal.destination.destinationList)
self.assertIs(intercept, principal.destinationList)
self.assertIs(list_, underlaying)
self.assertEqual([5,7,3,4], list_)
def test_list_destination_getitem(self):
listItem1 = [None]
list_ = [1, listItem1]
principal = Principal(tuple(), list_, tuple())
self.assertEqual(1, principal.destinationList[0])
self.assertIs(listItem1, principal.destinationList[1])
def test_destination_property_method(self):
principal = Principal([1,2], tuple(), tuple())
intercept = principal.destinationTuple
self.assertEqual(2, len(intercept))
self.assertEqual(1, intercept.count(2))
def test_base_class(self):
tuple_ = (0,)
list_ = [1]
readonly = ReadOnly([2])
str_ = "three"
base = Base(tuple_, list_, readonly, str_)
self.assertIs(base.destinationTuple, tuple_)
self.assertIs(base.destinationList, list_)
self.assertIs(base.destinationReadOnly, readonly)
self.assertIs(base.destinationStr, str_)
def test_intercept_super_class(self):
tuple_ = (0,)
list_ = [1]
readonly = ReadOnly([2])
intercept = InterceptSuper(tuple_, list_, readonly, "three")
self.assertIs(intercept.baseTuple, tuple_)
self.assertIs(intercept.baseList, list_)
self.assertIs(intercept.baseReadOnly, readonly)
def test_super_setter(self):
tuple0 = (1,2)
list0 = [3,4]
intercept = InterceptSuper(tuple0, list0, tuple(), "three")
self.assertIsNot(intercept.destinationTuple, tuple0)
self.assertEqual(intercept.destinationTuple[:], tuple0)
self.assertIs(intercept.baseTuple, tuple0)
self.assertIsNot(intercept.destinationList.__class__, list0.__class__)
self.assertEqual(intercept.destinationList[:], list0)
tuple1 = (5,6)
propertyInstance = intercept.destinationTuple
intercept.destinationTuple = tuple1
#
# Check that the Holder object persists through the set.
self.assertIs(intercept.destinationTuple, propertyInstance)
self.assertEqual(intercept.destinationTuple[:], tuple1)
self.assertIs(intercept.baseTuple, tuple1)
self.assertIsNot(intercept.destinationTuple, tuple0)
list1 = [7,8]
propertyInstance = intercept.destinationList
intercept.destinationList = list1
self.assertIs(intercept.destinationList, propertyInstance)
self.assertEqual(intercept.destinationList[:], list1)
self.assertIs(intercept.baseList, list1)
self.assertIsNot(intercept.destinationList, list0)
propertyInstance = intercept.destinationList
intercept.destinationList = tuple1
# Getting intercept.destinationList returns the holder, which doesn't
# change.
self.assertIs(intercept.destinationList, propertyInstance)
self.assertEqual(intercept.destinationList[:], list(tuple1))
self.assertIsInstance(tuple1, tuple)
self.assertIsInstance(intercept.baseList, list)
def test_tuple_super_setitem(self):
intercept = InterceptSuper([1,2], tuple(), tuple(), "three")
propertyInstance = intercept.destinationTuple
underlaying = intercept.baseTuple
self.assertIsInstance(underlaying, tuple)
self.assertEqual((1,2), intercept.destinationTuple[:])
intercept.destinationTuple[1] = 3
#
# In all cases, the underlying property must change, because it is a
# tuple and therefore immutable, hence assertIsNot.
self.assertIsNot(underlaying, intercept.baseTuple)
underlaying = intercept.baseTuple
self.assertIsInstance(underlaying, tuple)
#
# The propertyInstance variable is a reference to the holder, so it doesn't
# change in any case, hence assertIs.
self.assertIs(propertyInstance, intercept.destinationTuple)
self.assertEqual((1,3), intercept.destinationTuple[:])
intercept.destinationTuple[2:2] = (4,)
self.assertIsNot(underlaying, intercept.baseTuple)
underlaying = intercept.baseTuple
self.assertIsInstance(underlaying, tuple)
self.assertIs(propertyInstance, intercept.destinationTuple)
self.assertEqual((1,3,4), intercept.destinationTuple[:])
intercept.destinationTuple[0:1] = (5, 6, 7)
self.assertIsNot(underlaying, intercept.baseTuple)
underlaying = intercept.baseTuple
self.assertIsInstance(underlaying, tuple)
self.assertIs(propertyInstance, intercept.destinationTuple)
self.assertEqual((5,6,7,3,4), intercept.destinationTuple[:])
del intercept.destinationTuple[1]
self.assertIsNot(underlaying, intercept.baseTuple)
underlaying = intercept.baseTuple
self.assertIsInstance(underlaying, tuple)
self.assertIs(propertyInstance, intercept.destinationTuple)
self.assertEqual((5,7,3,4), intercept.destinationTuple[:])
def test_readonly_super_setitem(self):
intercept = InterceptSuper(tuple(), tuple(), ReadOnly([1,2]), "three")
propertyInstance = intercept.destinationReadOnly
underlaying = intercept.baseReadOnly
self.assertIsInstance(underlaying, ReadOnly)
self.assertEqual([1,2], intercept.destinationReadOnly[:])
intercept.destinationReadOnly[1] = 3
#
# In all cases, the underlying property must change, because it is
# immutable, hence assertIsNot.
self.assertIsNot(underlaying, intercept.baseReadOnly)
underlaying = intercept.baseReadOnly
self.assertIsInstance(underlaying, ReadOnly)
#
# The propertyInstance variable is a reference to the holder, so it doesn't
# change in any case, hence assertIs.
self.assertIs(propertyInstance, intercept.destinationReadOnly)
self.assertEqual([1,3], intercept.destinationReadOnly[:])
intercept.destinationReadOnly[2:2] = (4,)
self.assertIsNot(underlaying, intercept.baseReadOnly)
underlaying = intercept.baseReadOnly
self.assertIsInstance(underlaying, ReadOnly)
self.assertIs(propertyInstance, intercept.destinationReadOnly)
self.assertEqual([1,3,4], intercept.destinationReadOnly[:])
intercept.destinationReadOnly[0:1] = (5, 6, 7)
self.assertIsNot(underlaying, intercept.baseReadOnly)
underlaying = intercept.baseReadOnly
self.assertIsInstance(underlaying, ReadOnly)
self.assertIs(propertyInstance, intercept.destinationReadOnly)
self.assertEqual([5,6,7,3,4], intercept.destinationReadOnly[:])
del intercept.destinationReadOnly[1]
self.assertIsNot(underlaying, intercept.baseReadOnly)
underlaying = intercept.baseReadOnly
self.assertIsInstance(underlaying, ReadOnly)
self.assertIs(propertyInstance, intercept.destinationReadOnly)
self.assertEqual([5,7,3,4], intercept.destinationReadOnly[:])
def test_list_super_setitem(self):
list_ = [1,2]
intercept = InterceptSuper(tuple(), list_, tuple(), "three")
propertyInstance = intercept.destinationList
underlaying = intercept.baseList
self.assertEqual(list_, propertyInstance[:])
self.assertIs(list_, underlaying)
intercept.destinationList[1] = 3
#
# The underlying property shouldn't change, because it is mutable.
self.assertIs(underlaying, intercept.baseList)
self.assertIs(propertyInstance, intercept.destinationList)
self.assertIs(list_, underlaying)
self.assertEqual([1,3], list_)
intercept.destinationList[2:2] = [4]
self.assertIs(underlaying, intercept.baseList)
self.assertIs(propertyInstance, intercept.destinationList)
self.assertIs(list_, underlaying)
self.assertEqual([1,3,4], list_)
intercept.destinationList[0:1] = (5, 6, 7)
self.assertIs(underlaying, intercept.baseList)
self.assertIs(propertyInstance, intercept.destinationList)
self.assertIs(list_, underlaying)
self.assertEqual([5,6,7,3,4], list_)
del intercept.destinationList[1]
self.assertIs(underlaying, intercept.baseList)
self.assertIs(propertyInstance, intercept.destinationList)
self.assertIs(list_, underlaying)
self.assertEqual([5,7,3,4], list_)
def test_list_super_getitem(self):
listItem1 = [None]
list_ = [1, listItem1]
intercept = InterceptSuper(tuple(), list_, tuple(), "three")
self.assertEqual(1, intercept.destinationList[0])
self.assertIs(listItem1, intercept.destinationList[1])
self.assertNotIsInstance(intercept.destinationList, list)
def test_super_method(self):
intercept = InterceptSuper([1,2], tuple(), tuple(), "three")
self.assertEqual(2, len(intercept.destinationTuple))
self.assertEqual(1, intercept.destinationTuple.count(2))
def test_intercept_alternative_class(self):
tuple_ = (0,)
list_ = [1]
readonly = ReadOnly([2])
intercept = InterceptAlternative(tuple_, list_, readonly, "three")
self.assertIs(intercept.destinationTuple, tuple_)
self.assertIs(intercept.destinationList, list_)
self.assertIs(intercept.destinationReadOnly, readonly)
self.assertEqual(intercept.destinationTupleItems[:], tuple_)
self.assertEqual(intercept.destinationListItems[:], list_)
self.assertEqual(intercept.destinationReadOnlyItems[:], readonly)
def test_alternative_setter(self):
tuple0 = (1,2)
list0 = [3,4]
intercept = InterceptAlternative(tuple0, list0, tuple(), "three")
self.assertIs(intercept.destinationTuple, tuple0)
self.assertIsNot(intercept.destinationTupleItems, tuple0)
self.assertEqual(intercept.destinationTupleItems[:], tuple0)
self.assertIsNot(
intercept.destinationTupleItems.__class__, tuple0.__class__)
self.assertIs(intercept.destinationList, list0)
self.assertIsNot(intercept.destinationListItems, list0)
self.assertEqual(intercept.destinationListItems[:], list0)
self.assertIsNot(
intercept.destinationListItems.__class__, list0.__class__)
tuple1 = (5,6)
propertyInstance = intercept.destinationTupleItems
intercept.destinationTupleItems = tuple1
self.assertIs(intercept.destinationTuple, tuple1)
self.assertIs(intercept.destinationTupleItems, propertyInstance)
self.assertIsNot(intercept.destinationTuple, tuple0)
self.assertEqual(intercept.destinationTupleItems[:], tuple1)
list1 = [7,8]
propertyInstance = intercept.destinationListItems
intercept.destinationListItems = list1
self.assertIs(intercept.destinationListItems, propertyInstance)
self.assertEqual(intercept.destinationListItems[:], list1)
self.assertIs(intercept.destinationList, list1)
self.assertIsNot(intercept.destinationList, list0)
self.assertIsNot(
intercept.destinationList, intercept.destinationListItems)
propertyInstance = intercept.destinationListItems
intercept.destinationListItems = tuple1
# Getting intercept.destinationList returns the holder, which doesn't
# change.
self.assertIs(intercept.destinationListItems, propertyInstance)
self.assertEqual(intercept.destinationList[:], list(tuple1))
self.assertEqual(intercept.destinationListItems[:], list(tuple1))
self.assertIsInstance(tuple1, tuple)
self.assertIsInstance(intercept.destinationList, list)
def test_alternative_bypass_setter(self):
tuple0 = (1,2)
list0 = [3,4]
intercept = InterceptAlternative(tuple0, list0, tuple(), "three")
self.assertIs(intercept.destinationTuple, tuple0)
self.assertIs(intercept.destinationList, list0)
tuple1 = (5,6)
propertyInstance = intercept.destinationTupleItems
bypassInstance = intercept.destinationTuple
intercept.destinationTuple = tuple1
self.assertIs(intercept.destinationTuple, tuple1)
self.assertIsNot(intercept.destinationTuple, bypassInstance)
self.assertIs(intercept.destinationTupleItems, propertyInstance)
self.assertEqual(intercept.destinationTupleItems[:], tuple1)
list1 = [7,8]
propertyInstance = intercept.destinationListItems
bypassInstance = intercept.destinationList
intercept.destinationList = list1
self.assertIs(intercept.destinationList, list1)
self.assertIsNot(intercept.destinationList, list0)
self.assertIsNot(intercept.destinationList, bypassInstance)
self.assertIs(intercept.destinationListItems, propertyInstance)
self.assertEqual(intercept.destinationListItems[:], list1)
propertyInstance = intercept.destinationListItems
bypassInstance = intercept.destinationList
intercept.destinationList = tuple1
self.assertIs(intercept.destinationList, tuple1)
self.assertIs(intercept.destinationListItems, propertyInstance)
self.assertIsNot(intercept.destinationList, bypassInstance)
self.assertIsInstance(tuple1, tuple)
self.assertEqual(intercept.destinationListItems[:], tuple1)
def test_tuple_alternative_setitem(self):
intercept = InterceptAlternative([1,2], tuple(), tuple(), "three")
propertyInstance = intercept.destinationTupleItems
underlaying = intercept.destinationTuple
self.assertIsInstance(underlaying, tuple)
self.assertEqual((1,2), intercept.destinationTuple[:])
tuple_ = tuple((9,10))
expected = None
try:
tuple_[0] = 11
except TypeError as error:
expected = error
with self.assertRaises(TypeError) as context:
intercept.destinationTuple[1] = 3
self.assertEqual(str(context.exception), str(expected))
intercept.destinationTupleItems[1] = 3
#
# In all cases, the underlying property must change, because it is a
# tuple and therefore immutable, hence assertIsNot.
self.assertIsNot(underlaying, intercept.destinationTuple)
underlaying = intercept.destinationTuple
self.assertIsInstance(underlaying, tuple)
#
# The propertyInstance variable is a reference to the holder, so it doesn't
# change in any case, hence assertIs.
self.assertIs(propertyInstance, intercept.destinationTupleItems)
self.assertEqual((1,3), intercept.destinationTuple)
self.assertEqual(
intercept.destinationTuple, intercept.destinationTupleItems[:])
intercept.destinationTupleItems[2:2] = (4,)
self.assertIsNot(underlaying, intercept.destinationTuple)
underlaying = intercept.destinationTuple
self.assertIsInstance(underlaying, tuple)
self.assertIs(propertyInstance, intercept.destinationTupleItems)
self.assertEqual((1,3,4), intercept.destinationTuple)
self.assertEqual(
intercept.destinationTuple, intercept.destinationTupleItems[:])
intercept.destinationTupleItems[0:1] = (5, 6, 7)
self.assertIsNot(underlaying, intercept.destinationTuple)
underlaying = intercept.destinationTuple
self.assertIsInstance(underlaying, tuple)
self.assertIs(propertyInstance, intercept.destinationTupleItems)
self.assertEqual((5,6,7,3,4), intercept.destinationTuple)
self.assertEqual(
intercept.destinationTuple, intercept.destinationTupleItems[:])
del intercept.destinationTupleItems[1]
self.assertIsNot(underlaying, intercept.destinationTuple)
underlaying = intercept.destinationTuple
self.assertIsInstance(underlaying, tuple)
self.assertIs(propertyInstance, intercept.destinationTupleItems)
self.assertEqual((5,7,3,4), intercept.destinationTuple)
self.assertEqual(
intercept.destinationTuple, intercept.destinationTupleItems[:])
def test_readonly_alternative_setitem(self):
intercept = InterceptAlternative(
tuple(), tuple(), ReadOnly([1,2]), "three")
propertyInstance = intercept.destinationReadOnlyItems
underlaying = intercept.destinationReadOnly
self.assertIsInstance(underlaying, ReadOnly)
self.assertEqual([1,2], intercept.destinationReadOnly[:])
intercept.destinationReadOnlyItems[1] = 3
#
# In all cases, the underlying property must change, because it is a
# tuple and therefore immutable, hence assertIsNot.
self.assertIsNot(underlaying, intercept.destinationReadOnly)
underlaying = intercept.destinationReadOnly
self.assertIsInstance(underlaying, ReadOnly)
#
# The propertyInstance variable is a reference to the holder, so it doesn't
# change in any case, hence assertIs.
self.assertIs(propertyInstance, intercept.destinationReadOnlyItems)
self.assertEqual([1,3], intercept.destinationReadOnly)
self.assertEqual(
intercept.destinationReadOnly, intercept.destinationReadOnlyItems[:])
intercept.destinationReadOnlyItems[2:2] = (4,)
self.assertIsNot(underlaying, intercept.destinationReadOnly)
underlaying = intercept.destinationReadOnly
self.assertIsInstance(underlaying, ReadOnly)
self.assertIs(propertyInstance, intercept.destinationReadOnlyItems)
self.assertEqual([1,3,4], intercept.destinationReadOnly)
self.assertEqual(
intercept.destinationReadOnly, intercept.destinationReadOnlyItems[:])
intercept.destinationReadOnlyItems[0:1] = (5, 6, 7)
self.assertIsNot(underlaying, intercept.destinationReadOnly)
underlaying = intercept.destinationReadOnly
self.assertIsInstance(underlaying, ReadOnly)
self.assertIs(propertyInstance, intercept.destinationReadOnlyItems)
self.assertEqual([5,6,7,3,4], intercept.destinationReadOnly)
self.assertEqual(
intercept.destinationReadOnly, intercept.destinationReadOnlyItems[:])
del intercept.destinationReadOnlyItems[1]
self.assertIsNot(underlaying, intercept.destinationReadOnly)
underlaying = intercept.destinationReadOnly
self.assertIsInstance(underlaying, ReadOnly)
self.assertIs(propertyInstance, intercept.destinationReadOnlyItems)
self.assertEqual([5,7,3,4], intercept.destinationReadOnly)
self.assertEqual(
intercept.destinationReadOnly, intercept.destinationReadOnlyItems[:])
def test_list_alternative_setitem(self):
list_ = [1,2]
intercept = InterceptAlternative(tuple(), list_, tuple(), "three")
propertyInstance = intercept.destinationListItems
underlaying = intercept.destinationList
self.assertEqual(list_, propertyInstance[:])
self.assertIs(list_, underlaying)
intercept.destinationListItems[1] = 3
#
# The underlying property shouldn't change, because it is mutable.
self.assertIs(underlaying, intercept.destinationList)
self.assertIs(propertyInstance, intercept.destinationListItems)
self.assertIs(list_, underlaying)
self.assertEqual([1,3], list_)
self.assertEqual(list_, intercept.destinationListItems[:])
intercept.destinationListItems[2:2] = [4]
self.assertIs(underlaying, intercept.destinationList)
self.assertIs(propertyInstance, intercept.destinationListItems)
self.assertIs(list_, underlaying)
self.assertEqual([1,3,4], list_)
self.assertEqual(list_, intercept.destinationListItems[:])
intercept.destinationListItems[0:1] = (5, 6, 7)
self.assertIs(underlaying, intercept.destinationList)
self.assertIs(propertyInstance, intercept.destinationListItems)
self.assertIs(list_, underlaying)
self.assertEqual([5,6,7,3,4], list_)
self.assertEqual(list_, intercept.destinationListItems[:])
del intercept.destinationListItems[1]
self.assertIs(underlaying, intercept.destinationList)
self.assertIs(propertyInstance, intercept.destinationListItems)
self.assertIs(list_, underlaying)
self.assertEqual([5,7,3,4], list_)
self.assertEqual(list_, intercept.destinationListItems[:])
def test_list_alternative_getitem(self):
listItem1 = [None]
list_ = [1, listItem1]
intercept = InterceptAlternative(tuple(), list_, tuple(), "three")
self.assertEqual(1, intercept.destinationListItems[0])
self.assertIs(listItem1, intercept.destinationListItems[1])
self.assertNotIsInstance(intercept.destinationListItems, list)
def test_alternative_method(self):
intercept = InterceptAlternative([1,2], tuple(), tuple(), "three")
self.assertEqual(2, len(intercept.destinationTupleItems))
self.assertEqual(1, intercept.destinationTupleItems.count(2))
# ToDo:
# Test that the attribute changes, for example from list to tuple, if cast=NONE
# Test ISDIFFERENTNOW vs ISDIFFERENTTHEN
# Test two subclass instances.
# - Test setting the intercept property as a whole: bypass_setter.
# - Test like bypass_setitem.
|
python
|
from unittest.case import TestCase
from responsebot.handlers.base import BaseTweetHandler
from responsebot.handlers.event import BaseEventHandler
try:
from mock import MagicMock, patch
except ImportError:
from unittest.mock import MagicMock
class Handler(BaseTweetHandler):
class ValidEventHandler(BaseEventHandler):
pass
def __init__(self, *args, **kwargs):
self.event_handler_class = self.ValidEventHandler
super(Handler, self).__init__(*args, **kwargs)
class HandlerWithInvalidEventHandler(BaseTweetHandler):
class InvalidEventHandler(object):
pass
def __init__(self, *args, **kwargs):
self.event_handler_class = self.InvalidEventHandler
super(HandlerWithInvalidEventHandler, self).__init__(*args, **kwargs)
class HandlerWithErrorneousEventHandler(BaseTweetHandler):
class ErrorneousEventHandler(BaseEventHandler):
def __init__(self, client):
raise Exception
def __init__(self, *args, **kwargs):
self.event_handler_class = self.ErrorneousEventHandler
super(HandlerWithErrorneousEventHandler, self).__init__(*args, **kwargs)
class BaseTweetHandlerTestCase(TestCase):
def test_register_event_handler_on_user_stream(self):
client = MagicMock(config={'user_stream': True})
handler = Handler(client)
self.assertTrue(isinstance(handler.event_handler, Handler.ValidEventHandler))
def test_not_register_event_handler_on_public_stream(self):
client = MagicMock(config={'user_stream': False})
handler = Handler(client)
self.assertIsNone(handler.event_handler)
def test_only_register_valid_event_handler(self):
client = MagicMock(config={'user_stream': True})
handler = HandlerWithInvalidEventHandler(client)
self.assertIsNone(handler.event_handler)
def test_call_event_handler_handle(self):
client = MagicMock()
handler = Handler(client)
handler.event_handler = MagicMock()
event = MagicMock()
handler.on_event(event)
handler.event_handler.handle.assert_called_once_with(event)
|
python
|
# coding: utf-8
from cms.plugin_pool import plugin_pool
from cms.plugin_base import CMSPluginBase
from cmsplugin_bootstrap_grid.forms import ColumnPluginForm
from cmsplugin_bootstrap_grid.models import Row, Column
from django.utils.translation import ugettext_lazy as _
class BootstrapRowPlugin(CMSPluginBase):
model = Row
name = _('Row')
module = _('Bootstrap')
render_template = 'cmsplugin_bootstrap_grid/row.html'
allow_children = True
def render(self, context, instance, placeholder):
context.update({'row': instance, 'placeholder': placeholder})
return context
class BootstrapColumnPlugin(CMSPluginBase):
model = Column
name = _('Column')
module = _('Bootstrap')
render_template = 'cmsplugin_bootstrap_grid/column.html'
allow_children = True
form = ColumnPluginForm
def render(self, context, instance, placeholder):
context.update({'column': instance, 'placeholder': placeholder})
return context
# register plugins
plugin_pool.register_plugin(BootstrapRowPlugin)
plugin_pool.register_plugin(BootstrapColumnPlugin)
|
python
|
from scipy import spatial
from . import design
from .design import *
from .fields import *
schema = dj.schema('photixxx')
@schema
class Tissue(dj.Computed):
definition = """
-> design.Geometry
---
density : float # points per mm^3
margin : float # (um) margin to include on boundaries
min_distance : float # (um)
points : longblob # cell xyz
npoints : int # total number of points in volume
inner_count : int # number of points inside the probe boundaries
volume : float # (mm^3), hull volume including outer points
"""
def make(self, key):
density = 110000 # per cubic mm
xyz = np.stack((design.Geometry.EPixel() & key).fetch('e_loc'))
margin = 75
bounds_min = xyz.min(axis=0) - margin
bounds_max = xyz.max(axis=0) + margin
volume = (bounds_max - bounds_min).prod() * 1e-9
npoints = int(volume * density + 0.5)
# generate random points that aren't too close
min_distance = 10.0 # cells aren't not allowed any closer
points = np.empty((npoints, 3), dtype='float32')
replace = np.r_[:npoints]
while replace.size:
points[replace, :] = np.random.rand(replace.size, 3) * (bounds_max - bounds_min) + bounds_min
replace = spatial.cKDTree(points).query_pairs(min_distance, output_type='ndarray')[:, 0]
# eliminate points that are too distant
inner = (spatial.Delaunay(xyz).find_simplex(points)) != -1
d, _ = spatial.cKDTree(points[inner, :]).query(points[~inner, :], distance_upper_bound=margin)
points = np.vstack((points[inner, :], points[~inner, :][d < margin, :]))
self.insert1(dict(
key, margin=margin,
density=density,
npoints=points.shape[0], min_distance=min_distance,
points=points,
volume=spatial.ConvexHull(points).volume * 1e-9,
inner_count=inner.sum()))
@schema
class Fluorescence(dj.Computed):
definition = """
-> Tissue
"""
class EField(dj.Part):
definition = """
# Fluorescence produced by cells per Joule of illumination
-> master
-> Geometry.EField
---
nphotons : int # number of simulated photons for the volume
emit_probabilities : longblob # photons emitted from cells per joule of illumination
mean_probability : float # mean probability per cell
"""
def make(self, key):
neuron_cross_section = 0.1 # um^2
points = (Tissue & key).fetch1('points')
self.insert1(key)
for esim_key in (ESim() & (Geometry.EField & key)).fetch("KEY"):
pitch, *dims = (ESim & esim_key).fetch1(
'pitch', 'volume_dimx', 'volume_dimy', 'volume_dimz')
dims = np.array(dims)
space = (ESim & esim_key).make_volume(hops=100_000)
for k in tqdm.tqdm((Geometry.EField & key & esim_key).fetch('KEY')):
# cell positions in volume coordinates
e_xyz, basis_z = (Geometry.EPixel & k).fetch1('e_loc', 'e_norm')
basis_y = np.array([0, 0, 1])
basis_z = np.append(basis_z, 0)
basis = np.stack((np.cross(basis_y, basis_z), basis_y, basis_z)).T
assert np.allclose(basis.T @ basis, np.eye(3)), "incorrect epixel orientation"
vxyz = np.int16(np.round((points - e_xyz) @ basis / pitch + dims / 2))
# probabilities
v = neuron_cross_section * np.array([
space.volume[q[0], q[1], q[2]] if
0 <= q[0] < dims[0] and
0 <= q[1] < dims[1] and
0 <= q[2] < dims[2] else 0 for q in vxyz])
self.EField().insert1(
dict(k, **esim_key,
nphotons=space.total_count,
emit_probabilities=np.float32(v),
mean_probability=v.mean()))
@schema
class Detection(dj.Computed):
definition = """
-> Tissue
"""
class DField(dj.Part):
definition = """
# Fraction of photons detected from each cell per detector
-> master
-> Geometry.DField
---
nphotons : int # number of simulated photons for the volume
detect_probabilities : longblob # fraction of photons detected from each neuron
mean_probability : float # mean probability of detection across all neurons
"""
def make(self, key):
points = (Tissue & key).fetch1('points')
self.insert1(key)
for dsim_key in (DSim & (Geometry.DField & key)).fetch("KEY"):
pitch, *dims = (DSim & dsim_key).fetch1(
'pitch', 'volume_dimx', 'volume_dimy', 'volume_dimz')
space = (DSim & dsim_key).make_volume(hops=100_000)
dims = np.array(dims)
for k in tqdm.tqdm((Geometry.DField & key & dsim_key).fetch('KEY')):
# cell positions in volume coordinates
d_xyz, basis_z = (Geometry.DPixel & k).fetch1('d_loc', 'd_norm')
basis_y = np.array([0, 0, 1])
basis_z = np.append(basis_z, 0)
basis = np.stack((np.cross(basis_y, basis_z), basis_y, basis_z)).T
assert np.allclose(basis.T @ basis, np.eye(3)), "incorrect dpixel orientation"
vxyz = np.int16(np.round((points - d_xyz) @ basis / pitch + dims / 2))
# sample DSim volume
v = np.array([
space.volume[q[0], q[1], q[2]] if
0 <= q[0] < dims[0] and
0 <= q[1] < dims[1] and
0 <= q[2] < dims[2] else 0 for q in vxyz])
self.DField().insert1(
dict(k, nphotons=space.total_count,
detect_probabilities=np.float32(v),
mean_probability=v.mean()))
|
python
|
# Copyright 2017 The KaiJIN Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""CONTEXT for all issues"""
import time
import numpy as np
import tensorflow as tf
import functools
from gate.env import env
from gate.utils import filesystem
from gate.utils import string
from gate.utils import variable
from gate.utils.logger import logger
from gate.solver import snapshot
from gate.solver import summary
class Context():
""" A common class offers context managers, tasks should inherit this class.
"""
def __init__(self, config):
""" the self.phase and self.data used for store for switching task
like 'train' to 'test'.
"""
self.config = config
self.phase = None
self.data = None
""" initialize auxiliary information """
self.hooks = []
self.summary = summary.Summary(self.config)
self.snapshot = snapshot.Snapshot(self.config)
# print config information
string.print_members(config)
def _enter_(self, phase):
self.prephase = self.phase
self.phase = phase
self.config.set_phase(phase)
self.data = self.config.data
def _exit_(self):
if self.prephase is None:
return
self.phase = self.prephase
self.config.set_phase(self.phase)
self.data = self.config.data
def add_hook(self, hook):
self.hooks.append(hook)
@property
def is_training(self):
return True if self.phase == 'train' else False
@property
def batchsize(self):
return self.config.data.batchsize
@property
def total_num(self):
return self.config.data.total_num
@property
def iter_per_epoch(self):
return int(self.config.data.total_num / self.config.data.batchsize)
@property
def num_batch(self):
return int(self.data.total_num / self.data.batchsize)
class Running_Hook(tf.train.SessionRunHook):
""" Running Hooks for training showing information """
def __init__(self, config, step, keys, values,
func_val=None, func_test=None, func_end=None):
""" Running session for common application.
Default values[0] is iteration
config: config.log
"""
self.duration = 0
self.values = values
self.mean_values = np.zeros(len(self.values) + 1)
self.keys = keys + ['time']
self.step = step
self.config = config
# call
self.func_val = func_val
self.func_test = func_test
self.func_end = func_end
def begin(self):
# display variables
variable.print_trainable_list()
variable.print_global_list()
# pass
def before_run(self, run_context):
# feed monitor values
self.start_time = time.time()
return tf.train.SessionRunArgs([self.step] + self.values)
def after_run(self, run_context, run_values):
cur_iter = run_values.results[0]
self.mean_values[:-1] += run_values.results[1:]
self.mean_values[-1] += (time.time() - self.start_time) * 1000
if cur_iter == 0:
return
if cur_iter % self.config.print_invl == 0:
self.mean_values /= self.config.print_invl
logger.train(logger.iters(cur_iter, self.keys, self.mean_values))
np.zeros_like(self.mean_values)
if cur_iter % self.config.val_invl == 0:
if self.func_val is not None:
self.func_val()
if cur_iter % self.config.test_invl == 0:
if self.func_test is not None:
self.func_test()
if cur_iter == self.config.max_iter:
if self.func_end is not None:
self.func_end()
logger.sys('Achieved the maximum iterations, the system will terminate.')
exit(0)
class QueueContext():
"""For managing the data reader queue."""
def __init__(self, sess):
self.sess = sess
def __enter__(self):
self.coord = tf.train.Coordinator()
self.threads = []
for queuerunner in tf.get_collection(tf.GraphKeys.QUEUE_RUNNERS):
self.threads.extend(queuerunner.create_threads(
self.sess, coord=self.coord, daemon=True, start=True))
def __exit__(self, *unused):
self.coord.request_stop()
self.coord.join(self.threads, stop_grace_period_secs=10)
class DefaultSession():
"""Default session for custom"""
def __init__(self, hooks=None):
self.hooks = hooks
self.sess = None
def __enter__(self):
""" there, we set all issue to configure gpu memory with auto growth
however, when train + test, the memory will increase.
----
test presents that the performance has no benefits.
"""
tf_config = tf.ConfigProto(allow_soft_placement=False)
tf_config.gpu_options.allow_growth = True
if self.hooks is not None:
self.sess = tf.train.MonitoredTrainingSession(
hooks=self.hooks,
save_checkpoint_secs=None,
save_summaries_steps=None,
config=tf_config)
return self.sess
else:
self.sess = tf.Session()
return self.sess
def __exit__(self, *unused):
self.sess.close()
def graph_phase_wrapper():
# we use the func.__name__ as default phase value
# so, the function name should be defined in dataset.config
# e.g. 'train', 'val', 'test'
def decorator_wrapper(func):
@functools.wraps(func)
def wrapper(self, *args, **kwargs):
self._enter_(func.__name__)
with tf.Graph().as_default():
results = func(self, *args, **kwargs)
self._exit_()
return results
return wrapper
return decorator_wrapper
|
python
|
import pandas as pd
from plotly.subplots import make_subplots
import plotly.graph_objects as go
import modules.load_data_from_database as ldd
from db import connect_db
# connection with database
rdb = connect_db()
def day_figure_update(df, bar):
""" Update day figure depends on drop downs """
df_bar = df[df['type'] == bar]
df = df[df['type'] == 'Heart Rate']
if not df_bar.empty:
df_bar = df_bar.resample('5Min', on='Date').sum().reset_index()
fig = make_subplots(specs=[[{"secondary_y": True}]])
fig.add_trace(go.Scatter(x=df['Date'], y=df["Value"], name="Heart Rate"), secondary_y=False)
fig.add_trace(go.Bar(x=df_bar['Date'], y=df_bar["Value"], name='{}'.format(bar)), secondary_y=True)
fig.update_layout(
height=400,
template='plotly_white',
xaxis_title="Time",
legend=dict(
orientation="h",
yanchor="bottom",
y=1.02,
xanchor="right",
x=1
))
fig.update_yaxes(title_text='{}'.format(bar), secondary_y=False)
return fig
|
python
|
import threading
import json
from socketIO_client import SocketIO, LoggingNamespace
class Cliente():
def __init__(self, ip):
self.socketIO = SocketIO(ip, 8000)
self.errors = ''
#thread_rcv= threading.Thread ()
#thread_rcv.daemon=True
#thread_rcv.start()
def register_errors(self, message):
self.errors = message
def registrarse(self, data):
self.socketIO.emit('Registrarse', data, self.register_errors)
self.socketIO.wait(seconds=1)
return self.errors
def startsession(self, data):
self.socketIO.emit('startsession', data, self.register_errors)
self.socketIO.wait(seconds=1)
return self.errors
def send_message(self, data):
self.socketIO.emit('mensaje', data)
def crearsala(self, data):
self.socketIO.emit('crearsala', data, self.register_errors)
self.socketIO.wait(seconds=1)
return self.errors
def entrarsala(self, data):
self.socketIO.emit('entrarsala', data)
def salirsala(self):
self.socketIO.emit('salir')
def msgprivado(self, data):
self.socketIO.emit('private', data, self.register_errors)
self.socketIO.wait(seconds=1)
return self.errors
def exit(self):
self.socketIO.emit('desconectar')
def showusers(self):
'''
Here is used the self.register_errors to obtain
the list of users but not for register errors
'''
self.socketIO.emit('show_users', self.register_errors)
self.socketIO.wait(seconds=1)
return self.errors
def listarsalas(self):
'''
This functions returns the available rooms with the help of the method register errors.
Do not return errors
'''
self.socketIO.emit('listarsalas', self.register_errors)
self.socketIO.wait(seconds=1)
return self.errors
def eliminarsala(self):
self.socketIO.emit('eliminarsala', self.register_errors)
self.socketIO.wait(seconds=1)
return self.errors
def mensajesprivados(self):
self.socketIO.emit('mensajesprivados', self.register_errors)
self.socketIO.wait(seconds=1)
return self.errors
def leerprivado(self, data):
'''
This functions indicates to the server that a private message was readed
'''
self.socketIO.emit('read_message', data)
|
python
|
import ctypes
import os
import warnings
from ctypes import (
byref, c_byte, c_int, c_uint, c_char_p, c_size_t, c_void_p, create_string_buffer, CFUNCTYPE, POINTER
)
from pycoin.encoding.bytes32 import from_bytes_32, to_bytes_32
from pycoin.intbytes import iterbytes
SECP256K1_FLAGS_TYPE_MASK = ((1 << 8) - 1)
SECP256K1_FLAGS_TYPE_CONTEXT = (1 << 0)
SECP256K1_FLAGS_TYPE_COMPRESSION = (1 << 1)
# /** The higher bits contain the actual data. Do not use directly. */
SECP256K1_FLAGS_BIT_CONTEXT_VERIFY = (1 << 8)
SECP256K1_FLAGS_BIT_CONTEXT_SIGN = (1 << 9)
SECP256K1_FLAGS_BIT_COMPRESSION = (1 << 8)
# /** Flags to pass to secp256k1_context_create. */
SECP256K1_CONTEXT_VERIFY = (SECP256K1_FLAGS_TYPE_CONTEXT | SECP256K1_FLAGS_BIT_CONTEXT_VERIFY)
SECP256K1_CONTEXT_SIGN = (SECP256K1_FLAGS_TYPE_CONTEXT | SECP256K1_FLAGS_BIT_CONTEXT_SIGN)
SECP256K1_CONTEXT_NONE = (SECP256K1_FLAGS_TYPE_CONTEXT)
SECP256K1_FLAGS_BIT_COMPRESSION = (1 << 8)
SECP256K1_EC_COMPRESSED = (SECP256K1_FLAGS_TYPE_COMPRESSION | SECP256K1_FLAGS_BIT_COMPRESSION)
SECP256K1_EC_UNCOMPRESSED = (SECP256K1_FLAGS_TYPE_COMPRESSION)
def load_library():
try:
PYCOIN_LIBSECP256K1_PATH = os.getenv("PYCOIN_LIBSECP256K1_PATH")
library_path = PYCOIN_LIBSECP256K1_PATH or ctypes.util.find_library('libsecp256k1')
secp256k1 = ctypes.cdll.LoadLibrary(library_path)
secp256k1.secp256k1_context_create.argtypes = [c_uint]
secp256k1.secp256k1_context_create.restype = c_void_p
secp256k1.secp256k1_context_randomize.argtypes = [c_void_p, c_char_p]
secp256k1.secp256k1_context_randomize.restype = c_int
secp256k1.secp256k1_ec_pubkey_create.argtypes = [c_void_p, c_void_p, c_char_p]
secp256k1.secp256k1_ec_pubkey_create.restype = c_int
secp256k1.secp256k1_ecdsa_sign.argtypes = [c_void_p, c_char_p, c_char_p, c_char_p, c_void_p, c_void_p]
secp256k1.secp256k1_ecdsa_sign.restype = c_int
secp256k1.secp256k1_ecdsa_verify.argtypes = [c_void_p, c_char_p, c_char_p, c_char_p]
secp256k1.secp256k1_ecdsa_verify.restype = c_int
secp256k1.secp256k1_ec_pubkey_parse.argtypes = [c_void_p, c_char_p, c_char_p, c_int]
secp256k1.secp256k1_ec_pubkey_parse.restype = c_int
secp256k1.secp256k1_ec_pubkey_serialize.argtypes = [c_void_p, c_char_p, c_void_p, c_char_p, c_uint]
secp256k1.secp256k1_ec_pubkey_serialize.restype = c_int
secp256k1.secp256k1_ecdsa_signature_parse_compact.argtypes = [c_void_p, c_char_p, c_char_p]
secp256k1.secp256k1_ecdsa_signature_parse_compact.restype = c_int
secp256k1.secp256k1_ecdsa_signature_serialize_compact.argtypes = [c_void_p, c_char_p, c_char_p]
secp256k1.secp256k1_ecdsa_signature_serialize_compact.restype = c_int
secp256k1.secp256k1_ec_pubkey_tweak_mul.argtypes = [c_void_p, c_char_p, c_char_p]
secp256k1.secp256k1_ec_pubkey_tweak_mul.restype = c_int
secp256k1.ctx = secp256k1.secp256k1_context_create(SECP256K1_CONTEXT_SIGN | SECP256K1_CONTEXT_VERIFY)
r = secp256k1.secp256k1_context_randomize(secp256k1.ctx, os.urandom(32))
if r:
return secp256k1
except (OSError, AttributeError):
if PYCOIN_LIBSECP256K1_PATH:
warnings.warn("PYCOIN_LIBSECP256K1_PATH set but libsecp256k1 optimizations not loaded")
return None
libsecp256k1 = load_library()
class Optimizations:
def __mul__(self, e):
e %= self.order()
if e == 0:
return self._infinity
pubkey = create_string_buffer(65)
libsecp256k1.secp256k1_ec_pubkey_create(libsecp256k1.ctx, pubkey, c_char_p(to_bytes_32(e)))
pubkey_size = c_size_t(65)
pubkey_serialized = create_string_buffer(65)
libsecp256k1.secp256k1_ec_pubkey_serialize(
libsecp256k1.ctx, pubkey_serialized, byref(pubkey_size), pubkey, SECP256K1_EC_UNCOMPRESSED)
x = from_bytes_32(pubkey_serialized[1:33])
y = from_bytes_32(pubkey_serialized[33:])
return self.Point(x, y)
def sign(self, secret_exponent, val, gen_k=None):
nonce_function = None
if gen_k is not None:
k_as_bytes = to_bytes_32(gen_k(self.order(), secret_exponent, val))
def adaptor(nonce32_p, msg32_p, key32_p, algo16_p, data, attempt):
nonce32_p.contents[:] = list(iterbytes(k_as_bytes))
return 1
p_b32 = POINTER(c_byte*32)
nonce_function = CFUNCTYPE(c_int, p_b32, p_b32, p_b32, POINTER(c_byte*16), c_void_p, c_uint)(adaptor)
sig = create_string_buffer(64)
sig_hash_bytes = to_bytes_32(val)
libsecp256k1.secp256k1_ecdsa_sign(
libsecp256k1.ctx, sig, sig_hash_bytes, to_bytes_32(secret_exponent), nonce_function, None)
compact_signature = create_string_buffer(64)
libsecp256k1.secp256k1_ecdsa_signature_serialize_compact(libsecp256k1.ctx, compact_signature, sig)
r = from_bytes_32(compact_signature[:32])
s = from_bytes_32(compact_signature[32:])
return (r, s)
def verify(self, public_pair, val, signature_pair):
sig = create_string_buffer(64)
input64 = to_bytes_32(signature_pair[0]) + to_bytes_32(signature_pair[1])
r = libsecp256k1.secp256k1_ecdsa_signature_parse_compact(libsecp256k1.ctx, sig, input64)
if not r:
return False
r = libsecp256k1.secp256k1_ecdsa_signature_normalize(libsecp256k1.ctx, sig, sig)
public_pair_bytes = b'\4' + to_bytes_32(public_pair[0]) + to_bytes_32(public_pair[1])
pubkey = create_string_buffer(64)
r = libsecp256k1.secp256k1_ec_pubkey_parse(
libsecp256k1.ctx, pubkey, public_pair_bytes, len(public_pair_bytes))
if not r:
return False
return 1 == libsecp256k1.secp256k1_ecdsa_verify(libsecp256k1.ctx, sig, to_bytes_32(val), pubkey)
def multiply(self, p, e):
"""Multiply a point by an integer."""
e %= self.order()
if p == self._infinity or e == 0:
return self._infinity
pubkey = create_string_buffer(64)
public_pair_bytes = b'\4' + to_bytes_32(p[0]) + to_bytes_32(p[1])
r = libsecp256k1.secp256k1_ec_pubkey_parse(
libsecp256k1.ctx, pubkey, public_pair_bytes, len(public_pair_bytes))
if not r:
return False
r = libsecp256k1.secp256k1_ec_pubkey_tweak_mul(libsecp256k1.ctx, pubkey, to_bytes_32(e))
if not r:
return self._infinity
pubkey_serialized = create_string_buffer(65)
pubkey_size = c_size_t(65)
libsecp256k1.secp256k1_ec_pubkey_serialize(
libsecp256k1.ctx, pubkey_serialized, byref(pubkey_size), pubkey, SECP256K1_EC_UNCOMPRESSED)
x = from_bytes_32(pubkey_serialized[1:33])
y = from_bytes_32(pubkey_serialized[33:])
return self.Point(x, y)
def create_LibSECP256K1Optimizations():
class noop:
pass
native = os.getenv("PYCOIN_NATIVE")
if native and native.lower() != "secp256k1":
return noop
if not libsecp256k1:
return noop
return Optimizations
LibSECP256K1Optimizations = create_LibSECP256K1Optimizations()
|
python
|
from smach_based_introspection_framework.offline_part.model_training import train_anomaly_classifier
from smach_based_introspection_framework._constant import (
anomaly_classification_feature_selection_folder,
)
from smach_based_introspection_framework.configurables import model_type, model_config, score_metric
import glob
import os,ipdb
import pandas as pd
import pprint
import coloredlogs, logging
import sys, traceback
from sklearn.externals import joblib
import json
coloredlogs.install()
pp = pprint.PrettyPrinter(indent=4)
def run():
logger = logging.getLogger('GenClassificationModels')
folders = glob.glob(os.path.join(
anomaly_classification_feature_selection_folder,
'No.* filtering scheme',
'anomalies_grouped_by_type',
'anomaly_type_(*)',
))
for folder in folders:
logger.info(folder)
path_postfix = os.path.relpath(folder, anomaly_classification_feature_selection_folder).replace("anomalies_grouped_by_type"+os.sep, "")
output_dir = os.path.join(
anomaly_classification_feature_selection_folder,
'classifier_models',
path_postfix,
)
model_file = os.path.join(output_dir, 'classifier_model')
if os.path.isfile(model_file):
logger.info("Model already exists. Gonna skip.")
continue
csvs = glob.glob(os.path.join(
folder,
'*', '*.csv',
))
list_of_mat = []
for j in csvs:
df = pd.read_csv(j, sep=',')
# Exclude 1st column which is time index
list_of_mat.append(df.values[:, 1:])
try:
result = train_anomaly_classifier.run(list_of_mat, model_type, model_config, score_metric)
logger.info("Successfully trained classification model")
except Exception as e:
traceback.print_exc(file=sys.stdout)
logger.error("Failed to train_anomaly_classifier: %s"%e)
continue
if not os.path.isdir(output_dir):
os.makedirs(output_dir)
joblib.dump(
result['model'],
model_file,
)
model_info = {
'model_type': model_type,
'find_best_model_in_this_config': model_config,
'score_metric': score_metric,
}
model_info.update(result['model_info']),
json.dump(
model_info,
open(os.path.join(output_dir, 'classifier_model_info'), 'w'),
indent=4,
)
if __name__ == '__main__':
logger = logging.getLogger()
logger.setLevel(logging.DEBUG)
consoleHandler = logging.StreamHandler()
consoleHandler.setLevel(logging.DEBUG)
logger.addHandler(consoleHandler)
run()
|
python
|
from collections import deque
from time import perf_counter
def breadthFirstSearch(initialState, goalState, timeout=60):
# Initialize iterations counter.
iterations = 0
# Initialize visited vertexes as set, because it's faster to check
# if an item exists, due to O(1) searching complexity on average case.
# The items here are hashable state objects.
# A list, has O(n) on average case, when searching for an item existence.
#
# Initialize the search queue which is a double-ended queue and has O(1)
# complexity on average case when popping an item from it's left.
# A list has O(n) on average case, when popping from the left,
# so a deque, improves performance for both ends accesses.
#
# source : https://wiki.python.org/moin/TimeComplexity
visited, queue = set(), deque([initialState])
# Initialize timeout counter.
t1 = perf_counter()
# While there are elements to search for...
while queue:
# Initialize on each iteration the performace of the previous.
t2 = perf_counter()
# If the the previous iteration has exceeded the allowed time,
# then return, prematurely, nothing.
if t2 - t1 > timeout:
return None, iterations
iterations += 1
vertex = queue.popleft()
if vertex == goalState:
return vertex._tracePath(), iterations
for neighbour in vertex._generateStateChildren():
if neighbour not in visited:
visited.add(neighbour)
queue.append(neighbour)
def depthFirstSearch(initialState, goalState, timeout=60):
# Initialize iterations counter.
iterations = 0
# Initialize visited vertexes as set, because it's faster to check
# if an item exists, due to O(1) searching complexity on average case.
# The items here are hashable state objects.
# A list, has O(n) on average case, when searching for an item existence.
#
# Initialize the search queue which is a double-ended queue and has O(1)
# complexity on average case when popping an item from it's right.
# A list has O(1) on average case, when popping from the right,
# which is the same, but we leave it the same as BFS for readability reasons.
#
# source : https://wiki.python.org/moin/TimeComplexity
visited, stack = set(), deque([initialState])
# Initialize timeout counter.
t1 = perf_counter()
# While there are elements to search for...
while stack:
# Initialize on each iteration the performace of the previous.
t2 = perf_counter()
# If the the previous iteration has exceeded the allowed time,
# then return, prematurely, nothing.
if t2 - t1 > timeout:
return None, iterations
iterations += 1
vertex = stack.pop() # right
if vertex == goalState:
return vertex._tracePath(), iterations
if vertex in visited:
continue
for neighbour in vertex._generateStateChildren():
stack.append(neighbour)
visited.add(vertex)
|
python
|
/home/runner/.cache/pip/pool/fd/94/44/56b7be5adb54be4e2c5a3aea50daa6f50d6e15a013102374ffe3d729b9
|
python
|
import FWCore.ParameterSet.Config as cms
def useTMTT(process):
from L1Trigger.TrackerDTC.Producer_Defaults_cfi import TrackerDTCProducer_params
from L1Trigger.TrackerDTC.Format_TMTT_cfi import TrackerDTCFormat_params
from L1Trigger.TrackerDTC.Analyzer_Defaults_cfi import TrackerDTCAnalyzer_params
TrackerDTCProducer_params.ParamsED.DataFormat = "TMTT"
TrackerDTCAnalyzer_params.ParamsTP.MinPt = cms.double( 3. )
process.TrackerDTCAnalyzer = cms.EDAnalyzer('trackerDTC::Analyzer', TrackerDTCAnalyzer_params, TrackerDTCProducer_params, TrackerDTCFormat_params)
return process
|
python
|
from datetime import timedelta
import matplotlib.gridspec as gridspec
import matplotlib.pyplot as plt
import matplotlib.ticker as ticker
import numpy as np
from matplotlib import rcParams
__author__ = 'Ben Dichter'
class BrokenAxes:
def __init__(self, xlims=None, ylims=None, d=.015, tilt=45,
subplot_spec=None, fig=None, despine=True,
xscale=None, yscale=None, diag_color='k',
height_ratios=None, width_ratios=None,
*args, **kwargs):
"""Creates a grid of axes that act like a single broken axes
Parameters
----------
xlims, ylims: (optional) None or tuple of tuples, len 2
Define the ranges over which to plot. If `None`, the axis is left
unsplit.
d: (optional) double
Length of diagonal split mark used to indicate broken axes
tilt: (optional) double
Angle of diagonal split mark
subplot_spec: (optional) None or Gridspec.subplot_spec
Defines a subplot
fig: (optional) None or Figure
If no figure is defined, `plt.gcf()` is used
despine: (optional) bool
Get rid of right and top spines. Default: True
wspace, hspace: (optional) bool
Change the size of the horizontal or vertical gaps
xscale, yscale: (optional) None | str
None: linear axis (default), 'log': log axis
diag_color: (optional)
color of diagonal lines
{width, height}_ratios: (optional) | list of int
The width/height ratios of the axes, passed to gridspec.GridSpec.
By default, adapt the axes for a 1:1 scale given the ylims/xlims.
hspace: float
Height space between axes (NOTE: not horizontal space)
wspace: float
Widgth space between axes
args, kwargs: (optional)
Passed to gridspec.GridSpec
Notes
-----
The broken axes effect is achieved by creating a number of smaller axes
and setting their position and data ranges. A "big_ax" is used for
methods that need the position of the entire broken axes object, e.g.
`set_xlabel`.
"""
self.diag_color = diag_color
self.despine = despine
self.d = d
self.tilt = tilt
if fig is None:
self.fig = plt.gcf()
else:
self.fig = fig
if width_ratios is None:
if xlims:
# Check if the user has asked for a log scale on x axis
if xscale == 'log':
width_ratios = [np.log(i[1]) - np.log(i[0]) for i in xlims]
else:
width_ratios = [i[1] - i[0] for i in xlims]
else:
width_ratios = [1]
# handle datetime xlims
if type(width_ratios[0]) == timedelta:
width_ratios = [tt.total_seconds() for tt in width_ratios]
if height_ratios is None:
if ylims:
# Check if the user has asked for a log scale on y axis
if yscale == 'log':
height_ratios = [np.log(i[1]) - np.log(i[0])
for i in ylims[::-1]]
else:
height_ratios = [i[1] - i[0] for i in ylims[::-1]]
else:
height_ratios = [1]
# handle datetime ylims
if type(height_ratios[0]) == timedelta:
width_ratios = [tt.total_seconds() for tt in height_ratios]
ncols, nrows = len(width_ratios), len(height_ratios)
kwargs.update(ncols=ncols, nrows=nrows, height_ratios=height_ratios,
width_ratios=width_ratios)
if subplot_spec:
gs = gridspec.GridSpecFromSubplotSpec(subplot_spec=subplot_spec,
*args, **kwargs)
self.big_ax = plt.Subplot(self.fig, subplot_spec)
else:
gs = gridspec.GridSpec(*args, **kwargs)
self.big_ax = plt.Subplot(self.fig, gridspec.GridSpec(1, 1)[0])
[sp.set_visible(False) for sp in self.big_ax.spines.values()]
self.big_ax.set_xticks([])
self.big_ax.set_yticks([])
self.big_ax.patch.set_facecolor('none')
self.axs = []
for igs in gs:
ax = plt.Subplot(self.fig, igs)
self.fig.add_subplot(ax)
self.axs.append(ax)
self.fig.add_subplot(self.big_ax)
# get last axs row and first col
self.last_row = []
self.first_col = []
for ax in self.axs:
if ax.is_last_row():
self.last_row.append(ax)
if ax.is_first_col():
self.first_col.append(ax)
# Set common x/y lim for ax in the same col/row
# and share x and y between them
for i, ax in enumerate(self.axs):
if ylims is not None:
ax.set_ylim(ylims[::-1][i//ncols])
ax.get_shared_y_axes().join(ax, self.first_col[i // ncols])
if xlims is not None:
ax.set_xlim(xlims[i % ncols])
ax.get_shared_x_axes().join(ax, self.last_row[i % ncols])
self.standardize_ticks()
if d:
self.draw_diags()
if despine:
self.set_spines()
@staticmethod
def draw_diag(ax, xpos, xlen, ypos, ylen, **kwargs):
return ax.plot((xpos - xlen, xpos + xlen), (ypos - ylen, ypos + ylen),
**kwargs)
def draw_diags(self):
"""
Parameters
----------
d: float
Length of diagonal split mark used to indicate broken axes
tilt: float
Angle of diagonal split mark
"""
size = self.fig.get_size_inches()
ylen = self.d * np.sin(self.tilt * np.pi / 180) * size[0] / size[1]
xlen = self.d * np.cos(self.tilt * np.pi / 180)
d_kwargs = dict(transform=self.fig.transFigure, color=self.diag_color,
clip_on=False, lw=rcParams['axes.linewidth'])
ds = []
for ax in self.axs:
bounds = ax.get_position().bounds
if ax.is_last_row():
ypos = bounds[1]
if not ax.is_last_col():
xpos = bounds[0] + bounds[2]
ds += self.draw_diag(ax, xpos, xlen, ypos, ylen,
**d_kwargs)
if not ax.is_first_col():
xpos = bounds[0]
ds += self.draw_diag(ax, xpos, xlen, ypos, ylen,
**d_kwargs)
if ax.is_first_col():
xpos = bounds[0]
if not ax.is_first_row():
ypos = bounds[1] + bounds[3]
ds += self.draw_diag(ax, xpos, xlen, ypos, ylen, **d_kwargs)
if not ax.is_last_row():
ypos = bounds[1]
ds += self.draw_diag(ax, xpos, xlen, ypos, ylen, **d_kwargs)
if not self.despine:
if ax.is_first_row():
ypos = bounds[1] + bounds[3]
if not ax.is_last_col():
xpos = bounds[0] + bounds[2]
ds += self.draw_diag(ax, xpos, xlen, ypos, ylen,
**d_kwargs)
if not ax.is_first_col():
xpos = bounds[0]
ds += self.draw_diag(ax, xpos, xlen, ypos, ylen,
**d_kwargs)
if ax.is_last_col():
xpos = bounds[0] + bounds[2]
if not ax.is_first_row():
ypos = bounds[1] + bounds[3]
ds += self.draw_diag(ax, xpos, xlen, ypos, ylen,
**d_kwargs)
if not ax.is_last_row():
ypos = bounds[1]
ds += self.draw_diag(ax, xpos, xlen, ypos, ylen,
**d_kwargs)
self.diag_handles = ds
def set_spines(self):
"""Removes the spines of internal axes that are not boarder spines.
"""
for ax in self.axs:
ax.xaxis.tick_bottom()
ax.yaxis.tick_left()
if not ax.is_last_row():
ax.spines['bottom'].set_visible(False)
plt.setp(ax.xaxis.get_minorticklabels(), visible=False)
plt.setp(ax.xaxis.get_minorticklines(), visible=False)
plt.setp(ax.xaxis.get_majorticklabels(), visible=False)
plt.setp(ax.xaxis.get_majorticklines(), visible=False)
if self.despine or not ax.is_first_row():
ax.spines['top'].set_visible(False)
if not ax.is_first_col():
ax.spines['left'].set_visible(False)
plt.setp(ax.yaxis.get_minorticklabels(), visible=False)
plt.setp(ax.yaxis.get_minorticklines(), visible=False)
plt.setp(ax.yaxis.get_majorticklabels(), visible=False)
plt.setp(ax.yaxis.get_majorticklines(), visible=False)
if self.despine or not ax.is_last_col():
ax.spines['right'].set_visible(False)
def standardize_ticks(self, xbase=None, ybase=None):
"""Make all of the internal axes share tick bases
Parameters
----------
xbase, ybase: (optional) None or float
If `xbase` or `ybase` is a float, manually set all tick locators to
this base. Otherwise, use the largest base across internal subplots
for that axis.
"""
if xbase is None:
if self.axs[0].xaxis.get_scale() == 'log':
xbase = max(ax.xaxis.get_ticklocs()[1] /
ax.xaxis.get_ticklocs()[0]
for ax in self.axs if ax.is_last_row())
else:
xbase = max(ax.xaxis.get_ticklocs()[1] -
ax.xaxis.get_ticklocs()[0]
for ax in self.axs if ax.is_last_row())
if ybase is None:
if self.axs[0].yaxis.get_scale() == 'log':
ybase = max(ax.yaxis.get_ticklocs()[1] /
ax.yaxis.get_ticklocs()[0]
for ax in self.axs if ax.is_first_col())
else:
ybase = max(ax.yaxis.get_ticklocs()[1] -
ax.yaxis.get_ticklocs()[0]
for ax in self.axs if ax.is_first_col())
for ax in self.axs:
if ax.is_first_col():
if ax.yaxis.get_scale() == 'log':
ax.yaxis.set_major_locator(ticker.LogLocator(ybase))
else:
ax.yaxis.set_major_locator(ticker.MultipleLocator(ybase))
if ax.is_last_row():
if ax.xaxis.get_scale() == 'log':
ax.xaxis.set_major_locator(ticker.LogLocator(xbase))
else:
ax.xaxis.set_major_locator(ticker.MultipleLocator(xbase))
def __getattr__(self, method):
"""Catch all methods that are not defined and pass to internal axes
(e.g. plot, errorbar, etc.).
"""
return CallCurator(method, self)
def subax_call(self, method, args, kwargs, attr=None):
"""Apply method call to all internal axes. Called by CallCurator.
"""
result = []
for ax in self.axs:
if ax.xaxis.get_scale() == 'log':
ax.xaxis.set_major_locator(ticker.LogLocator())
else:
ax.xaxis.set_major_locator(ticker.AutoLocator())
if ax.yaxis.get_scale() == 'log':
ax.yaxis.set_major_locator(ticker.LogLocator())
else:
ax.yaxis.set_major_locator(ticker.AutoLocator())
if attr:
result.append(getattr(getattr(ax, attr), method)(*args, **kwargs))
else:
result.append(getattr(ax, method)(*args, **kwargs))
self.standardize_ticks()
self.set_spines()
return result
def set_xlabel(self, label, labelpad=15, **kwargs):
return self.big_ax.set_xlabel(label, labelpad=labelpad, **kwargs)
def set_ylabel(self, label, labelpad=30, **kwargs):
self.big_ax.xaxis.labelpad = labelpad
return self.big_ax.set_ylabel(label, labelpad=labelpad, **kwargs)
def set_title(self, *args, **kwargs):
return self.big_ax.set_title(*args, **kwargs)
def legend(self, *args, **kwargs):
h, l = self.axs[0].get_legend_handles_labels()
return self.big_ax.legend(h, l, *args, **kwargs)
def axis(self, *args, **kwargs):
[ax.axis(*args, **kwargs) for ax in self.axs]
class CallCurator:
"""Used by BrokenAxes.__getattr__ to pass methods to internal axes."""
def __init__(self, attr, broken_axes):
self.attr = attr
self.broken_axes = broken_axes
def __call__(self, *args, **kwargs):
return self.broken_axes.subax_call(self.attr, args, kwargs)
def __getattr__(self, name):
return AttributeCurator(self, name)
class AttributeCurator:
def __init__(self, call_curator, attr):
self.call_curator = call_curator
self.attr = attr
def __call__(self, *args, **kwargs):
return self.call_curator.broken_axes.subax_call(self.attr, args, kwargs, self.call_curator.attr)
def brokenaxes(*args, **kwargs):
"""Convenience method for `BrokenAxes` class.
Parameters
----------
args, kwargs: passed to `BrokenAxes()`
Returns
-------
out: `BrokenAxes`
"""
return BrokenAxes(*args, **kwargs)
|
python
|
# Copyright 2014-2015 ARM Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import logging
import json
import re
from HTMLParser import HTMLParser
from collections import defaultdict, OrderedDict
from distutils.version import StrictVersion
from wlauto import AndroidUiAutoBenchmark, Parameter
from wlauto.utils.types import list_of_strs, numeric
from wlauto.exceptions import WorkloadError
#pylint: disable=no-member
class Vellamo(AndroidUiAutoBenchmark):
name = 'vellamo'
description = """
Android benchmark designed by Qualcomm.
Vellamo began as a mobile web benchmarking tool that today has expanded
to include three primary chapters. The Browser Chapter evaluates mobile
web browser performance, the Multicore chapter measures the synergy of
multiple CPU cores, and the Metal Chapter measures the CPU subsystem
performance of mobile processors. Through click-and-go test suites,
organized by chapter, Vellamo is designed to evaluate: UX, 3D graphics,
and memory read/write and peak bandwidth performance, and much more!
Note: Vellamo v3.0 fails to run on Juno
"""
package = 'com.quicinc.vellamo'
run_timeout = 15 * 60
benchmark_types = {
'2.0.3': ['html5', 'metal'],
'3.0': ['Browser', 'Metal', 'Multi'],
'3.2.4': ['Browser', 'Metal', 'Multi'],
}
valid_versions = benchmark_types.keys()
summary_metrics = None
parameters = [
Parameter('version', kind=str, allowed_values=valid_versions, default=sorted(benchmark_types, reverse=True)[0],
description=('Specify the version of Vellamo to be run. '
'If not specified, the latest available version will be used.')),
Parameter('benchmarks', kind=list_of_strs, allowed_values=benchmark_types['3.0'], default=benchmark_types['3.0'],
description=('Specify which benchmark sections of Vellamo to be run. Only valid on version 3.0 and newer.'
'\nNOTE: Browser benchmark can be problematic and seem to hang,'
'just wait and it will progress after ~5 minutes')),
Parameter('browser', kind=int, default=1,
description=('Specify which of the installed browsers will be used for the tests. The number refers to '
'the order in which browsers are listed by Vellamo. E.g. ``1`` will select the first browser '
'listed, ``2`` -- the second, etc. Only valid for version ``3.0``.'))
]
def __init__(self, device, **kwargs):
super(Vellamo, self).__init__(device, **kwargs)
if StrictVersion(self.version) >= StrictVersion("3.0.0"):
self.activity = 'com.quicinc.vellamo.main.MainActivity'
if StrictVersion(self.version) == StrictVersion('2.0.3'):
self.activity = 'com.quicinc.vellamo.VellamoActivity'
def setup(self, context):
self.uiauto_params['version'] = self.version
self.uiauto_params['browserToUse'] = self.browser
self.uiauto_params['metal'] = 'Metal' in self.benchmarks
self.uiauto_params['browser'] = 'Browser' in self.benchmarks
self.uiauto_params['multicore'] = 'Multi' in self.benchmarks
super(Vellamo, self).setup(context)
def validate(self):
super(Vellamo, self).validate()
if self.version == '2.0.3' or not self.benchmarks or self.benchmarks == []: # pylint: disable=access-member-before-definition
self.benchmarks = self.benchmark_types[self.version] # pylint: disable=attribute-defined-outside-init
else:
for benchmark in self.benchmarks:
if benchmark not in self.benchmark_types[self.version]:
raise WorkloadError('Version {} does not support {} benchmarks'.format(self.version, benchmark))
def update_result(self, context):
super(Vellamo, self).update_result(context)
# Get total scores from logcat
self.non_root_update_result(context)
if not self.device.is_rooted:
return
elif self.version == '3.0.0':
self.update_result_v3(context)
elif self.version == '3.2.4':
self.update_result_v3_2(context)
def update_result_v3(self, context):
for test in self.benchmarks: # Get all scores from HTML files
filename = None
if test == "Browser":
result_folder = self.device.path.join(self.device.package_data_directory, self.package, 'files')
for result_file in self.device.listdir(result_folder, as_root=True):
if result_file.startswith("Browser"):
filename = result_file
else:
filename = '{}_results.html'.format(test)
device_file = self.device.path.join(self.device.package_data_directory, self.package, 'files', filename)
host_file = os.path.join(context.output_directory, filename)
self.device.pull_file(device_file, host_file, as_root=True)
with open(host_file) as fh:
parser = VellamoResultParser()
parser.feed(fh.read())
for benchmark in parser.benchmarks:
benchmark.name = benchmark.name.replace(' ', '_')
context.result.add_metric('{}_Total'.format(benchmark.name), benchmark.score)
for name, score in benchmark.metrics.items():
name = name.replace(' ', '_')
context.result.add_metric('{}_{}'.format(benchmark.name, name), score)
context.add_iteration_artifact('vellamo_output', kind='raw', path=filename)
def update_result_v3_2(self, context):
device_file = self.device.path.join(self.device.package_data_directory,
self.package,
'files',
'chapterscores.json')
host_file = os.path.join(context.output_directory, 'vellamo.json')
self.device.pull_file(device_file, host_file, as_root=True)
context.add_iteration_artifact('vellamo_output', kind='raw', path=host_file)
with open(host_file) as results_file:
data = json.load(results_file)
for chapter in data:
for result in chapter['benchmark_results']:
name = result['id']
score = result['score']
context.result.add_metric(name, score)
def non_root_update_result(self, context):
failed = []
with open(self.logcat_log) as fh:
iteration_result_regex = re.compile("VELLAMO RESULT: (Browser|Metal|Multicore) (\d+)")
for line in fh:
if 'VELLAMO ERROR:' in line:
self.logger.warning("Browser crashed during benchmark, results may not be accurate")
result = iteration_result_regex.findall(line)
if result:
for (metric, score) in result:
if not score:
failed.append(metric)
else:
context.result.add_metric(metric, score)
if failed:
raise WorkloadError("The following benchmark groups failed: {}".format(", ".join(failed)))
class VellamoResult(object):
def __init__(self, name):
self.name = name
self.score = None
self.metrics = {}
def add_metric(self, data):
split_data = data.split(":")
name = split_data[0].strip()
score = split_data[1].strip()
if name in self.metrics:
raise KeyError("A metric of that name is already present")
self.metrics[name] = float(score)
class VellamoResultParser(HTMLParser):
class StopParsingException(Exception):
pass
def __init__(self):
HTMLParser.__init__(self)
self.inside_div = False
self.inside_span = 0
self.inside_li = False
self.got_data = False
self.failed = False
self.benchmarks = []
def feed(self, text):
try:
HTMLParser.feed(self, text)
except self.StopParsingException:
pass
def handle_starttag(self, tag, attrs):
if tag == 'div':
self.inside_div = True
if tag == 'span':
self.inside_span += 1
if tag == 'li':
self.inside_li = True
def handle_endtag(self, tag):
if tag == 'div':
self.inside_div = False
self.inside_span = 0
self.got_data = False
self.failed = False
if tag == 'li':
self.inside_li = False
def handle_data(self, data):
if self.inside_div and not self.failed:
if "Problem" in data:
self.failed = True
elif self.inside_span == 1:
self.benchmarks.append(VellamoResult(data))
elif self.inside_span == 3 and not self.got_data:
self.benchmarks[-1].score = int(data)
self.got_data = True
elif self.inside_li and self.got_data:
if 'failed' not in data:
self.benchmarks[-1].add_metric(data)
else:
self.failed = True
|
python
|
from sklearn.cluster import KMeans
from sklearn.mixture import GaussianMixture
from sklearn.cluster import DBSCAN
from .MetadataExtractor import Extractor
from .DataCleaner import cleaner, limited_cleaner
from joblib import dump, load
def predictor(extractor, name):
data = cleaner(extractor).drop_duplicates(subset=['name'])
data_to_fit = data.drop(['table', 'name'],axis=1)
kmeans = KMeans(n_clusters=2, random_state=0).fit(data_to_fit)
gauss = GaussianMixture(n_components=2, random_state=0).fit(data_to_fit)
dbscan = DBSCAN(min_samples=5).fit(data_to_fit)
#dump(gauss, f'gauss_{name}.joblib')
kmeans_predict = kmeans.predict(data_to_fit)
gauss_predict = gauss.predict(data_to_fit)
data['kmeans'] = kmeans_predict
data['gauss'] = gauss_predict
data['scan'] = dbscan.labels_
return data
def run_analysis():
pagila_data = Extractor('postgresql', 'postgres', 'Viteco2020', 'pagila')
sportsdb_data = Extractor('postgresql', 'postgres', 'Viteco2020', 'sportsdb')
pagila = predictor(pagila_data, 'pagila')
sports = predictor(sportsdb_data, 'sportsdb')
pagila_agree = sum(pagila['kmeans'] != pagila['gauss'])/len(pagila)
sportsdb_agree = sum(sports['kmeans'] == sports['gauss'])/len(sports)
print(f'Pagila total of class 1 in K-means: {sum(pagila.kmeans)} and class 0: {len(pagila)-sum(pagila.kmeans)}')
print(f'Pagila total of class 1 in GMM: {sum(pagila.gauss)} and class 0: {len(pagila)-sum(pagila.gauss)}')
print(f'Sportsdb total of class 1 in K-means: {sum(sports.kmeans)} and class 0: {len(sports)-sum(sports.kmeans)}')
print(f'Sportsdb total of class 1 in GMM: {sum(sports.gauss)} and class 0: {len(sports)-sum(sports.gauss)}')
print(pagila_agree, sportsdb_agree)
print(pagila[(pagila.kmeans == 1) & (pagila.gauss == 0)].drop(['name', 'table', 'gauss', 'kmeans', 'length', 'scan'], axis=1).sum(skipna=True))
print(sports[(sports.kmeans == 0) & (sports.gauss == 0)].drop(['name', 'table', 'gauss', 'kmeans', 'length', 'scan'], axis=1).sum(skipna=True))
print(pagila[(pagila.kmeans == 1) & (pagila.gauss == 1)])
print(pagila[(pagila.kmeans == 0) & (pagila.gauss == 0)])
print(sports[(sports.kmeans == 1) & (sports.gauss == 1)])
def load_predictor(name, extractor, tables):
#gauss = load(f'gauss_{name}.joblib')
data = limited_cleaner(extractor, tables)
data_to_fit = data.drop(['table', 'name'],axis=1)
kmeans = KMeans(n_clusters=2, random_state=0).fit(data_to_fit)
gauss = GaussianMixture(n_components=2, random_state=0).fit(data_to_fit)
kmeans_predict = kmeans.predict(data_to_fit)
gauss_predict = gauss.predict(data_to_fit)
data['kmeans'] = kmeans_predict
data['gauss'] = gauss_predict
return data
|
python
|
# Copyright (c) 2021, Manfred Moitzi
# License: MIT License
from typing import (
Iterable,
List,
TYPE_CHECKING,
Tuple,
Iterator,
Sequence,
Dict,
)
import abc
from typing_extensions import Protocol
from ezdxf.math import (
Vec2,
Vec3,
linspace,
NULLVEC,
Vertex,
intersection_line_line_2d,
BoundingBox2d,
intersection_line_line_3d,
BoundingBox,
AbstractBoundingBox,
)
import bisect
if TYPE_CHECKING:
from ezdxf.math import Vertex
__all__ = [
"ConstructionPolyline",
"ApproxParamT",
"intersect_polylines_2d",
"intersect_polylines_3d",
]
REL_TOL = 1e-9
class ConstructionPolyline(Sequence):
"""A polyline construction tool to measure, interpolate and divide anything
that can be approximated or flattened into vertices.
This is an immutable data structure which supports the :class:`Sequence`
interface.
Args:
vertices: iterable of polyline vertices
close: ``True`` to close the polyline (first vertex == last vertex)
rel_tol: relative tolerance for floating point comparisons
Example to measure or divide a SPLINE entity::
import ezdxf
from ezdxf.math import ConstructionPolyline
doc = ezdxf.readfile("your.dxf")
msp = doc.modelspace()
spline = msp.query("SPLINE").first
if spline is not None:
polyline = ConstructionPolyline(spline.flattening(0.01))
print(f"Entity {spline} has an approximated length of {polyline.length}")
# get dividing points with a distance of 1.0 drawing unit to each other
points = list(polyline.divide_by_length(1.0))
.. versionadded:: 0.18
"""
def __init__(
self,
vertices: Iterable[Vertex],
close: bool = False,
rel_tol: float = REL_TOL,
):
self._rel_tol = float(rel_tol)
v3list: List[Vec3] = Vec3.list(vertices)
self._vertices: List[Vec3] = v3list
if close and len(v3list) > 2:
if not v3list[0].isclose(v3list[-1], rel_tol=self._rel_tol):
v3list.append(v3list[0])
self._distances: List[float] = _distances(v3list)
def __len__(self) -> int:
"""len(self)"""
return len(self._vertices)
def __iter__(self) -> Iterator[Vec3]:
"""iter(self)"""
return iter(self._vertices)
def __getitem__(self, item):
"""vertex = self[item]"""
if isinstance(item, int):
return self._vertices[item]
else: # slice
return self.__class__(self._vertices[item], rel_tol=self._rel_tol)
@property
def length(self) -> float:
"""Returns the overall length of the polyline."""
if self._distances:
return self._distances[-1]
return 0.0
@property
def is_closed(self) -> bool:
"""Returns ``True`` if the polyline is closed
(first vertex == last vertex).
"""
if len(self._vertices) > 2:
return self._vertices[0].isclose(
self._vertices[-1], rel_tol=self._rel_tol
)
return False
def data(self, index: int) -> Tuple[float, float, Vec3]:
"""Returns the tuple (distance from start, distance from previous vertex,
vertex). All distances measured along the polyline.
"""
vertices = self._vertices
if not vertices:
raise ValueError("empty polyline")
distances = self._distances
if index == 0:
return 0.0, 0.0, vertices[0]
prev_distance = distances[index - 1]
current_distance = distances[index]
vertex = vertices[index]
return current_distance, current_distance - prev_distance, vertex
def index_at(self, distance: float) -> int:
"""Returns the data index of the exact or next data entry for the given
`distance`. Returns the index of last entry if `distance` > :attr:`length`.
"""
if distance <= 0.0:
return 0
if distance >= self.length:
return max(0, len(self) - 1)
return self._index_at(distance)
def _index_at(self, distance: float) -> int:
# fast method without any checks
return bisect.bisect_left(self._distances, distance)
def vertex_at(self, distance: float) -> Vec3:
"""Returns the interpolated vertex at the given `distance` from the
start of the polyline.
"""
if distance < 0.0 or distance > self.length:
raise ValueError("distance out of range")
if len(self._vertices) < 2:
raise ValueError("not enough vertices for interpolation")
return self._vertex_at(distance)
def _vertex_at(self, distance: float) -> Vec3:
# fast method without any checks
vertices = self._vertices
distances = self._distances
index1 = self._index_at(distance)
if index1 == 0:
return vertices[0]
index0 = index1 - 1
distance1 = distances[index1]
distance0 = distances[index0]
# skip coincident vertices:
while index0 > 0 and distance0 == distance1:
index0 -= 1
distance0 = distances[index0]
if distance0 == distance1:
raise ArithmeticError("internal interpolation error")
factor = (distance - distance0) / (distance1 - distance0)
return vertices[index0].lerp(vertices[index1], factor=factor)
def divide(self, count: int) -> Iterator[Vec3]:
"""Returns `count` interpolated vertices along the polyline.
Argument `count` has to be greater than 2 and the start- and end
vertices are always included.
"""
if count < 2:
raise ValueError(f"invalid count: {count}")
vertex_at = self._vertex_at
for distance in linspace(0.0, self.length, count):
yield vertex_at(distance)
def divide_by_length(
self, length: float, force_last: bool = False
) -> Iterator[Vec3]:
"""Returns interpolated vertices along the polyline. Each vertex has a
fix distance `length` from its predecessor. Yields the last vertex if
argument `force_last` is ``True`` even if the last distance is not equal
to `length`.
"""
if length <= 0.0:
raise ValueError(f"invalid length: {length}")
if len(self._vertices) < 2:
raise ValueError("not enough vertices for interpolation")
total_length: float = self.length
vertex_at = self._vertex_at
distance: float = 0.0
vertex: Vec3 = NULLVEC
while distance <= total_length:
vertex = vertex_at(distance)
yield vertex
distance += length
if force_last and not vertex.isclose(self._vertices[-1]):
yield self._vertices[-1]
def _distances(vertices: Iterable[Vec3]) -> List[float]:
# distance from start vertex of the polyline to the vertex
current_station: float = 0.0
distances: List[float] = []
prev_vertex = Vec3()
for vertex in vertices:
if distances:
distant_vec = vertex - prev_vertex
current_station += distant_vec.magnitude
distances.append(current_station)
else:
distances.append(current_station)
prev_vertex = vertex
return distances
class SupportsPointMethod(Protocol):
def point(self, t: float) -> Vertex:
...
class ApproxParamT:
"""Approximation tool for parametrized curves.
- approximate parameter `t` for a given distance from the start of the curve
- approximate the distance for a given parameter `t` from the start of the curve
This approximations can be applied to all parametrized curves which provide
a :meth:`point` method, like :class:`Bezier4P`, :class:`Bezier3P` and
:class:`BSpline`.
The approximation is based on equally spaced parameters from 0 to `max_t`
for a given segment count.
The :meth:`flattening` method can not be used for the curve approximation,
because the required parameter `t` is not logged by the flattening process.
Args:
curve: curve object, requires a method :meth:`point`
max_t: the max. parameter value
segments: count of approximation segments
.. versionadded:: 0.18
"""
def __init__(
self,
curve: SupportsPointMethod,
*,
max_t: float = 1.0,
segments: int = 100,
):
assert hasattr(curve, "point")
assert segments > 0
self._polyline = ConstructionPolyline(
curve.point(t) for t in linspace(0.0, max_t, segments + 1)
)
self._max_t = max_t
self._step = max_t / segments
@property
def max_t(self) -> float:
return self._max_t
@property
def polyline(self) -> ConstructionPolyline:
return self._polyline
def param_t(self, distance: float):
"""Approximate parameter t for the given `distance` from the start of
the curve.
"""
poly = self._polyline
if distance >= poly.length:
return self._max_t
t_step = self._step
i = poly.index_at(distance)
station, d0, _ = poly.data(i)
t = t_step * i # t for station
if d0 > 1e-12:
t -= t_step * (station - distance) / d0
return min(self._max_t, t)
def distance(self, t: float) -> float:
"""Approximate the distance from the start of the curve to the point
`t` on the curve.
"""
if t <= 0.0:
return 0.0
poly = self._polyline
if t >= self._max_t:
return poly.length
step = self._step
index = int(t / step) + 1
station, d0, _ = poly.data(index)
return station - d0 * (step * index - t) / step
def intersect_polylines_2d(
p1: Sequence[Vec2], p2: Sequence[Vec2], abs_tol=1e-10
) -> List[Vec2]:
"""Returns the intersection points for two polylines as list of :class:`Vec2`
objects, the list is empty if no intersection points exist.
Does not return self intersection points of `p1` or `p2`.
Duplicate intersection points are removed from the result list, but the list
does not have a particular order! You can sort the result list by
:code:`result.sort()` to introduce an order.
Args:
p1: first polyline as sequence of :class:`Vec2` objects
p2: second polyline as sequence of :class:`Vec2` objects
abs_tol: absolute tolerance for comparisons
.. versionadded:: 0.17.2
"""
intersect = _PolylineIntersection2d(p1, p2, abs_tol)
intersect.execute()
return intersect.intersections
def intersect_polylines_3d(
p1: Sequence[Vec3], p2: Sequence[Vec3], abs_tol=1e-10
) -> List[Vec3]:
"""Returns the intersection points for two polylines as list of :class:`Vec3`
objects, the list is empty if no intersection points exist.
Does not return self intersection points of `p1` or `p2`.
Duplicate intersection points are removed from the result list, but the list
does not have a particular order! You can sort the result list by
:code:`result.sort()` to introduce an order.
Args:
p1: first polyline as sequence of :class:`Vec3` objects
p2: second polyline as sequence of :class:`Vec3` objects
abs_tol: absolute tolerance for comparisons
.. versionadded:: 0.17.2
"""
intersect = _PolylineIntersection3d(p1, p2, abs_tol)
intersect.execute()
return intersect.intersections
def divide(a: int, b: int) -> Tuple[int, int, int, int]:
m = (a + b) // 2
return a, m, m, b
TCache = Dict[Tuple[int, int, int], AbstractBoundingBox]
class _PolylineIntersection:
p1: Sequence
p2: Sequence
def __init__(self):
# At each recursion level the bounding box for each half of the
# polyline will be created two times, using a cache is an advantage:
self.bbox_cache: TCache = {}
@abc.abstractmethod
def bbox(self, points: Sequence) -> AbstractBoundingBox:
...
@abc.abstractmethod
def line_intersection(self, s1: int, e1: int, s2: int, e2: int) -> None:
...
def execute(self) -> None:
l1: int = len(self.p1)
l2: int = len(self.p2)
if l1 < 2 or l2 < 2: # polylines with only one vertex
return
self.intersect(0, l1 - 1, 0, l2 - 1)
def overlap(self, s1: int, e1: int, s2: int, e2: int) -> bool:
e1 += 1
e2 += 1
# If one part of the polylines has less than 2 vertices no intersection
# calculation is required:
if e1 - s1 < 2 or e2 - s2 < 2:
return False
cache = self.bbox_cache
key1 = (1, s1, e1)
bbox1 = cache.get(key1)
if bbox1 is None:
bbox1 = self.bbox(self.p1[s1:e1])
cache[key1] = bbox1
key2 = (2, s2, e2)
bbox2 = cache.get(key2)
if bbox2 is None:
bbox2 = self.bbox(self.p2[s2:e2])
cache[key2] = bbox2
return bbox1.has_overlap(bbox2)
def intersect(self, s1: int, e1: int, s2: int, e2: int) -> None:
assert e1 > s1 and e2 > s2
if e1 - s1 == 1 and e2 - s2 == 1:
self.line_intersection(s1, e1, s2, e2)
return
s1_a, e1_b, s1_c, e1_d = divide(s1, e1)
s2_a, e2_b, s2_c, e2_d = divide(s2, e2)
if self.overlap(s1_a, e1_b, s2_a, e2_b):
self.intersect(s1_a, e1_b, s2_a, e2_b)
if self.overlap(s1_a, e1_b, s2_c, e2_d):
self.intersect(s1_a, e1_b, s2_c, e2_d)
if self.overlap(s1_c, e1_d, s2_a, e2_b):
self.intersect(s1_c, e1_d, s2_a, e2_b)
if self.overlap(s1_c, e1_d, s2_c, e2_d):
self.intersect(s1_c, e1_d, s2_c, e2_d)
class _PolylineIntersection2d(_PolylineIntersection):
def __init__(self, p1: Sequence[Vec2], p2: Sequence[Vec2], abs_tol=1e-10):
super().__init__()
self.p1 = p1
self.p2 = p2
self.intersections: List[Vec2] = []
self.abs_tol = abs_tol
def bbox(self, points: Sequence) -> AbstractBoundingBox:
return BoundingBox2d(points)
def line_intersection(self, s1: int, e1: int, s2: int, e2: int) -> None:
line1 = self.p1[s1], self.p1[e1]
line2 = self.p2[s2], self.p2[e2]
p = intersection_line_line_2d(
line1, line2, virtual=False, abs_tol=self.abs_tol
)
if p is not None and not any(
p.isclose(ip, abs_tol=self.abs_tol) for ip in self.intersections
):
self.intersections.append(p)
class _PolylineIntersection3d(_PolylineIntersection):
def __init__(self, p1: Sequence[Vec3], p2: Sequence[Vec3], abs_tol=1e-10):
super().__init__()
self.p1 = p1
self.p2 = p2
self.intersections: List[Vec3] = []
self.abs_tol = abs_tol
def bbox(self, points: Sequence) -> AbstractBoundingBox:
return BoundingBox(points)
def line_intersection(self, s1: int, e1: int, s2: int, e2: int) -> None:
line1 = self.p1[s1], self.p1[e1]
line2 = self.p2[s2], self.p2[e2]
p = intersection_line_line_3d(
line1, line2, virtual=False, abs_tol=self.abs_tol
)
if p is not None and not any(
p.isclose(ip, abs_tol=self.abs_tol) for ip in self.intersections
):
self.intersections.append(p)
|
python
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2017/12/30 21:02
# @Author : WIX
# @File : 青蛙跳.py
"""
青蛙跳台阶, 每次可以跳1级或2级,求青蛙跳上一个n级的台阶一共有多少种跳法?
当n > 2时,第一次跳就有两种选择:
1. 第一次跳1级,后面的跳法相当于剩下n-1级的跳法,即f(n-1)
2. 第一次跳2级,后面的跳法相当于剩下n-2级的跳法,即f(n-2)
即f(n) = f(n-1) + f(n-2)
"""
class Solution(object):
def jumpfrog(self, n):
if isinstance(n, int) is False or n < 1:
return
result = [1, 2]
if n <= 2:
return result[n - 1]
for i in range(n - 2):
result[i % 2] = result[0] + result[1]
return result[n % 2 - 1]
s = Solution()
print(s.jumpfrog(4))
|
python
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.