content
stringlengths 0
894k
| type
stringclasses 2
values |
---|---|
# -*- coding: utf-8 -*-
"""
celery cli services module.
"""
from pyrin.application.services import get_component
from pyrin.task_queues.celery.cli import CeleryCLIPackage
def register_cli_handler(instance, **options):
"""
registers a new celery cli handler or replaces the existing one.
if `replace=True` is provided. otherwise, it raises an error
on adding a cli handler which is already registered.
:param CeleryCLIHandlerBase instance: celery cli handler to be registered.
it must be an instance of
CeleryCLIHandlerBase.
:keyword bool replace: specifies that if there is another registered
cli handler with the same name, replace it
with the new one, otherwise raise an error.
defaults to False.
:raises InvalidCLIHandlerTypeError: invalid cli handler type error.
:raises DuplicatedCLIHandlerError: duplicated cli handler error.
"""
get_component(CeleryCLIPackage.COMPONENT_NAME).register_cli_handler(instance, **options)
def execute(handler_name, **options):
"""
executes the handler with the given name with given inputs.
:param str handler_name: handler name to be executed.
:raises CLIHandlerNotFoundError: cli handler not found error.
"""
return get_component(CeleryCLIPackage.COMPONENT_NAME).execute(handler_name, **options)
def get_package_class():
"""
gets the package class of celery cli manager.
:raises PackageClassIsNotSetError: package class is not set error.
:returns: type[CeleryCLIPackage]
"""
return get_component(CeleryCLIPackage.COMPONENT_NAME).get_package_class()
| python |
# -*- coding: utf-8 -*-
#
# This file is part of the pyFDA project hosted at https://github.com/chipmuenk/pyfda
#
# Copyright © pyFDA Project Contributors
# Licensed under the terms of the MIT License
# (see file LICENSE in root directory for details)
"""
Create a tabbed widget for all plot subwidgets in the list ``fb.plot_widgets_list``.
This list is compiled at startup in :class:`pyfda.tree_builder.Tree_Builder`, it is
kept as a module variable in :mod:`pyfda.filterbroker`.
"""
import logging
logger = logging.getLogger(__name__)
import importlib
from pyfda.libs.compat import QTabWidget, QVBoxLayout, QEvent, QtCore, pyqtSignal
from pyfda.libs.pyfda_lib import pprint_log
from pyfda.pyfda_rc import params
import pyfda.filterbroker as fb
#------------------------------------------------------------------------------
class PlotTabWidgets(QTabWidget):
# incoming, connected to input_tab_widget.sig_tx in pyfdax
sig_rx = pyqtSignal(object)
# outgoing: emitted by process_sig_rx
sig_tx = pyqtSignal(object)
def __init__(self, parent):
super(PlotTabWidgets, self).__init__(parent)
self._construct_UI()
#---------------------------------------------- --------------------------------
def _construct_UI(self):
"""
Initialize UI with tabbed subwidgets: Instantiate dynamically each widget
from the dict `fb.plot_classes` and try to
- set the TabToolTip from the instance attribute `tool_tip`
- set the tab label from the instance attribute `tab_label`
for each widget.
- connect the available signals of all subwidgets (not all widgets have
both `sig_rx` and `sig_tx` signals).
- `self.sig_rx` is distributed to all `inst.sig_rx` signals
- all `inst.sig_tx` signals are collected in `self.sig_tx`
- `self.sig_tx.connect(self.sig_rx)` distributes incoming signals (via
pyfdax or coming from the input widgets) among all input widgets.
In order to prevent infinite loops, every widget needs to block in-
coming signals with its own name!
"""
tabWidget = QTabWidget(self)
tabWidget.setObjectName("plot_tabs")
n_wdg = 0 # number and ...
inst_wdg_str = "" # ... full names of successfully instantiated plot widgets
#
for plot_class in fb.plot_classes:
try:
mod_fq_name = fb.plot_classes[plot_class]['mod'] # fully qualified module name
mod = importlib.import_module(mod_fq_name)
wdg_class = getattr(mod, plot_class)
# and instantiate it
inst = wdg_class(self)
except ImportError as e:
logger.warning('Class "{0}" could not be imported from {1}:\n{2}.'\
.format(plot_class, mod_fq_name, e))
continue # unsuccessful, try next widget
if hasattr(inst, 'tab_label'):
tabWidget.addTab(inst, inst.tab_label)
else:
tabWidget.addTab(inst, "not set")
if hasattr(inst, 'tool_tip'):
tabWidget.setTabToolTip(n_wdg, inst.tool_tip)
if hasattr(inst, 'sig_tx'):
inst.sig_tx.connect(self.sig_tx)
if hasattr(inst, 'sig_rx'):
self.sig_rx.connect(inst.sig_rx)
n_wdg += 1 # successfully instantiated one more widget
inst_wdg_str += '\t' + mod_fq_name + "." + plot_class + '\n'
if len(inst_wdg_str) == 0:
logger.warning("No plotting widgets found!")
else:
logger.debug("Imported {0:d} plotting classes:\n{1}".format(n_wdg, inst_wdg_str))
#----------------------------------------------------------------------
layVMain = QVBoxLayout()
layVMain.addWidget(tabWidget)
layVMain.setContentsMargins(*params['wdg_margins'])#(left, top, right, bottom)
self.setLayout(layVMain)
#----------------------------------------------------------------------
# GLOBAL SIGNALS & SLOTs
#----------------------------------------------------------------------
# self.sig_rx.connect(inst.sig_rx) # this happens in _construct_UI()
#----------------------------------------------------------------------
# LOCAL SIGNALS & SLOTs
#----------------------------------------------------------------------
self.timer_id = QtCore.QTimer()
self.timer_id.setSingleShot(True)
# redraw current widget at timeout (timer was triggered by resize event):
self.timer_id.timeout.connect(self.current_tab_redraw)
self.sig_tx.connect(self.sig_rx) # loop back to local inputs
# self.sig_rx.connect(self.log_rx) # enable for debugging
# When user has selected a different tab, trigger a redraw of current tab
tabWidget.currentChanged.connect(self.current_tab_changed)
# The following does not work: maybe current scope must be left?
# tabWidget.currentChanged.connect(tabWidget.currentWidget().redraw)
tabWidget.installEventFilter(self)
"""
https://stackoverflow.com/questions/29128936/qtabwidget-size-depending-on-current-tab
The QTabWidget won't select the biggest widget's height as its own height
unless you use layout on the QTabWidget. Therefore, if you want to change
the size of QTabWidget manually, remove the layout and call QTabWidget::resize
according to the currentChanged signal.
You can set the size policy of the widget that is displayed to QSizePolicy::Preferred
and the other ones to QSizePolicy::Ignored. After that call adjustSize to update the sizes.
void MainWindow::updateSizes(int index)
{
for(int i=0;i<ui->tabWidget->count();i++)
if(i!=index)
ui->tabWidget->widget(i)->setSizePolicy(QSizePolicy::Ignored, QSizePolicy::Ignored);
ui->tabWidget->widget(index)->setSizePolicy(QSizePolicy::Preferred, QSizePolicy::Preferred);
ui->tabWidget->widget(index)->resize(ui->tabWidget->widget(index)->minimumSizeHint());
ui->tabWidget->widget(index)->adjustSize();
resize(minimumSizeHint());
adjustSize();
}
adjustSize(): The last two lines resize the main window itself. You might want to avoid it,
depending on your application. For example, if you set the rest of the widgets
to expand into the space just made available, it's not so nice if the window
resizes itself instead.
"""
#------------------------------------------------------------------------------
def log_rx(self, dict_sig=None):
"""
Enable `self.sig_rx.connect(self.log_rx)` above for debugging.
"""
if type(dict_sig) == dict:
logger.warning("SIG_RX\n{0}"\
.format(pprint_log(dict_sig)))
else:
logger.warning("empty dict")
#------------------------------------------------------------------------------
def current_tab_changed(self):
self.sig_tx.emit({'sender':__name__, 'ui_changed':'tab'})
#------------------------------------------------------------------------------
def current_tab_redraw(self):
self.sig_tx.emit({'sender':__name__, 'ui_changed':'resized'})
#------------------------------------------------------------------------------
def eventFilter(self, source, event):
"""
Filter all events generated by the QTabWidget. Source and type of all
events generated by monitored objects are passed to this eventFilter,
evaluated and passed on to the next hierarchy level.
This filter stops and restarts a one-shot timer for every resize event.
When the timer generates a timeout after 500 ms, ``current_tab_redraw()`` is
called by the timer.
"""
if isinstance(source, QTabWidget):
if event.type() == QEvent.Resize:
self.timer_id.stop()
self.timer_id.start(500)
# Call base class method to continue normal event processing:
return super(PlotTabWidgets, self).eventFilter(source, event)
#------------------------------------------------------------------------
def main():
import sys
from pyfda import pyfda_rc as rc
from pyfda.libs.compat import QApplication
app = QApplication(sys.argv)
app.setStyleSheet(rc.qss_rc)
mainw = PlotTabWidgets(None)
mainw.resize(300,400)
app.setActiveWindow(mainw)
mainw.show()
sys.exit(app.exec_())
if __name__ == "__main__":
main()
# test with: python -m pyfda.plot_widgets.plot_tab_widgets | python |
'''
* Capítulo 05: Pré-processamento
5.2 Histograma de Cores
> Equalização de histograma
'''
import cv2
from matplotlib import pyplot as grafico
imagemOriginal = cv2.imread("maquina.jpg", 0)
imagemEqualizada = cv2.equalizeHist(imagemOriginal)
cv2.imshow("Imagem Original", imagemOriginal)
cv2.imshow("Imagem Equalizada", imagemEqualizada)
grafico.hist(imagemOriginal.ravel(), 256, [0,256])
grafico.figure();
grafico.hist(imagemEqualizada.ravel(), 256, [0,256])
grafico.show()
# Função equalizeHist = Equalizaa histogramas de imagen
# O resultado da execução:
# Imagem original;
# Imagem com o histogramae qualizado;
# Gráficos referentes ao histograma de ambasas imagens. | python |
__author__ = 'lucabasa'
__version__ = '1.1.0'
__status__ = 'development'
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
from sklearn.model_selection import StratifiedKFold
from sklearn.svm import SVC
from sklearn.preprocessing import StandardScaler
from sklearn.pipeline import Pipeline
from sklearn.feature_selection import VarianceThreshold
from sklearn.decomposition import PCA
pd.set_option('max_columns', 200)
import utility as ut
def train_svc(df_train, df_test, n_splits=25, pca=False):
train = df_train.copy()
test = df_test.copy()
oof = np.zeros(len(train))
preds = np.zeros(len(test))
cols = [c for c in train.columns if c not in ['id', 'target', 'wheezy-copper-turtle-magic']]
for i in range(512):
train2 = train[train['wheezy-copper-turtle-magic']==i].copy()
test2 = test[test['wheezy-copper-turtle-magic']==i].copy()
idx1 = train2.index
idx2 = test2.index
train2.reset_index(drop=True,inplace=True)
if pca:
data = pd.concat([pd.DataFrame(train2[cols]), pd.DataFrame(test2[cols])])
#data2 = StandardScaler().fit_transform(PCA(n_components=40, random_state=51).fit_transform(data[cols]))
data2 = StandardScaler().fit_transform(PCA(svd_solver='full',n_components='mle').fit_transform(data[cols]))
train3 = data2[:train2.shape[0]]
test3 = data2[train2.shape[0]:]
else:
sel = VarianceThreshold(threshold=1.5).fit(train2[cols])
train3 = sel.transform(train2[cols])
test3 = sel.transform(test2[cols])
skf = StratifiedKFold(n_splits=n_splits, random_state=15)
for train_index, test_index in skf.split(train3, train2['target']):
clf = Pipeline([('scaler', StandardScaler()),
('svn', SVC(probability=True,kernel='poly',degree=4,gamma='auto'))])
clf.fit(train3[train_index,:],train2.loc[train_index]['target'])
oof[idx1[test_index]] = clf.predict_proba(train3[test_index,:])[:,1]
preds[idx2] += clf.predict_proba(test3)[:,1] / skf.n_splits
ut.report_oof(df_train, oof)
return oof, preds
def main():
df_train = pd.read_csv('data/train.csv')
df_test = pd.read_csv('data/test.csv')
oof_svc, preds_svc = train_svc(df_train, df_test)
ut.plot_results(oof_svc, preds_svc, df_train, 'svc')
if __name__ == '__main__':
main()
| python |
"""
Simulates the initial state discrimination experiment using different
methods, to compare the resulting error rates.
"""
import torch
from perm_hmm.util import num_to_data
from perm_hmm.postprocessing import ExactPostprocessor, EmpiricalPostprocessor
from perm_hmm.classifiers.perm_classifier import PermClassifier
class HMMSimulator(object):
"""
Runs an experiment where data is generated by an HMM, then classified by
a classifier.
Instances of this class have the following attributes:
``phmm``:
The :py:class:`~perm_hmm.models.hmms.PermutedDiscreteHMM` whose
misclassification rates will be computed.
"""
def __init__(self, phmm):
"""
Initializes the experiment.
:param perm_hmm.models.hmms.PermutedDiscreteHMM phmm:
the model whose
misclassification rate will be computed.
"""
self.phmm = phmm
""":py:class:`PermutedDiscreteHMM`
The model whose misclassification rates we wish to analyze.
"""
def all_classifications(self, num_steps, classifier=None, perm_policy=None, verbosity=0):
"""
Computes the data required to compute the exact misclassification rate for the given classifier.
This method always calls ``perm_policy.reset()`` if ``perm_policy`` is
not ``None``.
:param num_steps: Number of steps, int.
:param classifier: Defaults to
:py:class:`~perm_hmm.classifiers.perm_classifier.PermClassifier`,
initialized with the hmm ``self.phmm``.
:param perm_policy: Defaults to None. If specified, will call
``perm_policy.get_perms`` to compute the permutations.
:param verbosity: If ``verbosity == 0``, only the
:py:class:`~perm_hmm.postprocessing.ExactPostprocessor` needed to
compute the misclassification rates is returned.
If ``verbosity == 1``, this method returns a tuple, with the
postprocessor as the first element, and a dictionary with keys:
b"posterior_log_initial_state_dist":
The posterior log initial state distribution used to compute
the classifications.
b"perms":
Only present if ``perm_policy`` is not ``None``. The
permutations computed from ``perm_policy.get_perms()``.
If ``verbosity > 1``, this method returns a tuple, with the
postprocessor as the first element, and a dictionary with keys as in
the case with ``verbosity == 1`` and in addition,
b"history":
Whatever is stored in ``perm_policy.calc_history`` after
calling ``perm_policy.get_perms``.
Note that if ``verbosity > 1``, the simulator calls
``perm_policy.reset(save_history=True)`` before calling
``perm_policy.get_perms()``.
"""
base = len(self.phmm.observation_dist.enumerate_support())
data = torch.stack(
[num_to_data(num, num_steps, base) for num in range(base**num_steps)]
).float()
if verbosity > 1:
save_history = True
else:
save_history = False
if classifier is None:
classifier = PermClassifier(self.phmm)
if perm_policy is not None:
perm_policy.reset(save_history=save_history)
perms = perm_policy.get_perms(data)
if save_history:
history = perm_policy.calc_history
classi_result = classifier.classify(data, perms=perms, verbosity=verbosity)
else:
perms = None
classi_result = classifier.classify(data, verbosity=verbosity)
if verbosity:
classifications, classi_dict = classi_result
if perm_policy is not None:
classi_dict[b"perms"] = perms
if save_history:
classi_dict[b"history"] = history
else:
classifications = classi_result
lp = self.phmm.log_prob(data, perms)
dist = self.phmm.posterior_log_initial_state_dist(data, perms)
log_joint = dist.T + lp
ep = ExactPostprocessor(
log_joint,
classifications,
)
if verbosity:
return ep, classi_dict
return ep
def simulate(self, num_steps, num_samples, classifier=None, perm_policy=None, verbosity=0):
"""
Computes the data required to compute the misclassification rates
of the given classifier.
This method always calls ``perm_policy.reset()`` if ``perm_policy`` is
not ``None``.
:param num_steps: Number of steps, int.
:param num_samples: number of samples to draw from the hmm, int
:param classifier: Defaults to
:py:class:`~perm_hmm.classifiers.perm_classifier.PermClassifier`,
initialized with the hmm ``self.phmm``.
:param perm_policy: Defaults to None. If specified, will call
``self.hmm.sample(perm_policy=perm_policy)``.
:param verbosity: If ``verbosity == 0``, only the
:py:class:`~perm_hmm.postprocessing.EmpiricalPostprocessor` needed
to compute the misclassification rates is returned.
If ``verbosity == 1``, this method returns a tuple, with the
postprocessor as the first element, and a dictionary with keys:
b"posterior_log_initial_state_dist":
The posterior log initial state distribution used to compute
the classifications.
b"perms":
Only present if ``perm_policy`` is not ``None``. The
permutations computed from ``perm_policy.get_perms()``.
If ``verbosity > 1``, this method returns a tuple, with the
postprocessor as the first element, and a dictionary with keys as in
the case with ``verbosity == 1`` and in addition,
b"history":
Whatever is stored in ``perm_policy.calc_history`` after
calling ``perm_policy.get_perms``.
Note that if ``verbosity > 1``, the simulator calls
``perm_policy.reset(save_history=True)`` before calling
``perm_policy.get_perms()``.
"""
if verbosity > 1:
save_history = True
else:
save_history = False
if perm_policy is not None:
perm_policy.reset(save_history=save_history)
output = self.phmm.sample((num_samples, num_steps), perm_policy=perm_policy)
if perm_policy is not None:
perms = perm_policy.perm_history
else:
perms = None
history = None
if save_history:
if perm_policy is not None:
history = perm_policy.calc_history
data = output.observations
if classifier is None:
classifier = PermClassifier(self.phmm)
if perms is not None:
classi_result = classifier.classify(data, perms=perms, verbosity=verbosity)
else:
classi_result = classifier.classify(data, verbosity=verbosity)
if verbosity:
classifications, classi_dict = classi_result
classi_dict[b"data"] = data
if perm_policy is not None:
classi_dict[b"perms"] = perms
if history is not None:
classi_dict[b"history"] = history
else:
classifications = classi_result
ep = EmpiricalPostprocessor(
output.states[..., 0],
classifications,
)
if verbosity:
return ep, classi_dict
return ep
| python |
import json
import pickle
import pandas as pd
from decimal import Decimal
from django.db.models import Avg
from recommender.models import Rating
from scripts.recommenders.base_recommender import BaseRecommender
class SVDRecommender(BaseRecommender):
def __init__(self, save_path='./models/SVD/model/'):
self.save_path = save_path
self.avg = Decimal(list(Rating.objects.all().aggregate(Avg('rating')).values())[0])
self.load_model(self.save_path)
def load_model(self, save_path):
with open(save_path + 'user_bias.data', 'rb') as file:
self.user_bias = pickle.load(file)
with open(save_path + 'item_bias.data', 'rb') as file:
self.item_bias = pickle.load(file)
with open(save_path + 'user_factors.json', 'r') as file:
self.user_factors = pd.DataFrame(json.load(file)).T
with open(save_path + 'item_factors.json', 'r') as file:
self.item_factors = pd.DataFrame(json.load(file)).T
def recommend_items(self, user_id, num=10):
users_items = Rating.objects.filter(user_id=user_id).order_by('-rating')[:100]
return self.recommend_items_by_ratings(user_id, users_items.values(), num)
def recommend_items_by_ratings(self, user_id, users_items, num=10):
rated_movies_dict = {movie['movie_id']: movie['rating'] for movie in users_items}
recs = {}
if str(user_id) in self.user_factors.columns:
user = self.user_factors[str(user_id)]
scores = self.item_factors.T.dot(user)
rating = scores.sort_values(ascending=False)[:num + len(rated_movies_dict)]
user_bias = 0
if user_id in self.user_bias.keys():
user_bias = self.user_bias[user_id]
elif int(user_id) in self.user_bias.keys():
user_bias = self.user_bias[int(user_id)]
rating += float(user_bias + self.avg)
recs = {r[0]: {'prediction': r[1] + float(self.item_bias[r[0]])} for r in zip(rating.index, rating) if r[0] not in rated_movies_dict}
sorted_items = sorted(recs.items(), key=lambda item: -float(item[1]['prediction']))[:num]
return sorted_items
def predict_score(self, user_id, item_id):
if str(user_id) in self.user_factors.columns:
user = self.user_factors[str(user_id)]
scores = self.item_factors.T.dot(user)
user_bias = 0
if user_id in self.user_bias.keys():
user_bias = self.user_bias[user_id]
elif int(user_id) in self.user_bias.keys():
user_bias = self.user_bias[int(user_id)]
rating = float(user_bias + self.avg)
try:
return Decimal(scores[item_id] + rating)
except:
return Decimal(rating)
return Decimal(0.0)
| python |
import ast
from PythonVoiceCodingPlugin.library import nearest_node_from_offset,sorted_by_source_region,get_source_region,node_from_range,make_flat
from PythonVoiceCodingPlugin.library.info import *
from PythonVoiceCodingPlugin.library.LCA import LCA
from PythonVoiceCodingPlugin.library.level_info import LevelVisitor
from PythonVoiceCodingPlugin.library.partial import partially_parse, line_partial
from PythonVoiceCodingPlugin.library.traverse import search_upwards,search_upwards_log, find_matching,match_node, find_all_nodes,search_upwards_for_parent
from PythonVoiceCodingPlugin.queries.abstract import SelectionQuery
from PythonVoiceCodingPlugin.queries.tiebreak import tiebreak_on_lca
from PythonVoiceCodingPlugin.queries.strategies import adjective_strategy,decode_abstract_vertical,translate_adjective,obtain_result
class SelectBigRoi(SelectionQuery):
"""docstring for BigRoi"""
def handle_single(self,view_information,query_description,extra = {}):
f = query_description["format"]
possibilities = {
1: self.case_one,2: self.case_two,3: self.case_three,4: self.case_four,
}
return possibilities[f](view_information,query_description, extra)
def preliminary(self,view_information,query_description, extra = {}):
selection = self._get_selection(view_information,extra)
build = self.general_build
if not build or not build[0]:
return None,None,None,None
root,atok,m,r = build
selection = m.forward(selection)
origin = nearest_node_from_offset(root,atok, selection[0]) if selection[0]==selection[1] else node_from_range(root,atok, selection)
definition_node = search_upwards(origin,ast.FunctionDef) # ,aybe need to change that in the future
# in order to find the outermost function.
if definition_node and definition_node.first_token.startpos > selection[1]:
token = atok.get_token_from_offset(selection[0])
while token.string.isspace():
token = atok.prev_token( token )
s = token.startpos
origin = nearest_node_from_offset(root,atok, s)
definition_node = search_upwards(origin,ast.FunctionDef)
definition_node = (
definition_node
if definition_node and query_description["big_roi"] not in ["import statement"]
else root
)
return build, selection, origin, definition_node
def decode(self,query_description):
standard = lambda x:x
possibilities = {
"return value": ((ast.Return,ast.Yield,ast.YieldFrom),(),get_return_value),
"pass":(ast.Pass,(),standard),
"break":(ast.Break,(),standard),
"continue":(ast.Continue,(),standard),
"if condition":(ast.If,(),get_pure_if_condition),
"else if condition":(ast.If,(),get_elif_condition),
"while condition":(ast.While,(),get_condition),
"if expression":(ast.IfExp,(),standard),
"if expression condition":(ast.IfExp,(),get_condition),
"if expression body":(ast.IfExp,(),get_body),
"comprehension condition":(ast.comprehension,(),get_comprehension_condition),
"assertion message":(ast.Assert,(), get_message),
"assertion condition":(ast.Assert,(), get_condition),
"assignment left":((ast.Assign,ast.AugAssign),(),get_left),
"assignment right":((ast.Assign,ast.AugAssign),(),get_right),
"assignment full":((ast.Assign,ast.AugAssign),(),standard),
"expression statement":(ast.Expr,(),standard),
"iterable":((ast.For,ast.comprehension),(),get_iterable),
"iterator":((ast.For,ast.comprehension),(),get_iterator),
"import statement":((ast.Import,ast.ImportFrom),(),standard),
}
temporary = possibilities[query_description["big_roi"]]
if "big_roi_sub_index" in query_description:
if query_description["big_roi_sub_index"] == 0:
return possibilities[query_description["big_roi"]]
else:
index = query_description["big_roi_sub_index"]
def modified_information(x, information,index):
data = information(x)
return get_sub_index(data,index)
y = lambda x: temporary[2](x)
y.secondary = lambda x: modified_information(x,temporary[2],index-1)
return (temporary[0],temporary[1],y)
def case_one(self,view_information,query_description, extra = {}):
################################################################
# <big_roi>
###############################################################
build, selection, origin, definition_node = self.preliminary(view_information, query_description,extra)
targets, exclusions, information = self.decode(query_description)
information = getattr(information,"secondary",information)
candidates = tiebreak_on_lca(definition_node,origin,find_all_nodes(definition_node, targets, exclusions))
candidates = [information(x) for x in candidates if information(x)]
result, alternatives = obtain_result(None, candidates)
return self._backward_result(result, alternatives,build)
def case_two(self,view_information,query_description, extra = {}):
################################################################
# <adjective> <big_roi>
###############################################################
build, selection, origin, definition_node = self.preliminary(view_information, query_description,extra)
targets, exclusions, information = self.decode(query_description)
temporary_information = lambda x: information(x) if match_node(x,targets,exclusions) else None
additional_parameters = {}
root,atok,m,r = build
if selection[0]!=selection[1]:
additional_parameters["small_root"] = origin
additional_parameters["only_information"] = True
# just looking on the shape of this code you know there's a bug in here somewhere:)
result, alternatives = adjective_strategy(
atok=atok,
root = definition_node,
adjective_word = query_description["adjective"],
level_nodes = find_all_nodes(definition_node, (ast.If,ast.While,ast.For,ast.Try,ast.With,ast.FunctionDef)),
information_nodes = find_matching(definition_node,temporary_information),
**additional_parameters
)
information = getattr(information,"secondary",information)
result = information(result) if result else None
alternatives =[ information(x) for x in alternatives] if alternatives else []
return self._backward_result(result, alternatives,build)
def case_three(self,view_information,query_description, extra = {}):
################################################################
# <vertical_abstract_only_direction> [<ndir>] <big_roi> [<big_roi_sub_index>]
###############################################################
build, selection, origin, definition_node = self.preliminary(view_information, query_description,extra)
targets, exclusions, information = self.decode(query_description)
temporary_information = lambda x: information(x) if match_node(x,targets,exclusions) else None
root,atok,m,r = build
direction = query_description["vertical_abstract_only_direction"]
ndir = query_description["ndir"]
row, column = view_information["rowcol"](m.backward(selection)[0])
# bug fixing
test_result = decode_abstract_vertical(root,atok,targets,row+1, 1,direction,True,
temporary_information,want_alternatives = False)
l = search_upwards_log(origin,ast.stmt)
if test_result in [l[0]] + l[1] and row + 1>=test_result.first_token.start[0]:
ndir = ndir + 1
result,alternatives = decode_abstract_vertical(root,atok,targets,row+1, ndir,direction,True,
temporary_information,want_alternatives = True)
if result:
new_definition_node = search_upwards(result,ast.FunctionDef)
if definition_node is not new_definition_node and new_definition_node is not None:
alternatives = tiebreak_on_lca(new_definition_node,result,find_all_nodes(new_definition_node,targets , exclusions))
result, alternatives = obtain_result(result, alternatives)
information = getattr(information,"secondary",information)
result = information(result) if result else None
alternatives = [information(x) for x in alternatives] if alternatives else []
return self._backward_result(result, alternatives,build)
def case_four(self,view_information,query_description, extra = {}):
################################################################
# [smart] <vertical_abstract_only_direction> [<ndir>] <block> [<adjective>] <big_roi> [<big_roi_sub_index>]
###############################################################
build, selection, origin, definition_node = self.preliminary(view_information, query_description,extra)
targets, exclusions, information = self.decode(query_description)
temporary_information = lambda x: match_node(x,ast.FunctionDef)
root,atok,m,r = build
direction = query_description["vertical_abstract_only_direction"]
ndir = query_description["ndir"]
row = view_information["rowcol"](selection[0])[0] + 1 if definition_node is root else definition_node.first_token.start[0]
bonus = 1 if definition_node.first_token.startpos > selection[1] else 0
t = decode_abstract_vertical(root,atok,targets,row, ndir + bonus,direction,True,temporary_information)
if query_description["adjective"]=="None":
information = getattr(information,"secondary",information)
candidates = tiebreak_on_lca(root,definition_node,find_all_nodes(t, targets, exclusions))
candidates = [information(x) for x in candidates if information(x)]
result, alternatives = obtain_result(None, candidates)
return self._backward_result(result, alternatives,build)
else:
additional_parameters = {}
result, alternatives = adjective_strategy(
atok=atok,
root = t,
adjective_word = query_description["adjective"],
level_nodes = find_all_nodes(t,(ast.If,ast.While,ast.For,ast.Try,ast.With,ast.FunctionDef)),
information_nodes = find_matching(t,lambda x: information(x) if match_node(x,targets,exclusions) else None),
**additional_parameters
)
information = getattr(information,"secondary",information)
result = information(result) if result else None
alternatives =[ information(x) for x in alternatives] if alternatives else []
return self._backward_result(result, alternatives,build)
| python |
def get_current_admin():
def decorator(func):
setattr(func, 'get_current_admin', True)
return func
return decorator | python |
"""D-Bus interface for rauc."""
from enum import Enum
import logging
from typing import Optional
from ..exceptions import DBusError, DBusInterfaceError
from ..utils.gdbus import DBus
from .interface import DBusInterface
from .utils import dbus_connected
_LOGGER: logging.Logger = logging.getLogger(__name__)
DBUS_NAME = "de.pengutronix.rauc"
DBUS_OBJECT = "/"
class RaucState(str, Enum):
"""Rauc slot states."""
GOOD = "good"
BAD = "bad"
ACTIVE = "active"
class Rauc(DBusInterface):
"""Handle D-Bus interface for rauc."""
def __init__(self):
"""Initialize Properties."""
self._operation: Optional[str] = None
self._last_error: Optional[str] = None
self._compatible: Optional[str] = None
self._variant: Optional[str] = None
self._boot_slot: Optional[str] = None
async def connect(self):
"""Connect to D-Bus."""
try:
self.dbus = await DBus.connect(DBUS_NAME, DBUS_OBJECT)
except DBusError:
_LOGGER.warning("Can't connect to rauc")
except DBusInterfaceError:
_LOGGER.warning("Host has no rauc support. OTA updates have been disabled.")
@property
def operation(self) -> Optional[str]:
"""Return the current (global) operation."""
return self._operation
@property
def last_error(self) -> Optional[str]:
"""Return the last message of the last error that occurred."""
return self._last_error
@property
def compatible(self) -> Optional[str]:
"""Return the system compatible string."""
return self._compatible
@property
def variant(self) -> Optional[str]:
"""Return the system variant string."""
return self._variant
@property
def boot_slot(self) -> Optional[str]:
"""Return the used boot slot."""
return self._boot_slot
@dbus_connected
def install(self, raucb_file):
"""Install rauc bundle file.
Return a coroutine.
"""
return self.dbus.Installer.Install(raucb_file)
@dbus_connected
def get_slot_status(self):
"""Get slot status.
Return a coroutine.
"""
return self.dbus.Installer.GetSlotStatus()
@dbus_connected
def signal_completed(self):
"""Return a signal wrapper for completed signal.
Return a coroutine.
"""
return self.dbus.wait_signal(f"{DBUS_NAME}.Installer.Completed")
@dbus_connected
def mark(self, state: RaucState, slot_identifier: str):
"""Get slot status.
Return a coroutine.
"""
return self.dbus.Installer.Mark(state, slot_identifier)
@dbus_connected
async def update(self):
"""Update Properties."""
data = await self.dbus.get_properties(f"{DBUS_NAME}.Installer")
if not data:
_LOGGER.warning("Can't get properties for rauc")
return
self._operation = data.get("Operation")
self._last_error = data.get("LastError")
self._compatible = data.get("Compatible")
self._variant = data.get("Variant")
self._boot_slot = data.get("BootSlot")
| python |
#
# Copyright (c) 2015-2016 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
from nfv_common import debug
from nfv_vim.rpc._rpc_defs import RPC_MSG_RESULT
from nfv_vim.rpc._rpc_defs import RPC_MSG_TYPE
from nfv_vim.rpc._rpc_defs import RPC_MSG_VERSION
from nfv_vim.rpc._rpc_message import RPCMessage
DLOG = debug.debug_get_logger('nfv_vim.rpc.instance')
class APIRequestCreateInstance(RPCMessage):
"""
RPC API Request Message - Create Instance
"""
name = None
instance_type_uuid = None
image_uuid = None
vcpus = None
memory_mb = None
disk_gb = None
ephemeral_gb = None
swap_gb = None
network_uuid = None
auto_recovery = None
live_migration_timeout = None
live_migration_max_downtime = None
def __init__(self, msg_version=RPC_MSG_VERSION.VERSION_1_0,
msg_type=RPC_MSG_TYPE.CREATE_INSTANCE_REQUEST,
msg_result=RPC_MSG_RESULT.SUCCESS):
super(APIRequestCreateInstance, self).__init__(msg_version, msg_type,
msg_result)
def serialize_payload(self, msg):
msg['name'] = self.name
msg['instance_type_uuid'] = self.instance_type_uuid
msg['image_uuid'] = self.image_uuid
msg['vcpus'] = self.vcpus
msg['memory_mb'] = self.memory_mb
msg['disk_gb'] = self.disk_gb
msg['ephemeral_gb'] = self.ephemeral_gb
msg['swap_gb'] = self.swap_gb
msg['network_uuid'] = self.network_uuid
msg['sw:wrs:auto_recovery'] = self.auto_recovery
msg['hw:wrs:live_migration_timeout'] = self.live_migration_timeout
msg['hw:wrs:live_migration_max_downtime'] \
= self.live_migration_max_downtime
def deserialize_payload(self, msg):
self.name = msg.get('name', None)
self.instance_type_uuid = msg.get('instance_type_uuid', None)
self.image_uuid = msg.get('image_uuid', None)
self.vcpus = msg.get('vcpus', None)
self.memory_mb = msg.get('memory_mb', None)
self.disk_gb = msg.get('disk_gb', None)
self.ephemeral_gb = msg.get('ephemeral_gb', None)
self.swap_gb = msg.get('swap_gb', None)
self.network_uuid = msg.get('network_uuid', None)
self.auto_recovery = msg.get('sw:wrs:auto_recovery', None)
self.live_migration_timeout = msg.get('hw:wrs:live_migration_timeout',
None)
self.live_migration_max_downtime \
= msg.get('hw:wrs:live_migration_max_downtime', None)
def __str__(self):
return "create-instance request: %s" % self.name
class APIResponseCreateInstance(RPCMessage):
"""
RPC API Response Message - Create Instance
"""
uuid = None
name = None
admin_state = None
oper_state = None
avail_status = None
action = None
host_uuid = None
host_name = None
instance_type_original_name = None
image_uuid = None
vcpus = None
memory_mb = None
disk_gb = None
ephemeral_gb = None
swap_gb = None
network_uuid = None
auto_recovery = None
live_migration_timeout = None
live_migration_max_downtime = None
def __init__(self, msg_version=RPC_MSG_VERSION.VERSION_1_0,
msg_type=RPC_MSG_TYPE.CREATE_INSTANCE_RESPONSE,
msg_result=RPC_MSG_RESULT.SUCCESS):
super(APIResponseCreateInstance, self).__init__(msg_version, msg_type,
msg_result)
def serialize_payload(self, msg):
msg['uuid'] = self.uuid
msg['name'] = self.name
msg['admin_state'] = self.admin_state
msg['oper_state'] = self.oper_state
msg['avail_status'] = self.avail_status
msg['action'] = self.action
msg['host_uuid'] = self.host_uuid
msg['host_name'] = self.host_name
msg['instance_type_original_name'] = self.instance_type_original_name
msg['image_uuid'] = self.image_uuid
msg['vcpus'] = self.vcpus
msg['memory_mb'] = self.memory_mb
msg['disk_gb'] = self.disk_gb
msg['ephemeral_gb'] = self.ephemeral_gb
msg['swap_gb'] = self.swap_gb
msg['network_uuid'] = self.network_uuid
msg['sw:wrs:auto_recovery'] = self.auto_recovery
msg['hw:wrs:live_migration_timeout'] = self.live_migration_timeout
msg['hw:wrs:live_migration_max_downtime'] \
= self.live_migration_max_downtime
def deserialize_payload(self, msg):
self.uuid = msg.get('uuid', None)
self.name = msg.get('name', None)
self.admin_state = msg.get('admin_state', None)
self.oper_state = msg.get('oper_state', None)
self.avail_status = msg.get('avail_status', None)
self.action = msg.get('action', None)
self.host_uuid = msg.get('host_uuid', None)
self.host_name = msg.get('host_name', None)
self.instance_type_original_name = msg.get(
'instance_type_original_name', None)
self.image_uuid = msg.get('image_uuid', None)
self.vcpus = msg.get('vcpus', None)
self.memory_mb = msg.get('memory_mb', None)
self.disk_gb = msg.get('disk_gb', None)
self.ephemeral_gb = msg.get('ephemeral_gb', None)
self.swap_gb = msg.get('swap_gb', None)
self.network_uuid = msg.get('network_uuid', None)
self.auto_recovery = msg.get('sw:wrs:auto_recovery', None)
self.live_migration_timeout = msg.get('hw:wrs:live_migration_timeout',
None)
self.live_migration_max_downtime \
= msg.get('hw:wrs:live_migration_max_downtime', None)
def __str__(self):
return "create-instance response: %s" % self.uuid
class APIRequestStartInstance(RPCMessage):
"""
RPC API Request Message - Start Instance
"""
uuid = None
def __init__(self, msg_version=RPC_MSG_VERSION.VERSION_1_0,
msg_type=RPC_MSG_TYPE.START_INSTANCE_REQUEST,
msg_result=RPC_MSG_RESULT.SUCCESS):
super(APIRequestStartInstance, self).__init__(msg_version, msg_type,
msg_result)
def serialize_payload(self, msg):
msg['uuid'] = self.uuid
def deserialize_payload(self, msg):
self.uuid = msg.get('uuid', None)
def __str__(self):
return "start-instance request: %s" % self.uuid
class APIResponseStartInstance(RPCMessage):
"""
RPC API Response Message - Start Instance
"""
uuid = None
def __init__(self, msg_version=RPC_MSG_VERSION.VERSION_1_0,
msg_type=RPC_MSG_TYPE.START_INSTANCE_RESPONSE,
msg_result=RPC_MSG_RESULT.SUCCESS):
super(APIResponseStartInstance, self).__init__(msg_version, msg_type,
msg_result)
def serialize_payload(self, msg):
msg['uuid'] = self.uuid
def deserialize_payload(self, msg):
self.uuid = msg.get('uuid', None)
def __str__(self):
return "start-instance response: %s" % self.uuid
class APIRequestStopInstance(RPCMessage):
"""
RPC API Request Message - Stop Instance
"""
uuid = None
def __init__(self, msg_version=RPC_MSG_VERSION.VERSION_1_0,
msg_type=RPC_MSG_TYPE.STOP_INSTANCE_REQUEST,
msg_result=RPC_MSG_RESULT.SUCCESS):
super(APIRequestStopInstance, self).__init__(msg_version, msg_type,
msg_result)
def serialize_payload(self, msg):
msg['uuid'] = self.uuid
def deserialize_payload(self, msg):
self.uuid = msg.get('uuid', None)
def __str__(self):
return "stop-instance request: %s" % self.uuid
class APIResponseStopInstance(RPCMessage):
"""
RPC API Response Message - Stop Instance
"""
uuid = None
def __init__(self, msg_version=RPC_MSG_VERSION.VERSION_1_0,
msg_type=RPC_MSG_TYPE.STOP_INSTANCE_RESPONSE,
msg_result=RPC_MSG_RESULT.SUCCESS):
super(APIResponseStopInstance, self).__init__(msg_version, msg_type,
msg_result)
def serialize_payload(self, msg):
msg['uuid'] = self.uuid
def deserialize_payload(self, msg):
self.uuid = msg.get('uuid', None)
def __str__(self):
return "stop-instance response: %s" % self.uuid
class APIRequestPauseInstance(RPCMessage):
"""
RPC API Request Message - Pause Instance
"""
uuid = None
def __init__(self, msg_version=RPC_MSG_VERSION.VERSION_1_0,
msg_type=RPC_MSG_TYPE.PAUSE_INSTANCE_REQUEST,
msg_result=RPC_MSG_RESULT.SUCCESS):
super(APIRequestPauseInstance, self).__init__(msg_version, msg_type,
msg_result)
def serialize_payload(self, msg):
msg['uuid'] = self.uuid
def deserialize_payload(self, msg):
self.uuid = msg.get('uuid', None)
def __str__(self):
return "pause-instance request: %s" % self.uuid
class APIResponsePauseInstance(RPCMessage):
"""
RPC API Response Message - Pause Instance
"""
uuid = None
def __init__(self, msg_version=RPC_MSG_VERSION.VERSION_1_0,
msg_type=RPC_MSG_TYPE.PAUSE_INSTANCE_RESPONSE,
msg_result=RPC_MSG_RESULT.SUCCESS):
super(APIResponsePauseInstance, self).__init__(msg_version, msg_type,
msg_result)
def serialize_payload(self, msg):
msg['uuid'] = self.uuid
def deserialize_payload(self, msg):
self.uuid = msg.get('uuid', None)
def __str__(self):
return "pause-instance response: %s" % self.uuid
class APIRequestUnpauseInstance(RPCMessage):
"""
RPC API Request Message - Unpause Instance
"""
uuid = None
def __init__(self, msg_version=RPC_MSG_VERSION.VERSION_1_0,
msg_type=RPC_MSG_TYPE.UNPAUSE_INSTANCE_REQUEST,
msg_result=RPC_MSG_RESULT.SUCCESS):
super(APIRequestUnpauseInstance, self).__init__(msg_version, msg_type,
msg_result)
def serialize_payload(self, msg):
msg['uuid'] = self.uuid
def deserialize_payload(self, msg):
self.uuid = msg.get('uuid', None)
def __str__(self):
return "unpause-instance request: %s" % self.uuid
class APIResponseUnpauseInstance(RPCMessage):
"""
RPC API Response Message - Unpause Instance
"""
uuid = None
def __init__(self, msg_version=RPC_MSG_VERSION.VERSION_1_0,
msg_type=RPC_MSG_TYPE.UNPAUSE_INSTANCE_RESPONSE,
msg_result=RPC_MSG_RESULT.SUCCESS):
super(APIResponseUnpauseInstance, self).__init__(msg_version, msg_type,
msg_result)
def serialize_payload(self, msg):
msg['uuid'] = self.uuid
def deserialize_payload(self, msg):
self.uuid = msg.get('uuid', None)
def __str__(self):
return "unpause-instance response: %s" % self.uuid
class APIRequestSuspendInstance(RPCMessage):
"""
RPC API Request Message - Suspend Instance
"""
uuid = None
def __init__(self, msg_version=RPC_MSG_VERSION.VERSION_1_0,
msg_type=RPC_MSG_TYPE.SUSPEND_INSTANCE_REQUEST,
msg_result=RPC_MSG_RESULT.SUCCESS):
super(APIRequestSuspendInstance, self).__init__(msg_version, msg_type,
msg_result)
def serialize_payload(self, msg):
msg['uuid'] = self.uuid
def deserialize_payload(self, msg):
self.uuid = msg.get('uuid', None)
def __str__(self):
return "suspend-instance request: %s" % self.uuid
class APIResponseSuspendInstance(RPCMessage):
"""
RPC API Response Message - Suspend Instance
"""
uuid = None
def __init__(self, msg_version=RPC_MSG_VERSION.VERSION_1_0,
msg_type=RPC_MSG_TYPE.SUSPEND_INSTANCE_RESPONSE,
msg_result=RPC_MSG_RESULT.SUCCESS):
super(APIResponseSuspendInstance, self).__init__(msg_version, msg_type,
msg_result)
def serialize_payload(self, msg):
msg['uuid'] = self.uuid
def deserialize_payload(self, msg):
self.uuid = msg.get('uuid', None)
def __str__(self):
return "suspend-instance response: %s" % self.uuid
class APIRequestResumeInstance(RPCMessage):
"""
RPC API Request Message - Resume Instance
"""
uuid = None
def __init__(self, msg_version=RPC_MSG_VERSION.VERSION_1_0,
msg_type=RPC_MSG_TYPE.RESUME_INSTANCE_REQUEST,
msg_result=RPC_MSG_RESULT.SUCCESS):
super(APIRequestResumeInstance, self).__init__(msg_version, msg_type,
msg_result)
def serialize_payload(self, msg):
msg['uuid'] = self.uuid
def deserialize_payload(self, msg):
self.uuid = msg.get('uuid', None)
def __str__(self):
return "resume-instance request: %s" % self.uuid
class APIResponseResumeInstance(RPCMessage):
"""
RPC API Response Message - Resume Instance
"""
uuid = None
def __init__(self, msg_version=RPC_MSG_VERSION.VERSION_1_0,
msg_type=RPC_MSG_TYPE.RESUME_INSTANCE_RESPONSE,
msg_result=RPC_MSG_RESULT.SUCCESS):
super(APIResponseResumeInstance, self).__init__(msg_version, msg_type,
msg_result)
def serialize_payload(self, msg):
msg['uuid'] = self.uuid
def deserialize_payload(self, msg):
self.uuid = msg.get('uuid', None)
def __str__(self):
return "resume-instance response: %s" % self.uuid
class APIRequestRebootInstance(RPCMessage):
"""
RPC API Request Message - Reboot Instance
"""
uuid = None
def __init__(self, msg_version=RPC_MSG_VERSION.VERSION_1_0,
msg_type=RPC_MSG_TYPE.REBOOT_INSTANCE_REQUEST,
msg_result=RPC_MSG_RESULT.SUCCESS):
super(APIRequestRebootInstance, self).__init__(msg_version, msg_type,
msg_result)
def serialize_payload(self, msg):
msg['uuid'] = self.uuid
def deserialize_payload(self, msg):
self.uuid = msg.get('uuid', None)
def __str__(self):
return "reboot-instance request: %s" % self.uuid
class APIResponseRebootInstance(RPCMessage):
"""
RPC API Response Message - Reboot Instance
"""
uuid = None
def __init__(self, msg_version=RPC_MSG_VERSION.VERSION_1_0,
msg_type=RPC_MSG_TYPE.REBOOT_INSTANCE_RESPONSE,
msg_result=RPC_MSG_RESULT.SUCCESS):
super(APIResponseRebootInstance, self).__init__(msg_version, msg_type,
msg_result)
def serialize_payload(self, msg):
msg['uuid'] = self.uuid
def deserialize_payload(self, msg):
self.uuid = msg.get('uuid', None)
def __str__(self):
return "reboot-instance response: %s" % self.uuid
class APIRequestLiveMigrateInstance(RPCMessage):
"""
RPC API Request Message - Live Migrate Instance
"""
uuid = None
def __init__(self, msg_version=RPC_MSG_VERSION.VERSION_1_0,
msg_type=RPC_MSG_TYPE.LIVE_MIGRATE_INSTANCE_REQUEST,
msg_result=RPC_MSG_RESULT.SUCCESS):
super(APIRequestLiveMigrateInstance, self).__init__(msg_version,
msg_type,
msg_result)
def serialize_payload(self, msg):
msg['uuid'] = self.uuid
def deserialize_payload(self, msg):
self.uuid = msg.get('uuid', None)
def __str__(self):
return "live-migrate-instance request: %s" % self.uuid
class APIResponseLiveMigrateInstance(RPCMessage):
"""
RPC API Response Message - Live Migrate Instance
"""
uuid = None
def __init__(self, msg_version=RPC_MSG_VERSION.VERSION_1_0,
msg_type=RPC_MSG_TYPE.LIVE_MIGRATE_INSTANCE_RESPONSE,
msg_result=RPC_MSG_RESULT.SUCCESS):
super(APIResponseLiveMigrateInstance, self).__init__(msg_version,
msg_type,
msg_result)
def serialize_payload(self, msg):
msg['uuid'] = self.uuid
def deserialize_payload(self, msg):
self.uuid = msg.get('uuid', None)
def __str__(self):
return "live-migrate-instance response: %s" % self.uuid
class APIRequestColdMigrateInstance(RPCMessage):
"""
RPC API Request Message - Cold Migrate Instance
"""
uuid = None
def __init__(self, msg_version=RPC_MSG_VERSION.VERSION_1_0,
msg_type=RPC_MSG_TYPE.COLD_MIGRATE_INSTANCE_REQUEST,
msg_result=RPC_MSG_RESULT.SUCCESS):
super(APIRequestColdMigrateInstance, self).__init__(msg_version,
msg_type,
msg_result)
def serialize_payload(self, msg):
msg['uuid'] = self.uuid
def deserialize_payload(self, msg):
self.uuid = msg.get('uuid', None)
def __str__(self):
return "cold-migrate-instance request: %s" % self.uuid
class APIResponseColdMigrateInstance(RPCMessage):
"""
RPC API Response Message - Cold Migrate Instance
"""
uuid = None
def __init__(self, msg_version=RPC_MSG_VERSION.VERSION_1_0,
msg_type=RPC_MSG_TYPE.COLD_MIGRATE_INSTANCE_RESPONSE,
msg_result=RPC_MSG_RESULT.SUCCESS):
super(APIResponseColdMigrateInstance, self).__init__(msg_version,
msg_type,
msg_result)
def serialize_payload(self, msg):
msg['uuid'] = self.uuid
def deserialize_payload(self, msg):
self.uuid = msg.get('uuid', None)
def __str__(self):
return "cold-migrate-instance response: %s" % self.uuid
class APIRequestEvacuateInstance(RPCMessage):
"""
RPC API Request Message - Evacuate Instance
"""
uuid = None
def __init__(self, msg_version=RPC_MSG_VERSION.VERSION_1_0,
msg_type=RPC_MSG_TYPE.EVACUATE_INSTANCE_REQUEST,
msg_result=RPC_MSG_RESULT.SUCCESS):
super(APIRequestEvacuateInstance, self).__init__(msg_version,
msg_type,
msg_result)
def serialize_payload(self, msg):
msg['uuid'] = self.uuid
def deserialize_payload(self, msg):
self.uuid = msg.get('uuid', None)
def __str__(self):
return "evacuate-instance request: %s" % self.uuid
class APIResponseEvacuateInstance(RPCMessage):
"""
RPC API Response Message - Evacuate Instance
"""
uuid = None
def __init__(self, msg_version=RPC_MSG_VERSION.VERSION_1_0,
msg_type=RPC_MSG_TYPE.EVACUATE_INSTANCE_RESPONSE,
msg_result=RPC_MSG_RESULT.SUCCESS):
super(APIResponseEvacuateInstance, self).__init__(msg_version,
msg_type,
msg_result)
def serialize_payload(self, msg):
msg['uuid'] = self.uuid
def deserialize_payload(self, msg):
self.uuid = msg.get('uuid', None)
def __str__(self):
return "evacuate-instance response: %s" % self.uuid
class APIRequestDeleteInstance(RPCMessage):
"""
RPC API Request Message - Delete Instance
"""
uuid = None
def __init__(self, msg_version=RPC_MSG_VERSION.VERSION_1_0,
msg_type=RPC_MSG_TYPE.DELETE_INSTANCE_REQUEST,
msg_result=RPC_MSG_RESULT.SUCCESS):
super(APIRequestDeleteInstance, self).__init__(msg_version, msg_type,
msg_result)
def serialize_payload(self, msg):
msg['uuid'] = self.uuid
def deserialize_payload(self, msg):
self.uuid = msg.get('uuid', None)
def __str__(self):
return "delete-instance request: %s" % self.uuid
class APIResponseDeleteInstance(RPCMessage):
"""
RPC API Response Message - Delete Instance
"""
uuid = None
def __init__(self, msg_version=RPC_MSG_VERSION.VERSION_1_0,
msg_type=RPC_MSG_TYPE.DELETE_INSTANCE_RESPONSE,
msg_result=RPC_MSG_RESULT.SUCCESS):
super(APIResponseDeleteInstance, self).__init__(msg_version, msg_type,
msg_result)
def serialize_payload(self, msg):
msg['uuid'] = self.uuid
def deserialize_payload(self, msg):
self.uuid = msg.get('uuid', None)
def __str__(self):
return "delete-instance response: %s" % self.uuid
class APIRequestGetInstance(RPCMessage):
"""
RPC API Request Message - Get Instance
"""
get_all = False
filter_by_uuid = None
def __init__(self, msg_version=RPC_MSG_VERSION.VERSION_1_0,
msg_type=RPC_MSG_TYPE.GET_INSTANCE_REQUEST,
msg_result=RPC_MSG_RESULT.SUCCESS):
super(APIRequestGetInstance, self).__init__(msg_version, msg_type,
msg_result)
def serialize_payload(self, msg):
msg['get_all'] = self.get_all
msg['filter_by_uuid'] = self.filter_by_uuid
def deserialize_payload(self, msg):
self.get_all = msg.get('get_all', True)
self.filter_by_uuid = msg.get('filter_by_uuid', None)
def __str__(self):
if self.get_all:
return "get-instance request: get-all"
else:
return "get-instance request: %s" % self.filter_by_uuid
class APIResponseGetInstance(RPCMessage):
"""
RPC API Response Message - Get Instance
"""
uuid = None
name = None
admin_state = None
oper_state = None
avail_status = None
action = None
host_uuid = None
host_name = None
instance_type_original_name = None
image_uuid = None
vcpus = None
memory_mb = None
disk_gb = None
ephemeral_gb = None
swap_gb = None
auto_recovery = None
live_migration_timeout = None
live_migration_max_downtime = None
def __init__(self, msg_version=RPC_MSG_VERSION.VERSION_1_0,
msg_type=RPC_MSG_TYPE.GET_INSTANCE_RESPONSE,
msg_result=RPC_MSG_RESULT.SUCCESS):
super(APIResponseGetInstance, self).__init__(msg_version, msg_type,
msg_result)
def serialize_payload(self, msg):
msg['uuid'] = self.uuid
msg['name'] = self.name
msg['admin_state'] = self.admin_state
msg['oper_state'] = self.oper_state
msg['avail_status'] = self.avail_status
msg['action'] = self.action
msg['host_uuid'] = self.host_uuid
msg['host_name'] = self.host_name
msg['instance_type_original_name'] = self.instance_type_original_name
msg['image_uuid'] = self.image_uuid
msg['vcpus'] = self.vcpus
msg['memory_mb'] = self.memory_mb
msg['disk_gb'] = self.disk_gb
msg['ephemeral_gb'] = self.ephemeral_gb
msg['swap_gb'] = self.swap_gb
msg['sw:wrs:auto_recovery'] = self.auto_recovery
msg['hw:wrs:live_migration_timeout'] = self.live_migration_timeout
msg['hw:wrs:live_migration_max_downtime'] \
= self.live_migration_max_downtime
def deserialize_payload(self, msg):
self.uuid = msg.get('uuid', None)
self.name = msg.get('name', None)
self.admin_state = msg.get('admin_state', None)
self.oper_state = msg.get('oper_state', None)
self.avail_status = msg.get('avail_status', None)
self.action = msg.get('action', None)
self.host_uuid = msg.get('host_uuid', None)
self.host_name = msg.get('host_name', None)
self.instance_type_original_name = msg.get(
'instance_type_original_name', None)
self.image_uuid = msg.get('image_uuid', None)
self.vcpus = msg.get('vcpus', None)
self.memory_mb = msg.get('memory_mb', None)
self.disk_gb = msg.get('disk_gb', None)
self.ephemeral_gb = msg.get('ephemeral_gb', None)
self.swap_gb = msg.get('swap_gb', None)
self.auto_recovery = msg.get('sw:wrs:auto_recovery', None)
self.live_migration_timeout = msg.get('hw:wrs:live_migration_timeout',
None)
self.live_migration_max_downtime \
= msg.get('hw:wrs:live_migration_max_downtime', None)
def __str__(self):
return "get-instance response: %s" % self.uuid
| python |
#!/usr/bin/env python
"""Tests for util.py."""
import datetime
import logging
import os
import sys
import unittest
# Fix up paths for running tests.
sys.path.insert(0, "../src/")
from pipeline import util
from google.appengine.api import taskqueue
class JsonSerializationTest(unittest.TestCase):
"""Test custom json encoder and decoder."""
def testE2e(self):
now = datetime.datetime.now()
obj = {"a": 1, "b": [{"c": "d"}], "e": now}
new_obj = util.json.loads(util.json.dumps(
obj, cls=util.JsonEncoder), cls=util.JsonDecoder)
self.assertEquals(obj, new_obj)
class GetTaskTargetTest(unittest.TestCase):
def setUp(self):
super(GetTaskTargetTest, self).setUp()
os.environ["CURRENT_VERSION_ID"] = "v7.1"
os.environ["CURRENT_MODULE_ID"] = "foo-module"
def testGetTaskTarget(self):
self.assertEqual("v7.foo-module", util._get_task_target())
task = taskqueue.Task(url="/relative_url",
target=util._get_task_target())
self.assertEqual("v7.foo-module", task.target)
def testGetTaskTargetDefaultModule(self):
os.environ["CURRENT_MODULE_ID"] = "default"
self.assertEqual("v7.default", util._get_task_target())
task = taskqueue.Task(url="/relative_url",
target=util._get_task_target())
self.assertEqual("v7.default", task.target)
if __name__ == '__main__':
logging.getLogger().setLevel(logging.DEBUG)
unittest.main()
| python |
/home/runner/.cache/pip/pool/88/20/06/e25d76d7065f6488098440d13a701a2dc1acbe52cd8d7322b4405f3996 | python |
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
# Copyright (c) 2002-2019 "Neo4j,"
# Neo4j Sweden AB [http://neo4j.com]
#
# This file is part of Neo4j.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from neobolt.direct import connect
from neobolt.exceptions import ServiceUnavailable
from neobolt.routing import READ_ACCESS, WRITE_ACCESS, RoutingConnectionPool, RoutingProtocolError
from test.stub.tools import StubCluster, StubTestCase
VALID_ROUTING_RECORD = {
"ttl": 300,
"servers": [
{"role": "ROUTE", "addresses": ["127.0.0.1:9001", "127.0.0.1:9002", "127.0.0.1:9003"]},
{"role": "READ", "addresses": ["127.0.0.1:9004", "127.0.0.1:9005"]},
{"role": "WRITE", "addresses": ["127.0.0.1:9006"]},
],
}
VALID_ROUTING_RECORD_WITH_EXTRA_ROLE = {
"ttl": 300,
"servers": [
{"role": "ROUTE", "addresses": ["127.0.0.1:9001", "127.0.0.1:9002", "127.0.0.1:9003"]},
{"role": "READ", "addresses": ["127.0.0.1:9004", "127.0.0.1:9005"]},
{"role": "WRITE", "addresses": ["127.0.0.1:9006"]},
{"role": "MAGIC", "addresses": ["127.0.0.1:9007"]},
],
}
INVALID_ROUTING_RECORD = {
"X": 1,
}
UNREACHABLE_ADDRESS = ("127.0.0.1", 8080)
RoutingTable = object()
def connector(address, **kwargs):
return connect(address, auth=("neotest", "neotest"), **kwargs)
def RoutingPool(*routers):
return RoutingConnectionPool(connector, UNREACHABLE_ADDRESS, {}, *routers)
class RoutingConnectionPoolFetchRoutingInfoTestCase(StubTestCase):
def test_should_get_info_from_router(self):
with StubCluster({9001: "v1/router.script"}):
address = ("127.0.0.1", 9001)
with RoutingPool() as pool:
result = pool.fetch_routing_info(address)
assert len(result) == 1
record = result[0]
assert record["ttl"] == 300
assert record["servers"] == [
{"role": "ROUTE", "addresses": ["127.0.0.1:9001", "127.0.0.1:9002",
"127.0.0.1:9003"]},
{"role": "READ", "addresses": ["127.0.0.1:9004", "127.0.0.1:9005"]},
{"role": "WRITE", "addresses": ["127.0.0.1:9006"]},
]
def test_should_remove_router_if_cannot_connect(self):
address = ("127.0.0.1", 9001)
with RoutingPool(address) as pool:
assert address in pool.routing_table.routers
_ = pool.fetch_routing_info(address)
assert address not in pool.routing_table.routers
def test_should_remove_router_if_connection_drops(self):
with StubCluster({9001: "v1/rude_router.script"}):
address = ("127.0.0.1", 9001)
with RoutingPool(address) as pool:
assert address in pool.routing_table.routers
_ = pool.fetch_routing_info(address)
assert address not in pool.routing_table.routers
def test_should_not_fail_if_cannot_connect_but_router_already_removed(self):
address = ("127.0.0.1", 9001)
with RoutingPool() as pool:
assert address not in pool.routing_table.routers
_ = pool.fetch_routing_info(address)
assert address not in pool.routing_table.routers
def test_should_not_fail_if_connection_drops_but_router_already_removed(self):
with StubCluster({9001: "v1/rude_router.script"}):
address = ("127.0.0.1", 9001)
with RoutingPool() as pool:
assert address not in pool.routing_table.routers
_ = pool.fetch_routing_info(address)
assert address not in pool.routing_table.routers
def test_should_return_none_if_cannot_connect(self):
address = ("127.0.0.1", 9001)
with RoutingPool(address) as pool:
result = pool.fetch_routing_info(address)
assert result is None
def test_should_return_none_if_connection_drops(self):
with StubCluster({9001: "v1/rude_router.script"}):
address = ("127.0.0.1", 9001)
with RoutingPool(address) as pool:
result = pool.fetch_routing_info(address)
assert result is None
def test_should_fail_for_non_router(self):
with StubCluster({9001: "v1/non_router.script"}):
address = ("127.0.0.1", 9001)
with RoutingPool(address) as pool:
with self.assertRaises(ServiceUnavailable):
_ = pool.fetch_routing_info(address)
def test_should_fail_if_database_error(self):
with StubCluster({9001: "v1/broken_router.script"}):
address = ("127.0.0.1", 9001)
with RoutingPool(address) as pool:
with self.assertRaises(ServiceUnavailable):
_ = pool.fetch_routing_info(address)
def test_should_call_get_routing_tables_with_context(self):
with StubCluster({9001: "v1/get_routing_table_with_context.script"}):
address = ("127.0.0.1", 9001)
routing_context = {"name": "molly", "age": "1"}
with RoutingConnectionPool(connector, UNREACHABLE_ADDRESS, routing_context) as pool:
pool.fetch_routing_info(address)
def test_should_call_get_routing_tables(self):
with StubCluster({9001: "v1/get_routing_table.script"}):
address = ("127.0.0.1", 9001)
with RoutingConnectionPool(connector, UNREACHABLE_ADDRESS, {}) as pool:
pool.fetch_routing_info(address)
class RoutingConnectionPoolFetchRoutingTableTestCase(StubTestCase):
def test_should_get_table_from_router(self):
with StubCluster({9001: "v1/router.script"}):
address = ("127.0.0.1", 9001)
with RoutingPool() as pool:
table = pool.fetch_routing_table(address)
assert table.routers == {("127.0.0.1", 9001), ("127.0.0.1", 9002),
("127.0.0.1", 9003)}
assert table.readers == {("127.0.0.1", 9004), ("127.0.0.1", 9005)}
assert table.writers == {("127.0.0.1", 9006)}
assert table.ttl == 300
assert not pool.missing_writer
def test_null_info_should_return_null_table(self):
address = ("127.0.0.1", 9001)
with RoutingPool() as pool:
table = pool.fetch_routing_table(address)
assert table is None
def test_no_routers_should_raise_protocol_error(self):
with StubCluster({9001: "v1/router_no_routers.script"}):
address = ("127.0.0.1", 9001)
with RoutingPool() as pool:
with self.assertRaises(RoutingProtocolError):
_ = pool.fetch_routing_table(address)
def test_no_readers_should_raise_protocol_error(self):
with StubCluster({9001: "v1/router_no_readers.script"}):
address = ("127.0.0.1", 9001)
with RoutingPool() as pool:
with self.assertRaises(RoutingProtocolError):
_ = pool.fetch_routing_table(address)
def test_no_writers_should_return_table_with_no_writer(self):
with StubCluster({9001: "v1/router_no_writers.script"}):
address = ("127.0.0.1", 9001)
with RoutingPool() as pool:
table = pool.fetch_routing_table(address)
assert table.routers == {("127.0.0.1", 9001), ("127.0.0.1", 9002),
("127.0.0.1", 9003)}
assert table.readers == {("127.0.0.1", 9004), ("127.0.0.1", 9005)}
assert not table.writers
assert table.ttl == 300
assert pool.missing_writer
class RoutingConnectionPoolUpdateRoutingTableTestCase(StubTestCase):
scenarios = {
(None,): ServiceUnavailable,
(RoutingTable,): RoutingTable,
(ServiceUnavailable,): ServiceUnavailable,
(None, None): ServiceUnavailable,
(None, RoutingTable): RoutingTable,
(None, ServiceUnavailable): ServiceUnavailable,
(None, None, None): ServiceUnavailable,
(None, None, RoutingTable): RoutingTable,
(None, None, ServiceUnavailable): ServiceUnavailable,
}
def test_roll_back_to_initial_server_if_failed_update_with_existing_routers(self):
with StubCluster({9001: "v1/router.script"}):
initial_address = ("127.0.0.1", 9001) # roll back addresses
routers = [("127.0.0.1", 9002), ("127.0.0.1", 9003)] # not reachable servers
with RoutingConnectionPool(connector, initial_address, {}, *routers) as pool:
pool.update_routing_table()
table = pool.routing_table
assert table.routers == {("127.0.0.1", 9001), ("127.0.0.1", 9002),
("127.0.0.1", 9003)}
assert table.readers == {("127.0.0.1", 9004), ("127.0.0.1", 9005)}
assert table.writers == {("127.0.0.1", 9006)}
assert table.ttl == 300
def test_try_initial_server_first_if_missing_writer(self):
with StubCluster({9001: "v1/router.script"}):
initial_address = ("127.0.0.1", 9001)
with RoutingConnectionPool(connector, initial_address, {}) as pool:
pool.missing_writer = True
pool.update_routing_table()
table = pool.routing_table
assert table.routers == {("127.0.0.1", 9001), ("127.0.0.1", 9002),
("127.0.0.1", 9003)}
assert table.readers == {("127.0.0.1", 9004), ("127.0.0.1", 9005)}
assert table.writers == {("127.0.0.1", 9006)}
assert table.ttl == 300
assert not pool.missing_writer
def test_update_with_no_routers_should_signal_service_unavailable(self):
with RoutingPool() as pool:
with self.assertRaises(ServiceUnavailable):
pool.update_routing_table()
def test_update_scenarios(self):
for server_outcomes, overall_outcome in self.scenarios.items():
self._test_server_outcome(server_outcomes, overall_outcome)
def _test_server_outcome(self, server_outcomes, overall_outcome):
print("%r -> %r" % (server_outcomes, overall_outcome))
servers = {}
routers = []
for port, outcome in enumerate(server_outcomes, 9001):
if outcome is None:
servers[port] = "v1/rude_router.script"
elif outcome is RoutingTable:
servers[port] = "v1/router.script"
elif outcome is ServiceUnavailable:
servers[port] = "v1/non_router.script"
else:
assert False, "Unexpected server outcome %r" % outcome
routers.append(("127.0.0.1", port))
with StubCluster(servers):
with RoutingPool(*routers) as pool:
if overall_outcome is RoutingTable:
pool.update_routing_table()
table = pool.routing_table
assert table.routers == {("127.0.0.1", 9001), ("127.0.0.1", 9002),
("127.0.0.1", 9003)}
assert table.readers == {("127.0.0.1", 9004), ("127.0.0.1", 9005)}
assert table.writers == {("127.0.0.1", 9006)}
assert table.ttl == 300
elif overall_outcome is ServiceUnavailable:
with self.assertRaises(ServiceUnavailable):
pool.update_routing_table()
else:
assert False, "Unexpected overall outcome %r" % overall_outcome
class RoutingConnectionPoolEnsureRoutingTableTestCase(StubTestCase):
def test_should_update_if_stale(self):
with StubCluster({9001: "v1/router.script"}):
address = ("127.0.0.1", 9001)
with RoutingPool(address) as pool:
first_updated_time = pool.routing_table.last_updated_time
pool.routing_table.ttl = 0
pool.ensure_routing_table_is_fresh(WRITE_ACCESS)
second_updated_time = pool.routing_table.last_updated_time
assert second_updated_time != first_updated_time
assert not pool.missing_writer
def test_should_not_update_if_fresh(self):
with StubCluster({9001: "v1/router.script"}):
address = ("127.0.0.1", 9001)
with RoutingPool(address) as pool:
pool.ensure_routing_table_is_fresh(WRITE_ACCESS)
first_updated_time = pool.routing_table.last_updated_time
pool.ensure_routing_table_is_fresh(WRITE_ACCESS)
second_updated_time = pool.routing_table.last_updated_time
assert second_updated_time == first_updated_time
assert not pool.missing_writer
def test_should_flag_reading_without_writer(self):
with StubCluster({9001: "v1/router_no_writers.script"}):
address = ("127.0.0.1", 9001)
with RoutingPool(address) as pool:
assert not pool.routing_table.is_fresh(READ_ACCESS)
assert not pool.routing_table.is_fresh(WRITE_ACCESS)
pool.ensure_routing_table_is_fresh(READ_ACCESS)
assert pool.missing_writer
# TODO: fix flaky test
# def test_concurrent_refreshes_should_not_block_if_fresh(self):
# address = ("127.0.0.1", 9001)
# table = RoutingTable.parse_routing_info([VALID_ROUTING_RECORD])
#
# with RoutingPool(address) as pool:
# semaphore = Semaphore()
#
# class Refresher(Thread):
#
# refreshed = None
#
# def run(self):
# self.refreshed = pool.refresh_routing_table()
#
# class BlockingRefresher(Refresher):
#
# @classmethod
# def blocking_update(cls):
# pool.routing_table.update(table)
# semaphore.acquire()
# semaphore.release()
# return table
#
# def run(self):
# with patch.object(RoutingConnectionPool, "update_routing_table",
# side_effect=self.blocking_update):
# super(BlockingRefresher, self).run()
#
# first = BlockingRefresher()
# second = Refresher()
#
# assert not pool.routing_table.is_fresh()
#
# semaphore.acquire()
# first.start()
# second.start()
# sleep(1)
# assert not second.is_alive() # second call should return immediately without blocking
# second.join()
# semaphore.release()
# first.join()
#
# assert first.refreshed
# assert not second.refreshed
# assert pool.routing_table.is_fresh()
# TODO: fix flaky test
# def test_concurrent_refreshes_should_block_if_stale(self):
# address = ("127.0.0.1", 9001)
# table = RoutingTable.parse_routing_info([VALID_ROUTING_RECORD])
#
# with RoutingPool(address) as pool:
# semaphore = Semaphore()
#
# class Refresher(Thread):
#
# refreshed = None
#
# def run(self):
# self.refreshed = pool.refresh_routing_table()
#
# class BlockingRefresher(Refresher):
#
# @classmethod
# def blocking_update(cls):
# semaphore.acquire()
# semaphore.release()
# pool.routing_table.update(table)
# return table
#
# def run(self):
# with patch.object(RoutingConnectionPool, "update_routing_table",
# side_effect=self.blocking_update):
# super(BlockingRefresher, self).run()
#
# first = BlockingRefresher()
# second = Refresher()
#
# assert not pool.routing_table.is_fresh()
#
# semaphore.acquire()
# first.start()
# second.start()
# sleep(1)
# assert second.is_alive() # second call should block
# semaphore.release()
# second.join()
# first.join()
#
# assert first.refreshed
# assert not second.refreshed
# assert pool.routing_table.is_fresh()
class RoutingConnectionPoolAcquireForReadTestCase(StubTestCase):
def test_should_refresh(self):
with StubCluster({9001: "v1/router.script", 9004: "v1/empty.script"}):
address = ("127.0.0.1", 9001)
with RoutingPool(address) as pool:
assert not pool.routing_table.is_fresh(READ_ACCESS)
_ = pool.acquire(access_mode=READ_ACCESS)
assert pool.routing_table.is_fresh(READ_ACCESS)
assert not pool.missing_writer
def test_connected_to_reader(self):
with StubCluster({9001: "v1/router.script", 9004: "v1/empty.script"}):
address = ("127.0.0.1", 9001)
with RoutingPool(address) as pool:
assert not pool.routing_table.is_fresh(READ_ACCESS)
connection = pool.acquire(access_mode=READ_ACCESS)
assert connection.server.address in pool.routing_table.readers
assert not pool.missing_writer
def test_should_retry_if_first_reader_fails(self):
with StubCluster({9001: "v1/router.script",
9004: "v1/fail_on_init.script",
9005: "v1/empty.script"}):
address = ("127.0.0.1", 9001)
with RoutingPool(address) as pool:
assert not pool.routing_table.is_fresh(READ_ACCESS)
_ = pool.acquire(access_mode=READ_ACCESS)
assert ("127.0.0.1", 9004) not in pool.routing_table.readers
assert ("127.0.0.1", 9005) in pool.routing_table.readers
def test_should_connect_to_read_in_absent_of_writer(self):
with StubCluster({9001: "v1/router_no_writers.script", 9004: "v1/empty.script"}):
address = ("127.0.0.1", 9001)
with RoutingPool(address) as pool:
assert not pool.routing_table.is_fresh(READ_ACCESS)
connection = pool.acquire(access_mode=READ_ACCESS)
assert connection.server.address in pool.routing_table.readers
assert not pool.routing_table.is_fresh(WRITE_ACCESS)
assert pool.missing_writer
class RoutingConnectionPoolAcquireForWriteTestCase(StubTestCase):
def test_should_refresh(self):
with StubCluster({9001: "v1/router.script", 9006: "v1/empty.script"}):
address = ("127.0.0.1", 9001)
with RoutingPool(address) as pool:
assert not pool.routing_table.is_fresh(WRITE_ACCESS)
_ = pool.acquire(access_mode=WRITE_ACCESS)
assert pool.routing_table.is_fresh(WRITE_ACCESS)
assert not pool.missing_writer
def test_connected_to_writer(self):
with StubCluster({9001: "v1/router.script", 9006: "v1/empty.script"}):
address = ("127.0.0.1", 9001)
with RoutingPool(address) as pool:
assert not pool.routing_table.is_fresh(WRITE_ACCESS)
connection = pool.acquire(access_mode=WRITE_ACCESS)
assert connection.server.address in pool.routing_table.writers
assert not pool.missing_writer
def test_should_retry_if_first_writer_fails(self):
with StubCluster({9001: "v1/router_with_multiple_writers.script",
9006: "v1/fail_on_init.script",
9007: "v1/empty.script"}):
address = ("127.0.0.1", 9001)
with RoutingPool(address) as pool:
assert not pool.routing_table.is_fresh(WRITE_ACCESS)
_ = pool.acquire(access_mode=WRITE_ACCESS)
assert ("127.0.0.1", 9006) not in pool.routing_table.writers
assert ("127.0.0.1", 9007) in pool.routing_table.writers
def test_should_error_to_writer_in_absent_of_reader(self):
with StubCluster({9001: "v1/router_no_readers.script"}):
address = ("127.0.0.1", 9001)
with RoutingPool(address) as pool:
assert not pool.routing_table.is_fresh(WRITE_ACCESS)
with self.assertRaises(RoutingProtocolError):
_ = pool.acquire(access_mode=WRITE_ACCESS)
assert not pool.routing_table.is_fresh(READ_ACCESS)
assert not pool.routing_table.is_fresh(WRITE_ACCESS)
assert not pool.missing_writer
class RoutingConnectionPoolDeactivateTestCase(StubTestCase):
def test_should_remove_router_from_routing_table_if_present(self):
with StubCluster({9001: "v1/router.script"}):
address = ("127.0.0.1", 9001)
with RoutingPool(address) as pool:
pool.ensure_routing_table_is_fresh(WRITE_ACCESS)
target = ("127.0.0.1", 9001)
assert target in pool.routing_table.routers
pool.deactivate(target)
assert target not in pool.routing_table.routers
def test_should_remove_reader_from_routing_table_if_present(self):
with StubCluster({9001: "v1/router.script"}):
address = ("127.0.0.1", 9001)
with RoutingPool(address) as pool:
pool.ensure_routing_table_is_fresh(WRITE_ACCESS)
target = ("127.0.0.1", 9004)
assert target in pool.routing_table.readers
pool.deactivate(target)
assert target not in pool.routing_table.readers
def test_should_remove_writer_from_routing_table_if_present(self):
with StubCluster({9001: "v1/router.script"}):
address = ("127.0.0.1", 9001)
with RoutingPool(address) as pool:
pool.ensure_routing_table_is_fresh(WRITE_ACCESS)
target = ("127.0.0.1", 9006)
assert target in pool.routing_table.writers
pool.deactivate(target)
assert target not in pool.routing_table.writers
def test_should_not_fail_if_absent(self):
with StubCluster({9001: "v1/router.script"}):
address = ("127.0.0.1", 9001)
with RoutingPool(address) as pool:
pool.ensure_routing_table_is_fresh(WRITE_ACCESS)
target = ("127.0.0.1", 9007)
pool.deactivate(target)
| python |
import ast
import sys
class EnvVisitor(ast.NodeVisitor):
def __init__(self):
self.optional_environment_variables = set()
self.required_environment_variables = set()
def parse_and_visit(self, body, filename=''):
doc = ast.parse(body, filename=filename)
return self.visit(doc)
def visit_Call(self, call):
is_getenv = False
is_environ_get = False
if isinstance(call.func, ast.Attribute):
if call.func.attr == 'getenv':
is_getenv = True
elif call.func.attr == 'get':
if isinstance(call.func.value, ast.Attribute) and call.func.value.attr == 'environ':
is_environ_get = True
elif isinstance(call.func.value, ast.Name) and call.func.value.id == 'environ':
is_environ_get = True
elif isinstance(call.func, ast.Name):
if call.func.id == 'getenv':
is_getenv = True
if is_getenv:
if len(call.args) >= 1 and isinstance(call.args[0], ast.Str):
self.optional_environment_variables.add(ast.literal_eval(call.args[0]))
elif is_environ_get:
if len(call.args) >= 1 and isinstance(call.args[0], ast.Str):
self.optional_environment_variables.add(ast.literal_eval(call.args[0]))
self.generic_visit(call)
def visit_Subscript(self, what):
is_env_slice = False
if isinstance(what.value, ast.Attribute) and what.value.attr == 'environ':
is_env_slice = True
elif isinstance(what.value, ast.Name) and what.value.id == 'environ':
is_env_slice = True
if is_env_slice:
if isinstance(what.slice, ast.Index) and isinstance(what.slice.value, ast.Str):
self.required_environment_variables.add(ast.literal_eval(what.slice.value))
elif sys.version_info > (3, 9):
# this was added with the new parser in 3.9
if isinstance(what.slice, ast.Constant) and isinstance(what.slice.value, str):
self.required_environment_variables.add(what.slice.value)
self.generic_visit(what)
| python |
#!/usr/bin/python
# __*__ coding: utf8 __*__
oneline = "Read, write and operate with models"
#import os
from model_base import model_base
# --------------------------------------------------------------------
class Free_class:
pass
def bound(x, y):
if x > y/2.: return x-y
if x < -y/2. : return x+y
return x
#=============================================================================
class model_ngbr(model_base):
# --------------------------------------------------------------------
def __init__(self,d={}):
model_base.__init__(self,d)
# vc=self.vc
# ix=self.legend.index('x')
# for at in self.atoms:
# at[ix]=at[ix]%vc[0]
# at[ix+1]=at[ix+1]%vc[1]
# at[ix+2]=at[ix+2]%vc[2]
#========= make Verlet ===========================
def make_verlet(self,r=None):
""" Make Verlet for the model """
if r==None:
r=((self.vc[0]*self.vc[1]*self.vc[2]/self.natoms)**0.33333333333)
print "Verlet go. r=",r
ver =Free_class()
vc=self.vc
ver.imax=tuple(( int(x/r)+1 for x in vc ))
ver.dr=tuple(( x/y for x,y in zip(vc, ver.imax) ))
ver.ind={}
for iat,vec in self.at_it('x y z'):
im=tuple( int(x/y)%ii for x,y,ii in zip(vec,ver.dr,ver.imax) )
ver.ind[ im ] =ver.ind.get(im,[])+[iat]
self.verlet=ver
print "Verlet done"
#==============================================================
def make_ngbr_short(self,r=None):
""" makes Short Neighbours table """
if r==None: r=max(self.vc)/3.
print "Short NGBR go. r=",r
if not hasattr(self,'verlet'): self.make_verlet(r/2.5)
ng=Free_class()
ng.r=r
def key_it(pt,im,mmm):
for i in range(pt[0]+1,pt[0]+mmm+1):
for j in range(pt[1]-mmm,pt[1]+mmm+1):
for k in range(pt[2]-mmm,pt[2]+mmm+1):
yield (i%im[0],j%im[1],k%im[2])
i=pt[0]
for j in range(pt[1]+1,pt[1]+mmm+1):
for k in range(pt[2]-mmm,pt[2]+mmm+1):
yield (i,j%im[1],k%im[2])
i=pt[0]
j=pt[1]
for k in range(pt[2]+1,pt[2]+mmm+1):
yield (i,j,k%im[2])
ver=self.verlet
mmm=int(r/min(ver.dr))+1
print 'mmm = ',mmm
ng.index=[[] for i in self.atoms]
for key in ver.ind:
at_list=ver.ind[key]
for i in at_list: ng.index[i] +=at_list
for key1 in key_it(key,ver.imax,mmm):
try:
at_list1=ver.ind[key1]
for i in at_list: ng.index[i] +=at_list1
for i in at_list1: ng.index[i] +=at_list
except:
pass
self.ngbr_short=ng
print "Short NGBR done"
#==============================================================
def read_ngbr_short(self,d={}):
""" read Short Neighbours table """
self.time=d.get('time',0)
if self.box<>[[0],[0],[0]]:
box=d.get('box',[[0],[0],[0]])
self.box=box
if len(box[0])==3: self.vc=[box[0][0],box[1][1],box[2][2]]
elif len(box[0])==2: self.vc=map(lambda x: x[1]-x[0], box)
else: self.vc=[box[0][0],box[1][0],box[2][0]]
dat=d.get('atoms',[])
ng=Free_class()
ind=[]
for i in dat:
s=[int(j) for j in i]
while len(ind)<s[0]:
ind.append([])
ind[s[0]-1] += [j-1 for j in s[2:] if j<>-1]
if self.atoms==[]: self.atoms=[[] for j in ind]
while len(ind)<len(self.atoms):
ind.append([])
ng.index=ind
self.ngbr_short=ng
# print "Short NGBR is read"
#==============================================================
def make_ngbr(self,r=None,part=''):
""" makes Neighbours table with distances """
try:
self.make_ngbr_numpy(r,part)
return
except ImportError:
print 'Numpy is not installed, falling back to standard procedure'
if r==None:
print 'Warning !!! Make full ngbr list. It could take alot of time!!!'
r=max(self.vc)/3.
print "NGBR go. r=",r
if not hasattr(self,'ngbr_short'): self.make_ngbr_short(r)
ng=Free_class()
r2=r*r
ng.r=r
ix=self.legend.index('x')
aat=[i[ix:ix+3] for i in self.atoms]
vc=self.vc
ngs=self.ngbr_short.index
ng.index=[{} for i in self.atoms]
for iat,nng in enumerate(ngs):
vec0=aat[iat]
for jat in nng:
if jat<=iat: continue
vec1=aat[jat]
vec= [ ((x-y)+0.5*v)%v-0.5*v for x,y,v in zip(vec1,vec0,vc) ]
dist2=sum(x*x for x in vec)
vec +=[dist2]
if dist2 <= r2:
ng.index[iat][jat]=vec
ng.index[jat][iat]=[-vec[0],-vec[1],-vec[2],vec[3]]
self.ngbr=ng
print "NGBR done"
#==============================================================
def make_ngbr_numpy(self,r=None,part=''):
""" makes Neighbours table with distances """
import n3umpy as np
if r==None:
print 'Warning !!! Make full ngbr list. It could takes alot of time!!!'
r=max(self.vc)/3.
print "NGBR numpy go. r=",r
ng=Free_class()
r2=r*r
ng.r=r
ix=self.legend.index('x')
crd = np.array(self.atoms, order = 'F')[:,ix:ix+3].astype(np.float32)
vc = np.array(self.vc, order = 'F').astype(np.float32)
ng.index=[{} for i in self.atoms]
for iat in range(crd.shape[0]):
d = crd[iat:] - crd[iat]
vn = d - (d/vc).round()*vc
r2n = np.array([np.dot(x,x) for x in vn])
idn = np.nonzero((r2n < r2) & (r2n > 0.))
for inn in idn[0]:
ng.index[iat][iat + inn] = vn[inn].tolist()
ng.index[iat][iat + inn] += [r2n[inn],]
ng.index[iat + inn][iat] = (-vn[inn]).tolist()
ng.index[iat + inn][iat] += [r2n[inn],]
print ng.index[0]
self.ngbr=ng
print "NGBR numpy done"
#==============================================================
#---------------------------------------------------------------
def get_round_it(self,crd,r=None):
""" returns list of atoms near to to the point
"""
def key_it(pt,im,mmm):
for i in range(pt[0]-mmm,pt[0]+mmm+1):
for j in range(pt[1]-mmm,pt[1]+mmm+1):
for k in range(pt[2]-mmm,pt[2]+mmm+1):
yield (i%im[0],j%im[1],k%im[2])
if r==None: r=min(self.vc)/3.
if not hasattr(self,'verlet'): self.make_verlet(r+0.05)
ver=self.verlet
mmm=int(r/min(self.verlet.dr))+1
pt=[int(x/y) for x,y in zip(crd,ver.dr)]
it=(ver.ind.get(k,[]) for k in key_it(pt,ver.imax,mmm))
for val in it:
for iat in val:
yield iat
#======== NGBR ===========================================
def ngbr_it(self,iat,r=None,part=''):
filt={}
filt['gt']=lambda x,y: x>y
filt['ge']=lambda x,y: x>=y
filt['lt']=lambda x,y: x<y
filt['le']=lambda x,y: x<=y
filt['ne']=lambda x,y: x<>y
filt['']=lambda x,y: 1==1
ff=filt[part]
if hasattr(self,'ngbr'):
for k,vec in self.ngbr.index[iat].iteritems():
if ff(k,iat):
yield k,vec
else:
if not hasattr(self,'ngbr_short'): self.make_ngbr_short(r)
for k in self.ngbr_short.index[iat]:
if ff(k,iat):
yield k,[None,None,None,None]
#======== Make NGBR table ===========================================
def make_ngbr_old(self,r=1e10,part=''):
""" makes Neighbours table
"""
print "NGBR go. r=",r
ng=Free_class()
r2=r*r
ng.r2=r2
ng.index = [dict(self.ngbr_it(iat,r,part)) for iat in xrange(len(self.atoms)) ]
self.ngbr=ng
print "NGBR done"
#======== Make GR ===========================================
def make_gr_it(self,r=1e10):
ind=self.ngbr.index
for i in ind:
for j in i:
rr=i[j][3]**0.5
if rr<r:
yield rr
#========================================================================
def ep_it(self,n=1):
from random import random
nn=0
dr=self.verlet.dr
ind=self.verlet.ind
im=self.verlet.imax
while nn<n:
key=tuple( int(i*random()) for i in im )
if ind.has_key(key): continue
yield( ((i+0.5)*j for i,j in zip(key,dr)) )
nn +=1
#************************************************************************
if __name__=='__main__': #run as programm
from model_i import dump_lmp
# from timer import timer
# tm=timer()
dump=dump_lmp('dump.lmp')
mod=model_ngbr(dump())
mod.make_verlet(2)
# print tm
mod.make_fast_ngbr(5)
# print tm
l=list(mod.make_gr_it(5))
# print tm
| python |
import configparser
import datetime
import os
import time
#import xml.etree.ElementTree as ET
import lxml.etree as ET
from io import StringIO, BytesIO
from shutil import copyfile
import requests
from requests.auth import HTTPDigestAuth
from subprocess import Popen
print("Hikvision alert started")
# CONFIGS START
config = configparser.ConfigParser()
exists = os.path.isfile('/config/config.ini')
if exists:
config.read('/config/config.ini')
else:
copyfile('cfg/config.ini', '/config/config.ini')
config.read('/config/config.ini')
APP_PATH = config['DEFAULT']['APP_PATH']
NVR_URL = config['DEFAULT']['NVR_URL']
NVR_USR = config['DEFAULT']['NVR_USR']
NVR_PASS = config['DEFAULT']['NVR_PASS']
# CONFIGS ENDS
XML_NAMESPACE = 'http://www.hikvision.com/ver20/XMLSchema'
DEFAULT_HEADERS = {
'Content-Type': "application/xml; charset='UTF-8'",
'Accept': "*/*"
}
hik_request = requests.Session()
hik_request.auth = HTTPDigestAuth(NVR_USR, NVR_PASS)
hik_request.headers.update(DEFAULT_HEADERS)
url = NVR_URL + '/ISAPI/Event/notification/alertStream'
parse_string = ''
start_event = False
fail_count = 0
detection_date = datetime.datetime.now()
detection_id = '0'
log_file_name = "log-" + detection_date.strftime("%Y-%m-%d")+".txt"
log_file = open("/config/" + log_file_name, "a+")
while True:
try:
stream = hik_request.get(url, stream=True, timeout=(5, 60), verify=False)
if stream.status_code != requests.codes.ok:
print("Can't connect to the stream!")
raise ValueError('Connection unsuccessful.')
else:
print('Connection successful to: ' + NVR_URL)
fail_count = 0
for line in stream.iter_lines():
# filter out keep-alive new lines
if line:
str_line = line.decode("utf-8")
if str_line.find('<EventNotificationAlert') != -1:
start_event = True
parse_string += str_line
elif str_line.find('</EventNotificationAlert>') != -1:
parse_string += str_line
start_event = False
if parse_string:
#tree = ET.fromstring(parse_string)
# Use lxml instead of xml
parser = ET.XMLParser(recover=True)
tree = ET.parse(StringIO(parse_string), parser=parser)
channelID = tree.find('{%s}%s' % (XML_NAMESPACE, 'channelID'))
if channelID is None:
# Some devices use a different key
channelID = tree.find('{%s}%s' % (XML_NAMESPACE, 'dynChannelID'))
if channelID.text == '0':
# Continue and clear the chunk
parse_string = ""
continue
eventType = tree.find('{%s}%s' % (XML_NAMESPACE, 'eventType'))
eventState = tree.find('{%s}%s' % (XML_NAMESPACE, 'eventState'))
postCount = tree.find('{%s}%s' % (XML_NAMESPACE, 'activePostCount'))
current_date = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
log_file.write('%s - count: %s event: %s eventState: %s channel_id: %s\n' % (
current_date, postCount.text, eventType.text, eventState.text, channelID.text))
if eventType.text == 'linedetection':
print("Line decetion triggered!")
# Only trigger the event if the event not repeated in 5 sec
log_file.write('count: %s (triggered)\n' % postCount.text)
detection_date = datetime.datetime.now()
detection_id = channelID.text
# start the subprocess to process by channelID
p = Popen('python ' + APP_PATH + '/image_process.py ' + channelID.text,
shell=True)
# Clear the chunk
parse_string = ""
else:
if start_event:
parse_string += str_line
except (ValueError, requests.exceptions.ConnectionError, requests.exceptions.ChunkedEncodingError) as err:
fail_count += 1
time.sleep(fail_count * 5)
continue
| python |
from project import app
if __name__ == "__main__":
app.run(debug = True, host = "0.0.0.0") | python |
"""
An emulation of the Window class, for injecting pane data into tests
"""
from tmux_session_utils.tmux_utils import (
inject_pane_data,
WINDOW_ID_VARIABLE,
WINDOW_LAYOUT_VARIABLE,
)
class FakeWindow:
"""
Represents a window in a tmux session, for test injection
"""
def __init__(self, identity: str = None):
"""
Set invalid starting properties for the window
"""
self.identity = identity
self.name = ""
self.session = ""
self.number = None
self.directory = ""
self.layout = ""
def set_session_name(self, session: str) -> "FakeWindow":
"""
Set the session name
Parameters
----------
session : string
The session name to set
Returns
-------
self
This instance
"""
self.session = session
return self
def set_name(self, name: str) -> "FakeWindow":
"""
Set the window name
Parameters
----------
name : string
The window name to set
Returns
-------
self
This instance
"""
self.name = name
return self
def set_number(self, number: int) -> "FakeWindow":
"""
Set the window number
Parameters
----------
number : number
The window number to set
Returns
-------
self
This instance
"""
self.number = number
return self
def set_directory(self, directory: str) -> "FakeWindow":
"""
Set the directory
Parameters
----------
directory : string
The directory to set
Returns
-------
self
This instance
"""
self.directory = directory
return self
def set_layout(self, layout: str) -> "FakeWindow":
"""
Set the layout
Parameters
----------
layout : string
The layout to set
Returns
-------
self
This instance
"""
self.layout = layout
return self
def inject(self):
"""
Inject the attributes for this window into the session
"""
inject_pane_data(
self.session,
self.number,
None,
{WINDOW_ID_VARIABLE: self.identity, WINDOW_LAYOUT_VARIABLE: self.layout},
)
| python |
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
import os
import datetime
import unittest
import tempfile
import azext_interactive.azclishell.frequency_heuristic as fh
def _mock_update(_):
return {fh.day_format(datetime.datetime.utcnow()): 1}
def _mock_update2(_):
return {
fh.day_format(datetime.datetime.utcnow()): 2,
fh.day_format(datetime.datetime.utcnow() - datetime.timedelta(days=2)): 1}
def _mock_update3(_):
return {
fh.day_format(datetime.datetime.utcnow()): 19,
fh.day_format(datetime.datetime.utcnow() - datetime.timedelta(days=18)): 5,
fh.day_format(datetime.datetime.utcnow() - datetime.timedelta(days=27)): 2,
fh.day_format(datetime.datetime.utcnow() - datetime.timedelta(days=28)): 2,
fh.day_format(datetime.datetime.utcnow() - datetime.timedelta(days=100)): 1,
fh.day_format(datetime.datetime.utcnow() - datetime.timedelta(days=200)): 1}
class FeedbackTest(unittest.TestCase):
""" tests the frequncy heuristic """
def __init__(self, *args, **kwargs):
super(FeedbackTest, self).__init__(*args, **kwargs)
from azure.cli.core.mock import DummyCli
from azext_interactive.azclishell.app import AzInteractiveShell
self.norm_update = fh.update_frequency
self.shell_ctx = AzInteractiveShell(DummyCli(), None)
def test_heuristic(self):
# test the correct logging of time for frequency
fh.update_frequency = _mock_update
self.assertEqual(1, fh.frequency_measurement(self.shell_ctx))
fh.update_frequency = _mock_update2
self.assertEqual(2, fh.frequency_measurement(self.shell_ctx))
fh.update_frequency = _mock_update3
self.assertEqual(3, fh.frequency_measurement(self.shell_ctx))
def test_update_freq(self):
# tests updating the files for frequency
fh.update_frequency = self.norm_update
now = fh.day_format(datetime.datetime.now())
fd, freq_path = tempfile.mkstemp()
freq_dir, freq_file = freq_path.rsplit(os.path.sep, 1)
def _get_freq():
return freq_file
self.shell_ctx.config.config_dir = freq_dir
self.shell_ctx.config.get_frequency = _get_freq
# with a file
json_freq = fh.update_frequency(self.shell_ctx)
self.assertEqual(json_freq, {now: 1})
json_freq = fh.update_frequency(self.shell_ctx)
self.assertEqual(json_freq, {now: 2})
if os.path.exists(freq_path):
os.close(fd)
os.remove(freq_path)
def test_update_freq_no_file(self):
# tests updating the files for frequency with no file written
fh.update_frequency = self.norm_update
fd, freq_path = tempfile.mkstemp()
freq_dir, freq_file = freq_path.rsplit(os.path.sep, 1)
def _get_freq():
return freq_file
self.shell_ctx.config.config_dir = freq_dir
self.shell_ctx.config.get_frequency = _get_freq
if os.path.exists(freq_path):
os.close(fd)
os.remove(freq_path)
# without a file already written
json_freq = fh.update_frequency(self.shell_ctx)
now = fh.day_format(datetime.datetime.now())
self.assertEqual(json_freq, {now: 1})
if os.path.exists(freq_path):
os.remove(freq_path)
if __name__ == '__main__':
unittest.main()
| python |
from app import app
import sys, getopt, json
def clear_file(file_name):
with open(file_name, 'w') as filep:
json.dump({}, filep)
if __name__ == "__main__":
try:
opts, args = getopt.getopt(sys.argv, "c", ["clear"])
except getopt.GetoptError:
print('python webapp.py [-c or --clear for clearing memory]')
sys.exit(2)
for arg in args:
if arg in ['-c','--clear']:
clear_file('tx_history.json')
clear_file('retired_store.json')
clear_file('data_store.json')
clear_file('purchase_request_store.json')
print('Cleared memory')
app.run(debug=True, host="127.0.0.1", port=8090) | python |
import os
import sys
import time
import random
import string
import argparse
from collections import namedtuple
import copy
import torch
import torch.backends.cudnn as cudnn
import torch.nn.init as init
import torch.optim as optim
import torch.utils.data
from torch import autograd
import torch.multiprocessing as mp
import torch.distributed as dist
from torch.utils.data import Dataset
from torch.nn.parallel import DistributedDataParallel as pDDP
from torchsummary import summary
from torchvision.utils import save_image
# import horovod.torch as hvd
import gin
import numpy as np
from tqdm import tqdm, trange
from PIL import Image
from pprint import pprint
import apex
from apex.parallel import DistributedDataParallel as aDDP
from apex.fp16_utils import *
from apex import amp
from apex.multi_tensor_apply import multi_tensor_applier
import wandb
import ds_load
from utils import CTCLabelConverter, Averager, ModelEma, Metric
from cnv_model import OrigamiNet, ginM
from test import validation
parOptions = namedtuple('parOptions', ['DP', 'DDP', 'HVD'])
parOptions.__new__.__defaults__ = (False,) * len(parOptions._fields)
pO = None
OnceExecWorker = None
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
def init_bn(model):
if type(model) in [torch.nn.InstanceNorm2d, torch.nn.BatchNorm2d]:
init.ones_(model.weight)
init.zeros_(model.bias)
elif type(model) in [torch.nn.Conv2d]:
init.kaiming_uniform_(model.weight)
def WrkSeeder(_):
return np.random.seed((torch.initial_seed()) % (2 ** 32))
@gin.configurable
def train(opt, AMP, WdB, train_data_path, train_data_list, test_data_path, test_data_list, charset,
experiment_name, train_batch_size, val_batch_size, workers, lr, valInterval, num_iter,
wdbprj, continue_model=''):
os.makedirs(f'./saved_models/{experiment_name}', exist_ok=True)
if OnceExecWorker and WdB:
wandb.init(project=wdbprj, name=experiment_name)
wandb.config.update(opt)
alph = ds_load.get_charset(charset)
train_dataset = ds_load.myLoadDS2(train_data_path, train_data_list, alph=alph)
valid_dataset = ds_load.myLoadDS2(test_data_path, test_data_list, alph=alph)
if OnceExecWorker:
print(pO)
# print('Alphabet :', len(train_dataset.alph), train_dataset.alph)
for d in [train_dataset, valid_dataset]:
print('Dataset Size :', len(d.fns))
print('Max LbW : ', max(list(map(len, d.tlbls))))
print('#Chars : ', sum([len(x) for x in d.tlbls]))
print('Sample label :', d.tlbls[-1])
# print("Dataset :", sorted(list(map(len, d.tlbls))))
print('-' * 80)
if opt.num_gpu > 1:
workers = workers * opt.num_gpu
train_loader = torch.utils.data.DataLoader(
train_dataset,
batch_size=train_batch_size,
shuffle=True,
pin_memory=True,
num_workers=int(workers),
worker_init_fn=WrkSeeder,
collate_fn=ds_load.SameTrCollate
)
valid_loader = torch.utils.data.DataLoader(
valid_dataset,
batch_size=val_batch_size,
pin_memory=True,
num_workers=int(workers),
)
model = OrigamiNet()
model.apply(init_bn)
model.train()
if OnceExecWorker:
for k in sorted(model.lreszs.keys()):
print(k, model.lreszs[k])
biparams = list(dict(filter(lambda kv: 'bias' in kv[0], model.named_parameters())).values())
nonbiparams = list(dict(filter(lambda kv: 'bias' not in kv[0], model.named_parameters())).values())
model = model.to(device)
optimizer = optim.Adam(model.parameters(), lr=lr)
lr_scheduler = torch.optim.lr_scheduler.ExponentialLR(optimizer, gamma=10 ** (-1 / 90000))
if OnceExecWorker and WdB:
wandb.watch(model, log="all")
'''
if pO.HVD:
hvd.broadcast_parameters(model.state_dict(), root_rank=0)
hvd.broadcast_optimizer_state(optimizer, root_rank=0)
optimizer = hvd.DistributedOptimizer(optimizer, named_parameters=model.named_parameters())
# optimizer = hvd.DistributedOptimizer(optimizer, named_parameters=model.named_parameters(), compression=hvd.Compression.fp16)
'''
if pO.DDP and opt.rank != 0:
random.seed()
np.random.seed()
if AMP:
model, optimizer = amp.initialize(model, optimizer, opt_level="O1")
if pO.DP:
model = torch.nn.DataParallel(model)
model_ema = ModelEma(model)
if continue_model != '':
if OnceExecWorker:
print(f'loading pretrained model from {continue_model}')
checkpoint = torch.load(continue_model)
model.load_state_dict(checkpoint['model'], strict=True)
optimizer.load_state_dict(checkpoint['optimizer'])
model_ema._load_checkpoint(continue_model)
criterion = torch.nn.CTCLoss(reduction='none', zero_infinity=True).to(device)
converter = CTCLabelConverter(train_dataset.ralph.values())
if OnceExecWorker:
with open(f'./saved_models/{experiment_name}/opt.txt', 'a') as opt_file:
opt_log = '------------ Options -------------\n'
args = vars(opt)
for k, v in args.items():
opt_log += f'{str(k)}: {str(v)}\n'
opt_log += '---------------------------------------\n'
opt_log += gin.operative_config_str()
opt_file.write(opt_log)
if WdB:
wandb.config.gin_str = gin.operative_config_str().splitlines()
print(optimizer)
print(opt_log)
start_time = time.time()
best_accuracy = -1
best_norm_ED = 1e+6
best_CER = 1e+6
i = 0
gAcc = 1
epoch = 1
btReplay = False and AMP
max_batch_replays = 3
titer = iter(train_loader)
while True:
start_time = time.time()
model.zero_grad()
train_loss = Metric(pO, 'train_loss')
train_loss.to(device)
for j in trange(valInterval, leave=False, desc='Training'):
try:
image_tensors, labels = next(titer)
except StopIteration:
epoch += 1
titer = iter(train_loader)
image_tensors, labels = next(titer)
image = image_tensors.to(device)
text, length = converter.encode(labels)
batch_size = image.size(0)
replay_batch = True
maxR = 3
while replay_batch and maxR > 0:
maxR -= 1
preds = model(image, text).float()
preds_size = torch.IntTensor([preds.size(1)] * batch_size).to(device)
preds = preds.permute(1, 0, 2).log_softmax(2)
if i == 0 and OnceExecWorker:
print('Model inp : ', image.dtype, image.size())
print('CTC inp : ', preds.dtype, preds.size(), preds_size[0])
# To avoid ctc_loss issue, disabled cudnn for the computation of the ctc_loss
torch.backends.cudnn.enabled = False
cost = criterion(preds, text.to(device), preds_size, length.to(device)).mean() / gAcc
torch.backends.cudnn.enabled = True
train_loss.update(cost)
optimizer.zero_grad()
default_optimizer_step = optimizer.step # added for batch replay
if not AMP:
cost.backward()
replay_batch = False
else:
with amp.scale_loss(cost, optimizer) as scaled_loss:
scaled_loss.backward()
# if pO.HVD: optimizer.synchronize()
if optimizer.step is default_optimizer_step or not btReplay:
replay_batch = False
elif maxR > 0:
optimizer.step()
if (i + 1) % gAcc == 0:
optimizer.step()
model.zero_grad()
model_ema.update(model, num_updates=i / 2)
if (i + 1) % (gAcc * 2) == 0:
lr_scheduler.step()
i += 1
# validation part
if True:
elapsed_time = time.time() - start_time
start_time = time.time()
model.eval()
with torch.no_grad():
# valid_loss, current_accuracy, current_norm_ED, ted, bleu, preds, labels, infer_time = validation(
# model_ema.ema, criterion, valid_loader, converter, opt, pO)
valid_loss, current_accuracy, current_norm_ED, ted, bleu, preds, labels, infer_time = validation(
model, criterion, valid_loader, converter, opt, pO)
model.train()
v_time = time.time() - start_time
if OnceExecWorker:
if current_norm_ED < best_norm_ED:
best_norm_ED = current_norm_ED
checkpoint = {
'model': model.state_dict(),
'state_dict_ema': model_ema.ema.state_dict(),
'optimizer': optimizer.state_dict(),
}
torch.save(checkpoint, f'./saved_models/{experiment_name}/best_norm_ED.pth')
if ted < best_CER:
best_CER = ted
if current_accuracy > best_accuracy:
best_accuracy = current_accuracy
out = f'[{i}] Loss: {train_loss.avg:0.5f} time: ({elapsed_time:0.1f},{v_time:0.1f})'
out += f' vloss: {valid_loss:0.3f}'
out += f' CER: {ted:0.4f} NER: {current_norm_ED:0.4f} lr: {lr_scheduler.get_last_lr()[0]:0.5f}'
out += f' bAcc: {best_accuracy:0.1f}, bNER: {best_norm_ED:0.4f}, bCER: {best_CER:0.4f}, B: {bleu * 100:0.2f}'
print(out)
with open(f'./saved_models/{experiment_name}/log_train.txt', 'a') as log:
log.write(out + '\n')
if WdB:
wandb.log({'lr': lr_scheduler.get_last_lr()[0], 'It': i, 'nED': current_norm_ED, 'B': bleu * 100,
'tloss': train_loss.avg, 'AnED': best_norm_ED, 'CER': ted, 'bestCER': best_CER,
'vloss': valid_loss})
if i == num_iter:
print('end the training')
sys.exit()
def gInit(opt):
global pO, OnceExecWorker
gin.parse_config_file(opt.gin)
pO = parOptions(**{ginM('dist'): True})
OnceExecWorker = pO.DP
cudnn.benchmark = True
def rSeed(sd):
random.seed(sd)
np.random.seed(sd)
torch.manual_seed(sd)
torch.cuda.manual_seed(sd)
def launch_fn(rank, opt):
global OnceExecWorker
gInit(opt)
OnceExecWorker = OnceExecWorker or (pO.DDP and rank == 0)
mp.set_start_method('fork', force=True)
os.environ['MASTER_ADDR'] = 'localhost'
os.environ['MASTER_PORT'] = str(opt.port)
dist.init_process_group("nccl", rank=rank, world_size=opt.num_gpu)
# to ensure identical init parameters
rSeed(opt.manualSeed)
torch.cuda.set_device(rank)
opt.world_size = opt.num_gpu
opt.rank = rank
train(opt)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--gin', help='Gin config file')
opt = parser.parse_args()
gInit(opt)
opt.manualSeed = ginM('manualSeed')
opt.port = ginM('port')
if OnceExecWorker:
rSeed(opt.manualSeed)
opt.num_gpu = torch.cuda.device_count()
train(opt)
| python |
import logging
from huobi.connection.impl.websocket_watchdog import WebSocketWatchDog
from huobi.connection.impl.websocket_manage import WebsocketManage
from huobi.connection.impl.websocket_request import WebsocketRequest
from huobi.constant.system import WebSocketDefine, ApiVersion
class SubscribeClient(object):
# static property
subscribe_watch_dog = WebSocketWatchDog()
def __init__(self, **kwargs):
"""
Create the subscription client to subscribe the update from server.
:param kwargs: The option of subscription connection.
api_key: The public key applied from Huobi.
secret_key: The private key applied from Huobi.
url: Set the URI for subscription.
init_log: to init logger
"""
self.__api_key = kwargs.get("api_key", None)
self.__secret_key = kwargs.get("secret_key", None)
self.__uri = kwargs.get("url", WebSocketDefine.Uri)
self.__init_log = kwargs.get("init_log", None)
if self.__init_log and self.__init_log:
logger = logging.getLogger("huobi-client")
# logger.setLevel(level=logging.INFO)
handler = logging.StreamHandler()
handler.setFormatter(logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s'))
logger.addHandler(handler)
self.__websocket_manage_list = list()
def __create_websocket_manage(self, request):
manager = WebsocketManage(self.__api_key, self.__secret_key, self.__uri, request)
self.__websocket_manage_list.append(manager)
manager.connect()
SubscribeClient.subscribe_watch_dog.on_connection_created(manager)
def create_request(self, subscription_handler, parse, callback, error_handler, is_trade, is_mbp_feed=False):
request = WebsocketRequest()
request.subscription_handler = subscription_handler
request.is_trading = is_trade
request.is_mbp_feed = is_mbp_feed
request.auto_close = False # subscribe need connection. websocket request need close request.
request.json_parser = parse
request.update_callback = callback
request.error_handler = error_handler
return request
def create_request_v1(self, subscription_handler, parse, callback, error_handler, is_trade=False):
request = self.create_request(subscription_handler=subscription_handler, parse=parse, callback=callback,
error_handler=error_handler, is_trade=is_trade)
request.api_version = ApiVersion.VERSION_V1
return request
def create_request_v2(self, subscription_handler, parse, callback, error_handler, is_trade=False):
request = self.create_request(subscription_handler=subscription_handler, parse=parse, callback=callback,
error_handler=error_handler, is_trade=is_trade)
request.api_version = ApiVersion.VERSION_V2
return request
def execute_subscribe_v1(self, subscription_handler, parse, callback, error_handler, is_trade=False):
request = self.create_request_v1(subscription_handler, parse, callback, error_handler, is_trade)
self.__create_websocket_manage(request)
def execute_subscribe_v2(self, subscription_handler, parse, callback, error_handler, is_trade=False):
request = self.create_request_v2(subscription_handler, parse, callback, error_handler, is_trade)
self.__create_websocket_manage(request)
def execute_subscribe_mbp(self, subscription_handler, parse, callback, error_handler, is_trade=False,
is_mbp_feed=True):
request = self.create_request(subscription_handler, parse, callback, error_handler, is_trade, is_mbp_feed)
self.__create_websocket_manage(request)
def unsubscribe_all(self):
for websocket_manage in self.__websocket_manage_list:
SubscribeClient.subscribe_watch_dog.on_connection_closed(websocket_manage)
websocket_manage.close()
self.__websocket_manage_list.clear()
| python |
import numpy as np
from seisflows.tools import unix
from seisflows.tools.array import loadnpy, savenpy
from seisflows.tools.code import exists
from seisflows.tools.config import SeisflowsParameters, SeisflowsPaths, \
loadclass, ParameterError
PAR = SeisflowsParameters()
PATH = SeisflowsPaths()
import solver
import postprocess
migration = loadclass('workflow','migration')()
class test_postprocess(object):
""" Postprocessing class
"""
def check(self):
""" Checks parameters and paths
"""
migration.check()
if 'INPUT' not in PATH:
setattr(PATH, 'INPUT', None)
def main(self):
""" Writes gradient of objective function
"""
if not PATH.INPUT:
migration.main()
postprocess.process_kernels()
| python |
from jsonobject import JsonObject
from taxjar.data.float_property import TaxJarFloatProperty
class TaxJarBreakdownLineItem(JsonObject):
# NB: can return either string or integer
# `id` is a valid property, but isn't enforced here
# id = StringProperty()
taxable_amount = TaxJarFloatProperty()
tax_collectable = TaxJarFloatProperty()
combined_tax_rate = TaxJarFloatProperty()
state_taxable_amount = TaxJarFloatProperty()
state_sales_tax_rate = TaxJarFloatProperty()
state_amount = TaxJarFloatProperty()
county_taxable_amount = TaxJarFloatProperty()
county_tax_rate = TaxJarFloatProperty()
county_amount = TaxJarFloatProperty()
city_taxable_amount = TaxJarFloatProperty()
city_tax_rate = TaxJarFloatProperty()
city_amount = TaxJarFloatProperty()
special_district_taxable_amount = TaxJarFloatProperty()
special_tax_rate = TaxJarFloatProperty()
special_district_amount = TaxJarFloatProperty()
country_taxable_amount = TaxJarFloatProperty()
country_tax_rate = TaxJarFloatProperty()
country_tax_collectable = TaxJarFloatProperty()
gst_taxable_amount = TaxJarFloatProperty()
gst_tax_rate = TaxJarFloatProperty()
gst = TaxJarFloatProperty()
pst_taxable_amount = TaxJarFloatProperty()
pst_tax_rate = TaxJarFloatProperty()
pst = TaxJarFloatProperty()
qst_taxable_amount = TaxJarFloatProperty()
qst_tax_rate = TaxJarFloatProperty()
qst = TaxJarFloatProperty()
| python |
import abjad
import consort
from abjad.tools import durationtools
from abjad.tools import rhythmmakertools
from abjad.tools import systemtools
from abjad.tools import templatetools
from abjad.tools import timespantools
layer = 1
score_template = templatetools.StringOrchestraScoreTemplate(
violin_count=2,
viola_count=1,
cello_count=1,
contrabass_count=0,
)
segment_timespan = abjad.Timespan(0, 4)
timespan_maker = consort.TaleaTimespanMaker(
playing_talea=rhythmmakertools.Talea(
counts=(1,),
denominator=1,
),
silence_talea=None,
)
timespan_quantization = abjad.Duration(1, 16)
def test_MusicSetting_01():
music_setting = consort.MusicSetting(
timespan_maker=timespan_maker,
viola_bowing_voice=consort.tools.MusicSpecifier(),
)
result = music_setting(
layer=layer,
score_template=score_template,
segment_timespan=segment_timespan,
)
assert format(result) == abjad.String.normalize(
'''
abjad.TimespanList(
[
consort.tools.PerformedTimespan(
start_offset=abjad.Offset(0, 1),
stop_offset=abjad.Offset(1, 1),
layer=1,
music_specifier=consort.tools.MusicSpecifier(),
voice_name='Viola Bowing Voice',
),
consort.tools.PerformedTimespan(
start_offset=abjad.Offset(1, 1),
stop_offset=abjad.Offset(2, 1),
layer=1,
music_specifier=consort.tools.MusicSpecifier(),
voice_name='Viola Bowing Voice',
),
consort.tools.PerformedTimespan(
start_offset=abjad.Offset(2, 1),
stop_offset=abjad.Offset(3, 1),
layer=1,
music_specifier=consort.tools.MusicSpecifier(),
voice_name='Viola Bowing Voice',
),
consort.tools.PerformedTimespan(
start_offset=abjad.Offset(3, 1),
stop_offset=abjad.Offset(4, 1),
layer=1,
music_specifier=consort.tools.MusicSpecifier(),
voice_name='Viola Bowing Voice',
),
]
)
''',
), format(result)
def test_MusicSetting_02():
music_setting = consort.MusicSetting(
timespan_maker=timespan_maker,
timespan_identifier=abjad.Timespan(1, 2),
viola_bowing_voice=consort.tools.MusicSpecifier(),
)
result = music_setting(
layer=layer,
score_template=score_template,
segment_timespan=segment_timespan,
)
assert format(result) == abjad.String.normalize(
'''
abjad.TimespanList(
[
consort.tools.PerformedTimespan(
start_offset=abjad.Offset(1, 1),
stop_offset=abjad.Offset(2, 1),
layer=1,
music_specifier=consort.tools.MusicSpecifier(),
voice_name='Viola Bowing Voice',
),
]
)
''',
), format(result)
def test_MusicSetting_03():
music_setting = consort.MusicSetting(
timespan_maker=timespan_maker,
timespan_identifier=abjad.TimespanList([
abjad.Timespan(0, 1),
abjad.Timespan(2, 4),
]),
viola_bowing_voice=consort.tools.MusicSpecifier(),
)
result = music_setting(
layer=layer,
score_template=score_template,
segment_timespan=segment_timespan,
)
assert format(result) == abjad.String.normalize(
'''
abjad.TimespanList(
[
consort.tools.PerformedTimespan(
start_offset=abjad.Offset(0, 1),
stop_offset=abjad.Offset(1, 1),
layer=1,
music_specifier=consort.tools.MusicSpecifier(),
voice_name='Viola Bowing Voice',
),
consort.tools.PerformedTimespan(
start_offset=abjad.Offset(2, 1),
stop_offset=abjad.Offset(3, 1),
layer=1,
music_specifier=consort.tools.MusicSpecifier(),
voice_name='Viola Bowing Voice',
),
consort.tools.PerformedTimespan(
start_offset=abjad.Offset(3, 1),
stop_offset=abjad.Offset(4, 1),
layer=1,
music_specifier=consort.tools.MusicSpecifier(),
voice_name='Viola Bowing Voice',
),
]
)
''',
), format(result)
def test_MusicSetting_04():
music_setting = consort.MusicSetting(
timespan_maker=timespan_maker,
timespan_identifier=consort.RatioPartsExpression(
ratio=(1, 2, 1),
parts=1,
),
viola_bowing_voice=consort.tools.MusicSpecifier(),
)
result = music_setting(
layer=layer,
score_template=score_template,
segment_timespan=segment_timespan,
)
assert format(result) == abjad.String.normalize(
'''
abjad.TimespanList(
[
consort.tools.PerformedTimespan(
start_offset=abjad.Offset(1, 1),
stop_offset=abjad.Offset(2, 1),
layer=1,
music_specifier=consort.tools.MusicSpecifier(),
voice_name='Viola Bowing Voice',
),
consort.tools.PerformedTimespan(
start_offset=abjad.Offset(2, 1),
stop_offset=abjad.Offset(3, 1),
layer=1,
music_specifier=consort.tools.MusicSpecifier(),
voice_name='Viola Bowing Voice',
),
]
)
''',
), format(result)
def test_MusicSetting_05():
music_setting = consort.MusicSetting(
timespan_maker=timespan_maker,
timespan_identifier=consort.RatioPartsExpression(
ratio=(1, 2, 1),
parts=(0, 2),
),
viola_bowing_voice=consort.tools.MusicSpecifier(),
)
result = music_setting(
layer=layer,
score_template=score_template,
segment_timespan=segment_timespan,
)
assert format(result) == abjad.String.normalize(
'''
abjad.TimespanList(
[
consort.tools.PerformedTimespan(
start_offset=abjad.Offset(0, 1),
stop_offset=abjad.Offset(1, 1),
layer=1,
music_specifier=consort.tools.MusicSpecifier(),
voice_name='Viola Bowing Voice',
),
consort.tools.PerformedTimespan(
start_offset=abjad.Offset(3, 1),
stop_offset=abjad.Offset(4, 1),
layer=1,
music_specifier=consort.tools.MusicSpecifier(),
voice_name='Viola Bowing Voice',
),
]
)
''',
), format(result)
def test_MusicSetting_06():
music_setting = consort.MusicSetting(
timespan_maker=timespan_maker,
timespan_identifier=consort.RatioPartsExpression(
ratio=(1, 1, 1),
parts=1,
),
viola_bowing_voice=consort.tools.MusicSpecifier(),
)
result = music_setting(
layer=layer,
score_template=score_template,
segment_timespan=segment_timespan,
timespan_quantization=timespan_quantization,
)
assert format(result) == abjad.String.normalize(
'''
abjad.TimespanList(
[
consort.tools.PerformedTimespan(
start_offset=abjad.Offset(21, 16),
stop_offset=abjad.Offset(37, 16),
layer=1,
music_specifier=consort.tools.MusicSpecifier(),
voice_name='Viola Bowing Voice',
),
]
)
''',
), format(result)
def test_MusicSetting_07():
music_setting = consort.MusicSetting(
timespan_maker=timespan_maker,
timespan_identifier=consort.RatioPartsExpression(
ratio=(1, 1, 1, 2),
parts=(1, 3),
),
viola_bowing_voice=consort.tools.MusicSpecifier(),
)
result = music_setting(
layer=layer,
score_template=score_template,
segment_timespan=segment_timespan,
timespan_quantization=timespan_quantization,
)
assert format(result) == abjad.String.normalize(
'''
abjad.TimespanList(
[
consort.tools.PerformedTimespan(
start_offset=abjad.Offset(19, 8),
stop_offset=abjad.Offset(27, 8),
layer=1,
music_specifier=consort.tools.MusicSpecifier(),
voice_name='Viola Bowing Voice',
),
]
)
''',
), format(result)
| python |
# -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2015-2018 CERN.
#
# Invenio is free software; you can redistribute it and/or modify it
# under the terms of the MIT License; see LICENSE file for more details.
"""Error module tests."""
from __future__ import absolute_import, print_function
import json
from invenio_rest import InvenioREST
from invenio_rest.errors import FieldError, InvalidContentType, \
RESTException, RESTValidationError
def test_errors(app):
"""Error handlers view."""
InvenioREST(app)
@app.route('/', methods=['GET'])
def test_rest():
raise RESTException(description='error description')
@app.route('/contenttype', methods=['GET'])
def test_content_type():
raise InvalidContentType(allowed_content_types=['application/json'])
@app.route('/validationerror', methods=['GET'])
def test_validation_error():
raise RESTValidationError(
errors=[FieldError('myfield', 'mymessage', code=10)])
with app.test_client() as client:
res = client.get('/')
assert res.status_code == 200
data = json.loads(res.get_data(as_text=True))
assert data['status'] is None
assert data['message'] == 'error description'
res = client.get('/contenttype')
assert res.status_code == 415
data = json.loads(res.get_data(as_text=True))
assert data['status'] == 415
assert 'application/json' in data['message']
res = client.get('/validationerror')
assert res.status_code == 400
data = json.loads(res.get_data(as_text=True))
print(data)
assert data['status'] == 400
assert data['message'] == 'Validation error.'
assert data['errors'] == [
dict(field='myfield', message='mymessage', code=10)
]
| python |
from OwlveyGateway import OwlveyGateway
from datetime import datetime, timedelta
import pandas as pd
import random
import math
if __name__ == "__main__":
client_id = "CF4A9ED44148438A99919FF285D8B48D"
secret_key = "0da45603-282a-4fa6-a20b-2d4c3f2a2127"
owlvey = OwlveyGateway("http://localhost:50001","http://localhost:47002", client_id, secret_key)
customers = owlvey.get_customers_lite()
customer = next(filter(lambda c: c['name'] == "EShopping", customers))
products = owlvey.get_products_lite(customer_id = customer["id"])
product = next(filter(lambda c: c['name'] == "Amazing Product", products))
sources = owlvey.get_sources(product_id=product["id"])
data = list()
for item in sources:
for i in range(365):
start = datetime(2020, 1, 1, 0, 0, 0) + timedelta(days=i)
for j in range(24):
end = start + timedelta( minutes=59, seconds=59)
total = math.floor(random.uniform(100,1000))
if item["name"] in ["LoginController:PreLogin", "LoginController::Login",
"CatalogController::LoadSilders", "CatalogController::LoadBanners",
"CatalogController::LoadProducts", "CatalogController::LoadAwards",
"CatalogController::LoadNotifications", "CatalogController::LoadCategories"]:
ava_prop = random.normalvariate(0.99, 0.01)
exp_prop = random.normalvariate(0.98, 0.01)
lat = round(random.normalvariate(1000, 200), 3)
#random.choices([0.65, 0.95, 0.98, 0.989, 0.99, 0.999], [0.1, 0.1 , 0.2 ,0.2 , 0.2 , 0.2], 24)
else:
ava_prop = random.normalvariate(0.95, 0.4)
exp_prop = random.normalvariate(0.97, 0.4)
lat = round(random.normalvariate(1000, 200), 3)
ava_prop = ava_prop if ava_prop <= 1 else 1
exp_prop = exp_prop if exp_prop <= 1 else 1
good = math.floor(total * ava_prop)
experience = math.floor(total * exp_prop)
experience = experience if experience >= 0 else 0
good = good if good >= 0 else 0
lat = lat if lat >= 0 else 0
data.append("{};{};{};{};{};{};{}\n".format(item["name"], start, end, total,
good, experience, lat))
start = end + timedelta(seconds=1)
with open('data.csv', 'w+') as f:
f.writelines(data)
| python |
# carve.py
# Wed May 9 14:18:46 IST 2018
from __future__ import print_function
import sys
def main(source, start, end, dest):
# type: (str, int, int, str) -> None
with open(source, 'rb') as sf:
sf.seek(start)
byte_str = sf.read(end)
with open(dest, 'wb') as df:
df.write(byte_str)
return
if __name__ == '__main__':
assert len(sys.argv) >= 5, 'too few arguments'
source_file, start, end, dest_file = sys.argv[1:5]
start_offset = int(start)
end_offset = int(end)
main(source_file, start_offset, end_offset, dest_file)
| python |
from direct.showbase import PythonUtil
from toontown.toonbase import ToontownGlobals
from toontown.hood import ZoneUtil
from random import choice
latencyTolerance = 10.0
MaxLoadTime = 40.0
rulesDuration = 21
JellybeanTrolleyHolidayScoreMultiplier = 2
DifficultyOverrideMult = int(1 << 16)
def QuantizeDifficultyOverride(diffOverride):
return int(round(diffOverride * DifficultyOverrideMult)) / float(DifficultyOverrideMult)
NoDifficultyOverride = 2147483647
NoTrolleyZoneOverride = -1
SafeZones = [ToontownGlobals.ToontownCentral,
ToontownGlobals.DonaldsDock,
ToontownGlobals.DaisyGardens,
ToontownGlobals.MinniesMelodyland,
ToontownGlobals.TheBrrrgh,
ToontownGlobals.DonaldsDreamland]
def getDifficulty(trolleyZone):
hoodZone = getSafezoneId(trolleyZone)
return float(SafeZones.index(hoodZone)) / (len(SafeZones) - 1)
def getSafezoneId(trolleyZone):
return ZoneUtil.getCanonicalHoodId(trolleyZone)
def getScoreMult(trolleyZone):
szId = getSafezoneId(trolleyZone)
multiplier = PythonUtil.lerp(1.0, 1.5, float(SafeZones.index(szId)) / (len(SafeZones) - 1))
return multiplier
| python |
# -*- coding: utf-8 -*-
"""Forms module."""
from django import forms
class UploadFileForm(forms.Form):
file = forms.FileField()
def __init__(self, *args, **kwargs):
super(UploadFileForm, self).__init__(*args, **kwargs)
for visible in self.visible_fields():
visible.field.widget.attrs['class'] = 'box__file'
visible.field.widget.attrs['id'] = 'file' | python |
import logging
import copy
import time
import os
import sys
import numpy as np
import math
import functools
import mxnet as mx
from mxnet import context as ctx
from mxnet.initializer import Uniform
from mxnet.module.base_module import BaseModule
from mxnet.module.module import Module
from mxnet import metric
from mxnet.model import BatchEndParam
from mxnet import io
import mxnet.ndarray as nd
def parall_log_softmax_ce_loss(datas, device_onehot_labels, ctx):
ctx_max_list = list(map(lambda fc7_out : mx.nd.max(fc7_out, axis=1, keepdims=True).as_in_context(ctx), datas))
local_fc7_max = mx.nd.max(nd.concat(*ctx_max_list, dim=1), axis=1, keepdims=True)
z_list = list(map(lambda fc7_out : fc7_out - local_fc7_max.as_in_context(fc7_out.context), datas))
ctx_exp_sum_list = list(map(lambda z: mx.nd.sum(mx.nd.exp(z), axis=1, keepdims=True).as_in_context(ctx), z_list))
log_exp_sum = mx.nd.log(mx.nd.add_n(*ctx_exp_sum_list))
ce_loss_list = [mx.nd.sum((log_exp_sum.as_in_context(z.context) - z) * device_onehot_label)
for z, device_onehot_label in zip(z_list, device_onehot_labels)]
ce_loss = mx.nd.add_n(*[ce.as_in_context(ctx) for ce in ce_loss_list])
return ce_loss
def cls_argmax(datas, ctx):
assert len(datas) == 1
return mx.nd.argmax(datas[0], axis=-1)
def parall_argmax(datas, ctx):
sub_max = mx.nd.concat(*[mx.nd.max(data, axis=-1, keepdims=True).as_in_context(ctx)
for data in datas], dim=1)
sub_arg_max = mx.nd.concat(*[data.shape[1] * i + mx.nd.argmax(data, axis=-1, keepdims=True).as_in_context(ctx)
for i, data in enumerate(datas)], dim=1)
part_arg_max = mx.nd.argmax(sub_max, axis=1)
return mx.nd.pick(sub_arg_max, part_arg_max, axis=1)
def parall_argmin(datas, ctx):
sub_min = mx.nd.concat(*[mx.nd.min(data, axis=-1, keepdims=True).as_in_context(ctx)
for data in datas], dim=1)
sub_arg_min = mx.nd.concat(*[data.shape[1] * i + mx.nd.argmin(data, axis=-1, keepdims=True).as_in_context(ctx)
for i, data in enumerate(datas)], dim=1)
part_arg_min = mx.nd.argmin(sub_min, axis=1)
return mx.nd.pick(sub_arg_min, part_arg_min, axis=1)
def parall_topk_value(datas, ctx, k=2):
top2_values = mx.nd.concat(*[mx.nd.topk(data, axis=-1, k=k, ret_typ='value').as_in_context(ctx)
for data in datas], dim=1)
top2_prob = mx.nd.topk(top2_values, axis=-1, k=k, ret_typ='value')
return top2_prob
def parall_pick_teacher_cos_label(teacher_preds, device_labels, ctx_num_classes, ctx):
onehot_device_labels = [nd.one_hot(label, depth=ctx_num_classes,
on_value = 1.0, off_value = 0.0)
for label in device_labels]
teacher_cos_sim_scores = [mx.nd.sum(teacher_pred * device_onehot_label, axis=1, keepdims=True)
for teacher_pred, device_onehot_label in zip(teacher_preds, onehot_device_labels)]
teacher_cos_sim_scores = mx.nd.concat(*[teacher_sim_score.as_in_context(ctx) for teacher_sim_score in teacher_cos_sim_scores], dim=1)
teacher_cos_sim_scores = mx.nd.sum(teacher_cos_sim_scores, axis=1, keepdims=True)
return teacher_cos_sim_scores
def parall_topk_index(datas, ctx, k=5):
topk_sub_values = mx.nd.concat(*[mx.nd.topk(data, k=k, ret_typ='value').as_in_context(ctx)
for data in datas], dim=1)
topk_sub_index = mx.nd.concat(*[data.shape[1]*i+mx.nd.topk(data, k=k).as_in_context(ctx)
for i, data in enumerate(datas)], dim=1)
topk_all_index = mx.nd.topk(topk_sub_values, k=k)
topk_index = mx.nd.concat(*[mx.nd.pick(topk_sub_index, topk_all_index.T[i], axis=1, keepdims=True) for i in range(k)], dim=1)
return topk_index
def nd_phi_linear(theta):
phi_theta = -(1+2 * np.cos(0.5))/np.pi * theta + np.cos(0.5)
return phi_theta
def nd_phi_linear_plus_n(theta, n):
phi_theta = -(1+2 * np.cos(0.5))/np.pi * theta + n
return phi_theta
def nd_phi_linear_slope_n(theta, n):
phi_theta = -n * theta + 1
return phi_theta
def nd_phi_cos(theta):
cos_theta = mx.nd.cos(theta)
return cos_theta
def nd_arcface_phi(theta):
phi_theta = mx.nd.cos(theta+0.5)
return phi_theta
def nd_linear_smooth(theta):
phi_linear_smooth = -0.7* theta + 0.6
return phi_linear_smooth
def nd_linear_large(theta):
phi_theta = -0.876996 * theta + 0.5
return phi_theta
def penalize_with_cos_psi(fc_pred_datas, onehot_device_labels, phi_fn, loss_s):
phi_out_list = []
for y_hat, onehot_device_label in zip(fc_pred_datas, onehot_device_labels):
onehot_cos_theta = onehot_device_label * y_hat
cos_theta = mx.nd.clip(onehot_cos_theta, -1.0, 1.0)
theta = mx.nd.arccos(cos_theta)
phi_theta = phi_fn(theta)
onehot_phi_theta = onehot_device_label * phi_theta
y_out = loss_s * (y_hat - onehot_cos_theta + onehot_phi_theta)
phi_out_list.append(y_out)
return phi_out_list
def penalize_linear_psi(fc_pred_datas, onehot_device_labels, phi_fn, loss_s, slope, margin):
phi_out_list = []
for y_hat, onehot_device_label in zip(fc_pred_datas, onehot_device_labels):
linear_theta = -slope * y_hat + 1 + margin
onehot_theta = onehot_device_label * linear_theta
phi_theta = -slope * y_hat + 1
onehot_phi_theta = onehot_device_label * phi_theta
y_out = loss_s * (linear_theta - onehot_theta + onehot_phi_theta)
phi_out_list.append(y_out)
return phi_out_list
def cls_log_softmax_ce_loss_fn(datas, device_onehot_labels, ctx):
assert len(datas) == 1
assert len(device_onehot_labels) == 1
fc7_out = datas[0].as_in_context(ctx)
device_onehot_label = device_onehot_labels[0].as_in_context(ctx)
fc_max = mx.nd.max(fc7_out, axis=1, keepdims=True)
z = fc7_out - fc_max
exp_sum = mx.nd.sum(mx.nd.exp(z), axis=1, keepdims=True)
log_exp_sum = mx.nd.log(exp_sum)
ce_loss = (log_exp_sum - z) * device_onehot_label
ce_loss = mx.nd.sum(ce_loss)
return ce_loss
def cls_loss_fun(cls_pred_datas, labels, cls_num, ctx, phi_fn, psi_norm_fn, target_fn, loss_s):
assert len(labels) == 1
onehot_device_labels = [nd.one_hot(label, depth=cls_num,
on_value = 1.0, off_value = 0.0)
for label in labels]
phi_datas = psi_norm_fn(cls_pred_datas, onehot_device_labels, phi_fn, loss_s)
## check phi pred correct
phi_pred = target_fn(phi_datas, ctx)
pred_correct = nd.equal(phi_pred, labels[0])
label_loss = cls_log_softmax_ce_loss_fn(phi_datas, onehot_device_labels, ctx)
cls_loss = label_loss
return cls_loss, pred_correct
def parall_cls_loss(cls_pred_datas, labels, y_label, ctx, ctx_num_classes, phi_fn, psi_norm_fn, parral_target_fn, loss_s):
onehot_device_labels = [nd.one_hot(label, depth=ctx_num_classes,
on_value = 1.0, off_value = 0.0)
for label in labels]
phi_datas = psi_norm_fn(cls_pred_datas, onehot_device_labels, phi_fn, loss_s)
## check phi pred correct
phi_pred = parral_target_fn(phi_datas, ctx)
pred_correct = nd.equal(phi_pred, y_label)
label_loss = parall_log_softmax_ce_loss(phi_datas, onehot_device_labels, ctx)
cls_loss = label_loss
return cls_loss, pred_correct
def constant_diff(restore_img, constant_img_label, restore_scale, batch_size):
diff = restore_img - constant_img_label
diff_loss = 1 - mx.nd.smooth_l1(scalar=3.0, data=diff)
constant_loss = mx.nd.mean(diff_loss)
constant_loss = batch_size * constant_loss
return constant_loss
def l1_gan_loss(restore_img, gan_img_label, restore_scale, batch_size):
restore_error = restore_img - gan_img_label
restore_loss = restore_scale * mx.nd.smooth_l1(scalar=3.0, data=restore_error)
restore_loss = mx.nd.mean(restore_loss)
restore_loss = batch_size * restore_loss
return restore_loss
def dssim_loss(restore_img, gan_image_label, restore_scale, batch_size):
restore_mean = mx.nd.mean(restore_img, axis=(1,2,3), keepdims=True)
label_mean = mx.nd.mean(gan_image_label, axis=(1,2,3), keepdims=True)
restore_var = mx.nd.mean((restore_img - restore_mean)**2, axis=(1,2,3), keepdims=True)
label_var = mx.nd.mean((gan_image_label - label_mean)**2, axis=(1,2,3), keepdims=True)
covariance = mx.nd.mean(restore_img * gan_image_label, axis=(1,2,3), keepdims=True) - (restore_mean * label_mean)
c1 = 0.01**2
c2 = 0.03**2
ssim = (2 * restore_mean * label_mean + c1) * (2 * covariance + c2) / ((restore_mean**2 + label_mean**2 + c1) * (restore_var + label_var + c2))
dssim = (1-ssim)/2
dssim = batch_size * mx.nd.mean(dssim)
return dssim
def both_dssim_l1_loss(restore_img, gan_image_label, restore_scale, batch_size):
dssim = dssim_loss(restore_img, gan_image_label[0], restore_scale, batch_size)
restore_loss = l1_gan_loss(restore_img, gan_image_label[1], restore_scale, batch_size)
gan_loss = dssim + restore_loss
return gan_loss
def both_ones_constant_l1_loss(restore_img, gan_image_label, restore_scale, batch_size):
constant_loss = constant_diff(restore_img, gan_image_label[0], restore_scale, batch_size)
restore_loss = l1_gan_loss(restore_img, gan_image_label[1], restore_scale, batch_size)
gan_loss = constant_loss + restore_loss
return gan_loss
def parall_total_loss(cls_pred_datas, labels, y_label, ctx, ctx_num_classes,
phi_fn, psi_norm_fn, parral_target_fn, loss_s, restore_img, restore_scale, gan_img_label, gan_loss_fun,
descriminator_cls_pred_list, descriminator_cls_labels, descriminator_cls_num, batch_size):
with mx.autograd.record():
cls_loss = mx.nd.array([0], ctx=ctx)
pred_correct = mx.nd.array([0], ctx=ctx)
## get true label loss
cls_loss, pred_correct = parall_cls_loss(cls_pred_datas, labels, y_label, ctx, ctx_num_classes, phi_fn, psi_norm_fn, parral_target_fn, loss_s)
## get dec label loss
descriminator_cls_loss = mx.nd.array([0], ctx=ctx)
descriminator_correct = mx.nd.array([0], ctx=ctx)
if len(descriminator_cls_pred_list) > 0:
descriminator_cls_loss, descriminator_correct = cls_loss_fun(descriminator_cls_pred_list, descriminator_cls_labels,
descriminator_cls_num, ctx, phi_fn, psi_norm_fn, cls_argmax, loss_s)
## get restore gan loss
restore_loss = mx.nd.array([0], ctx=ctx)
if restore_img is not None:
restore_loss = gan_loss_fun(restore_img, gan_img_label, restore_scale, batch_size)
total_loss = cls_loss + restore_loss + descriminator_cls_loss
return total_loss, pred_correct, restore_loss, cls_loss, descriminator_cls_loss, descriminator_correct
def parall_feat_mom_udpate(batch_fc1, device_labels, device_feats, feat_mom, ctx_num_cls):
zeros_pad_lines = [mx.nd.zeros_like(device_feat[0]).reshape(1,-1) for device_feat in device_feats]
pad_feats = [mx.nd.concat(*[zeros_pad_lines[i], device_feat, zeros_pad_lines[i]], dim=0) for i, device_feat in enumerate(device_feats)]
clip_labels = [mx.nd.clip(label+1, 0, ctx_num_cls+1) for label in device_labels]
for pad_feat, clip_label in zip(pad_feats, clip_labels):
pad_feat[clip_label, :] = feat_mom * pad_feat[clip_label, :] + (1-feat_mom) * batch_fc1.as_in_context(pad_feat.context)
for device_feat, pad_feat in zip(device_feats, pad_feats):
device_feat[:] = mx.nd.L2Normalization(pad_feat[1:-1], mode='instance')
return device_feats
class ParallModule(BaseModule):
def __init__(self, symbol, data_names, label_names,
logger=logging, context=ctx.cpu(), asymbol = None, args = None, config=None,
restore_sym=None, restore_scale=1.0, model_teacher = None,
get_descriminator_cls_sym_fn=None, descriminator_embedding=None, **kwargs):
super(ParallModule, self).__init__(logger=logger)
self._symbol = symbol
self._asymbol = asymbol
self._data_names = data_names
self._context = context
self._batch_size = args.batch_size
self._verbose = args.verbose
self._emb_size = config.emb_size
self._loss_s = config.loss_s
if ('plus' in args.phi_name) or ('slope' in args.phi_name):
assert False
phi_name = args.phi_name
suffix_idx = phi_name.rfind('_')
l_n = int(phi_name[suffix_idx+1 : ])
phi_fn = eval(phi_name[: suffix_idx+1]+'n')
self._phi_fn = functools.partial(phi_fn, n=l_n)
self.logger.info("= linear loss {} with {}".format(phi_name, l_n))
else:
self._phi_fn = eval(args.phi_name)
self.logger.info("=== psi fun init {}".format(args.psi_norm_name))
self._psi_norm_fn = eval(args.psi_norm_name)
self._parall_target_fn = parall_argmax
if args.psi_norm_name == 'penalize_linear_psi':
self.logger.info("=== psi linear slope {}, margin {}".format(config.slope, config.margin))
self._psi_norm_fn = functools.partial(self._psi_norm_fn, slope=config.slope, margin=config.margin)
self._parall_target_fn = parall_argmin
self._local_class_start = args.local_class_start
assert self._local_class_start == 0
self._iter = 0
self._num_ctx = len(self._context)
self._ctx_num_classes = args.ctx_num_classes
self._total_cls_num = self._ctx_num_classes * len(self._context)
self._ctx_single_gpu = self._context[-1]
label_name = None
self._backbone_module = Module(self._symbol, self._data_names, label_name, logger=self.logger, context=self._context)
self._phi_parall_cls_modules = []
self._ctx_class_start = []
## parall cls sym
for i in range(len(self._context)):
args._ctxid = i
_module = Module(self._asymbol(args), self._data_names, label_name, logger=self.logger,
context=self._context[i])
self._phi_parall_cls_modules.append(_module)
_c = self._local_class_start + i* self._ctx_num_classes
self._ctx_class_start.append(_c)
## restore error analysis
self._restore_scale = restore_scale
self._add_gan_loss = False
self._gan_both_loss = True if 'both' in args.gan_loss_fun else False
self._gan_loss_fun = eval(args.gan_loss_fun)
if restore_sym is not None:
self._add_gan_loss = True
self.logger.info("==== add gan loss fun {} with scale {} both {} for generative loss ======".format(args.gan_loss_fun, restore_scale, self._gan_both_loss))
self._restore_img_sym = restore_sym
self._restore_module = Module(self._restore_img_sym, ['data'], [],
logger=self.logger, context=self._context)
## decode embedding and cls layer
self._add_descriminator = False
self._descriminator_cls_num = 2
if descriminator_embedding is not None:
assert self._add_gan_loss ## descriminator available only when AE generate image from decoder
self._add_descriminator = True
self._add_input2descriminator = True
self._descriminator_cls_modules = []
self.logger.info("=== add descriminator layer ======================")
self._descriminator_batch_mul = 2
self._descriminator_embedding = descriminator_embedding
self._descriminator_embedding_module = Module(self._descriminator_embedding,
['data'], [],
logger=self.logger, context=self._context)
self.logger.info("==== decode cls mul {} because add_input to dec set {}".format(self._descriminator_batch_mul, self._add_input2descriminator))
args._ctxid = 0
descriminator_cls_mod = Module(get_descriminator_cls_sym_fn(args), self._data_names, label_name,
logger=self.logger, context=self._ctx_single_gpu)
self._descriminator_cls_modules.append(descriminator_cls_mod)
self._teacher_correct_cnt = 0
self._teacher_batch_cnt = 0
self._frequent = args.frequent
self._model_teacher = model_teacher
self._teacher_topk = args.teacher_topk
if self._model_teacher is not None:
self.logger.info("==== add teacher model with topk setting {}".format(self._teacher_topk))
self._teacher_backbone_module = Module(self._model_teacher.backbone_sym, self._data_names,
label_name, context=self._context)
self._teacher_fc_modules = []
for i in range(len(self._context)):
args._ctxid = i
_teacher_cls_part_mod = Module(self._model_teacher.get_arcface_fun(args), self._data_names, label_name, logger=self.logger,
context=self._context[i])
self._teacher_fc_modules.append(_teacher_cls_part_mod)
self.logger.info("==== init with scale {} ".format(self._loss_s))
def _reset_bind(self):
self.binded = False
self._backbone_module = None
@property
def data_names(self):
return self._data_names
@property
def output_names(self):
return self._symbol.list_outputs()
@property
def data_shapes(self):
assert self.binded
return self._backbone_module.data_shapes
@property
def label_shapes(self):
assert self.binded
return self._backbone_module.label_shapes
@property
def output_shapes(self):
assert self.binded
return self._backbone_module.output_shapes
def get_export_params(self):
assert self.binded and self.params_initialized
_g, _x = self._backbone_module.get_params()
g = _g.copy()
x = _x.copy()
return g, x
def _get_dec_cls_params(self):
_dec_dis_em_params, _dec_dis_em_x = self._descriminator_embedding_module.get_params()
g = _dec_dis_em_params.copy()
x = _dec_dis_em_x.copy()
for _module in self._descriminator_cls_modules:
_g, _x = _module.get_params()
ag = _g.copy()
ax = _x.copy()
g.update(ag)
x.update(ax)
return g,x
def _get_enc_clsnet_params(self):
_g, _x = self._backbone_module.get_params()
g = _g.copy()
x = _x.copy()
for _module in self._phi_parall_cls_modules:
_g, _x = _module.get_params()
ag = _g.copy()
ax = _x.copy()
g.update(ag)
x.update(ax)
return g, x
def get_params(self):
assert self.binded and self.params_initialized
_enc_g, _enc_x = self._get_enc_clsnet_params()
g = _enc_g.copy()
x = _enc_x.copy()
if self._add_gan_loss:
_k_g, _k_x = self._restore_module.get_params()
kg = _k_g.copy()
kx = _k_x.copy()
g.update(kg)
x.update(kx)
if self._add_descriminator:
_dec_cls_g, _dec_cls_x = self._get_dec_cls_params()
dec_g = _dec_cls_g.copy()
dec_x = _dec_cls_x.copy()
g.update(dec_g)
x.update(dec_x)
return g, x
def set_params(self, arg_params, aux_params, allow_missing=False, force_init=True,
allow_extra=False):
## encode cls net
for _enc_cls_module in self._phi_parall_cls_modules:
_enc_cls_module.set_params(arg_params, aux_params, allow_missing=allow_missing, allow_extra=allow_extra)
self._backbone_module.set_params(arg_params, aux_params, allow_missing=allow_missing, allow_extra=allow_extra)
## decode restore net
if self._add_gan_loss:
self._restore_module.set_params(arg_params, aux_params, allow_missing=allow_missing, allow_extra=allow_extra)
## decode discriminative net
if self._add_descriminator:
for _descriminator_cls_mod in self._descriminator_cls_modules:
_descriminator_cls_mod.set_params(arg_params, aux_params, allow_missing=allow_missing, allow_extra=allow_extra)
self._descriminator_embedding_module.set_params(arg_params, aux_params, allow_missing=allow_missing, allow_extra=allow_extra)
def init_params(self, initializer=Uniform(0.01), arg_params=None, aux_params=None,
allow_missing=False, force_init=False, allow_extra=False):
if self.params_initialized and not force_init:
return
assert self.binded, 'call bind before initializing the parameters'
#TODO init the same weights with all work nodes
self._backbone_module.init_params(initializer=initializer,arg_params=arg_params,
aux_params=aux_params, allow_missing=False,
force_init=force_init, allow_extra=allow_extra)
for _module in self._phi_parall_cls_modules:
#_initializer = initializer
_initializer = mx.init.Normal(0.01)
_module.init_params(initializer=_initializer, arg_params=arg_params,
aux_params=aux_params, allow_missing=allow_missing,
force_init=force_init, allow_extra=allow_extra)
if self._add_gan_loss:
self._restore_module.init_params(initializer=initializer, arg_params=arg_params,
aux_params=aux_params, allow_missing=allow_missing,
force_init=force_init, allow_extra=allow_extra)
if self._add_descriminator:
self._descriminator_embedding_module.init_params(initializer=initializer, arg_params=arg_params,
aux_params=aux_params, allow_missing=allow_missing,
force_init=force_init, allow_extra=allow_extra)
for _module in self._descriminator_cls_modules:
_initializer = mx.init.Normal(0.01)
_module.init_params(initializer=_initializer, arg_params=arg_params,
aux_params=arg_params, allow_missing=allow_missing,
force_init=force_init, allow_extra=allow_extra)
if self._model_teacher:
self._teacher_backbone_module.init_params(initializer=initializer, arg_params=self._model_teacher.backbone_arg_params,
aux_params=self._model_teacher.backbone_aux_params, allow_missing=False,
force_init=force_init, allow_extra=False)
for i, _module in enumerate(self._teacher_fc_modules):
_initializer = mx.init.Normal(0.01)
arg_params = {}
arg_params['fc7_%d_weight' % (i)] = self._model_teacher.fc_arg_params['fc7_%d_weight' % (i)]
_module.init_params(initializer=_initializer, arg_params=arg_params,
aux_params=None, allow_missing=False,
force_init=force_init, allow_extra=False)
self.params_initialized = True
def bind(self, data_shapes, label_shapes=None, for_training=True,
inputs_need_grad=False, force_rebind=False, shared_module=None):
print('in_bind', self.params_initialized, data_shapes, label_shapes)
self.logger.info('in_bind {}'.format(self.params_initialized, data_shapes, label_shapes))
if self.params_initialized:
arg_params, aux_params = self.get_params()
# force rebinding is typically used when one want to switch from
# training to prediction phase.
if force_rebind:
self._reset_bind()
if self.binded:
self.logger.warning('Already binded, ignoring bind()')
return
assert shared_module is None, 'shared_module for MutableModule is not supported'
self.for_training = for_training
self.inputs_need_grad = inputs_need_grad
self.binded = True
label_shapes = None
self.logger.info('bind backbone data_shape {}, label shape {}'.format( data_shapes, label_shapes))
self._backbone_module.bind(data_shapes, label_shapes, for_training, inputs_need_grad,
force_rebind=False, shared_module=None)
batch_size = data_shapes[0][1][0]
## bind parall cls layer
for i, _module in enumerate(self._phi_parall_cls_modules):
_module.bind([('data', (batch_size, self._emb_size))],
label_shapes, for_training, True,
force_rebind=False, shared_module=None)
## bind restore generative net layer
if self._add_gan_loss:
self._restore_module.bind([('data', (batch_size, self._emb_size))],
label_shapes, for_training, True, force_rebind=False, shared_module=None)
## bind decode cls layer
if self._add_descriminator:
img_shape = data_shapes[0][1][1:]
descriminator_batch_size = self._descriminator_batch_mul * batch_size
self._descriminator_embedding_module.bind([('data', (descriminator_batch_size, *img_shape))], label_shapes, for_training, True, force_rebind=False, shared_module=None)
for i, _descriminator_cls_modules in enumerate(self._descriminator_cls_modules):
_descriminator_cls_modules.bind([('data', (descriminator_batch_size, self._emb_size))],
label_shapes, for_training, True,
force_rebind=False, shared_module=None)
## bind teacher with data
if self._model_teacher is not None:
self._teacher_backbone_module.bind(data_shapes, label_shapes, for_training=False, inputs_need_grad=False,
force_rebind=False, shared_module=None)
for i, _module in enumerate(self._teacher_fc_modules):
_module.bind([('data', (batch_size, self._emb_size))],
label_shapes, for_training=False, inputs_need_grad=False,
force_rebind=False, shared_module=None)
if self.params_initialized:
self.set_params(arg_params, aux_params, allow_missing=allow_missing, allow_extra=allow_extra)
def init_optimizer(self, kvstore='local', optimizer='sgd',
optimizer_params=(('learning_rate', 0.01),), force_init=False):
assert self.binded and self.params_initialized
if self.optimizer_initialized and not force_init:
self.logger.warning('optimizer already initialized, ignoring.')
return
self._backbone_module.init_optimizer(kvstore, optimizer, optimizer_params,
force_init=force_init)
for _module in self._phi_parall_cls_modules:
_module.init_optimizer(kvstore, optimizer, optimizer_params,
force_init=force_init)
if self._add_gan_loss:
self._restore_module.init_optimizer(kvstore, optimizer, optimizer_params,
force_init=force_init)
if self._add_descriminator:
self._descriminator_embedding_module.init_optimizer(kvstore, optimizer, optimizer_params,
force_init=force_init)
for _module in self._descriminator_cls_modules:
_module.init_optimizer(kvstore, optimizer, optimizer_params,
force_init=force_init)
self.optimizer_initialized = True
#forward backbone fc1 and other parts
def forward(self, data_batch, is_train=None):
assert self.binded and self.params_initialized
label = data_batch.label
input_data = data_batch.data
self._backbone_module.forward(data_batch, is_train=is_train)
backbone_pred = self._backbone_module.get_outputs(merge_multi_context=True)
if is_train:
label_len = 2 if self._add_gan_loss else 1
assert len(label) == label_len
assert len(backbone_pred) == 1
self._iter += 1
self.global_fc1 = backbone_pred[0]
self.global_label = label[0].as_in_context(self._ctx_single_gpu)
self.restore_img_buff = None
self.gan_image_label = None
if self._add_gan_loss:
if self._gan_both_loss:
assert False
### 0 is dssim , and 1 for l1 regression
self.gan_image_label = [(input_data[0]/255.0).as_in_context(self._ctx_single_gpu), label[1].as_in_context(self._ctx_single_gpu)]
#self.gan_image_label = [label[1].as_in_context(self._ctx_single_gpu), label[1].as_in_context(self._ctx_single_gpu)]
### 0 is ones constant , and 1 for l1 regression
#self.gan_image_label = [mx.nd.ones_like(input_data[0]).as_in_context(self._ctx_single_gpu), label[1].as_in_context(self._ctx_single_gpu)]
else:
self.gan_image_label = label[1].as_in_context(self._ctx_single_gpu)
db_restore_batch = io.DataBatch([backbone_pred[0]], [])
self._restore_module.forward(db_restore_batch)
resotore_mod_output = self._restore_module.get_outputs(merge_multi_context=True)
assert len(resotore_mod_output) == 1
self.restore_img_buff = resotore_mod_output[0].as_in_context(self._ctx_single_gpu)
if self._add_descriminator:
descriminator_databatch = io.DataBatch([mx.nd.concat(self.restore_img_buff, input_data[0].as_in_context(self._ctx_single_gpu), dim=0)], [])
self._descriminator_embedding_module.forward(descriminator_databatch)
descriminator_embedding_pred = self._descriminator_embedding_module.get_outputs(merge_multi_context=True)
assert len(descriminator_embedding_pred) == 1
for i, _module in enumerate(self._descriminator_cls_modules):
descriminator_cls_batch = io.DataBatch(descriminator_embedding_pred, [])
_module.forward(descriminator_cls_batch)
# teacher module forward
if self._model_teacher is not None:
self._teacher_backbone_module.forward(data_batch, is_train=False)
teacher_backbone_pred = self._teacher_backbone_module.get_outputs(merge_multi_context=True)
assert len(teacher_backbone_pred) == 1
for i, _module in enumerate(self._teacher_fc_modules):
teacher_fc1_databatch = io.DataBatch([teacher_backbone_pred[0]], [])
_module.forward(teacher_fc1_databatch, is_train=False)
for i, _module in enumerate(self._phi_parall_cls_modules):
db_global_fc1 = io.DataBatch([backbone_pred[0]], [])
_module.forward(db_global_fc1) #fc7 matrix multiple
def backward(self, out_grads=None):
assert self.binded and self.params_initialized
## ============= backward classifier layer ===========
self._fc_cls_buff_list = []
for i, _module in enumerate(self._phi_parall_cls_modules):
mod_output_list = _module.get_outputs(merge_multi_context=True)
assert len(mod_output_list) == 1
mod_output_list[0].attach_grad()
self._fc_cls_buff_list.append(mod_output_list[0])
## ============= compute verbose train accuracy and loss ===========
local_label = self.global_label
device_labels = [(local_label.as_in_context(device) - self._ctx_class_start[i]) for i, device in enumerate(self._context)]
descriminator_cls_labels = []
descriminator_cls_global_label = 0*local_label
if self._add_descriminator:
descriminator_cls_global_label = mx.nd.concat(descriminator_cls_global_label, descriminator_cls_global_label+1, dim=0)
descriminator_cls_labels = [descriminator_cls_global_label.as_in_context(self._ctx_single_gpu)]
if self._add_gan_loss:
self.restore_img_buff.attach_grad()
self._descriminator_cls_buff_list = []
if self._add_descriminator:
for i, _module in enumerate(self._descriminator_cls_modules):
mod_output_list = _module.get_outputs(merge_multi_context=True)
assert len(mod_output_list) == 1
mod_output_list[0].attach_grad()
self._descriminator_cls_buff_list.append(mod_output_list[0])
loss, pred_correct, restore_loss, cls_loss, descriminator_cls_loss, descriminator_correct = \
parall_total_loss(self._fc_cls_buff_list, device_labels, local_label,
self._ctx_single_gpu, self._ctx_num_classes, self._phi_fn, self._psi_norm_fn, self._parall_target_fn, self._loss_s,
self.restore_img_buff, self._restore_scale, self.gan_image_label, self._gan_loss_fun,
self._descriminator_cls_buff_list, descriminator_cls_labels,
self._descriminator_cls_num, self._batch_size)
assert not math.isnan(loss.asscalar())
assert not math.isnan(restore_loss.asscalar())
assert not math.isnan(cls_loss.asscalar())
assert not math.isnan(descriminator_cls_loss.asscalar())
if self._iter % self._verbose == 0:
acc = nd.mean(pred_correct).asnumpy()
dec_acc = nd.mean(descriminator_correct).asnumpy()
self.logger.info('[Iter {}] train phi acc : {}, dec acc : {}, total loss : {}\n--- restore loss : {}, restore scale : {}, cls loss : {} decode dis loss : {}'.format(
self._iter, acc, dec_acc, loss.asscalar()/ self._batch_size,
restore_loss.asscalar()/self._batch_size, self._restore_scale,
cls_loss.asscalar()/self._batch_size, descriminator_cls_loss.asscalar()/self._batch_size))
##============caculate teacher mask ===============
if self._model_teacher is not None:
self._teacher_fc_cls_list = []
for i, _module in enumerate(self._teacher_fc_modules):
mod_output_list = _module.get_outputs(merge_multi_context=True)
assert len(mod_output_list) == 1
self._teacher_fc_cls_list.append(mod_output_list[0])
if self._teacher_topk == 10000: # compute teacher pred cos sim as teacher mask
teacher_pred_correct_mask = parall_pick_teacher_cos_label(self._teacher_fc_cls_list, device_labels, self._ctx_num_classes, self._ctx_single_gpu)
teacher_pred_correct_mask = mx.nd.reshape(teacher_pred_correct_mask, (self._batch_size, 1))
else:
if self._teacher_topk == 1:
module_teacher_pred = self._parall_target_fn(self._teacher_fc_cls_list, self._ctx_single_gpu)
teacher_pred_correct_mask = mx.nd.reshape(mx.nd.equal(module_teacher_pred, local_label), (self._batch_size, 1))
else:
local_label = mx.nd.reshape(local_label, (self._batch_size, 1))
module_teacher_pred_topk = parall_topk_index(self._teacher_fc_cls_list, self._ctx_single_gpu, self._teacher_topk)
teacher_pred_correct_mask = mx.nd.sum(mx.nd.broadcast_equal(module_teacher_pred_topk, local_label), axis=1, keepdims=True)
pred_correct_nums = mx.nd.sum(teacher_pred_correct_mask).asnumpy().astype('int32')
self._teacher_correct_cnt += pred_correct_nums[0]
self._teacher_batch_cnt += 1
else:
teacher_pred_correct_mask = mx.nd.ones((self._batch_size, 1), ctx=self._ctx_single_gpu)
## ============= backward large weight classifier layer with gradient ===========
loss.backward()
local_fc1_grad = mx.nd.zeros((self._batch_size, self._emb_size), ctx=self._ctx_single_gpu)
## =========== backward parall cls layer ================
for i, _module in enumerate(self._phi_parall_cls_modules):
phi_cls_grad_with_mask = mx.nd.broadcast_mul(self._fc_cls_buff_list[i].grad, teacher_pred_correct_mask.as_in_context(self._context[i]))
_module.backward(out_grads=[phi_cls_grad_with_mask])
local_fc1_grad += _module.get_input_grads()[0].as_in_context(self._ctx_single_gpu)
## =========== backward decode net cls model ======
if self._add_descriminator:
descriminator_cls_grad_4_descriminator_embedding = mx.nd.zeros((self._descriminator_batch_mul * self._batch_size, self._emb_size), ctx=self._ctx_single_gpu)
for i, _module in enumerate(self._descriminator_cls_modules):
_module.backward(out_grads=[self._descriminator_cls_buff_list[i].grad])
dec_cls_grad = _module.get_input_grads()[0].as_in_context(self._ctx_single_gpu)
descriminator_cls_grad_4_descriminator_embedding += dec_cls_grad
self._descriminator_embedding_module.backward(out_grads=[descriminator_cls_grad_4_descriminator_embedding])
dec_cls_net_input_grads = self._descriminator_embedding_module.get_input_grads()
assert len(dec_cls_net_input_grads) == 1
dec_cls_net_grad_4_gan_image = mx.nd.split(dec_cls_net_input_grads[0].as_in_context(self._ctx_single_gpu), num_outputs=2, axis=0)[0]
## =========== backward restore layer ============
if self._add_gan_loss:
restore_grad = self.restore_img_buff.grad
if self._add_descriminator:
restore_grad = restore_grad + dec_cls_net_grad_4_gan_image
##restore_grad = mx.nd.broadcast_mul(restore_grad, teacher_pred_correct_mask.reshape((self._batch_size, 1, 1, 1)).as_in_context(restore_grad.context))
self._restore_module.backward(out_grads = [restore_grad])
restore_fc1_grad = self._restore_module.get_input_grads()[0].as_in_context(self._ctx_single_gpu)
restore_fc1_grad = mx.nd.broadcast_mul(restore_fc1_grad, teacher_pred_correct_mask.as_in_context(self._ctx_single_gpu))
local_fc1_grad = local_fc1_grad + restore_fc1_grad
## ============= backward backbone ===============
self._backbone_module.backward(out_grads = [local_fc1_grad])
def update(self):
assert self.binded and self.params_initialized and self.optimizer_initialized
self._backbone_module.update()
for i, _module in enumerate(self._phi_parall_cls_modules):
_module.update()
if self._add_gan_loss:
self._restore_module.update()
if self._add_descriminator:
self._descriminator_embedding_module.update()
for _dec_mod in self._descriminator_cls_modules:
_dec_mod.update()
mx.nd.waitall()
def get_outputs(self, merge_multi_context=True):
assert self.binded and self.params_initialized
return self._backbone_module.get_outputs(merge_multi_context=merge_multi_context)
def get_class_output(self, merge_multi_context=True):
part_pred_list = [m.get_outputs(merge_multi_context=merge_multi_context)[0]
for m in self._phi_parall_cls_modules]
fc7_pred_label = self._parall_target_fn(part_pred_list, self._ctx_single_gpu)
return [fc7_pred_label]
def reset_teacher_metric(self):
self._teacher_correct_cnt = 0
self._teacher_batch_cnt = 0
def get_input_grads(self, merge_multi_context=True):
assert False
assert self.binded and self.params_initialized and self.inputs_need_grad
return self._backbone_module.get_input_grads(merge_multi_context=merge_multi_context)
def update_metric(self, eval_metric, labels):
assert self.binded and self.params_initialized
preds = self.get_class_output(merge_multi_context=True)
label_len = 2 if self._add_gan_loss else 1
#assert len(labels) == label_len, 'label out len'
assert len(preds) == 1, 'pred cls out len'
eval_metric.update(labels=[labels[0]], preds=preds)
def install_monitor(self, mon):
""" Install monitor on all executors """
assert self.binded
self._backbone_module.install_monitor(mon)
for enc_cls_mod in self._phi_parall_cls_modules:
enc_cls_mod.install_monitor(mon)
if self._add_gan_loss:
self._restore_module.install_monitor(mon)
if self._add_descriminator:
self._descriminator_embedding_module.install_monitor(mon)
for dec_cls_mod in self._descriminator_cls_modules:
dec_cls_mod.install_monitor(mon)
def forward_backward(self, data_batch):
"""A convenient function that calls both ``forward`` and ``backward``."""
self.forward(data_batch, is_train=True) # forward net
self.backward()
def fit(self, train_data, eval_data=None, eval_metric='acc',
epoch_end_callback=None, batch_end_callback=None, kvstore='local',
optimizer='sgd', optimizer_params=(('learning_rate', 0.01),),
eval_end_callback=None,
eval_batch_end_callback=None, initializer=Uniform(0.01),
arg_params=None, aux_params=None, allow_missing=False,
force_rebind=False, force_init=False, begin_epoch=0, num_epoch=None,
validation_metric=None, monitor=None, sparse_row_id_fn=None):
"""Trains the module parameters.
Checkout `Module Tutorial <http://mxnet.io/tutorials/basic/module.html>`_ to see
a end-to-end use-case.
Parameters
----------
train_data : DataIter
Train DataIter.
eval_data : DataIter
If not ``None``, will be used as validation set and the performance
after each epoch will be evaluated.
eval_metric : str or EvalMetric
Defaults to 'accuracy'. The performance measure used to display during training.
Other possible predefined metrics are:
'ce' (CrossEntropy), 'f1', 'mae', 'mse', 'rmse', 'top_k_accuracy'.
epoch_end_callback : function or list of functions
Each callback will be called with the current `epoch`, `symbol`, `arg_params`
and `aux_params`.
batch_end_callback : function or list of function
Each callback will be called with a `BatchEndParam`.
kvstore : str or KVStore
Defaults to 'local'.
optimizer : str or Optimizer
Defaults to 'sgd'.
optimizer_params : dict
Defaults to ``(('learning_rate', 0.01),)``. The parameters for
the optimizer constructor.
The default value is not a dict, just to avoid pylint warning on dangerous
default values.
eval_end_callback : function or list of function
These will be called at the end of each full evaluation, with the metrics over
the entire evaluation set.
eval_batch_end_callback : function or list of function
These will be called at the end of each mini-batch during evaluation.
initializer : Initializer
The initializer is called to initialize the module parameters when they are
not already initialized.
arg_params : dict
Defaults to ``None``, if not ``None``, should be existing parameters from a trained
model or loaded from a checkpoint (previously saved model). In this case,
the value here will be used to initialize the module parameters, unless they
are already initialized by the user via a call to `init_params` or `fit`.
`arg_params` has a higher priority than `initializer`.
aux_params : dict
Defaults to ``None``. Similar to `arg_params`, except for auxiliary states.
allow_missing : bool
Defaults to ``False``. Indicates whether to allow missing parameters when `arg_params`
and `aux_params` are not ``None``. If this is ``True``, then the missing parameters
will be initialized via the `initializer`.
force_rebind : bool
Defaults to ``False``. Whether to force rebinding the executors if already bound.
force_init : bool
Defaults to ``False``. Indicates whether to force initialization even if the
parameters are already initialized.
begin_epoch : int
Defaults to 0. Indicates the starting epoch. Usually, if resumed from a
checkpoint saved at a previous training phase at epoch N, then this value should be
N+1.
num_epoch : int
Number of epochs for training.
sparse_row_id_fn : A callback function
The function takes `data_batch` as an input and returns a dict of
str -> NDArray. The resulting dict is used for pulling row_sparse
parameters from the kvstore, where the str key is the name of the param,
and the value is the row id of the param to pull.
Examples
--------
>>> # An example of using fit for training.
>>> # Assume training dataIter and validation dataIter are ready
>>> # Assume loading a previously checkpointed model
>>> sym, arg_params, aux_params = mx.model.load_checkpoint(model_prefix, 3)
>>> mod.fit(train_data=train_dataiter, eval_data=val_dataiter, optimizer='sgd',
... optimizer_params={'learning_rate':0.01, 'momentum': 0.9},
... arg_params=arg_params, aux_params=aux_params,
... eval_metric='acc', num_epoch=10, begin_epoch=3)
"""
assert num_epoch is not None, 'please specify number of epochs'
#assert arg_params is None and aux_params is None
self.bind(data_shapes=train_data.provide_data, label_shapes=train_data.provide_label,
for_training=True, force_rebind=force_rebind)
if monitor is not None:
self.install_monitor(monitor)
self.init_params(initializer=initializer, arg_params=arg_params, aux_params=aux_params,
allow_missing=True, force_init=force_init, allow_extra=True)
self.init_optimizer(kvstore=kvstore, optimizer=optimizer,
optimizer_params=optimizer_params)
if validation_metric is None:
validation_metric = eval_metric
if not isinstance(eval_metric, metric.EvalMetric):
eval_metric = metric.create(eval_metric)
print("=== init eval metirc {}, {}".format(eval_metric, type(eval_metric)))
################################################################################
# training loop
################################################################################
for epoch in range(begin_epoch, num_epoch):
tic = time.time()
eval_metric.reset()
nbatch = 0
data_iter = iter(train_data)
end_of_batch = False
next_data_batch = next(data_iter)
while not end_of_batch:
data_batch = next_data_batch
if monitor is not None:
monitor.tic()
self.forward_backward(data_batch)
self.update()
assert not isinstance(data_batch, list)
if isinstance(data_batch, list):
assert False
db_cls_label = mx.nd.concat(*[db.label[0] for db in data_batch], dim=0)
self.update_metric(eval_metric,
[db_cls_label],
pre_sliced=True)
else:
self.update_metric(eval_metric, data_batch.label)
try:
# pre fetch next batch
next_data_batch = next(data_iter)
self.prepare(next_data_batch, sparse_row_id_fn=sparse_row_id_fn)
except StopIteration:
end_of_batch = True
if monitor is not None:
monitor.toc_print()
if end_of_batch:
eval_name_vals = eval_metric.get_name_value()
if batch_end_callback is not None:
batch_end_params = BatchEndParam(epoch=epoch, nbatch=nbatch,
eval_metric=eval_metric,
locals=locals())
batch_end_callback(batch_end_params)
if self._model_teacher and self._teacher_topk != 10000 and (self._teacher_batch_cnt % self._frequent == 0):
acc = self._teacher_correct_cnt / (self._teacher_batch_cnt * self._batch_size)
self.logger.info('TeacherModule-Accuracy=%f', acc)
self.reset_teacher_metric()
nbatch += 1
# one epoch of training is finished
for name, val in eval_name_vals:
self.logger.info('Epoch[%d] Train-%s=%f', epoch, name, val)
toc = time.time()
self.logger.info('Epoch[%d] Time cost=%.3f', epoch, (toc-tic))
# sync aux params across devices
arg_params, aux_params = self.get_params()
self.set_params(arg_params, aux_params, allow_missing=False, allow_extra=True)
#----------------------------------------
# evaluation on validation set
if eval_data:
res = self.score(eval_data, validation_metric,
score_end_callback=eval_end_callback,
batch_end_callback=eval_batch_end_callback, epoch=epoch)
#TODO: pull this into default
for name, val in res:
self.logger.info('Epoch[%d] Validation-%s=%f', epoch, name, val)
# end of 1 epoch, reset the data-iter for another epoch
train_data.reset()
if epoch_end_callback is not None:
epoch_end_callback(epoch)
| python |
import functools
import json
from flask import request, session, url_for
from flask_restplus import Namespace, Resource
from CTFd.models import Users, db
from CTFd.plugins import bypass_csrf_protection
from CTFd.utils import validators, config, email, get_app_config, get_config, user as current_user
from CTFd.utils.config.visibility import registration_visible
from CTFd.utils.crypto import verify_password
from CTFd.utils.decorators import ratelimit
from CTFd.utils.security.auth import login_user, logout_user
def load(app):
def ret_json(func):
@functools.wraps(func)
def _ret_json(*args, **kwargs):
return json.dumps(func(*args, **kwargs))
return _ret_json
@app.route('/api/v1/login', methods=['POST'])
@ratelimit(method="POST", limit=10, interval=5)
@bypass_csrf_protection
@ret_json
def login(): # login
req = request.json
if 'name' not in req or 'password' not in req:
return {"success": False, "data": None}
name = req['name']
if validators.validate_email(name) is True:
user = Users.query.filter_by(email=name).first()
else:
user = Users.query.filter_by(name=name).first()
if user and verify_password(request.json["password"], user.password):
session.regenerate()
login_user(user)
db.session.close()
return {
"success": True, "data": {
"nonce": session["nonce"],
}}
else:
db.session.close()
return {"success": False, "data": "Your username or password is incorrect"}
@app.route('/api/v1/logout')
@ratelimit(method="GET", limit=10, interval=5)
@ret_json
def logout():
if current_user.authed():
logout_user()
return {"success": True, "data": None}
@app.route('/api/v1/register', methods=['POST'])
@ratelimit(method="POST", limit=10, interval=5)
@bypass_csrf_protection
@ret_json
def register(): # register
def error(msg):
return {"success": False, "data": msg}
name = request.json.get("name", "").strip()
email_address = request.json.get("email", "").strip().lower()
password = request.json.get("password", "").strip()
name_len = len(name) == 0
names = Users.query.add_columns(
"name", "id").filter_by(name=name).first()
emails = (
Users.query.add_columns("email", "id")
.filter_by(email=email_address)
.first()
)
pass_short = len(password) == 0
pass_long = len(password) > 128
valid_email = validators.validate_email(email_address)
team_name_email_check = validators.validate_email(name)
if not valid_email:
return error("Please enter a valid email address")
if email.check_email_is_whitelisted(email_address) is False:
return error("Only email addresses under {domains} may register".format(
domains=get_config("domain_whitelist")
))
if names:
return error("That user name is already taken")
if team_name_email_check is True:
return error("Your user name cannot be an email address")
if emails:
return error("That email has already been used")
if pass_short:
return error("Pick a longer password")
if pass_long:
return error("Pick a shorter password")
if name_len:
return error("Pick a longer user name")
with app.app_context():
user = Users(name=name, email=email_address, password=password)
db.session.add(user)
db.session.commit()
db.session.flush()
login_user(user)
if config.can_send_mail() and get_config(
"verify_emails"
):
email.verify_email_address(user.email)
db.session.close()
return {"success": True, "data": url_for("auth.confirm")}
else:
if (config.can_send_mail()):
email.successful_registration_notification(user.email)
db.session.close()
return {"success": True, "data": None} | python |
from typing import Dict
class Song:
def __init__(self, lyrics: str, artist: str, yt_link: str):
self._lyrics = lyrics
self._artist = artist
self._yt_link = yt_link
@staticmethod
def new(data: Dict):
return Song(lyrics=data['lyrics'], artist=data['artist'], yt_link=data['yt_link'])
@property
def lyrics(self):
return self._lyrics
@property
def artist(self):
return self._artist
@property
def yt_link(self):
return self._yt_link
| python |
"""
Generate NSR and NSW compounds for all methods across all cell lines
Generate a list of NSR, NSW, NSR but not NSW and NSW but not NSR targets
hitting all cell lines, and detected using all analysis methods - as given in
Sup. tables 2,4,5, and 6.
HERE FOR COMPLETENESS - NO OUTPUT AS NOTHING MEETS THE CRITERIA.
"""
import json
from pathlib import Path
from nss_std_functions import get_cleaned_datasets
lookup_plateid_to_htargetname=json.load(open(Path("dat-plateid-to-hithumantargetnames.json")))
s2,s4,s5,s6 =get_cleaned_datasets()
# Boilerplate to obtain compounds
s2_NSR_compounds_PC3=set([c.replace("Plate","") for c in s2.query('Hit_PC3_6h == "-" or Hit_PC3_36h =="-" ')['Compound'].values])
s4_NSR_compounds_PC3=set([c.replace("Plate","") for c in s4.query('Hit_PC3_6h == "-" or Hit_PC3_36h =="-" ')['Compound'].values])
s5_NSR_compounds_PC3=set([c.replace("Plate","") for c in s5.query('Hit_PC3_6h == "-" or Hit_PC3_36h =="-" ')['Compound'].values])
s6_NSR_compounds_PC3=set([c.replace("Plate","") for c in s6.query('Hit_PC3_6h == "-" or Hit_PC3_36h =="-" ')['Compound'].values])
s2_NSW_compounds_PC3=set([c.replace("Plate","") for c in s2.query('Hit_PC3_6h == "+" or Hit_PC3_36h =="+" ')['Compound'].values])
s4_NSW_compounds_PC3=set([c.replace("Plate","") for c in s4.query('Hit_PC3_6h == "+" or Hit_PC3_36h =="+" ')['Compound'].values])
s5_NSW_compounds_PC3=set([c.replace("Plate","") for c in s5.query('Hit_PC3_6h == "+" or Hit_PC3_36h =="+" ')['Compound'].values])
s6_NSW_compounds_PC3=set([c.replace("Plate","") for c in s6.query('Hit_PC3_6h == "+" or Hit_PC3_36h =="+" ')['Compound'].values])
s2_NSR_compounds_HCT116=set([c.replace("Plate","") for c in s2.query('Hit_HCT116_6h == "-" or Hit_HCT116_36h =="-" ')['Compound'].values])
s4_NSR_compounds_HCT116=set([c.replace("Plate","") for c in s4.query('Hit_HCT116_6h == "-" or Hit_HCT116_36h =="-" ')['Compound'].values])
s5_NSR_compounds_HCT116=set([c.replace("Plate","") for c in s5.query('Hit_HCT116_6h == "-" or Hit_HCT116_36h =="-" ')['Compound'].values])
s6_NSR_compounds_HCT116=set([c.replace("Plate","") for c in s6.query('Hit_HCT116_6h == "-" or Hit_HCT116_36h =="-" ')['Compound'].values])
s2_NSW_compounds_HCT116=set([c.replace("Plate","") for c in s2.query('Hit_HCT116_6h == "+" or Hit_HCT116_36h =="+" ')['Compound'].values])
s4_NSW_compounds_HCT116=set([c.replace("Plate","") for c in s4.query('Hit_HCT116_6h == "+" or Hit_HCT116_36h =="+" ')['Compound'].values])
s5_NSW_compounds_HCT116=set([c.replace("Plate","") for c in s5.query('Hit_HCT116_6h == "+" or Hit_HCT116_36h =="+" ')['Compound'].values])
s6_NSW_compounds_HCT116=set([c.replace("Plate","") for c in s6.query('Hit_HCT116_6h == "+" or Hit_HCT116_36h =="+" ')['Compound'].values])
s2_NSR_compounds_H1299=set([c.replace("Plate","") for c in s2.query('Hit_H1299_6h == "+" or Hit_H1299_36h =="+" ')['Compound'].values])
s4_NSR_compounds_H1299=set([c.replace("Plate","") for c in s4.query('Hit_H1299_6h == "+" or Hit_H1299_36h =="+" ')['Compound'].values])
s5_NSR_compounds_H1299=set([c.replace("Plate","") for c in s5.query('Hit_H1299_6h == "+" or Hit_H1299_36h =="+" ')['Compound'].values])
s6_NSR_compounds_H1299=set([c.replace("Plate","") for c in s6.query('Hit_H1299_6h == "+" or Hit_H1299_36h =="+" ')['Compound'].values])
s2_NSW_compounds_H1299=set([c.replace("Plate","") for c in s2.query('Hit_H1299_6h == "-" or Hit_H1299_36h =="-" ')['Compound'].values])
s4_NSW_compounds_H1299=set([c.replace("Plate","") for c in s4.query('Hit_H1299_6h == "-" or Hit_H1299_36h =="-" ')['Compound'].values])
s5_NSW_compounds_H1299=set([c.replace("Plate","") for c in s5.query('Hit_H1299_6h == "-" or Hit_H1299_36h =="-" ')['Compound'].values])
s6_NSW_compounds_H1299=set([c.replace("Plate","") for c in s6.query('Hit_H1299_6h == "-" or Hit_H1299_36h =="-" ')['Compound'].values])
NSR_cpds=set(s2_NSR_compounds_PC3).intersection(
s4_NSR_compounds_PC3,s5_NSR_compounds_PC3,s6_NSR_compounds_PC3,
s2_NSR_compounds_HCT116,s4_NSR_compounds_HCT116,s5_NSR_compounds_HCT116,s6_NSR_compounds_HCT116,
s2_NSR_compounds_H1299,s4_NSR_compounds_H1299,s5_NSR_compounds_H1299,s6_NSR_compounds_H1299
)
NSW_cpds=set(s2_NSW_compounds_PC3).intersection(
s4_NSW_compounds_PC3,s5_NSW_compounds_PC3,s6_NSW_compounds_PC3,
s2_NSW_compounds_HCT116,s4_NSW_compounds_HCT116,s5_NSW_compounds_HCT116,s6_NSW_compounds_HCT116,
s2_NSW_compounds_H1299,s4_NSW_compounds_H1299,s5_NSW_compounds_H1299,s6_NSW_compounds_H1299
)
NSR_targets_list=[]
NSW_targets_list=[]
[NSR_targets_list.extend(lookup_plateid_to_htargetname[pid]) for pid in NSR_cpds if pid in lookup_plateid_to_htargetname.keys()]
[NSW_targets_list.extend(lookup_plateid_to_htargetname[pid]) for pid in NSW_cpds if pid in lookup_plateid_to_htargetname.keys()]
counts_of_NSR_targets=sorted([(NSR_targets_list.count(prot),prot) for prot in set(NSR_targets_list)], reverse=True)
counts_of_NSW_targets=sorted([(NSW_targets_list.count(prot),prot) for prot in set(NSW_targets_list)], reverse=True)
print("NSR")
for item in counts_of_NSR_targets:
print(f"{item[0]}\t{item[1]}")
# Perofrm the output
print("NSW")
for item in counts_of_NSW_targets:
print(f"{item[0]}\t{item[1]}")
print("NSR but not in NSW")
for item in counts_of_NSR_targets:
if item[1] not in set(NSW_targets_list):
print(f"{item[0]}\t{item[1]}")
print("NSW but not in NSR")
for item in counts_of_NSW_targets:
if item[1] not in set(NSW_targets_list):
print(f"{item[0]}\t{item[1]}")
| python |
# -*- coding: utf-8 -*-
# Copyright (c) 2020 Charles Vanwynsberghe
# Pyworld2 is a Python implementation of the World2 model designed by Jay W.
# Forrester, and thouroughly described in the book World Dynamics (1971). It
# is written for educational and research purposes.
# Pyworld2 is forked from the Software Rworld2 held by Arnaud Mignan (2020).
# Licensed under the MIT license:
# http://www.opensource.org/licenses/mit-license.php
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import json
import os
import numpy as np
from scipy.interpolate import interp1d
from .utils import Clipper, plot_world_state
class World2:
"""
World2 class contains helpers to configure and run a simulation. Defaults
parameters leads to a standard run.
Examples
--------
>>> w2 = World2() # possibly modify time limits and step
>>> w2.set_state_variables() # possibly modify the model constants
>>> w2.set_initial_state() # possibly modify the condition constants
>>> w2.set_table_functions() # possibly do your own tables in a json file
>>> w2.set_switch_functions() # possibly choose switches in a json file
>>> w2.run() # run the simulation
Attributes
----------
year_min : int
starting year of the simulation.
year_max : int
end year of the simulation.
dt : float
time step of the numerical integration [year].
time : numpy.ndarray
time from year_min to year_max sampled every dt on n points [year].
n : int
number of time steps of the numerical integration.
p : numpy.ndarray
P - Population [people]. It is a state variable.
br : numpy.ndarray
BR - Birth Rate [people/year].
dr : numpy.ndarray
DR - Death Rate [people/year].
cr : numpy.ndarray
CR - Crowding Ratio [].
la : float
LA - Land Area [square kilometers].
pdn : float
PDN - Population Density Normal [people/square kilometer].
nr : numpy.ndarray
NR - Natural Resources [natural resource units]. It is a state variable.
nrur : numpy.ndarray
NRUR - Natural-Resource-Usage Rate [natural resource units/year].
nrfr : numpy.ndarray
NRFR - Natural-Resource Fraction Remaining [].
ci : numpy.ndarray
CI - Capital Investment [capital units]. It is a state variable.
cir : numpy.ndarray
CIR - Capital-Investment Ratio [capital units/person].
cig : numpy.ndarray
CIG - Capital-Investment Generation [capital units/year].
cid : numpy.ndarray
CID - Capital-Investment Discard [capital units/year].
cira : numpy.ndarray
CIRA - Capital-Investment Ratio in Agriculture [capital units/person].
ciafn : float
CIAFN - Capital-Investment-Ratio-in-Agriculture Fraction Normal [].
msl : numpy.ndarray
MSL - Material Standard of Living [].
ecir : numpy.ndarray
ECIR - Effective-Capital-Investment Ratio [capital units/person].
ecirn : float
ECIRN - Effective-Capital-Investment Ratio Normal [capital units/person].
ciaf : numpy.ndarray
CIAF - Capital-Investment-in-Agriculture Fraction [].
ciaft : float
CIAFT - Capital-Investment-in-Agriculture-Fraction Adjustment Time
[years].
fr : numpy.ndarray
FR - Food Ratio [].
fn : float
FN - Food Normal [food units/person/year].
pol : numpy.ndarray
POL - Pollution [pollution units].
polr : numpy.ndarray
POLR - Pollution Ratio [].
polg : numpy.ndarray
POLG - Pollution Generation [pollution units/year].
pola : numpy.ndarray
POLA - Pollution Absorption [pollution units/year].
pols : float
POLS - Pollution Standard [pollution units].
ql : numpy.ndarray
QL - Quality of Life [satisfaction units].
qls : numpy.ndarray
QLS - Quality-of-Life Standard [satisfaction units]
pi : float
PI - Population, Initial [people].
nri : float
NRI - Natural Resources, Initial [natural resources units].
cii : float
CII - Capital Investment, Initial [capital units].
poli : float
POLI - Pollution, Initial [pollution units].
ciafi : float
CIAFI - Capital-Investment-in-Agriculture Fraction, Initial [].
brcm : interp1d
BRCM - Birth-Rate-From-Crowding Multiplier [].
brfm : interp1d
BRFM - Birth-Rate-From-Food Multiplier [].
brmm : interp1d
BRMM - Birth-Rate-From-Material Multiplier [].
brpm : interp1d
BRPM - Death-Rate-From-Pollution Multiplier [].
drcm : interp1d
DRCM - Death-Rate-From-Crowding Multiplier [].
drfm : interp1d
DRFM - Death-Rate-From-Frood Multiplier [].
drmm : interp1d
DRMM - Death-Rate-From-Material Multiplier [].
drpm : interp1d
DRPM - Death-Rate-From-Pollution Multiplier [].
cfifr : interp1d
CFIFR - Capital Fraction Indicated by Food Ratio [].
cim: interp1d
CIM - Capital-Investment Multiplier [].
ciqr : interp1d
CIQR - Capital-Investment-From-Quality Ratio [].
fcm : interp1d
FCM - Food-From-Crowding Multiplier [].
fpci: interp1d
FPCI - Food Potential From Capital Investment [food units/person/year].
fpm : interp1d
FPM - Food-From-Pollution Multiplier [].
nrem : interp1d
NREM - Natural-Resource-Exctraction Multiplier [].
nrmm : interp1d
NRMM - Natural-Resource-From-Material Multiplier [].
polat : interp1d
POLAT - Pollution-Absoption Time [years].
polcm : interp1d
POLCM - Pollution-From-Capital Multiplier [].
qlc : interp1d
QLC - Quality of Life from Crowding [].
qlf : interp1d
QLF - Quality of Life from Food [].
qlm : interp1d
QLM - Quality of Life from Material [].
qlp : interp1d
QLP - Quality of Life from Pollution [].
brn : Clipper
BRN - Birth Rate Normal [fraction/year].
drn : Clipper
DRN - Death Rate Normal [fraction/year].
cidn : Clipper
CIDN - Capital-Investment Discard Normal [fraction/year].
cign : Clipper
CIGN - Capital-Investment Generation Normal [fraction/year].
fc : Clipper
FC - Food Coefficient [].
nrun : Clipper
NRUN - Natural-Resource Usage Normal
[natural resource units/person/year].
poln : Clipper
POLN - Pollution Normal [pollution units/person/year].
"""
def __init__(self, year_min=1900, year_max=2100, dt=0.2):
"""
__init__ of class World2.
Parameters
----------
year_min : int, optional
starting year of the simulation. The default is 1900.
year_max : int, optional
end year of the simulation. The default is 2100.
dt : float, optional
time step of the numerical integration [year]. The default is 0.2.
"""
self.year_min = year_min
self.year_max = year_max
self.dt = dt
self.time = np.arange(self.year_min, self.year_max + self.dt, self.dt)
self.n = self.time.size
def set_state_variables(self, la=135e6, pdn=26.5, ciafn=0.3, ecirn=1,
ciaft=15, pols=3.6e9, fn=1, qls=1):
"""
Sets constant variables and initializes model vectors.
Parameters
----------
la : float, optional
LA - Land Area [square kilometers]. The default is 135e6.
pdn : float, optional
PDN - Population Density Normal [people/square kilometer]. The
default is 26.5.
ciafn : float, optional
CIAFN - Capital Investment Ratio in Agriculture Fraction Normal [].
The default is 0.3.
ecirn : float, optional
ECIRN - Effective-Capital-Investment Ratio Normal
[capital units/person]. The default is 1.
ciaft : float, optional
CIAFT - Capital-Investment-in-Agriculture Fraction Adjustment Time
[years]. The default is 15.
pols : float, optional
POLS - Pollution Standard [pollution units]. The default is 3.6e9.
fn : float, optional
FN - Food Normal [food units/person/year]. The default is 1.
qls : float, optional
QLS - Quality-of-Life Standard [satisfaction units]. The default
is 1.
"""
# Variables & constants related to Population
self.p = np.zeros((self.n,))
self.br = np.zeros((self.n,))
self.dr = np.zeros((self.n,))
self.cr = np.zeros((self.n,))
self.la = la
self.pdn = pdn
# Variables & constants related to Natural Resources
self.nr = np.zeros((self.n,))
self.nrur = np.zeros((self.n,))
self.nrfr = np.zeros((self.n,))
# Variables & constants related to Capital investsment
self.ci = np.zeros((self.n,))
self.cir = np.zeros((self.n,))
self.cig = np.zeros((self.n,))
self.cid = np.zeros((self.n,))
self.cira = np.zeros((self.n,))
self.ciafn = ciafn
self.msl = np.zeros((self.n,))
self.ecir = np.zeros((self.n,))
self.ecirn = ecirn
# Variables & constants related to Agriculture & Food
self.ciaf = np.zeros((self.n,))
self.ciaft = ciaft
self.fr = np.zeros((self.n,))
self.fn = fn
# Variables & constants related to Pollution
self.pol = np.zeros((self.n,))
self.polr = np.zeros((self.n,))
self.polg = np.zeros((self.n,))
self.pola = np.zeros((self.n,))
self.pols = pols
# Variables & constants related to Quality of Life
self.ql = np.zeros((self.n,))
self.qls = qls
def set_initial_state(self, pi=1.65e9, nri=900e9,
cii=0.4e9, poli=0.2e9, ciafi=0.2):
"""
Sets initial conditions of the state variables.
Parameters
----------
pi : float, optional
PI - Population, Initial [people]. The default is 1.65e9.
nri : float, optional
NRI - Natural Resources, Initial [natural resources units]. The
default is 900e9.
cii : float, optional
CII - Capital Investment, Initial [capital units]. The default is
0.4e9.
poli : float, optional
POLI - Pollution, Initial [pollution units]. The default is 0.2e9.
ciafi : float, optional
CIAFI - Capital-Investment-in-Agriculture Fraction, Initial []. The
default is 0.2.
"""
self.pi = pi
self.nri = nri
self.cii = cii
self.poli = poli
self.ciafi = ciafi
def set_switch_functions(self, json_file=None):
"""
Sets all time-dependant variables switched at some threshold year.
These variables are useful to simulate control policies.
Parameters
----------
json_file : str, optional
path to a json configuration file, keeping the same structure as
"functions_switch_default.json" in pyworld2 library. If None,
default json file is loaded.
"""
if json_file is None:
json_file = "functions_switch_default.json"
json_file = os.path.join(os.path.dirname(__file__), json_file)
with open(json_file) as fjson:
tables = json.load(fjson)
func_names = ["BRN", "DRN", "CIDN", "CIGN", "FC", "NRUN", "POLN"]
for func_name in func_names:
for table in tables:
if func_name in table:
func = Clipper(table[func_name], table[f"{func_name}1"],
table["trigger.value"])
setattr(self, func_name.lower(), func)
def set_table_functions(self, json_file=None):
"""
Sets all variables dependant on non-linear functions. Output values are
a linear interpolation of tables.
Parameters
----------
json_file : str, optional
path to a json configuration file, keeping the same structure as
"functions_table_default.json" in pyworld2 library. If None,
default json file is loaded.
"""
if json_file is None:
json_file = "functions_table_default.json"
json_file = os.path.join(os.path.dirname(__file__), json_file)
with open(json_file) as fjson:
tables = json.load(fjson)
func_names = ["BRCM", "BRFM", "BRMM", "BRPM",
"DRCM", "DRFM", "DRMM", "DRPM",
"CFIFR", "CIM", "CIQR", "FCM", "FPCI", "FPM",
"NREM", "NRMM", "POLAT", "POLCM", "POLR",
"QLC", "QLF", "QLM", "QLP"]
for func_name in func_names:
for table in tables:
if table["y.name"] == func_name:
func = interp1d(table["x.values"], table["y.values"],
bounds_error=False,
fill_value=(table["y.values"][0],
table["y.values"][-1]))
setattr(self, func_name.lower(), func)
def set_all_standard(self):
"""
Helper to set everything for a standard run.
"""
self.set_state_variables()
self.set_initial_state()
self.set_table_functions()
self.set_switch_functions()
def run(self):
"""
Runs the simulation.
"""
self.step_init()
for k in range(1, self.n):
self.step(k)
def step_init(self):
"""
Runs the simulation at first time step.
"""
# initialize population
self.p[0] = self.pi
self.br[0] = np.nan
self.dr[0] = np.nan
# initialize natural resources
self.nr[0] = self.nri
self.nrfr[0] = self.nri / self.nri
# initialize capital investment
self.ci[0] = self.cii
self.cr[0] = self.pi / (self.la * self.pdn)
self.cir[0] = self.cii / self.pi
# initialize pollution
self.pol[0] = self.poli
self.polg[0] = (self.pi * self.poln(self.time[0]) *
self.polcm(self.cir[0]))
self.polr[0] = self.poli / self.pols
self.pola[0] = self.poli / self.polat(self.polr[0])
# initialize capital investment in agriculutre fraction
self.ciaf[0] = self.ciafi
self.cid[0] = np.nan
self.cig[0] = np.nan
# initialize other intermediary variables
self.cira[0] = self.cir[0] * self.ciafi / self.ciafn
self.fr[0] = (self.fpci(self.cira[0]) * self.fcm(self.cr[0]) *
self.fpm(self.polr[0]) * self.fc(self.time[0])) / self.fn
self.ecir[0] = (self.cir[0] * (1 - self.ciaf[0]) *
self.nrem(self.nrfr[0])) / (1 - self.ciafn)
self.msl[0] = self.ecir[0] / self.ecirn
self.ql[0] = np.nan
def step(self, k):
"""
Runs the simulation at k-th time step.
"""
j = k - 1
# update population state variable
self.br[k] = (self.p[j] * self.brn(self.time[j]) *
self.brmm(self.msl[j]) * self.brcm(self.cr[j]) *
self.brfm(self.fr[j]) * self.brpm(self.polr[j]))
self.dr[k] = (self.p[j] * self.drn(self.time[j]) *
self.drmm(self.msl[j]) * self.drpm(self.polr[j]) *
self.drfm(self.fr[j]) * self.drcm(self.cr[j]))
self.p[k] = self.p[j] + (self.br[k] - self.dr[k]) * self.dt
# update natural resources state variable
self.nrur[k] = (self.p[j] * self.nrun(self.time[j]) *
self.nrmm(self.msl[j]))
self.nr[k] = self.nr[j] - self.nrur[k] * self.dt
self.nrfr[k] = self.nr[k] / self.nri
# update capital investment state variable
self.cid[k] = self.ci[j] * self.cidn(self.time[j])
self.cig[k] = (self.p[j] * self.cim(self.msl[j]) *
self.cign(self.time[j]))
# (24):
self.ci[k] = self.ci[j] + self.dt * (self.cig[k] - self.cid[k])
self.cr[k] = self.p[k] / (self.la * self.pdn)
self.cir[k] = self.ci[k] / self.p[k]
# update pollution state variable
self.polg[k] = (self.p[j] * self.poln(self.time[j]) *
self.polcm(self.cir[j]))
self.pola[k] = self.pol[j] / self.polat(self.polr[j])
self.pol[k] = self.pol[j] + (self.polg[k] - self.pola[k]) * self.dt
self.polr[k] = self.pol[k] / self.pols
# update capital investment in agriculutre fraction state variable
self.ciaf[k] = (self.ciaf[j] +
(self.cfifr(self.fr[j]) *
self.ciqr(self.qlm(self.msl[j]) /
self.qlf(self.fr[j])) -
self.ciaf[j]) *
(self.dt / self.ciaft))
# update other intermediary variables
self.cira[k] = self.cir[k] * self.ciaf[k] / self.ciafn
self.fr[k] = (self.fcm(self.cr[k]) *
self.fpci(self.cira[k]) *
self.fpm(self.polr[k]) *
self.fc(self.time[k])) / self.fn
self.ecir[k] = (self.cir[k] *
(1 - self.ciaf[k]) *
self.nrem(self.nrfr[k])) / (1 - self.ciafn)
self.msl[k] = self.ecir[k] / self.ecirn
self.ql[k] = (self.qls * self.qlm(self.msl[k]) *
self.qlc(self.cr[k]) * self.qlf(self.fr[k]) *
self.qlp(self.polr[k]))
def hello_world2():
"""
This example runs and plots the 2 scenarios from the book World Dynamics
by Jay W. Forrester:
- standard run (Business as usual)
- reduced usage of Natural Resources.
"""
# scenario: standard run
w2_std = World2()
w2_std.set_state_variables()
w2_std.set_initial_state()
w2_std.set_table_functions()
w2_std.set_switch_functions()
w2_std.run()
# scenario: Reduced Usage if Natural Resource
w2_nr = World2()
w2_nr.set_state_variables()
w2_nr.set_initial_state()
w2_nr.set_table_functions()
fname_nr = "./functions_switch_scenario_nr.json"
json_file = os.path.join(os.path.dirname(__file__), fname_nr)
w2_nr.set_switch_functions(json_file)
w2_nr.run()
# plotting
title_std = "World2 scenario - standard run"
plot_world_state(w2_std, title=title_std)
title_nr = "World2 scenario - reduced usage of Natural Resources"
plot_world_state(w2_nr, title=title_nr)
if __name__ == "__main__":
hello_world2()
| python |
# coding:utf-8
#!/usr/bin/python
#
# Copyright (c) Contributors to the Open 3D Engine Project.
# For complete copyright and license terms please see the LICENSE at the root of this distribution.
#
# SPDX-License-Identifier: Apache-2.0 OR MIT
#
#
# -------------------------------------------------------------------------
# DCCsi\\Tools\\DCC\\Maya\\constsants.py
"""DccScriptingInterface (DCCsi)
This module contains constants for the O3DE Maya DCCsi interface
"""
| python |
#
# Name: CountBookmarks.py
#
# Purpose: To count the bookmarks in each folder and subfolder of a bookmarks file exported by a web browser. The output file that
# this program generates can be imported into a spreadsheet and sorted to show the relative size of all your bookmark folders.
#
# Inputs: This program requires a command line argument specifying the fully qualified name of a bookmarks file in HTML format and, optionally,
# a command line argument (-d) indicating that debugging output is to be included in the log file.
#
# Outputs: For each folder of the bookmarks file, the folder's name, the number of bookmarks local to that folder, and the total number
# of bookmarks in that folder and all of its subfolders are written to file CountBookmarks.csv, in the current working directory. To allow
# for commas in bookmark folder names, this output file uses semicolons for field separators instead of commas. Select semicolon as the
# field separator when importing this file into a spreadsheet. This program also generates a log file, CountBookmarks.log, in the current
# working directory.
#
# Command Syntax: python CountBookmarks.py [-d] File
# Command Options: -d: Include debugging output in the log file.
# Command Example: python CountBookmarks.py "/home/yourname/Downloads/your bookmarks.html"
#
# Compatible Browsers: This program is compatible with the Google Chrome, Mozilla Firefox, Microsoft Edge, and Microsoft Internet Explorer browsers.
# It may also work with non-Google, Chromium-based browsers and Apple Safari.
#
# Development and Test Environments:
#
# Browsers (Version)
# - Google Chrome (80.0.3987.132 (Official Build) (64-bit))
# - Mozilla Firefox (74.0 (64-bit))
# - Microsoft Edge (44.18362.449.0)
# - Microsoft Internet Explorer (11.719.18362.0)
#
# Operating Systems (Version)
# - Windows 10 Home (1909)
# - Ubuntu Linux (18.04 LTS)
#
# Programming Languages (Version)
# - Python (3.8.2)
# - Python (3.6.9)
#
# Python Environment on Ubuntu Linux
# - See https://www.digitalocean.com/community/tutorials/how-to-install-python-3-and-set-up-a-local-programming-environment-on-ubuntu-18-04
#
# Updated: 04-07-20
#
#
# Copyright 2020 David Boyd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
# Process Overview: This program looks for DT (definition term) start tags that are immediately followed by H3 tags (for folder names) or A tags (for bookmark links),
# and for DL (definition list) start and end tags which represent, potentially nested, bookmark folders.
#
# ------------------------------------------------------------------------------------------- IMPORTS
import sys
import logging
import functools
from html.parser import HTMLParser
from collections import deque
# ------------------------------------------------------------------------------------------- GLOBAL DECLARATIONS
tag_stack = deque()
folder_name_stack = deque()
local_bookmarks_count_stack = deque()
offspring_bookmarks_count_stack = deque()
nesting_level_counter = -1 # nesting level 0 is the top level of the folder name hierarchy (i.e. while parsing within a highest level DL tag, the nesting level should be 0)
localandchild_bookmarks_counter = 0
log_file = "CountBookmarks.log"
output_file = "CountBookmarks.csv"
output_buffer = "Folder Name;Local Bookmarks;Total Bookmarks\n"
print = functools.partial(print, flush=True) # suppress print buffering
logging.basicConfig(filemode="w", filename=log_file, format="%(asctime)s %(message)s", datefmt="%m/%d/%Y %I:%M:%S %p")
logger = logging.getLogger('__name__')
# ------------------------------------------------------------------------------------------- FUNCTION
def BuildBookmarkFolderName(NestingLevel):
global folder_name_stack
temp_folder_name_stack = deque()
logger.debug("BuildBookmarkFolderName: NestingLevel: " + str(NestingLevel))
logger.debug("BuildBookmarkFolderName: folder_name_stack: " + str(folder_name_stack))
foldername = ""
# assemble the hierarchical folder name
i = 0
while i <= NestingLevel:
if len(folder_name_stack) <= 0: # the folder name stack is empty
print("Critical error. See file \"", log_file, "\" for more information.", sep="") # padding suppressed
logger.critical("BuildBookmarkFolderName: Critical error: len(folder_name_stack) <= 0")
print("Exiting CountBookmarks...")
logger.critical("BuildBookmarkFolderName: Exiting CountBookmarks to avoid popping an empty folder_name_stack...")
raise SystemExit() # abort this program
temp_foldername = folder_name_stack.pop()
foldername = temp_foldername + foldername
temp_folder_name_stack.append(temp_foldername)
i += 1
# restore folder_name_stack
i = 0
while i <= NestingLevel:
if len(temp_folder_name_stack) <= 0: # the temp_folder_name_stack is empty
print("Critical error. See file \"", log_file, "\" for more information.", sep="") # padding suppressed
logger.critical("BuildBookmarkFolderName: Critical error: len(temp_folder_name_stack) <= 0")
print("Exiting CountBookmarks...")
logger.critical("BuildBookmarkFolderName: Exiting CountBookmarks to avoid popping an empty temp_folder_name_stack...")
raise SystemExit() # abort this program
folder_name_stack.append(temp_folder_name_stack.pop())
i += 1
return foldername
# ------------------------------------------------------------------------------------------- BEGIN CLASS BookmarksHTMLParser
class BookmarksHTMLParser(HTMLParser):
# ------------------------------------------------------------------------------------------- METHOD OF CLASS BookmarksHTMLParser
def handle_starttag(self, tag, attrs):
global tag_stack
global offspring_bookmarks_count_stack
global nesting_level_counter
global local_bookmarks_count_stack
logger.debug("handle_starttag: Encountered a start tag: " + tag)
if tag == "meta": # no append/push
logger.debug("handle_starttag: tag == meta")
elif tag == "title":
logger.debug("handle_starttag: tag == title")
tag_stack.append(tag)
elif tag == "dl": # begin new folder
logger.debug("handle_starttag: tag == dl")
nesting_level_counter += 1
logger.debug("handle_starttag: updated nesting_level_counter: " + str(nesting_level_counter))
tag_stack.append(tag)
local_bookmarks_count_stack.append(0) # create and initialize the local bookmarks counter for the current folder
offspring_bookmarks_count_stack.append(0) # create and initialize the offspring bookmarks counter for the current folder
logger.debug("handle_starttag: offspring_bookmarks_count_stack: " + str(offspring_bookmarks_count_stack))
elif tag == "dt":
logger.debug("handle_starttag: tag == dt")
elif tag == "p": # no append/push
logger.debug("handle_starttag: tag == p")
elif tag == "h1":
logger.debug("handle_starttag: tag == h1")
tag_stack.append(tag)
elif tag == "h3":
logger.debug("handle_starttag: tag == h3")
tag_stack.append(tag)
elif tag == "a": # begin bookmark/link
logger.debug("handle_starttag: tag == a")
local_bookmarks_count_stack[-1] += 1 # d[-1] is the top element of deque d
tag_stack.append(tag)
else: # parser encountered unexpected tag, so don't append/push
logger.debug("handle_starttag: unexpected tag: " + tag)
# ------------------------------------------------------------------------------------------- METHOD OF CLASS BookmarksHTMLParser
def handle_endtag(self, tag):
global folder_name_stack
global tag_stack
global offspring_bookmarks_count_stack
global nesting_level_counter
global local_bookmarks_count_stack
global localandchild_bookmarks_counter
global output_buffer
logger.debug("handle_endtag: Encountered an end tag: " + tag)
if tag == "title":
logger.debug("handle_endtag: tag == title")
elif tag == "h1":
logger.debug("handle_endtag: tag == h1")
elif tag == "dl": # end of folder
logger.debug("handle_endtag: tag == dl")
logger.debug("handle_endtag: updated nesting_level_counter before decrementing it: " + str(nesting_level_counter))
logger.debug("handle_endtag: folder_name_stack before popping top element off of it: " + str(folder_name_stack))
current_folder_name = BuildBookmarkFolderName(nesting_level_counter)
logger.debug("handle_endtag: folder " + current_folder_name + " has " + str(local_bookmarks_count_stack[-1]) + " local bookmarks") # d[-1] is the top element of deque d
# Note 1: len(offspring_bookmarks_count_stack) will be 1 less than len(folder_name_stack), because while lowest level folders have a name, they, by definition, have no offspring.
# note 2: Bookmarks are encountered and counted from the lowest level folders toward their ancestor folders.
# note 3: Each offspring folder needs to add its total (local + offspring) bookmark count to the offspring bookmark count of its parent.
# The running bookmark count for its parent will be on top of the offspring_bookmarks_count_stack.
logger.debug("handle_endtag: offspring_bookmarks_count_stack before popping it: " + str(offspring_bookmarks_count_stack))
if len(offspring_bookmarks_count_stack) <= 0: # the offspring bookmarks count stack is empty
print("Critical error. See file \"", log_file, "\" for more information.", sep="") # padding suppressed
logger.critical("handle_endtag: Critical error: tag == dl and len(offspring_bookmarks_count_stack) <= 0")
print("Exiting CountBookmarks...")
logger.critical("handle_endtag: Exiting CountBookmarks to avoid popping an empty offspring_bookmarks_count_stack...")
raise SystemExit() # abort this program
offspring_bookmarks_count = offspring_bookmarks_count_stack.pop()
logger.debug("handle_endtag: offspring_bookmarks_count_stack after popping it: " + str(offspring_bookmarks_count_stack))
logger.debug("handle_endtag: offspring_bookmarks_count: " + str(offspring_bookmarks_count))
localandchild_bookmarks_counter = offspring_bookmarks_count + local_bookmarks_count_stack[-1] # TOS value + local_bookmarks_counter
logger.debug("handle_endtag: folder " + current_folder_name + " has " + str(localandchild_bookmarks_counter) + " total bookmarks (local + offspring)")
if len(folder_name_stack) <= 0: # the folder name stack is empty
print("Critical error. See file \"", log_file, "\" for more information.", sep="") # padding suppressed
logger.critical("handle_endtag: Critical error: tag == dl and len(folder_name_stack) <= 0")
print("Exiting CountBookmarks...")
logger.critical("handle_endtag: Exiting CountBookmarks to avoid popping an empty folder_name_stack...")
raise SystemExit() # abort this program
folder_name_stack.pop()
logger.debug("handle_endtag: folder_name_stack after popping top element off of it: " + str(folder_name_stack))
nesting_level_counter -= 1
logger.debug("handle_endtag: updated nesting_level_counter after decrementing it: " + str(nesting_level_counter))
if nesting_level_counter > -1: # nesting level 0 is the top level of the folder name hierarchy (i.e. while parsing within a highest level DL tag, the nesting level should be 0)
if len(offspring_bookmarks_count_stack) <= 0: # the offspring bookmarks count stack is empty
print("Critical error. See file \"", log_file, "\" for more information.", sep="") # padding suppressed
logger.critical("handle_endtag: Critical error: nesting_level_counter > -1 and len(offspring_bookmarks_count_stack) <= 0")
print("Exiting CountBookmarks...")
logger.critical("handle_endtag: Exiting CountBookmarks to avoid popping an empty offspring_bookmarks_count_stack...")
raise SystemExit() # abort this program
offspring_bookmarks_count_stack.append(offspring_bookmarks_count_stack.pop() + localandchild_bookmarks_counter) # propagate this folder's bookmarks total up to the level of its parent folder
logger.debug("handle_endtag: offspring_bookmarks_count_stack after propagating this folder's bookmarks total up to the level of its parent folder: " + str(offspring_bookmarks_count_stack))
logger.debug("handle_endtag: folder " + current_folder_name + " has " + str(local_bookmarks_count_stack[-1]) + " local bookmarks and " + str(localandchild_bookmarks_counter) + " total bookmarks (local + offspring)")
output_buffer = output_buffer + current_folder_name + ";" + str(local_bookmarks_count_stack[-1]) + ";" + str(localandchild_bookmarks_counter) + "\n" # add next line to buffer string for output file
if len(local_bookmarks_count_stack) <= 0: # the local bookmarks count stack is empty
print("Critical error. See file \"", log_file, "\" for more information.", sep="") # padding suppressed
logger.critical("handle_endtag: Critical error: len(local_bookmarks_count_stack) <= 0")
print("Exiting CountBookmarks...")
logger.critical("handle_endtag: Exiting CountBookmarks to avoid popping an empty local_bookmarks_count_stack...")
raise SystemExit() # abort this program
local_bookmarks_count_stack.pop()
localandchild_bookmarks_counter = 0 # reset counter
print(".", end="") # progress indicator, newline suppressed
elif tag == "h3":
logger.debug("handle_endtag: tag == h3")
elif tag == "a":
logger.debug("handle_endtag: tag == a")
else:
logger.debug("handle_endtag: unexpected tag: " + tag)
if len(tag_stack) <= 0: # the tag stack is empty
print("Critical error. See file \"", log_file, "\" for more information.", sep="") # padding suppressed
logger.critical("handle_endtag: Critical error: len(tag_stack) <= 0")
print("Exiting CountBookmarks...")
logger.critical("handle_endtag: Exiting CountBookmarks to avoid popping an empty tag_stack...")
raise SystemExit() # abort this program
tag_stack.pop()
# ------------------------------------------------------------------------------------------- METHOD OF CLASS BookmarksHTMLParser
def handle_data(self, data):
global tag_stack
global folder_name_stack
global nesting_level_counter
logger.debug("handle_data: nesting_level_counter: " + str(nesting_level_counter))
logger.debug("handle_data: Encountered some data: " + data)
logger.debug("handle_data: tag_stack: " + str(tag_stack))
if len(tag_stack) == 0:
logger.debug("handle_data: tag_stack is empty")
if len(tag_stack) > 0 and (tag_stack[-1] == "h1" or tag_stack[-1] == "h3"): # d[-1] is the top element of deque d
if data[0] != "\n":
folder_name_stack.append("/" + data)
logger.debug("handle_data: current foldername: " + BuildBookmarkFolderName(nesting_level_counter))
logger.debug("handle_data: folder_name_stack: " + str(folder_name_stack))
# ------------------------------------------------------------------------------------------- END CLASS BookmarksHTMLParser
# ------------------------------------------------------------------------------------------- MAIN
# sys.argv[0]: CountBookmarks.py
# sys.argv[1]: -d or filename
# sys.argv[2]: <NULL> or filename
logger.setLevel(logging.INFO)
logger.info("main: The command line arguments to the Python interpreter are: " + str(sys.argv))
numPythonArgs = len(sys.argv)
numProgramArgs = numPythonArgs - 1 # number of arguments to CountBookmarks
logger.info("main: The number of command line arguments to CountBookmarks is: " + str(numProgramArgs))
if numProgramArgs == 0 or numProgramArgs > 2:
print("Invalid command. The correct command syntax is: python CountBookmarks.py [-d] File")
logger.critical("main: Invalid command. The correct command syntax is: python CountBookmarks.py [-d] File")
print("Exiting CountBookmarks...")
logger.critical("main: Exiting CountBookmarks...")
raise SystemExit() # abort this program
if numProgramArgs == 1:
bookmarks_file = sys.argv[1]
if numProgramArgs == 2:
if sys.argv[1] == "-d":
logger.setLevel(logging.DEBUG)
logger.debug("main: sys.argv[1] == " + str(sys.argv[1]))
bookmarks_file = sys.argv[2]
else:
print(sys.argv[1], " is an invalid command option.", sep="") # padding suppressed
logger.critical("main: " + str(sys.argv[1]) + " is an invalid command option.")
print("The correct command syntax is: python CountBookmarks.py [-d] File")
logger.critical("main: The correct command syntax is: python CountBookmarks.py [-d] File")
print("Exiting CountBookmarks...")
logger.critical("main: Exiting CountBookmarks...")
raise SystemExit() # abort this program
logger.debug("main: bookmarks_file name just before opening and reading it: " + str(bookmarks_file))
with open(bookmarks_file) as fin: # open the bookmarks file
read_data = fin.read() # read the bookmarks file
fin.closed
print("Counting the bookmarks in file \"", bookmarks_file, "\"", sep="", end="") # padding and newline suppressed
logger.info("main: Counting the bookmarks in file \"" + str(bookmarks_file) + "\"")
parser = BookmarksHTMLParser()
parser.feed(read_data) # parse the bookmarks file and count its bookmarks
logger.debug("main: tag_stack after parsing file: " + str(tag_stack))
logger.debug("main: folder_name_stack after parsing file: " + str(folder_name_stack))
logger.debug("main: local_bookmarks_count_stack after parsing file: " + str(local_bookmarks_count_stack))
logger.debug("main: offspring_bookmarks_count_stack after parsing file: " + str(offspring_bookmarks_count_stack))
print("\nWriting the results to file \"", output_file, "\"...", sep="") # padding suppressed
logger.info("main: Writing the results to file \"" + output_file + "\"...")
with open(output_file, "w") as fout:
fout.write(output_buffer) # write the results to output_file
fout.closed
print("The bookmarks in file \"", bookmarks_file, "\" were successfully counted.", sep="") # padding suppressed
logger.info("main: The bookmarks in file \"" + str(bookmarks_file) + "\" were successfully counted.")
print("The results may be found in file \"", output_file, "\", and a log may be found in file \"", log_file, "\", in the working directory.", sep="") # padding suppressed
logger.info("main: The results may be found in file \"" + output_file + "\", and a log may be found in file \"" + log_file + "\", in the working directory.")
| python |
# -*- coding:utf8 -*-
# File : neural_stype_opr.py
# Author : Jiayuan Mao
# Email : [email protected]
# Date : 2/27/17
#
# This file is part of TensorArtist.
import numpy as np
from tartist.nn import opr as O
def get_content_loss(p, x):
c = p.shape[3]
n = p.shape[1] * p.shape[2]
loss = (1. / (2. * n ** 0.5 * c ** 0.5)) * ((x - p) ** 2.).sum()
return O.as_varnode(loss)
def get_style_loss(a, x):
c = a.shape[3]
n = x.shape[1] * x.shape[2]
a = a.reshape(-1, c)
x = x.reshape(-1, c)
ga = np.dot(a.T, a)
gx = O.matmul(x.dimshuffle(1, 0), x)
a = 1. / ((4. * a.shape[0] * c ** 2.) * O.cast(c, 'float32'))
loss = a * O.reduce_sum((gx - ga) ** 2)
return O.as_varnode(loss)
| python |
#Simule u caixa eletrónico com cédulas de 50,20,10 e 1
#Banco CEV
#Pergunte o valor que você quer sacar
#Total de {} cédulas de 50;Total de {} cedulas de 10 e Total de {} cédulas de 1
print('='*20)
print('Banco cev')
print('='*20)
valor = int(input('Quanto você quer sacar ?'))
total = valor
céd = 50
totcéd = 0
while True :
if total >= céd :
total -= céd
totcéd += 1
else :
print(f'O total de cédulas de {céd} foi de {totcéd}')
if céd == 50 :
céd = 20
elif céd == 20 :
céd = 10
elif céd == 10 :
céd = 1
totcéd = 0
if total == 0 :
break | python |
# -*- coding: utf-8 -*-
# Resource object code
#
# Created by: The Resource Compiler for PyQt5 (Qt v5.11.2)
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore
qt_resource_data = b"\
\x00\x00\x23\x70\
\x89\
\x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\
\x00\x00\x83\x00\x00\x00\x83\x08\x06\x00\x00\x00\xae\x9d\xa8\x66\
\x00\x00\x20\x00\x49\x44\x41\x54\x78\x5e\xed\x7d\x09\x74\x54\xf5\
\xf5\xff\x7d\xdb\xcc\x64\x0f\x21\x2c\x81\x80\x6c\x82\x84\x45\x44\
\x7e\xa0\xf2\xa7\xc2\xaf\xda\xaa\x45\x39\x3d\x2c\x2d\x56\x29\xd8\
\xda\x5a\x0b\x15\x6d\xad\xc7\x43\xa5\x54\xeb\xf1\xd0\x4d\x2c\x94\
\xda\x4d\x29\x8a\xb6\x2e\xa7\x05\x29\xda\x62\x7f\xe0\x02\x0a\xa5\
\x80\xac\x21\xb2\x05\x92\xb0\x05\xb2\x67\x66\xde\x76\xff\xe7\x7e\
\xdf\xbc\xc9\x24\x99\x99\xb7\xcc\x9b\x84\x48\xde\xf9\xe5\xf0\xab\
\xf3\x7d\xdf\xe5\x7e\x3f\xef\xde\xfb\xbd\xdf\xbb\x70\xd0\xfd\x74\
\x53\x20\x42\x01\xae\x9b\x12\xdd\x14\x30\x29\xd0\x0d\x86\x6e\x2c\
\x44\x29\xd0\x0d\x86\x6e\x30\x74\x83\xa1\x1b\x03\xed\x29\xd0\xcd\
\x19\xba\x51\xd1\xcd\x19\xba\x31\xd0\x01\x9c\x61\xdb\xb6\x6d\x38\
\x79\xf2\xe4\x6e\x5a\x7b\x40\x81\x5f\xfe\xf2\x97\xf0\xfd\xef\x7f\
\xbf\xc3\xb8\xb7\xe7\x03\x75\x83\xc1\x03\x14\x44\xba\xf8\xc5\x2f\
\x7e\x01\x3f\xf8\xc1\x0f\x3c\xdf\xa3\x44\x33\xf4\x7c\xa0\x6e\x30\
\x74\x83\x21\x4a\x81\x6e\x30\x74\x83\x21\x4a\x81\xd2\xd2\x52\xbc\
\xe6\x9a\x6b\xbc\xa3\xc8\x15\xdc\x53\x97\x17\x13\xb4\x77\x3f\xfd\
\xe9\x4f\xf1\x47\x3f\xfa\xd1\x15\xbc\x8d\xde\x2c\xfd\x33\x01\x06\
\x22\xc5\xc4\x89\x13\x71\xe7\xce\x9d\xde\x50\xe5\x0a\xed\xe5\x33\
\x03\x86\x0f\x3e\xf8\x00\xa7\x4c\x99\x72\x85\x6e\xa3\x37\xcb\xee\
\xb2\x60\xd0\x65\x05\xb1\x29\x08\x9c\x20\x00\x97\xe1\x07\x4e\x12\
\xb9\x47\x1f\x7d\x14\x7f\xfe\xf3\x9f\x7b\x42\x19\xb2\x5d\x3c\xf0\
\xc0\x03\x9e\xf4\x15\xdb\x09\xcf\xf3\x40\x7f\x1c\xc7\xb1\x7f\xad\
\xfe\x14\x45\x81\xda\xda\x5a\xa8\xa9\xa9\x61\xff\x7e\xf0\xc1\x07\
\xb0\x79\xf3\x66\xd0\x75\xdd\xf3\xb9\x75\x49\x30\xa8\x67\x2e\xa2\
\x7e\xf6\x22\x48\xd9\x59\xa0\xe7\x66\x00\x97\x93\x09\x9c\x4f\x02\
\x4e\x14\xb8\x11\x23\x46\xe0\x91\x23\x47\x3c\x21\xd4\xd6\xad\x5b\
\x61\xea\xd4\xa9\x9e\x1f\x87\x53\x9d\x5c\x28\x14\x42\x02\xc4\x73\
\xcf\x3d\x07\xef\xbe\xfb\x6e\xaa\xdd\x45\xdf\xef\x52\x60\xd0\x9b\
\x43\xa8\x94\x96\x83\x54\xd3\x04\xba\xc0\x81\x96\xe9\x07\xbe\x67\
\x2e\xf0\x3d\x72\x81\xcb\x0a\x00\xef\xf7\x71\xff\xf8\xc7\x3f\xf0\
\x4b\x5f\xfa\x92\x27\x04\x12\x04\x01\x34\x4d\x6b\x07\x06\x54\x55\
\x44\x4d\x07\xd0\x11\x00\xe9\xcf\x93\xe1\x12\x77\x62\xce\x80\xe7\
\x01\x04\x1e\x78\x49\x8c\xce\x69\xd3\xa6\x4d\xf8\xd4\x53\x4f\xc1\
\x47\x1f\x7d\x94\xf2\x24\xba\x0c\x18\xb4\xda\x06\xd4\xf7\x1d\x07\
\x51\xd6\x20\x14\x0e\x01\xf2\x1c\xf0\x24\x1e\xf2\xb3\x41\x28\xc8\
\x63\xff\xb2\xff\x2d\x89\xdc\xb7\xbf\xfd\x6d\xfc\xdd\xef\x7e\x97\
\x32\x71\xa8\x83\x6f\x7d\xeb\x5b\xf0\xfb\xdf\xff\x3e\x4a\x7c\x06\
\x04\x59\x05\x0c\xc9\x80\xb2\x02\x40\xa0\x20\x40\xa4\xf3\xe1\x38\
\x00\x9e\x63\x22\x11\x88\x03\xfa\x44\x00\x51\x00\xde\x27\xb1\x79\
\x85\xc3\x61\x9c\x3d\x7b\x36\x6c\xd8\xb0\x21\xa5\x59\x74\x09\x30\
\x28\xa5\xe5\x08\xe5\xe7\x80\xd3\x11\x64\x55\x31\x16\xcc\x71\xb4\
\xf1\x06\x47\x28\xc8\x05\xbe\x67\x1e\xf0\x39\x99\xc0\x67\x06\xb8\
\xc6\xc6\x46\x1c\x36\x6c\x18\x9c\x3d\x7b\x36\x25\xe2\x98\x2f\xef\
\xd8\xb1\x03\x26\x4d\x9a\xc4\x08\xcf\x74\x95\xe6\x10\x60\x7d\x33\
\xe8\x8d\x41\x03\x10\x69\x90\xdf\xed\x26\x4e\x7a\x86\x4f\x34\xf4\
\x23\xfa\x0b\xf8\x0c\xd1\x48\x34\x88\x70\x8a\xf9\xf3\xe7\xe3\x9a\
\x35\x6b\x5c\xaf\xf9\xb2\x07\x83\x7c\xe8\x04\x4a\x55\x35\x10\x0e\
\x87\xda\x2b\x4d\x02\x0f\x9c\x5f\x02\x3e\x37\x0b\xf8\x9e\xf9\xc0\
\x17\xe4\x00\x97\x19\x60\x5f\xcc\x2b\xaf\xbc\x82\x77\xdf\x7d\xb7\
\x6b\xc2\xc4\xbe\x98\x9d\x9d\x0d\x8d\x8d\x8d\x2d\x60\x68\x0a\x82\
\x5e\xd3\x00\x7a\x6d\x23\x90\x12\x8b\x8a\x16\xe1\x0e\xe9\xe2\x10\
\x11\xce\x20\x8a\xc0\x05\x24\x03\x0c\x99\x19\xc0\x65\xd2\xbf\x01\
\x83\x06\x7e\x1f\x9b\xdf\xe4\xc9\x93\x71\xdb\xb6\x6d\xae\xd6\x6d\
\x82\x81\xc4\x31\x71\x3b\x3e\x2b\x23\xad\xfa\x92\xa3\xce\xe5\xc3\
\x27\x51\xaa\xb8\x68\x88\x85\x78\xac\x98\xb8\x83\x28\x18\x00\x28\
\xc8\x31\x00\x91\x9b\xc5\x88\x43\xca\xe4\x57\xbf\xfa\x55\xfc\xcb\
\x5f\xfe\xe2\x8a\x30\x6d\x5f\x7a\xe8\xa1\x87\x48\x61\xe3\x50\x51\
\x91\x44\x84\x5e\xd7\x08\xfa\xa5\x7a\x03\x10\xc1\x30\xa0\x46\x80\
\xa0\xb7\xd2\x01\x08\x0e\x80\xfe\x8f\x74\x06\x5a\x2f\x71\x04\x02\
\x04\x71\x45\x52\x9e\xe9\x2f\xc3\xcf\x00\xb1\x73\xe7\x4e\x9c\x38\
\x71\xa2\xab\x35\xc7\x82\x01\x6b\x1b\x41\xab\x6b\x04\x71\x48\xbf\
\x28\xd0\x5c\x75\x9a\xe4\x25\xdb\x60\x90\x0f\x97\xa3\x58\x71\x81\
\xe4\x61\x7c\x20\x98\x83\x10\xfb\xa4\x2f\x83\x44\x04\x89\x8a\x82\
\x16\x65\xb2\xaa\xaa\x0a\x87\x0c\x19\x02\xa1\x50\xc8\x93\x75\xec\
\xdf\xbf\x1f\xc6\x8c\x19\xc3\xe9\x61\x19\xb1\x29\x04\x7a\x4d\x3d\
\xe8\x17\xeb\x41\xaf\x6f\x6a\xd1\x1f\x3c\x19\x29\x41\x27\xa4\x3b\
\xd0\x07\x20\xf0\x00\x92\x00\x5c\xc0\x6f\x70\xc5\xfc\x6c\xe0\xf2\
\x5a\x74\xa6\x1f\xfe\xf0\x87\xf8\xb3\x9f\xfd\xcc\xf1\x4c\xa2\x60\
\x68\x0a\xa1\x5e\xdb\x00\xc2\xa5\x06\x50\x43\x32\x70\x03\x0a\x41\
\xec\x5b\x68\x7b\xef\xec\x0e\x6c\xab\x43\xa5\xfc\x0c\x8a\x65\x55\
\x89\x39\x42\xec\x68\x44\x20\xd2\xb0\x13\x28\x93\xcf\x3f\xff\x3c\
\x7a\x65\x2f\xe8\xd5\xab\x17\x5c\xb8\x70\x81\x43\x55\x43\xd2\x15\
\x08\x04\x0c\x0c\x35\xf5\x40\xe0\x40\x45\x4d\xbf\x32\x19\xd1\x97\
\x98\x42\x49\x62\x83\xb8\x43\x0f\xe2\x8a\x2d\x3a\x53\x45\x45\x05\
\x16\x17\x17\xdb\xdd\x93\x68\xbb\x56\x60\xa8\xa9\x07\xed\x7c\x0d\
\x08\x0d\x41\xe0\x15\x0d\xb4\x41\x7d\x40\x1a\x56\x6c\x6b\xff\xec\
\x0e\x6c\xd9\x99\xd6\xd0\x8c\xda\xae\x52\xe0\x82\x32\x28\x9a\x6a\
\xaf\xdf\x58\x65\xb2\x07\x29\x93\xb9\x86\xb8\x88\xd8\x1e\xee\xb8\
\xe3\x0e\xdc\xb4\x69\x93\xbd\xbe\x2c\x5a\x3d\xfe\xf8\xe3\xf0\xcc\
\x33\xcf\x30\x71\xa1\x93\x78\x20\x76\x7a\xa9\x8e\xfd\x4b\xff\xbb\
\x43\x4e\x17\xe6\x1c\x93\xe8\x4c\x0b\x17\x2e\xc4\x55\xab\x56\x39\
\x5a\x73\x3c\x30\x10\xd0\xb9\xb0\x0a\x92\x20\x82\x5e\xdc\x13\x84\
\xc1\xfd\x98\x92\xee\xa8\xe3\x44\x8c\x2e\x59\x27\xa4\xa9\x6b\x1f\
\x1f\x04\xa1\x59\x86\x90\x22\x3b\x1b\x8f\x08\xe3\x33\x95\xc9\xd6\
\xb6\x87\xc3\x87\x0f\xe3\xc8\x91\x23\x9d\xf5\x97\xa4\xf5\xa1\x43\
\x87\xa0\xa4\xa4\xa4\xf3\xc4\x85\x39\xb7\xb6\x3a\x53\xa1\xa1\x33\
\xd1\x66\xb9\xb9\xcd\x4d\x04\x06\xd2\x91\x38\x04\x08\xf8\xfd\xa0\
\x48\x02\x08\xd7\x0f\x07\x21\x27\x2b\x65\x40\x24\xed\x40\x3d\x56\
\x89\xdc\xd1\x4a\x08\xcb\x0e\x81\x60\xb2\xce\x24\xe2\x62\xf9\xf2\
\xe5\xf8\xd8\x63\x8f\x79\x02\x88\xab\xae\xba\x0a\xca\xcb\xcb\x3b\
\x5f\x5c\xd0\x6a\x4c\x9d\x89\x40\x40\x60\x88\x39\x51\xcd\x99\x33\
\x07\x5f\x7b\xed\x35\xdb\x6b\x4e\x06\x06\x66\x60\x03\x80\x80\xcf\
\x07\xb2\xc8\x83\x34\xa9\x04\x84\xec\xcc\x94\x00\x91\xf0\x65\xad\
\xa6\x01\x71\xc7\x61\x50\x54\x25\xb9\xc2\x98\x6c\x69\x16\xe2\x22\
\x95\x63\x57\xdb\x61\x9f\x7c\xf2\x49\x58\xba\x74\x69\xe7\x8b\x8b\
\x24\xdc\xe1\xcd\x37\xdf\xc4\x99\x33\x67\x7a\x0a\x06\x03\x10\x7e\
\x50\x7c\x02\x48\x13\x47\xa6\x74\xfc\x4c\x08\x86\xf0\xc7\x07\x50\
\xac\x6d\x86\xb0\x53\xf1\xd0\x76\xa9\x49\xc4\xc5\x8e\x1d\x3b\x70\
\xd2\xa4\x49\xb6\x89\x63\xd5\x90\xee\x40\x46\x8c\x18\xd1\xf9\xe2\
\xc2\xe4\x0e\x79\x11\xee\x40\x7a\x53\x56\x80\x19\xa3\x0a\x0b\x0b\
\xb1\xba\xba\xda\x6a\x29\xec\x77\x3b\x9c\xc1\xec\x88\x00\xa1\xfa\
\x45\x10\x26\x5c\x03\x42\xb6\x3b\x7b\x44\x5c\x30\xa8\xa7\xcf\x21\
\x1c\x3c\x09\x74\x43\x97\xf2\x29\xdd\xe2\x74\xb1\x64\xc9\x12\x7c\
\xfa\xe9\xa7\x6d\x11\xc7\xaa\x11\x79\x58\x95\x96\x96\x76\xbe\xb8\
\x30\xb9\x03\x9d\x2c\x7a\xe6\x81\x50\x98\x0f\x5c\x6e\x26\xf0\x01\
\x3f\x77\xff\xfd\xf7\xe3\x1f\xfe\xf0\x07\xab\xa5\x38\x06\x83\xc9\
\x21\xb4\x0c\x1f\xf0\xe3\xaf\x76\x25\x32\xe2\x82\x21\xb4\x65\x37\
\x4a\x61\x05\xc2\x4a\xc4\xd4\x6c\x6b\xea\x49\x1a\x59\x88\x8b\x51\
\xa3\x46\xe1\xc1\x83\x07\x53\x1d\x85\xbd\x4f\x57\xe6\x8f\x3e\xfa\
\x68\xe7\x8b\x0b\xe2\x0e\xb4\x31\xf9\x39\x20\xf4\xee\xc1\xfe\xe5\
\xb3\x33\xb8\x8d\x1b\x37\xe2\xf4\xe9\xd3\x6d\xad\xd5\x09\x67\x30\
\x3b\xcc\xf0\x07\x40\xcb\x0e\x00\x3f\xf1\x1a\xe0\xc5\x96\x0b\x34\
\x3b\x03\xb6\x03\x83\x7a\xa6\x1a\x61\xef\x31\x50\x54\x15\x30\x75\
\xbe\xd0\x32\x87\x24\xe2\x62\xf3\xe6\xcd\x78\xeb\xad\xb7\xda\x99\
\xaf\xad\x36\x47\x8f\x1e\x85\x61\xc3\x86\x79\x26\x2e\x04\xb2\x34\
\xd2\x3d\x08\xea\xf6\xef\xc0\xe8\x03\x60\xa7\xa9\x4c\xe0\x7b\xe5\
\x33\x0e\xc1\x65\x65\xb0\x1b\xce\xbc\xbc\x3c\xac\xab\xab\xb3\x5c\
\x8b\x1b\x30\x50\xa7\x7e\x49\x02\x1c\x52\x04\xe2\xf0\x81\x8e\x14\
\xca\x76\x8d\x95\xdd\x47\x50\xb8\x50\x0f\x21\x39\x6c\x39\x59\x47\
\x0d\xe2\x88\x0b\x21\x3f\x1b\x20\x72\xb3\xb9\x68\xd1\x22\x5c\xb9\
\x72\xa5\xa3\x2e\x13\x35\xbe\xee\xba\xeb\x60\xcf\x9e\x3d\xae\xc5\
\x05\xcf\x71\x20\x49\x12\xf0\x74\x96\xd7\x75\x08\xeb\x2a\x33\x3f\
\x8b\xc8\x81\x24\x4a\xa0\x6b\x2a\xc8\x24\x42\x93\xdd\x8e\x92\x71\
\x92\x1c\x7d\x08\x00\x85\xa6\xa8\xc8\x02\x3e\xe0\xe3\xbe\xf2\x95\
\xaf\xe0\x5f\xff\xfa\x57\xcb\xb5\xba\x05\x03\x39\xea\xf8\x44\x09\
\xb4\xa1\x45\xe0\xbb\x7a\x80\x6d\x40\xb4\x6a\xc8\xae\xa5\x77\x96\
\x82\xa6\x28\x69\xf1\xdc\x31\x6f\x36\xfd\xf9\xb9\xa0\xe5\x66\x00\
\xdf\xb7\xc0\x20\x96\x4f\xe2\x64\x59\xc6\x41\x83\x06\x41\x55\x55\
\x95\x25\x91\xec\x34\x20\x03\xcf\xc2\x85\x0b\x1d\x89\x0b\x02\x81\
\x4f\xf2\x41\x48\x95\x81\xef\x53\x00\x7c\x76\x26\x80\x24\x82\x58\
\xdc\x8b\x99\x9d\xb5\x33\x17\x81\x6e\x48\xe9\x76\x54\x3b\x53\x0d\
\x7e\x9f\x0f\x54\x4d\x4b\x4c\x2b\xe2\x86\x64\xa2\x2e\x20\x51\x51\
\xc0\xcc\xd4\x64\x73\x78\xf1\xc5\x17\x71\xc1\x82\x05\x96\xcb\x70\
\x0b\x06\xea\x98\x01\x42\x92\x00\x26\x8e\x04\xa1\x47\x8e\x2d\x40\
\xb4\x06\xc3\xb1\x2a\xe4\x4f\x9c\x85\x60\xb0\xd9\x72\xa2\x6e\x1a\
\x88\x82\x00\xa2\x24\x81\xda\x2b\x0f\x84\xa1\xfd\xda\x19\x4a\xde\
\x78\xe3\x0d\x9c\x35\x6b\x96\x9b\xae\xe3\xbe\x73\xe2\xc4\x09\x18\
\x3c\x78\xb0\x2d\x71\x91\x41\xda\x38\x07\xa0\xf6\xc9\x03\xff\xd5\
\x03\x80\xcf\x4c\xac\x91\xeb\xcc\xfc\x2d\x83\x56\x7e\x0e\xf4\x93\
\x67\x40\x04\xbe\xe5\x2a\x3f\x76\x26\x64\xa2\xf6\x93\xde\x90\x6d\
\x80\x81\x6c\x0e\x01\x3f\x9c\xbd\x70\x1e\x8a\x8a\x8a\x2c\xd7\x99\
\x0a\x18\xa8\x73\xe2\x0e\x6a\x96\x1f\x7c\x74\xe4\x0c\xf8\x2d\x01\
\x11\xe3\x24\xa2\xa1\xbc\x7d\x3f\xf0\x4d\x21\xa6\x2f\x78\xfd\x04\
\x24\x1f\x68\xe4\x04\x52\xdc\x0b\xa4\xe1\x89\x59\xd7\xbc\x79\xf3\
\x70\xed\xda\xb5\x9e\x0c\x7f\xd3\x4d\x37\xc1\xf6\xed\xdb\x2d\xc5\
\x05\x01\x41\xce\x90\x40\x1c\x31\x10\x84\x3e\x05\x96\x44\x8b\x9d\
\x9c\x56\xd7\x88\xf8\xc9\x31\x10\x82\x32\x04\xdb\x8a\x56\xa6\x37\
\x88\xc0\xe5\x66\xb1\x13\x05\xd3\x1b\xb2\x0d\xbd\x61\xc2\x84\x09\
\xb8\x6b\xd7\xae\xa4\xeb\x4c\x15\x0c\xd4\x79\x46\x46\x26\x68\xbd\
\x72\x41\xbc\x76\x98\xe5\xba\x5a\xc0\x20\x2b\xa8\x6c\xdd\x03\xba\
\xa2\x7a\x2e\x22\x08\xa1\x98\xe5\x07\x61\xfc\x08\xa6\x51\x27\xa3\
\x40\x75\x75\x35\x0e\x1c\x38\x10\x9a\x9b\xbd\xe1\x4e\xcf\x3f\xff\
\x3c\x39\xd2\x26\x14\x17\x19\x02\x71\xaa\x5c\x90\xae\x1b\x6e\x49\
\xac\x44\xf3\xd6\x1a\x9b\x11\xf7\x1c\x35\x00\x11\x8e\xb9\x91\x8d\
\xd5\x1b\xe8\xe2\x8a\x14\xc9\xc8\x11\xd3\x8e\xb3\xb0\x17\x60\x20\
\x71\x21\x90\x17\xd6\xb5\x43\x41\x2c\xea\x95\x74\x8d\xd1\x1f\x95\
\xf2\xb3\xa8\x1d\x38\xce\x94\xa2\xa4\x8a\x91\xc3\x6f\x96\x34\x71\
\xc1\xef\x03\xee\xfa\x11\xb6\x65\xd7\x0b\x2f\xbc\x80\xf7\xdd\x77\
\x9f\xc3\x91\x12\x37\x3f\x75\xea\x14\x0c\x1c\x38\xb0\x9d\xb8\x10\
\x83\x32\x00\xcd\x6d\xfc\x70\xe0\x7d\xce\x8e\x61\x6d\x47\xd3\x9b\
\x42\xa8\xfd\xf7\x08\xf0\xcd\xe1\xd6\x86\xba\x04\x47\xcc\xb7\xdf\
\x7e\x1b\x6f\xbf\xfd\x76\x9b\x9c\x21\x88\xe4\xbc\xa3\x9d\xbb\xc4\
\x9c\x78\x30\x2c\x1b\xfe\x9e\x36\x1f\xd2\x1d\xf4\x80\x0f\x84\x1b\
\x4a\x40\x48\x22\x2e\xa2\x60\xd0\x4b\xcb\x91\xab\xa8\x86\x60\x30\
\x68\x73\x08\x7b\xcd\x24\x51\x04\xbc\xaa\x0f\xf8\x4a\x06\x3b\xfa\
\xf2\xee\xbc\xf3\x4e\x7c\xeb\xad\xb7\xec\x0d\x62\xd1\xea\xf3\x9f\
\xff\x3c\xfc\xfb\xdf\xff\x6e\x25\x2e\x30\xe2\x08\x23\x0c\xeb\x0f\
\x62\x6f\x67\xa2\x21\xd1\x70\x7a\x63\x10\xb5\x9d\x87\x01\x43\x61\
\xa6\x58\xb2\x27\x72\xc4\x24\xa3\x93\x21\x2a\xc8\xbf\xc3\x50\x9a\
\xdf\x7f\xff\xfd\x84\x3b\x4a\xa7\x98\x92\x92\x12\xe8\x55\x50\x00\
\x18\x94\x41\xa3\xf9\x9e\xaf\x61\x4e\x3c\x74\x51\x65\xff\x8c\x6b\
\x4c\x83\xc4\x85\x3a\xa0\x27\x48\x23\xae\x4a\xb8\x0f\xd1\x1f\xe4\
\x03\xc7\x51\x3a\x53\x03\xc1\x90\x77\x60\x20\x44\x2a\x01\x09\x32\
\xa6\x8e\x77\x04\x04\x9a\xfc\xb1\x63\xc7\x70\xe8\xd0\xa1\x9e\x80\
\x81\x3a\x21\xab\xdf\xfd\xf7\xdf\x1f\x15\x17\x7c\x43\x10\x74\x44\
\x10\x8a\x93\xb3\x4e\xa7\x13\xd0\xca\x4e\x21\x5f\x7e\x1e\x82\xa6\
\x03\x0f\xad\x5c\x10\xe8\x14\xd1\xca\xcf\x01\xc8\x67\x92\xb8\x86\
\x28\x24\xa4\x0d\x73\xf6\x55\x34\xe6\xca\x47\x5e\x5c\xda\xc5\x3a\
\xc0\x86\x66\x20\x07\x60\xa7\x60\xa0\x78\x10\xe4\x79\xf0\x4d\x1e\
\x03\x42\x6e\xfc\x1b\x4e\x36\x11\x3d\x14\xc6\xf0\x07\x9f\x00\x2f\
\xab\xa0\x79\xe8\x4c\x9a\x11\xc8\x00\x18\x3d\x08\xb8\xa2\x9e\x8e\
\xc1\x40\xf3\x7a\xf6\xd9\x67\xf1\xe1\x87\x1f\x76\xba\x1f\x09\xdb\
\x57\x56\x56\x42\xff\xfe\xfd\x99\xb8\x80\xe6\x10\xd3\xec\xb9\x0c\
\x6b\x2d\xdb\xc9\x04\xf4\x86\x66\x0c\x6f\xdb\x07\xbc\xa6\xb7\xd0\
\xd2\x74\x9e\x25\xef\x2f\xf2\x80\x22\xdf\x0e\xf2\x97\x14\x45\xe6\
\x65\xcd\x8c\x18\xed\x1e\x64\xa2\x80\x44\x82\xde\xd0\x6c\xf8\x78\
\x92\x07\x17\xf3\xd1\x30\x5d\xfa\x9c\xcc\x8c\x24\xa2\x0f\xd4\x82\
\x6c\xf0\xff\x4f\x49\xdc\xfd\x60\xff\x11\x9b\x43\x28\x6f\xdd\x0d\
\x3a\x0d\xee\x91\x9b\x39\xe9\x0a\x74\x54\xf3\x4f\x1e\x0b\x62\x5e\
\xb6\x2b\x30\xd0\xdc\xa6\x4c\x99\x82\x14\xb5\xe4\xc5\x73\xe7\x9d\
\x77\xc2\x5b\x6f\xbd\x65\x88\x0b\x45\x25\x6f\x2c\xd7\xf3\x4a\x36\
\x1f\xed\xbf\x47\x90\xbf\x58\x0f\xc1\x70\xc4\x70\x17\x31\xb8\x31\
\xe7\x1e\xba\xaf\x20\xa7\x59\xfa\x93\x44\xe6\x15\x96\xe8\x61\xb1\
\x20\x04\x06\xf2\xfa\xa6\x3f\x72\xde\x49\xd1\x7b\x8b\x94\x79\x7e\
\xc2\x70\xe0\x7b\xf5\x68\xb7\x76\x83\x33\x34\x05\x31\xb4\x65\x37\
\xc3\xa7\x57\x60\x60\xc7\xb5\xfc\x4c\xf0\x4f\x1a\x95\x12\xc1\xf7\
\xec\xd9\x83\x64\x51\xf4\xea\x79\xe1\x85\x17\xe0\xbe\xfb\xee\x4b\
\x69\x4e\x56\x73\x09\xef\xfb\x14\x45\xf2\x20\x8f\xbd\xf1\x35\x01\
\x11\x71\xa0\x05\x72\x12\xa6\xb8\x0b\xe2\x0c\xf4\x5b\xdb\x87\x3e\
\x4a\xfa\x38\xe9\x5a\x20\xac\x00\x84\x15\x40\x55\x4b\x39\x0c\x80\
\xfc\x1f\xb0\x47\x2e\x08\x13\xaf\xe9\x7a\x60\x20\x1a\x2d\x5b\xb6\
\x0c\x97\x2d\x5b\x66\xb5\x07\xb6\x7f\xa7\xf8\x8d\xbe\x7d\xfb\xa6\
\x0d\x10\x24\x2a\xe4\xf7\xf6\x30\x9d\xa4\xd5\x63\x3a\xd0\x12\x00\
\x88\x23\x44\xee\x3c\x12\x49\x09\x23\x3a\x0c\x81\x71\x08\x0f\x83\
\x83\xe8\xba\x9b\xbb\xb1\x84\xec\x1f\xad\x68\x70\xd9\x73\x06\x93\
\x98\xa3\x47\x8f\xc6\x03\x07\x0e\xd8\xde\xf0\x64\x0d\xe7\xcc\x99\
\x03\xaf\xbd\xf6\x5a\xc7\x83\xc1\x9c\x14\x1b\xd9\x70\xb7\xb7\x7c\
\x4c\x77\x7f\xfb\x27\x49\xcb\x2e\xfd\x92\x0f\xd4\x7e\x05\xe0\x1f\
\xdb\xda\x10\x95\x5e\x30\xf4\xc8\x02\xff\xc4\xf8\xca\x8a\xe5\x8c\
\xdb\x34\x78\xef\xbd\xf7\xf0\xe6\x9b\x6f\x76\xfa\x5a\xc2\xf6\x7f\
\xfe\xf3\x9f\xe1\xeb\x5f\xff\xba\x9d\xed\x70\x3c\x66\x42\xce\xe0\
\xb8\xa7\xf4\xbc\x40\xd7\x02\x14\x01\x26\xdc\x38\xba\x95\xde\x14\
\x51\x20\xc3\xcc\xfa\xa8\x69\x74\x6d\xed\xcd\x43\xf6\x05\x18\x3d\
\x18\xa4\x01\x7d\x3c\x23\xf8\xe2\xc5\x8b\x71\xc5\x8a\x15\xde\x4c\
\x10\x00\xce\x9f\x3f\x0f\xbd\x7b\xf7\xf6\x6c\x7e\xe6\xc4\xb4\xda\
\x46\x54\x3e\xdc\xc7\xae\xbc\x2f\xd7\x27\x23\x33\x0b\xf4\x21\x7d\
\x41\x18\x5c\x14\x5d\xbf\xc1\x19\xc2\x32\xca\xdb\xf6\x03\x1f\x92\
\x5b\x8c\x25\x29\xae\x82\xb4\x56\x61\xf2\x18\xf2\xf0\xf1\x94\xd8\
\xc5\xc5\xc5\x58\x51\x51\x91\xe2\xec\x8c\xd7\xbf\xf6\xb5\xaf\xc1\
\xba\x75\xeb\x3c\x9d\x1f\xf5\x1b\xde\x73\x04\xa5\xf3\x75\x10\x72\
\xe3\x48\xec\xc9\xca\xac\x3b\x21\x1b\x90\x9a\xe1\x83\xc0\xcd\xd7\
\xb5\x06\x03\x03\x44\xe9\xa9\x88\x05\x32\xf5\x3b\x01\x81\x17\x00\
\x02\x12\x48\x37\x8d\xf1\xfc\xf8\xb6\x61\xc3\x06\xbc\xeb\xae\xbb\
\xac\x57\x6b\xb3\xc5\xcb\x2f\xbf\x0c\xf7\xdc\x73\x8f\xa7\x80\x50\
\x3e\x3e\x88\x42\x6d\x93\xf3\xf0\x02\x9b\x73\xf6\xa2\x19\xdd\x59\
\xd0\xe3\x9b\x34\x0a\x84\x5e\xf9\xa6\x16\x63\x74\xad\xd7\x34\xa0\
\xbe\xab\x14\x54\x3a\xd7\xa6\x68\x6b\xa0\x63\xa5\x52\x5c\x08\xbe\
\x51\xce\x4c\xd0\x76\x17\x99\x6a\x74\x73\xec\x38\xe4\xc4\x42\xa7\
\x8b\x9e\x3d\xdd\x19\xc6\xda\xce\x59\xbb\x54\xcf\xbc\xca\xa3\xd1\
\xe9\x76\x17\xd5\x09\xed\x48\x54\xe0\x90\xbe\xc0\x47\x44\x45\xab\
\x2f\x42\xde\x53\x86\xc2\x39\x3a\x1f\xbb\xf7\x7d\x64\xca\x89\x24\
\x81\x30\x65\x8c\xad\x3b\x74\x37\x34\xa8\xaf\xaf\xc7\x7e\xfd\xfa\
\x51\x24\xb6\x9b\xd7\xdb\xbd\x33\x6f\xde\x3c\x58\xbb\x76\xad\x27\
\xdc\x21\xfc\xc9\xa7\x28\x9e\xad\x71\x17\x6b\xe2\xc9\x6a\xec\x77\
\x42\x7b\xa5\xf9\x04\xc8\xb8\x65\x62\x6b\xce\xc0\xb8\x43\x28\x8c\
\xda\xf6\x03\x2c\x7c\xcb\x0d\xb2\x29\xb3\x0a\x19\x51\xf8\x6b\x87\
\x81\x54\xe4\x7d\x60\x68\xec\x32\x5f\x7a\xe9\x25\xbc\xf7\xde\x7b\
\xed\xaf\xdc\xa2\xe5\xab\xaf\xbe\x0a\x73\xe7\xce\x4d\x09\x10\xea\
\xd9\x8b\xc8\xed\x3d\x0a\x32\x19\x8a\x52\xe4\xae\x9e\x2d\x2c\x49\
\x47\x64\x25\xd6\x05\x1e\xfc\x53\xc6\x91\x2b\x7f\x7b\xd3\x97\x7a\
\xbe\x06\xf5\xbd\x9f\x82\x04\x7c\xeb\xbb\x79\x8b\xd9\x31\x8e\x40\
\x46\x94\xd1\x83\x40\x2a\xf6\xee\x04\xd1\x76\x58\x32\x25\x9b\x97\
\x3b\x33\x66\xcc\xc0\xf5\xeb\xd7\x7b\x42\xb7\xdc\xdc\x5c\xa0\xab\
\xee\xfc\x7c\x43\x7e\x3a\x7d\xb4\x86\x26\x54\xff\x53\x0a\x7c\x58\
\x49\x8b\x73\x90\xd3\xf9\xd8\x6d\x4f\xb7\x99\xca\xa0\xde\xe0\x1b\
\x56\x1c\xcf\x0e\x0a\xa0\x5d\xa8\x41\xf5\xc8\x69\xf0\x35\xcb\x2c\
\xa2\x2a\x7a\x1d\x1b\x67\x04\x42\x17\xf9\x0d\xea\x1c\x02\x8e\x1e\
\x0c\x62\x1a\x39\x02\x4b\x5a\x21\xab\x46\x2e\x04\xbf\xc4\x9d\x3e\
\x7d\x1a\x07\x0c\x18\x60\x77\xdd\x96\xed\xe6\xcf\x9f\x0f\x6b\xd6\
\xac\x71\x0c\x06\xad\xbe\x09\xf5\x3d\x65\x20\x84\x94\xcb\xfa\x04\
\x11\x8f\x00\xe4\x5a\xaf\x16\xf7\x04\xa9\x64\x70\x7c\x30\x98\x2f\
\xc9\xfb\x8f\xa2\x76\xe6\x12\x04\x78\x91\x25\xbf\xd0\xf4\xc8\x1d\
\x3d\xb3\x9f\x71\x20\xf8\x7c\xa0\x81\x0e\x5a\x41\x0e\x48\x43\xfb\
\xd3\x6d\x9c\x63\x42\x5a\xee\x50\xa4\x01\x29\x66\x50\x75\x11\x20\
\x3f\x0b\x80\xe2\x18\x23\x5e\xd5\x2b\x57\xae\xc4\x45\x8b\x16\xd9\
\xed\xc6\xb2\x1d\xc5\x42\xce\x99\x33\xc7\xf6\x3a\x88\x93\xe2\xe1\
\x72\x10\x42\x72\x97\x03\x02\x11\x83\x89\x0a\x51\x80\xc0\xff\x5e\
\x6f\x6d\x10\x25\x97\x2e\x68\x34\x12\x61\x68\x55\xd5\xc6\xc5\x0a\
\xd9\xcb\x45\x01\x7c\x43\x8a\x0d\x9f\xbe\x7c\xf7\xb7\x92\x56\xbb\
\xa3\x36\x36\xa3\x72\xac\x12\xa4\xba\x26\xa6\x98\x6a\xe4\x36\xd6\
\x26\xc4\xff\xe6\x9b\x6f\xc6\xf7\xde\x7b\xcf\xaa\x2b\x5b\xbf\xf7\
\xe9\xd3\x07\x28\xee\x22\x27\x27\xb1\x47\xb1\x1e\x56\x50\xab\x6d\
\x00\xed\x68\x05\xf8\x1a\xc9\x91\x45\xf5\xcc\x3e\x63\x6b\x92\x1e\
\x36\x62\x7e\x0e\xa4\x37\xd8\x01\x83\x87\xe3\x26\xed\x4a\x57\x55\
\x04\xba\x95\xa3\x87\xe7\x40\x3b\x57\x03\x78\xb1\x0e\xd4\xca\x6a\
\xc8\x90\x24\x08\xeb\x1a\x03\xa0\x91\x2f\xaa\x75\x88\xff\xc1\x83\
\x07\x71\xd4\xa8\x51\x9e\x4d\xf5\x1b\xdf\xf8\x06\xfc\xe9\x4f\x7f\
\x8a\x18\xe4\x14\x24\xcf\x25\x96\xfc\x83\xe5\x8b\x52\x59\xd2\x0c\
\x72\x99\xa3\xaf\xea\x72\x36\x2c\xd9\x21\x08\xd9\x1b\x28\x58\xca\
\x77\xfd\x08\x6b\xce\x60\xa7\xc3\x54\xda\x68\xe7\x2e\xb1\x14\x35\
\xfa\xf9\x5a\xd0\x1b\x9a\xa2\xd7\xb9\x12\x2f\x00\x19\xaf\x58\x22\
\x31\xd2\xcc\x2d\x62\x36\x9f\x7c\xf2\x49\x5c\xba\x74\x69\x2a\x53\
\x69\xf5\xee\x9b\x6f\xbe\x09\x33\x67\xce\x64\x9e\x51\xd0\x1c\x06\
\xa8\x6d\x04\xa8\x69\x00\x68\x68\x06\xa5\x29\xc8\x62\x4b\xbc\x32\
\xdd\x7b\x36\x69\x97\x1d\x91\x13\x12\x16\x17\x76\x1e\x18\x88\xc8\
\xe1\xff\x1c\x06\xa1\xbe\x19\x44\xf2\xef\x57\x64\xd0\xe8\x9a\x36\
\xf2\x30\xc7\xdc\xb6\xe4\xb6\x88\xd9\x1c\x3b\x76\x2c\xee\xdb\xb7\
\xcf\x25\x49\x5a\xbf\x46\x8a\x29\x45\x75\x67\x66\x66\x72\xa4\xb8\
\xea\x75\x4d\xa0\x57\xd7\x1a\x79\xa3\x9a\x42\xae\xbd\x8d\x3c\x99\
\x9c\xc7\x9d\x10\x18\x94\xa2\x1e\x9d\x03\x06\xca\xfd\xa0\x1f\x3a\
\x01\x62\xb3\xcc\x12\x86\x39\xb2\x78\x26\x89\xd9\xdc\xbe\x7d\x3b\
\x52\xac\x84\x57\x8f\x99\x80\xd4\xc8\x8b\x1d\x02\xfd\x62\x1d\x68\
\x17\x6a\x01\x1b\x28\x81\x98\x73\x3f\x44\xaf\xe6\xe5\x75\x3f\x9d\
\x06\x06\x2d\x14\x66\x21\x7c\x62\x48\x85\x60\xd8\x85\xf3\xad\x85\
\xb8\x78\xe4\x91\x47\xf0\x57\xbf\xfa\x95\x67\xf4\xfa\xfb\xdf\xff\
\x0e\x33\xa6\xdf\xc9\x5c\xce\x98\x53\x6a\x0a\x1e\xca\x9e\x4d\xca\
\xe3\x8e\xc8\x58\xa8\xfb\xc5\x8e\xe7\x0c\xea\xce\xc3\xec\x12\xa7\
\x55\xb0\x89\xd3\xc5\x59\x88\x8b\x81\x03\x07\x22\x19\x90\xbc\x78\
\x28\xfe\xf3\xe4\xc9\x93\x1c\x36\x87\x11\xea\x1a\x01\x2e\xd4\x81\
\x7a\xa9\x0e\xf4\x60\x08\x34\x53\xe1\xf5\x62\xa0\x4e\xec\x83\x34\
\x65\x9e\x8c\x86\x1d\x39\x07\x52\x16\xb5\xdd\x65\x94\x0c\x3c\x75\
\x73\x6d\x12\x71\xe1\x65\xf2\x72\xba\xc8\x52\x14\x85\xd3\x49\x91\
\xa4\xf4\x82\x0d\xcd\xa0\x1e\xaf\x04\x68\x0a\x81\x1f\x62\x14\xdc\
\x8e\x24\xa4\xc7\x63\xb1\x13\x05\x62\x07\x83\x61\xd7\x11\xe4\x2e\
\x7a\x14\xee\x6f\x21\x2e\x16\x2c\x58\x80\x2f\xbe\xf8\x62\xca\x64\
\x33\xc1\xd0\xb6\x23\x0a\x98\x51\x8e\x57\x02\x56\x5c\x00\x81\xe3\
\x99\xa5\xb6\xab\x3e\x1d\x0e\x06\x94\x15\x54\xdf\xdb\x0b\x9a\x07\
\x57\xe4\x51\xa2\x27\x49\x5e\x1e\x0c\x06\x91\x0c\x48\xf5\xf5\xf5\
\x29\xed\x51\x22\x30\x98\x9d\x6a\x4d\x41\x54\x3e\x39\x0a\x52\x7d\
\x33\x28\xc9\xc2\xf3\x53\x9a\x45\x7a\x5f\xee\x70\x30\x90\x37\x55\
\xe8\xff\xfe\x0b\xbc\x8e\xde\x06\xf6\x26\x49\xc4\xb9\x6e\xdd\x3a\
\x24\x6f\xa6\x54\x1e\x51\x14\x41\x55\x29\x02\x24\xf9\xa3\x9e\x3a\
\x87\x78\xf0\x04\x13\x81\x8e\x4e\x47\x56\x1d\x77\xc0\xef\x9f\x1d\
\x30\x24\x49\xb5\x47\x74\xfc\xf2\x97\xbf\x8c\x7f\xfb\xdb\xdf\x5c\
\x93\xd4\x2e\x18\x68\x00\xf5\x44\x15\x0a\xc7\xd2\x97\xdf\xc2\xf5\
\x22\x2c\x5e\xfc\xec\x80\x81\x16\x9a\x24\xd5\xde\xd9\xb3\x67\xb1\
\x6f\xdf\xbe\xae\xe9\x48\x99\xe8\x96\x2c\x59\x62\xc9\x19\x68\x00\
\x2d\x2c\xa3\xba\xab\x14\xc4\x86\x60\x4a\x0e\x42\xae\x27\xeb\xf2\
\xc5\x0e\x07\x43\x5a\x74\x06\x73\xf1\xd1\x54\x7b\x19\x46\xaa\xbd\
\x5e\x79\xc0\xe5\x18\xf9\x93\xa8\xc9\xaa\x55\xab\x70\xe1\xc2\x85\
\x8e\x49\x75\xdb\x6d\xb7\xc1\x3b\xef\xbc\x63\x0b\x08\x66\xe7\xea\
\xa5\x3a\x0c\x7d\xf0\x09\xd0\xd9\xbd\x2b\x38\xb8\xd0\xbc\x3b\x1c\
\x0c\xec\xcb\xd9\x55\x1a\x39\x4d\xb8\x48\x3f\x9c\x6c\x2b\x69\xbb\
\xcc\x48\x67\xba\xc4\xea\xd5\x03\x78\x4a\xc8\x19\x93\x8b\xa0\x57\
\xaf\x5e\x78\xe1\xc2\x05\xdb\x80\xc8\xc9\xc9\x01\x0a\xda\xb9\xea\
\xaa\xc4\x21\xec\x89\x3a\x6b\x17\x6b\x69\x7b\xd4\xce\x69\xd8\x39\
\x60\xa8\xbc\x80\xfa\xbe\x63\xec\xba\xd7\xd3\xaf\xc6\x4c\x97\x93\
\x9d\xc9\xea\x5b\x08\x85\xc4\x19\x32\xa3\x45\x3a\x9e\x7b\xee\x39\
\xa4\x62\x25\x4e\x9e\x75\xeb\xd6\x91\x2b\xbd\x23\xae\x60\xf6\x2f\
\x1f\x29\x67\xb9\xb1\x28\x23\x5c\x57\x78\x3a\x05\x0c\x44\x18\x65\
\xc7\x21\x14\x6a\x1b\xbd\xbb\xfa\x6d\x6b\x6f\xa0\xbc\x49\x31\x85\
\x3f\xdc\x04\xee\x3e\xf8\xe0\x83\xb0\x7a\xf5\x6a\x57\x40\xa0\x35\
\x5e\xee\x11\x55\x6d\x01\xda\x29\x16\x48\x26\x2a\xc8\x63\x69\x77\
\x19\x80\xaa\xa7\x9e\x97\x9a\x3a\x4c\x62\x89\xa4\x9f\x9d\x66\x9f\
\x1d\x3b\x76\x2c\xec\xdb\xb7\xcf\x35\x10\xba\x22\x18\xd8\xdd\x04\
\x25\x22\xeb\x0c\x36\x26\x9f\xac\x42\xfc\xb4\x12\x44\x1d\x59\x7e\
\x6a\xd7\xe7\x72\x3a\x45\x50\x39\x20\x4a\x8b\x43\xd5\x5f\xa8\x6a\
\x5e\x4c\x91\x93\x07\x1e\x78\x00\x29\xc1\x97\x93\x87\xea\x51\xde\
\x78\xe3\x8d\x29\xd1\xa5\xab\x71\x86\x4e\xbb\xb5\x34\x37\x46\x0f\
\x86\x51\xde\x73\x04\x84\x86\x10\x88\x82\x08\x72\x9b\xda\x57\x96\
\x71\x8a\xa6\xf5\x91\x0a\x83\x91\x58\xa0\x1c\x8b\x31\xe2\xc1\x4d\
\xb5\x3c\x33\xef\xb4\x13\xf0\xc4\x6b\xdb\x0d\x06\x97\x14\x64\x9e\
\x4e\x97\xea\x41\x3d\x7d\x1e\x80\xf2\x4f\x46\x2e\x4d\x7c\xe4\xe9\
\x24\xd0\x45\x50\x1c\x7f\x87\x58\x3d\x21\x02\x04\x3e\x37\x9b\xa5\
\xc6\xa1\x8a\x71\x54\x13\x8a\x6e\x1b\x55\x07\xf9\x2c\x67\xcc\x98\
\x01\xeb\xd7\xaf\x4f\x89\x23\x44\x81\x9e\x28\x3f\x83\x4b\x1a\xa5\
\xfb\xb5\x4e\xf7\x74\x6a\xbb\x40\x0a\xe0\x89\xfd\x6f\x5a\x65\x35\
\x73\x22\x89\xfa\x40\xc6\xa6\x30\x8e\xd5\x13\x48\x34\xf4\xc8\x31\
\x44\x45\xc4\xae\xe0\xd4\x41\xb6\x67\xcf\x9e\x70\xfc\xf8\x71\xc8\
\xcb\xcb\xbb\xe2\xc0\xe0\xc8\x07\x52\xaf\x6f\x42\xca\x30\xa6\x9c\
\xac\x62\xa9\x64\xa2\x29\x67\x78\x0e\x44\x2a\x96\x95\x9b\x9d\x76\
\xef\x68\xf5\x68\x05\x88\xd5\xf5\xcc\x19\x95\x2e\x83\xa2\x95\x75\
\xe3\x14\x43\x7b\xe2\x89\x27\x58\x2d\x6a\x27\xcf\x1b\x6f\xbc\x01\
\xb3\x66\xcd\xf2\x04\x08\x5d\x4d\x81\xb4\xe5\x1d\x4d\x5f\xaa\x52\
\x76\x0a\x84\xea\x06\x10\x90\x03\x4d\x96\xdb\xf9\x24\x8a\x94\x02\
\x98\x43\x50\x73\x33\xa8\xac\x1e\x08\x3d\x72\x3d\x23\x68\xdb\xcd\
\xd4\xce\xd7\x20\xec\x3f\x01\xba\xae\x81\x46\x4a\x63\x9c\x9a\xdb\
\xff\xfa\xd7\xbf\xf0\x0b\x5f\xf8\x82\x13\x1c\xc0\xe2\xc5\x8b\x61\
\xc5\x8a\x15\x9e\xce\xbb\x2b\xe9\x0c\x96\x71\x13\x14\x18\xa2\xee\
\x3b\x0a\x7e\x9d\x63\x86\x93\xd8\xe0\x99\xb6\x94\x36\x23\xaa\x34\
\x82\xca\xa8\x41\x20\xf5\xf7\x36\xaf\x62\x2b\xd1\x71\xa1\x06\xf5\
\x23\xa7\x59\x44\x15\xe6\x67\x01\xdf\xa3\xa5\x80\x6a\x28\x14\x62\
\x7a\x82\x93\x7a\xdb\x13\x26\x4c\x80\x5d\xbb\x76\x79\x0a\x84\xae\
\xc6\x19\x28\xa2\x4a\xe9\x5f\x00\xbe\x51\x43\x12\xc7\x5a\x0a\x94\
\x75\xce\x41\x7d\x2a\x02\x05\x63\x39\x43\x8a\xc0\x97\x24\x0b\xa9\
\xa3\xcf\x36\x4e\x63\x56\x06\xe1\x5c\x8d\x51\x55\x96\xca\x0c\x47\
\x6a\x65\xce\x9a\x35\x0b\x89\xdd\x3b\x79\xa8\xfa\xcd\xa8\x51\xa9\
\x65\xa3\xeb\xea\xa7\x89\x84\xb1\x96\x44\x68\xed\x3f\xa5\xc0\xa9\
\x9a\xab\xe0\x51\x02\x03\x3d\xe2\xf8\xe1\x69\x8d\xb9\xa4\x4c\xee\
\x6c\x2c\xca\xb2\x2a\x89\xdc\xaf\x7f\xfd\x6b\xfc\xde\xf7\xbe\xe7\
\x04\x07\x54\x47\x1b\x1e\x7a\xe8\x21\xcf\xb9\x42\x57\xe2\x0c\x66\
\x14\xb6\xef\xff\x5d\xcb\x8a\x9c\xb5\xce\xcf\xb0\xbb\x0c\xa5\xea\
\xba\x96\x64\x96\x8e\xc8\x6b\x34\xee\x88\xfc\x0c\x8c\xe0\xb2\x82\
\x94\x7f\xd9\x8d\xb9\xb9\xd3\xb3\xbd\xb9\xa0\x6b\x3a\x5e\x49\x98\
\x9f\x81\x32\xb7\x10\x57\x20\x45\xd1\xb5\x45\x30\x32\xe3\x74\x67\
\x6e\x89\x25\xcc\x98\x31\x63\x90\x0a\xa4\xdb\x7d\x28\xc9\x47\x55\
\x55\x55\x5a\x38\x82\x39\x87\xae\xa2\x40\x26\xcc\xdc\xe2\x6d\x4e\
\x27\xa3\xf4\xb1\x70\xd3\x68\x10\xb2\xdc\xd5\x58\xb4\xb3\xb9\xdf\
\xf9\xce\x77\xf0\xb7\xbf\xfd\xad\x9d\xa6\xd1\x36\xff\xfc\xe7\x3f\
\xe1\x8b\x5f\xfc\xe2\x15\x0f\x86\x84\x39\x9d\xc8\xf1\x24\xf4\xe1\
\x27\x2c\xbf\x40\xb2\x5c\x0c\x4e\xa8\x9e\xae\x6c\x6f\xe6\x1c\xdc\
\x98\x9b\xa9\xdc\xf2\xf2\xe5\xcb\x59\xfc\x24\x5d\xa1\x93\x98\x71\
\xb2\x26\xbb\x6d\xbb\x02\x67\x48\x98\xed\x8d\x02\x44\xd2\x92\x07\
\xb2\x64\x10\x48\x57\x79\x9f\x96\x97\xcc\xcd\x54\x7e\x80\x4c\xd5\
\x76\x9f\xc9\x93\x27\xc3\xb6\x6d\xdb\x58\x12\x71\x50\x8c\x34\x3b\
\x5e\x55\x97\x6f\x3b\x87\xae\x00\x06\x23\x0f\x64\x11\x08\x83\x5b\
\xf6\xc7\x08\x3b\x4f\x53\x22\xf1\x70\x7e\x26\x04\x52\x4c\x24\x1e\
\x6f\xb3\xa7\x4d\x9b\x86\x5b\xb6\x6c\xb1\x8b\x03\xd6\xce\x4c\x00\
\x4a\x5e\xda\x5c\x50\x06\x9d\xd2\x37\xa7\x90\xed\x3e\xd9\xe0\x97\
\x3b\x18\x8c\x0c\xb1\x7e\x10\x6e\x1c\xd5\x3e\x43\x6c\xba\xc0\xe0\
\x45\x56\xf9\xb6\x44\x77\x63\x6e\xfe\xe3\x1f\xff\x08\xdf\xfc\xe6\
\x37\x5b\x0a\x8f\x34\x06\xd9\xfd\xbd\x50\xe8\x2e\x7f\x93\x15\x0a\
\x6d\x81\xc1\xcc\xaf\x66\x47\x50\x79\x9c\x3f\x9a\x72\x47\x6b\xfd\
\x7b\x82\x6f\xcc\xd0\x56\xa3\xa7\x95\x33\x78\x0d\x06\x37\x95\x71\
\xef\xb9\xe7\x1e\x78\xf9\xe5\x97\x5b\x95\x24\x12\xea\x83\xa0\xe5\
\x04\xd2\x66\x0b\x49\x0a\x86\xcb\x20\xb3\x7c\x97\xcf\x2a\x4f\xe6\
\xe6\x61\xc3\x86\x81\x93\x54\xc1\x66\xd0\x2c\x13\x85\x21\x19\x59\
\x79\x9f\x9a\x06\x10\x1b\x43\xa0\x16\xe6\x82\x54\xec\x7d\xde\xe8\
\xa4\x46\xa7\xcb\xa0\xe6\x84\x65\xbd\x09\xaa\x44\x13\xde\xb2\xdb\
\xd3\xca\x75\x64\xdd\xd2\x78\x8e\x55\xa2\x49\x54\x13\xc9\x8a\xdd\
\xc6\xfe\x3e\x7b\xf6\x6c\x7c\xfd\xf5\xd7\x9d\xbc\x02\xbb\x77\xef\
\x86\xf1\xe3\xc7\x1b\x55\xeb\x9a\xc3\xa0\xd7\x37\x82\x7e\xa9\x01\
\x7c\x61\x95\xa5\xd8\x97\x06\x7a\xaf\xdc\xd2\x04\xe5\x4f\x4f\x23\
\x7f\xb4\xaa\x75\x2e\xcd\xcb\xa0\x1a\x0d\x89\x01\x4a\x8c\xc2\x5f\
\x3f\x9c\x8a\xb5\xb7\x13\x50\x86\x98\x08\x86\x31\xfc\xfe\x5e\x66\
\x86\xa6\xaa\x69\x5e\x3d\xcc\x69\x62\xec\x60\x2a\x25\x6c\x47\x32\
\x26\x1c\xd6\x8d\xb9\x99\x8a\x95\x2c\x5b\xb6\xac\xa5\x9e\x65\x9d\
\x01\x04\xaa\xfe\x26\x91\xff\x65\x86\x0f\x32\x6f\x1c\x9d\xd2\xbc\
\x12\x4d\x58\xfd\xef\x11\x14\x62\xcb\x12\x51\xc3\xcb\xa0\x4e\x15\
\xab\x51\xd5\x23\x3b\x61\xd9\x87\x28\x31\xe4\xfd\xc7\x50\x3a\x4b\
\xd5\xeb\x62\x8a\x74\xa6\x88\x0a\xaa\xd6\xae\x64\x05\x20\x30\xe5\
\x5a\xd7\x44\xdf\xbb\x77\x2f\x8e\x1b\x37\xce\xd1\x4c\xa6\x4d\x9b\
\x06\x5b\xb6\x6c\x69\x5d\xe9\x96\x4a\x01\x52\x4d\xc8\xa6\x20\x70\
\xaa\xce\x92\x73\x51\x3d\x4b\x31\xce\x17\xe2\x68\xb0\x36\x8d\x99\
\x32\xfe\xe1\x3e\x10\x54\x4a\x95\x18\xf9\xb0\x62\xe3\x3a\xc8\x57\
\x93\x0a\x9e\xe6\x64\x1a\x77\x2b\x1d\x54\xc1\xce\x76\xf5\x3a\xc6\
\x1d\xd2\x50\xd7\x92\xb1\x25\x8a\x2c\x1a\x5c\x04\xbe\x91\x83\x5c\
\x01\x62\xdc\xb8\x71\xb8\x77\xef\x5e\xdb\xfb\xe3\xf3\xf9\x40\x96\
\x65\x83\xe3\xc5\xe8\x09\xe4\x5a\xc7\xaa\xbf\xc9\x0a\x2b\xf1\x43\
\x26\x73\xad\x30\x17\xc4\xeb\x47\xb8\x9a\x57\xa2\x09\x29\x07\x8e\
\xa3\x78\xe6\x52\xeb\x8f\xca\x8c\xeb\xc8\xc9\xa2\x0c\xee\x46\x6d\
\xcb\xcc\x0c\xe0\xfd\x12\x57\x53\x53\xc3\x0c\x60\x89\x1e\xb2\x14\
\xe6\xe7\xe4\xa4\x5c\xdb\xd2\x51\x5d\x4b\xe5\xe4\x19\xd4\x0f\x9e\
\x60\xf7\x12\x5e\x06\xb8\x10\x18\x84\x8c\x00\xf0\x13\xaf\x21\xd7\
\x34\x47\x84\x7f\xf0\xc1\x07\x71\xf5\xea\xd5\xb6\x81\x40\x0d\x37\
\x6c\xd8\x00\x77\xdd\x75\x57\x3b\x3d\x01\x49\x4c\xb0\x32\x80\x3a\
\xcb\x63\xc9\x80\x2a\x49\xc0\x51\x56\x5b\x8f\x14\x49\xe5\x78\x25\
\x62\xe9\x69\xe6\xff\xd1\x8a\x86\x04\x86\x80\x8f\x39\xee\x0a\xbd\
\x7b\x30\x2f\x6e\xf2\xc9\xa8\xa9\xaf\x83\x82\x82\x02\x3b\xeb\xe3\
\x88\xe3\xb8\xad\x7a\x6b\x56\xbc\x15\x6f\x18\x15\x75\x0d\x8c\x37\
\x68\x74\x73\xc8\x24\x2d\x6f\xdd\xc3\x5c\xcb\xbc\xd4\x1b\x68\x50\
\x9a\x0c\x66\x06\x40\x18\x7f\x35\xf0\xd9\xf6\x8a\x91\xb8\x31\x37\
\x9b\xf9\x1b\xc9\xdc\x4c\x1b\xcf\x00\x10\xd1\x13\xe2\x95\x01\xa4\
\xd4\x82\xbc\x5f\x02\x7e\xdc\xd5\x14\x85\xe5\x08\xa8\x6d\x89\x29\
\x1f\xab\x44\xfe\x68\x05\xa8\xf1\xf4\x2e\x12\x05\x04\x86\x1e\x39\
\x06\x18\x48\x54\x64\x65\x70\x1b\x37\x6e\xc4\xe9\xd3\xa7\xdb\x04\
\x43\x08\x59\x62\x56\xca\x29\x55\x53\x6f\x54\xbd\xb5\x51\x02\xd9\
\x55\x2d\x6c\x4a\x53\x13\xa6\xfb\x89\x20\xdd\x4f\xa8\x76\x26\xe8\
\xa8\x0d\x9d\x6d\xf5\x80\x04\x30\xb4\x9f\xe5\x97\x48\xe6\xe6\x11\
\x23\x46\x40\x53\x53\x93\xed\x31\xa8\xfd\x91\x23\x47\x12\xea\x09\
\x54\x39\x16\xe2\x28\xc7\x92\x28\x51\xd5\x59\xd0\x7a\xe7\x91\xb7\
\x4f\xd2\x0a\xb4\xf1\x26\x43\x74\xd3\x4f\x9d\x03\xee\xd3\x4a\x50\
\xb4\x04\x1f\x12\x39\xf0\x92\x4b\x3f\x95\x10\xec\x43\x71\xa0\xd9\
\xcc\x14\xfe\xf4\xd3\x4f\xe3\x92\x25\x4b\x92\xae\x91\x3c\xc4\x35\
\x4d\xe3\xa8\xd6\xb6\x1b\x30\x90\x78\x60\xe2\x70\x5c\xeb\xe2\x64\
\x49\x39\x03\x93\xb1\x27\xce\x20\x77\xfc\x2c\x04\x9b\xed\x6f\x82\
\xed\xdd\x8a\xe8\x0f\xec\xc8\x59\x98\x07\xc2\xd0\xfe\x09\x0b\xa5\
\xdf\x7a\xeb\xad\xb8\x79\xf3\x66\x27\x5d\xc3\xe9\xd3\xa7\x61\xc0\
\x80\x01\xed\x8a\x9f\xc7\xea\x09\x89\x3a\x64\xae\x7b\x3e\x3f\x84\
\x33\x25\x10\x86\xf4\x03\x91\x42\xf4\x2c\x2e\xb1\x28\xfc\x9e\xea\
\x69\x2b\xa5\xa7\xc0\x2f\xab\x2c\x04\x3f\xae\x78\xa5\x22\x75\x64\
\xfe\x25\xef\x6d\x5a\x77\x61\x3e\x95\x10\x64\xec\x7a\xe6\xcc\x99\
\x48\xc9\x47\x93\x3d\x85\x85\x85\x50\x5d\x5d\xed\x0a\x0c\x74\x59\
\xa8\x66\xf9\xc1\x37\x71\xa4\xad\xda\x1f\xad\x58\x23\x85\xbe\x69\
\xff\x39\x0c\xba\xc7\x47\xcc\x56\x8b\xe5\x38\x08\x64\x65\x82\xea\
\x13\x80\x1b\xd0\xdb\xf8\x5a\x72\x5a\x44\xc7\xd2\xa5\x4b\xf1\xc9\
\x27\x9f\x74\x04\x84\xdf\xfc\xe6\x37\xf0\xdd\xef\x7e\xb7\xe5\x18\
\x59\xdb\x08\xda\xa5\x3a\xc0\xda\xd6\x7a\x82\x55\xa7\xcc\x4c\x8b\
\x3a\xab\xd4\x2b\x14\xf7\x06\x4e\xe0\x41\x28\x2a\x04\x21\x3f\x9b\
\x71\x1c\x3d\x2c\x83\x56\x71\x9e\xb1\x67\xa5\xe2\x3c\x48\x8a\xc6\
\xc2\xd9\x95\x64\xf1\x19\x91\xc2\xe8\x3c\xe5\xbc\x66\xca\x63\x9e\
\x01\x0c\x49\xe4\xfa\xf5\xeb\x87\x55\x55\x55\x49\xa7\x35\x7c\xf8\
\x70\x28\x2b\x2b\x73\x0c\x06\x9a\x17\x89\x67\x98\x38\x32\xe1\x47\
\xd7\x76\xe0\x76\x72\x32\xfc\xf1\x01\x14\x6b\x9b\xd2\x93\x6c\x22\
\x26\xf8\x85\x7c\x18\xc5\xc2\x1e\xc0\xe5\x65\x45\xdd\xd7\xdc\x98\
\x9b\xcd\x1c\x0a\xac\xa4\x31\x65\x63\xab\x6f\x02\xfd\x22\x1d\x23\
\xeb\x8d\x7c\xcf\x0e\xcb\x05\x13\x41\xe8\x18\x26\x49\x7e\xe0\x78\
\x0e\xc2\x9a\x02\x48\xff\x91\xe6\xae\xeb\x10\xe0\x25\x46\x43\x85\
\x32\xda\xea\xba\xb5\xb2\x4d\xfa\x42\x86\x0f\xf8\xfc\x88\xbe\x90\
\x9f\x03\x7c\x76\x06\x67\xd7\x93\xfb\x86\x1b\x6e\x80\x8f\x3f\xfe\
\xd8\x11\x18\x18\x10\x44\x09\xf4\xa1\x45\x20\x5d\x3d\xc0\xb6\x2e\
\xd4\xae\xa1\x5a\x71\x1e\x85\xd2\xd3\x10\x0c\xba\x48\xd8\x69\xf5\
\xe9\x25\x09\x92\x0d\x87\xc3\x48\x5f\x41\x79\x79\xb9\x55\x2f\xd1\
\xdf\xb3\xb3\xb3\xa9\x34\x91\xe5\x31\xd2\x76\x87\x71\x1a\x12\x30\
\xa8\x9c\x02\x3d\x94\xbe\xd8\x91\x72\x1d\x2b\x22\xc8\xb6\x40\x9c\
\x81\x38\x44\xc0\xcf\xd9\x2d\x8d\x70\xc7\x1d\x77\xc0\xa6\x4d\x9b\
\x1c\x81\x81\x38\x9c\x3e\xb8\x0f\x48\x0e\x1d\x93\xdb\x81\x41\x57\
\x14\xd4\xb7\x1f\xa4\x62\xe9\xae\x9c\x62\x13\x12\xde\x22\x48\x76\
\xee\xdc\xb9\x48\xa5\x81\x9c\x3c\x66\x90\x6c\x5b\x73\x73\xdb\x63\
\xa4\x93\x3e\x3d\x6d\x9b\x44\x44\x4c\x9f\x3e\x1d\x37\x6e\xdc\x68\
\x39\x9c\x59\x6e\xd1\xae\x02\x49\xae\xef\x3a\x85\x19\x4e\x1a\xc9\
\x9c\x85\x2d\x07\x88\x69\x10\xb7\xb1\x7a\xe2\x0c\x0a\xc7\xce\x40\
\x30\x98\x7a\x59\x43\x36\x96\x45\x90\xac\xdd\xaf\x24\x76\x61\x8f\
\x3f\xfe\x38\x3c\xf3\xcc\x33\x71\xcd\xcd\x5e\x54\x93\x77\x42\xc4\
\xa4\x1f\x40\x1c\x11\xd1\xd4\xd4\x84\x94\x19\xc6\x0e\x97\xa1\xc2\
\x2a\x2b\x57\xae\xb4\xc5\x19\xe8\xc4\xa6\xd1\x78\xe3\xaf\x06\xc1\
\xe6\x11\x3e\x76\xee\x71\xc1\xc0\x6c\x0e\x3b\x0e\x81\xd0\x18\x72\
\x55\xb8\xac\xad\xc2\x48\x39\x14\x58\xe5\x98\x38\x41\xb2\x6e\xcc\
\xcd\x63\xc6\x8c\x81\xfd\xfb\xf7\x3b\x3e\x46\x7a\xb2\xc1\x76\x3b\
\x89\xe6\x99\x0a\x18\x79\xa6\xd8\x29\xc2\x10\x11\xaf\xbe\xfa\x2a\
\xce\x9d\x3b\xd7\x56\x4f\x3f\xfe\xf1\x8f\xe1\x27\x3f\xf9\x89\x25\
\x18\x08\x08\xaa\x5f\x04\x61\xc2\x35\xcc\xed\xdd\x56\xe7\x6d\x1a\
\x25\x7c\x89\x9d\x2c\x3e\x3a\xc8\xb4\xeb\x94\x2c\x92\x16\x41\xb2\
\x13\x26\x4c\xc0\x5d\xbb\x76\x39\x9a\x7b\x28\x14\x82\x40\x20\xe0\
\xea\x18\xe9\x68\xa0\x54\x1a\x47\xf3\x53\x66\xb3\x23\x25\x9d\x9a\
\xf8\xac\x00\x63\xdd\x4e\x02\x7e\xa8\xdc\xf3\xe2\xc5\x8b\x93\x82\
\x81\x80\xa0\xf8\x04\x90\xe8\x08\xe9\xd0\xca\x6b\xc9\x19\xcc\x06\
\xf2\x27\x9f\xa2\x74\xbe\x1e\x82\x21\x97\xca\xa4\x85\x9e\xb0\x68\
\xd1\x22\x5c\xb9\x72\xa5\x23\x92\x9b\x35\xa4\xa2\x56\x46\x97\xc7\
\x48\x47\x83\x3a\x6d\xdc\x76\xdd\x91\x8b\x29\x32\x34\x9d\x3b\x77\
\x8e\x65\xae\xb5\xfb\xac\x5d\xbb\x16\xe6\xcd\x9b\x97\x10\x0c\xe4\
\x9f\x20\x8b\x3c\x48\x93\x4a\x5c\x89\x06\xdb\x60\xa0\xa2\x1b\x2c\
\xc2\xaa\x99\xc4\x85\x43\xab\xa4\x85\x9e\xe0\x84\x55\x9a\x13\x8e\
\xe7\xb5\x94\xca\x31\xd2\xee\x86\x58\xb6\x63\xfc\x95\x8e\x0e\xf4\
\x0f\xcf\x2c\x9a\xcc\xe2\x48\x19\xe7\x28\x5d\x40\x4c\x12\x91\xc7\
\x1e\x7b\x0c\x97\x2f\x5f\x6e\xd9\xa5\xd9\x80\x94\xcc\xe9\xd3\xa7\
\xb7\x03\x03\x45\xc4\x07\x24\x3f\xb3\xd7\x30\x1d\x21\x27\xf5\x62\
\x71\x96\xb2\x85\x42\xd9\xd4\x8f\x0e\xb2\x0b\x9e\x64\x01\xb8\x4e\
\xf4\x04\x32\x37\x97\x94\x94\x38\xca\xeb\x4c\x39\x14\x2e\x5e\xbc\
\x68\x1c\x23\xc3\x0a\xd2\x69\x87\x2e\x6e\x74\x4a\xf7\x5f\xdf\x0c\
\x10\x96\x01\xa3\xb6\xfa\x8e\x2a\x18\x64\x00\x80\xc5\x20\x08\x3c\
\xf3\x59\x60\x39\xa6\xfc\x3e\xb2\x25\x30\xb3\x33\x59\x1b\xcd\x24\
\x22\x65\x65\x65\xec\xf8\xec\xe4\xd9\xb6\x6d\x1b\x4c\x9e\x3c\xb9\
\x15\x18\xa0\xa6\x91\xa5\x40\x22\x33\xb3\x70\xf5\x00\xd7\x3a\x42\
\xdb\x79\x58\x82\x81\x5e\x50\xca\x4e\xa3\x78\xf2\x1c\x84\xe4\xb0\
\x3d\xfd\xc1\x42\x4f\xb8\xed\xb6\xdb\xf0\x9d\x77\xde\x71\x42\x13\
\x56\x51\x6e\xd8\xb0\x61\x86\xd2\x18\x56\x40\x6f\x6c\x36\xfc\x13\
\xe8\x5a\x9a\x2e\xa5\x58\xed\x87\x8e\x02\x81\x39\x75\x8e\x15\x57\
\x23\x73\x33\x50\x82\x2c\x51\x20\x57\x22\xe3\x52\x8a\xf4\x83\xec\
\x4c\x83\x43\x44\x92\x88\xb8\x39\x3e\x1f\x3a\x74\x08\x4a\x4a\x4a\
\xa2\x60\x80\xea\x3a\xe0\xea\x83\xa0\xf7\xef\x49\xe5\x23\x6d\xed\
\x9f\x5d\x42\xdb\xee\x4c\x3e\x5c\x8e\x62\xc5\x05\x16\xab\x90\x54\
\xa1\xb4\x28\x0c\xe2\xa6\xb0\x18\x77\x57\xfb\x13\x00\x00\x03\x2b\
\x49\x44\x41\x54\x55\x96\x79\xe4\x91\x47\xd8\x5c\x51\x55\x23\x60\
\x08\x02\x36\x36\x1b\x40\xa0\x4b\xa8\xce\x2a\x37\x6c\x7a\x30\x51\
\x34\xb8\x4f\x34\xc0\x20\x89\x54\x84\x15\x80\x6e\x44\xfd\x46\x96\
\xda\x15\x2b\x56\x20\xe5\x82\x70\xfa\x9c\x39\x73\x06\x8a\x8a\x8a\
\xd8\x15\x36\xd4\x35\x01\x95\x50\x84\xfc\x6c\x90\xfa\x79\x9f\xfa\
\xc0\x36\x18\x68\x11\xf2\xe1\x93\x28\x55\x5c\x84\x50\x38\x94\xe0\
\x52\xc6\x60\x97\xec\x18\x19\x27\x99\xc6\xbb\xef\xbe\x8b\xb7\xdc\
\x72\x8b\x23\x7a\x4c\x9d\x3a\x15\xb6\x6e\xdd\xda\x6a\x9e\xcc\x69\
\x25\x2c\xb3\x4c\x32\x8c\x23\x10\x10\x3a\x03\x0c\x24\x1e\x4c\xce\
\x40\x00\x90\xa8\x16\xb8\x21\x2e\xe8\x5e\xc3\x34\xfa\x3c\xfc\xf0\
\xc3\xf8\xec\xb3\xcf\x3a\x5a\xb7\xd9\x98\x4e\x4e\x7e\x51\x62\x57\
\xd6\x54\xf8\x84\x95\x71\x4c\x53\x24\x98\x23\x30\x30\x40\x1c\x3a\
\x81\x52\xe5\x25\x08\x85\x82\xed\x99\xb2\x85\xb9\x99\xf4\x84\x63\
\xc7\x8e\x39\x25\x4a\xbb\x39\x32\x51\x41\x4e\x2a\x74\x25\xcd\x80\
\xe0\xb4\x4b\x8f\xda\x9b\x33\x33\x75\x05\xc3\xb6\x10\x9d\x2f\xf9\
\x64\x90\x73\xce\x87\x1f\x7e\x98\xca\x80\x46\x38\xa0\xae\x47\xb9\
\x4c\x2a\x9d\x25\x7b\xd7\x31\x18\x98\x0e\x71\xf8\x24\x8a\x15\xd5\
\x2c\x87\x63\x34\x36\xd3\xe2\x18\x79\xef\xbd\xf7\xe2\x4b\x2f\xbd\
\xe4\x68\x1d\x14\x35\x35\x6d\xda\x34\x57\x73\x74\x34\x90\x47\x8d\
\xa9\xd8\xea\x9e\x3d\x7b\x80\xfe\xc8\x93\xdb\xa3\x3a\x59\x1d\xb6\
\x7e\xd7\x03\x51\x8e\x25\x2c\x3d\x05\x82\xac\x42\x88\x62\x1e\xa3\
\xc7\xa9\xf6\x39\x19\x57\xaf\x5e\x8d\x94\x82\xd7\x8b\x87\x9c\x3d\
\xe8\xf2\x88\xfe\xd2\xf9\xff\xdb\x99\x2b\xe9\x4f\x35\x35\x35\x50\
\x5b\x5b\xcb\xfe\x75\x92\x6a\xd0\x4e\xff\xb4\x46\x5d\xa7\x40\xc0\
\x8e\x79\x52\x1a\x88\xea\x34\x85\xf7\x96\x81\x4f\xd1\x01\x45\x1e\
\x34\x3a\x4e\x15\xe4\xb0\x0c\x70\xe6\x71\xca\x8d\xb9\xb9\x63\x96\
\x7e\xf9\x8f\x62\x3a\xb6\x74\xd4\x4c\x53\x02\x83\x39\x49\xe5\xd4\
\x59\x84\x8b\xf5\x20\x16\xe4\x82\x4e\x79\x96\x32\x03\xd1\xe3\xd4\
\xa4\x49\x93\x70\xc7\x8e\x1d\x1d\xb5\x9e\xcf\xd4\x38\xa6\x63\x4b\
\x47\x2d\xca\x13\x30\xd0\x64\xc9\x17\x90\x8e\x79\x74\xd6\x36\x43\
\xdd\x53\xd1\xa2\x3b\x8a\x00\x97\xf3\x38\x93\x26\x4d\x82\x1d\x3b\
\x76\x78\xb6\x47\x56\x6b\x4d\xdb\x40\xaf\xbf\xfe\x3a\xce\x9e\x3d\
\xdb\x6a\xfc\xee\xdf\x93\x50\xe0\xf6\xdb\x6f\x87\xb7\xdf\x7e\x3b\
\x6d\x7b\xd4\x76\xe8\xb4\x0c\x54\x59\x59\x89\x74\xcd\x7c\xe9\xd2\
\xa5\xee\xcd\x4e\x81\x02\x77\xdf\x7d\x37\xbc\xf2\xca\x2b\x69\xd9\
\xa3\x78\xd3\x4a\xcb\x40\x76\xbd\x78\x52\xa0\xd3\x15\xf1\x2a\xd5\
\xd5\x5a\xb5\x6a\x55\x5a\xf6\xa8\x43\xc0\x60\x27\x16\xe0\x8a\xd8\
\x49\x0f\x16\xf9\xc4\x13\x4f\xc0\x53\x4f\x3d\xd5\x35\xc1\xb0\x65\
\xcb\x16\xa4\xa0\xd7\xee\xc7\x1b\x0a\xc4\xde\xc9\x78\xd3\x63\xf2\
\x5e\x3c\x43\x1d\x79\x37\x93\x9e\x50\x56\x56\xd6\x11\xf3\xbe\x22\
\xc6\xa0\x5a\xde\x0b\x16\x2c\xf0\x6c\x8f\xac\x88\xe6\xd9\x40\xf3\
\xe7\xcf\xc7\x35\x6b\xd6\x58\x8d\xd7\xfd\xbb\x03\x0a\xac\x5f\xbf\
\x1e\x66\xcc\x98\xe1\xd9\x1e\x59\x0d\xed\xd9\x40\x6e\x42\xe2\xac\
\x26\x77\xa5\xff\xfe\xfe\xfb\xef\xc3\xe7\x3e\xf7\x39\xcf\xf6\xc8\
\x8a\x9e\x9e\x0d\x34\x75\xea\x54\xdc\xba\x75\xab\xd5\x78\xdd\xbf\
\x3b\xa0\x00\xa5\x41\x1e\x33\x66\x8c\x67\x7b\x64\x35\xb4\x67\x03\
\x75\x9b\x9d\xad\x48\xed\xfc\x77\x4a\x66\x56\x5c\x5c\xec\xd9\x1e\
\x59\xcd\xc0\xb3\x81\x9c\x26\xf4\xb6\x9a\x58\xf7\xef\xc0\x52\x12\
\x64\x65\xa5\xee\xe8\x6a\x97\x96\x9e\x81\xa1\xb9\xb9\xb9\xb3\x5c\
\x4c\xec\xae\xb5\xcb\xb5\xcb\xcc\xb4\x97\xd8\xc4\xab\x85\x79\x06\
\x06\xaf\x26\xd4\xdd\x4f\xe7\x51\xe0\xff\x03\x31\x71\xca\xdf\xcd\
\xc6\x57\xf9\x00\x00\x00\x00\x49\x45\x4e\x44\xae\x42\x60\x82\
"
qt_resource_name = b"\
\x00\x07\
\x07\x3b\xe0\xb3\
\x00\x70\
\x00\x6c\x00\x75\x00\x67\x00\x69\x00\x6e\x00\x73\
\x00\x14\
\x06\x1d\x28\xd3\
\x00\x74\
\x00\x68\x00\x72\x00\x65\x00\x65\x00\x64\x00\x69\x00\x5f\x00\x63\x00\x75\x00\x73\x00\x74\x00\x6f\x00\x6d\x00\x5f\x00\x73\x00\x74\
\x00\x61\x00\x74\x00\x73\
\x00\x08\
\x0a\x61\x5a\xa7\
\x00\x69\
\x00\x63\x00\x6f\x00\x6e\x00\x2e\x00\x70\x00\x6e\x00\x67\
"
qt_resource_struct_v1 = b"\
\x00\x00\x00\x00\x00\x02\x00\x00\x00\x01\x00\x00\x00\x01\
\x00\x00\x00\x00\x00\x02\x00\x00\x00\x01\x00\x00\x00\x02\
\x00\x00\x00\x14\x00\x02\x00\x00\x00\x01\x00\x00\x00\x03\
\x00\x00\x00\x42\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\
"
qt_resource_struct_v2 = b"\
\x00\x00\x00\x00\x00\x02\x00\x00\x00\x01\x00\x00\x00\x01\
\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x02\x00\x00\x00\x01\x00\x00\x00\x02\
\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x14\x00\x02\x00\x00\x00\x01\x00\x00\x00\x03\
\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x42\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\
\x00\x00\x01\x71\x9e\x58\x12\x76\
"
qt_version = [int(v) for v in QtCore.qVersion().split('.')]
if qt_version < [5, 8, 0]:
rcc_version = 1
qt_resource_struct = qt_resource_struct_v1
else:
rcc_version = 2
qt_resource_struct = qt_resource_struct_v2
def qInitResources():
QtCore.qRegisterResourceData(rcc_version, qt_resource_struct, qt_resource_name, qt_resource_data)
def qCleanupResources():
QtCore.qUnregisterResourceData(rcc_version, qt_resource_struct, qt_resource_name, qt_resource_data)
qInitResources()
| python |
from .alembic_current import AlembicCurrent
from .alembic_downgrade import AlembicDowngrade
from .alembic_history import AlembicHistory
from .alembic_init import AlembicInit
from .alembic_migrate import AlembicMigrate
from .alembic_show import AlembicShow
from .alembic_stamp import AlembicStamp
from .alembic_upgrade import AlembicUpgrade
| python |
#from django.db import models
class CreditCard():
def __init__ (self,
full_credit_card_number = '',
major_industry_identifier = 0,
issuer_identification_number = 0,
personal_account_number = 0,
check_digit = 0,
issuer = 'Unkown',
):
self.full_credit_card_number = full_credit_card_number
self.major_industry_identifier = major_industry_identifier
self.issuer_identification_number = issuer_identification_number
self.personal_account_number = personal_account_number
self.check_digit = check_digit
self.issuer = issuer
class InvalidCreditCard(CreditCard):
def __init__ (self,full_credit_card_number):
super().__init__(full_credit_card_number)
| python |
#!/usr/bin/env python
import io
import os
import re
from setuptools import setup, find_packages
file_dir = os.path.dirname(__file__)
def read(path, encoding='utf-8'):
path = os.path.join(os.path.dirname(__file__), path)
with io.open(path, encoding=encoding) as fp:
return fp.read()
def version(path):
"""Obtain the packge version from a python file e.g. pkg/__init__.py
See <https://packaging.python.org/en/latest/single_source_version.html>.
"""
version_file = read(path)
version_match = re.search(r"""^__version__ = ['"]([^'"]*)['"]""",
version_file, re.M)
if version_match:
return version_match.group(1)
raise RuntimeError("Unable to find version string.")
LONG_DESCRIPTION = """
Consecution is an easy-to-use pipeline abstraction inspired by
Apache Storm topologies.
"""
setup(
name='consecution',
version=version(os.path.join(file_dir, 'consecution', '__init__.py')),
author='Rob deCarvalho',
author_email='unlisted',
description=('Pipeline Abstraction Library'),
license='BSD',
keywords=('pipeline apache storm DAG graph topology ETL'),
url='https://github.com/robdmc/consecution',
packages=find_packages(),
long_description=LONG_DESCRIPTION,
classifiers=[
'Environment :: Console',
'Intended Audience :: Developers',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.5',
'Topic :: Scientific/Engineering',
],
extras_require={'dev': ['nose', 'coverage', 'mock', 'flake8', 'coveralls']},
install_requires=['graphviz']
)
| python |
#
# PySNMP MIB module HUAWEI-LswMAM-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/HUAWEI-LswMAM-MIB
# Produced by pysmi-0.3.4 at Mon Apr 29 19:34:27 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
Integer, OctetString, ObjectIdentifier = mibBuilder.importSymbols("ASN1", "Integer", "OctetString", "ObjectIdentifier")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ValueSizeConstraint, ConstraintsUnion, ConstraintsIntersection, ValueRangeConstraint, SingleValueConstraint = mibBuilder.importSymbols("ASN1-REFINEMENT", "ValueSizeConstraint", "ConstraintsUnion", "ConstraintsIntersection", "ValueRangeConstraint", "SingleValueConstraint")
lswCommon, = mibBuilder.importSymbols("HUAWEI-3COM-OID-MIB", "lswCommon")
hwdot1qVlanIndex, = mibBuilder.importSymbols("HUAWEI-LswVLAN-MIB", "hwdot1qVlanIndex")
ModuleCompliance, NotificationGroup = mibBuilder.importSymbols("SNMPv2-CONF", "ModuleCompliance", "NotificationGroup")
iso, Integer32, Bits, TimeTicks, Gauge32, Counter64, ModuleIdentity, Unsigned32, NotificationType, IpAddress, MibScalar, MibTable, MibTableRow, MibTableColumn, MibIdentifier, ObjectIdentity, Counter32 = mibBuilder.importSymbols("SNMPv2-SMI", "iso", "Integer32", "Bits", "TimeTicks", "Gauge32", "Counter64", "ModuleIdentity", "Unsigned32", "NotificationType", "IpAddress", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "MibIdentifier", "ObjectIdentity", "Counter32")
TextualConvention, DisplayString, MacAddress = mibBuilder.importSymbols("SNMPv2-TC", "TextualConvention", "DisplayString", "MacAddress")
hwLswMacPort = ModuleIdentity((1, 3, 6, 1, 4, 1, 2011, 2, 23, 1, 3))
hwLswMacPort.setRevisions(('2001-06-29 00:00',))
if mibBuilder.loadTexts: hwLswMacPort.setLastUpdated('200106290000Z')
if mibBuilder.loadTexts: hwLswMacPort.setOrganization(' ')
class InterfaceIndex(TextualConvention, Integer32):
status = 'current'
displayHint = 'd'
class PortList(TextualConvention, OctetString):
status = 'current'
hwdot1qMacSearchTable = MibTable((1, 3, 6, 1, 4, 1, 2011, 2, 23, 1, 3, 1), )
if mibBuilder.loadTexts: hwdot1qMacSearchTable.setStatus('current')
hwdot1qMacSearchEntry = MibTableRow((1, 3, 6, 1, 4, 1, 2011, 2, 23, 1, 3, 1, 1), ).setIndexNames((0, "HUAWEI-LswMAM-MIB", "hwdot1qMacSearchAddress"), (0, "HUAWEI-LswMAM-MIB", "hwdot1qMacSearchVlanID"))
if mibBuilder.loadTexts: hwdot1qMacSearchEntry.setStatus('current')
hwdot1qMacSearchAddress = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 2, 23, 1, 3, 1, 1, 1), MacAddress()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwdot1qMacSearchAddress.setStatus('current')
hwdot1qMacSearchVlanID = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 2, 23, 1, 3, 1, 1, 2), Integer32().subtype(subtypeSpec=ConstraintsUnion(ValueRangeConstraint(-1, -1), ValueRangeConstraint(1, 4096), ))).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwdot1qMacSearchVlanID.setStatus('current')
hwdot1qMacSearchPort = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 2, 23, 1, 3, 1, 1, 3), InterfaceIndex()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwdot1qMacSearchPort.setStatus('current')
hwdot1qMacSearchAgeTime = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 2, 23, 1, 3, 1, 1, 4), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwdot1qMacSearchAgeTime.setStatus('current')
hwdot1qTpFdbSetTable = MibTable((1, 3, 6, 1, 4, 1, 2011, 2, 23, 1, 3, 2), )
if mibBuilder.loadTexts: hwdot1qTpFdbSetTable.setStatus('current')
hwdot1qTpFdbSetEntry = MibTableRow((1, 3, 6, 1, 4, 1, 2011, 2, 23, 1, 3, 2, 1), ).setIndexNames((0, "HUAWEI-LswVLAN-MIB", "hwdot1qVlanIndex"), (0, "HUAWEI-LswMAM-MIB", "hwdot1qTpFdbSetAddress"))
if mibBuilder.loadTexts: hwdot1qTpFdbSetEntry.setStatus('current')
hwdot1qTpFdbSetAddress = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 2, 23, 1, 3, 2, 1, 1), MacAddress())
if mibBuilder.loadTexts: hwdot1qTpFdbSetAddress.setStatus('current')
hwdot1qTpFdbSetPort = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 2, 23, 1, 3, 2, 1, 2), InterfaceIndex()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwdot1qTpFdbSetPort.setStatus('current')
hwdot1qTpFdbSetStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 2, 23, 1, 3, 2, 1, 3), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 3, 6, 7, 9, 11))).clone(namedValues=NamedValues(("other", 1), ("learned", 3), ("static", 6), ("dynamic", 7), ("blackhole", 9), ("security", 11)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwdot1qTpFdbSetStatus.setStatus('current')
hwdot1qTpFdbSetOperate = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 2, 23, 1, 3, 2, 1, 4), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("add", 1), ("delete", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwdot1qTpFdbSetOperate.setStatus('current')
hwdot1qTpFdbGroupSetTable = MibTable((1, 3, 6, 1, 4, 1, 2011, 2, 23, 1, 3, 3), )
if mibBuilder.loadTexts: hwdot1qTpFdbGroupSetTable.setStatus('current')
hwdot1qTpFdbGroupSetEntry = MibTableRow((1, 3, 6, 1, 4, 1, 2011, 2, 23, 1, 3, 3, 1), ).setIndexNames((0, "HUAWEI-LswVLAN-MIB", "hwdot1qVlanIndex"), (0, "HUAWEI-LswMAM-MIB", "hwdot1qTpFdbGroupSetAddress"))
if mibBuilder.loadTexts: hwdot1qTpFdbGroupSetEntry.setStatus('current')
hwdot1qTpFdbGroupSetAddress = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 2, 23, 1, 3, 3, 1, 1), MacAddress())
if mibBuilder.loadTexts: hwdot1qTpFdbGroupSetAddress.setStatus('current')
hwdot1qTpFdbGroupSetPort = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 2, 23, 1, 3, 3, 1, 2), PortList()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwdot1qTpFdbGroupSetPort.setStatus('current')
hwdot1qTpFdbGroupSetOperate = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 2, 23, 1, 3, 3, 1, 3), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("add", 1), ("delete", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwdot1qTpFdbGroupSetOperate.setStatus('current')
mibBuilder.exportSymbols("HUAWEI-LswMAM-MIB", hwLswMacPort=hwLswMacPort, hwdot1qMacSearchVlanID=hwdot1qMacSearchVlanID, hwdot1qTpFdbGroupSetAddress=hwdot1qTpFdbGroupSetAddress, hwdot1qTpFdbSetEntry=hwdot1qTpFdbSetEntry, hwdot1qTpFdbSetAddress=hwdot1qTpFdbSetAddress, hwdot1qMacSearchAgeTime=hwdot1qMacSearchAgeTime, InterfaceIndex=InterfaceIndex, hwdot1qTpFdbSetTable=hwdot1qTpFdbSetTable, PYSNMP_MODULE_ID=hwLswMacPort, hwdot1qTpFdbSetStatus=hwdot1qTpFdbSetStatus, hwdot1qTpFdbGroupSetEntry=hwdot1qTpFdbGroupSetEntry, hwdot1qTpFdbSetOperate=hwdot1qTpFdbSetOperate, PortList=PortList, hwdot1qTpFdbGroupSetPort=hwdot1qTpFdbGroupSetPort, hwdot1qTpFdbGroupSetOperate=hwdot1qTpFdbGroupSetOperate, hwdot1qMacSearchPort=hwdot1qMacSearchPort, hwdot1qTpFdbSetPort=hwdot1qTpFdbSetPort, hwdot1qMacSearchTable=hwdot1qMacSearchTable, hwdot1qMacSearchEntry=hwdot1qMacSearchEntry, hwdot1qTpFdbGroupSetTable=hwdot1qTpFdbGroupSetTable, hwdot1qMacSearchAddress=hwdot1qMacSearchAddress)
| python |
import os
import subprocess
import sys
kolibri_dir = os.path.abspath(os.path.join('src', 'kolibri'))
win_dir = os.path.abspath(os.path.join('dist', 'win', 'Kolibri'))
kolibri_dest_dir = os.path.join(win_dir, 'kolibri')
from .version import get_env_with_version_set
def do_build(args):
if 'android' in args and '--docker' in args:
subprocess.call(['docker', 'build', '-t', 'android_kolibri', '.'])
subprocess.call(['docker/android/rundocker.sh'])
return
elif '--docker' in args:
print("Docker builds not supported for this platform.")
print("Attempting non-docker build...")
try:
print("Building app...")
from . import stdlib
# see function docstring for more info on why we do this.
stdlib.generate_stdlib_imports()
env = get_env_with_version_set(args)
# This is needed to avoid errors when scanning python
# code for dependencies.
if sys.platform.startswith('darwin'):
env['PYTHONPATH'] = os.path.join(kolibri_dir, 'dist')
cmd = ['pew', 'build']
if args and len(args) > 0:
cmd.extend(args)
subprocess.call(cmd, env=env)
if sys.platform.startswith('win'):
stdlib.generate_python_bytecode(kolibri_dest_dir)
except Exception as e:
raise e
| python |
total = totmil = cont = menor = 0
barato = ''
while True:
produto = str(input('Nome do produto: '))
preco = float(input('Preço: '))
cont += 1
total += preco
if preco > 1000:
totmil += 1
if cont == 1 or preco < menor:
menor = preco
barato = produto
resposta = ' '
while resposta not in 'SN':
resposta = str(input('Quer continuar[S/N]?' )).upper().strip()[0]
if resposta == 'N':
break
print('{:-^40}'.format('Fim do Programa'))
print(f'O total da compra foi R${total}')
print(f'Temos {totmil} produtos que custam mais de R$1.000')
print(f'O produto mais barato foi {barato} que custa R${menor}')
| python |
import uuid
import os
import traceback
import flask
import urllib, json
import logging
import jsonschema
class FlaskHelper():
def __init__(self, port=None):
self.session = {}
self.server = flask.Flask(__name__)
self.port = port if port else os.environ["PORT"]
def route(self, url_rule, **kwargs):
def wrapper(func):
def method(*default_args, **default_kwargs):
message = ""
status_code = 200
args = flask.request.get_json()
args = {} if not args else args
url_rule = str(flask.request.url_rule)
with open("settings.json", "r", encoding="utf-8") as fp:
settings = json.loads(fp.read())
schema_item = settings["api_schemas"][url_rule]
try:
if schema_item == None:
raise ValueError(
"schema is none. url_rule is %s" % (url_rule))
try:
args = self.get_validated_obj(args, schema_item)
except Exception as e:
status_code = 400
raise ValueError(e)
default_kwargs.update({"args": args})
message = func(*default_args, **default_kwargs)
except ValueError as e:
status_code = 400
exc = traceback.format_exc()
logging.warning("process failed. status code is %s. traceback is %s" % (
status_code, exc))
message = str(e)
except Exception as e:
status_code = 500
exc = traceback.format_exc()
logging.error("process failed. status code is %s. traceback is %s" % (
status_code, exc))
message = str(e)
return flask.jsonify({
"message": message
}), status_code
if "methods" not in kwargs:
kwargs["methods"] = ["POST"]
method.__name__ = func.__name__
self.server.route(url_rule, **kwargs)(method)
return method
return wrapper
def get_validated_obj(self, obj, schema_item):
schema = schema_item.get("schema", {})
properties = schema_item.get("properties", {})
for name in properties:
prop = properties[name]
for key in prop:
if key == "default":
default = prop[key]
if name not in obj:
obj[name] = default
for key in prop:
value = obj[name]
if key == "change_type":
type_name = prop[key]
obj[name] = self.set_type(type_name, value)
try:
jsonschema.validate(obj, schema)
except Exception as e:
raise ValueError(f"validate failed. {e}")
return obj
def set_type(self, type_name, value):
if type_name == "int":
return int(value)
elif type_name == "float":
return float(value)
elif type_name == "string":
return str(value)
elif type_name == "bool":
if value == "true" or value == "True":
return True
elif value == "false" or value == "False":
return False
else:
raise ValueError(f"invalid bool value. value is [{value}]")
else:
raise ValueError("invalid set type name %s" % (type_name))
def listen(self):
self.server.run("0.0.0.0", self.port)
| python |
#
# Copyright 2019 Xilinx Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import hashlib
import xir
import vart
import numpy as np
import hot_patch_xmodel
def md5(np_array):
hash_md5 = hashlib.md5()
hash_md5.update(np_array)
return hash_md5.hexdigest()
g = xir.Graph.deserialize('/workspace/yolov4-tiny.xmodel')
the_root = g.get_root_subgraph()
the_root.get_name()
hot_patch_xmodel.hot_patch(the_root)
graph_runner = vart.RunnerExt.create_runner(the_root, "run")
inputs = graph_runner.get_inputs()
outputs = graph_runner.get_outputs()
with open('/scratch/models/cache/golden/74/32192dbe8b0cacdf99c2112732324b',
'rb') as f:
f.readinto(inputs[0])
print(md5(inputs[0]))
job = graph_runner.execute_async(inputs, outputs)
graph_runner.wait(job)
print(md5(outputs[0]))
print(md5(outputs[1]))
| python |
#!/usr/bin/env python
# Copyright 2019 Juliane Mai - juliane.mai(at)uwaterloo.ca
#
# License
# This file is part of the EEE code library for "Computationally inexpensive identification
# of noninformative model parameters by sequential screening: Efficient Elementary Effects (EEE)".
#
# The EEE code library is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# The MVA code library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
# You should have received a copy of the GNU Lesser General Public License
# along with The EEE code library.
# If not, see <https://github.com/julemai/EEE/blob/master/LICENSE>.
#
# If you use this method in a publication please cite:
#
# M Cuntz & J Mai et al. (2015).
# Computationally inexpensive identification of noninformative model parameters by sequential screening.
# Water Resources Research, 51, 6417-6441.
# https://doi.org/10.1002/2015WR016907.
#
from __future__ import print_function
"""
Template files for Efficient Elementary Effects sensitivity analysis of RAVEN
History
-------
Written, JM, Jun 2019
"""
RVI = """
#########################################################################
:FileType rvi ASCII Raven rev217 (v2.9)
:WrittenBy James Craig & Juliane Mai
:CreationDate June 2019
#
# RAVEN run of Salmon River near Prince George using HMETS model setup
#------------------------------------------------------------------------
#
:StartDate 1989-01-01 00:00:00 # 1954-01-01 00:00:00
:EndDate 2010-12-31 00:00:00
:EvaluationTime 1991-01-01 00:00:00
# :Duration 20819
:TimeStep 1.0
:Method ORDERED_SERIES
:PotentialMeltMethod POTMELT_HMETS
:RainSnowFraction RAINSNOW_DATA
:SWRadiationMethod SW_RAD_NONE # no radiation is faster
:Evaporation PET_DATA
:CatchmentRoute ROUTE_DUMP
:Routing ROUTE_NONE
:SoilModel SOIL_TWO_LAYER
:Alias DELAYED_RUNOFF CONVOLUTION[1]
:HydrologicProcesses
:Precipitation RAVEN_DEFAULT ATMOS_PRECIP MULTIPLE
:Infiltration INF_HMETS PONDED_WATER MULTIPLE
:Overflow OVERFLOW_RAVEN SOIL[0] DELAYED_RUNOFF
:Baseflow BASE_LINEAR_ANALYTIC SOIL[0] SURFACE_WATER # interflow, really
:Percolation PERC_LINEAR SOIL[0] SOIL[1] # recharge
:Overflow OVERFLOW_RAVEN SOIL[1] DELAYED_RUNOFF
:SoilEvaporation SOILEVAP_ALL SOIL[0] ATMOSPHERE # AET
:Convolve CONVOL_GAMMA CONVOLUTION[0] SURFACE_WATER # 'surface runoff'
:Convolve CONVOL_GAMMA_2 DELAYED_RUNOFF SURFACE_WATER # 'delayed runoff'
:Baseflow BASE_LINEAR_ANALYTIC SOIL[1] SURFACE_WATER
:SnowBalance SNOBAL_HMETS MULTIPLE MULTIPLE
:EndHydrologicProcesses
#:CreateRVPTemplate
#---------------------------------------------------------
# Output Options
#
# :WriteForcingFunctions
# :WriteNetcdfFormat
# Accumulated Infiltration volume
:CustomOutput DAILY AVERAGE Between:PONDED_WATER.And.SOIL[0] BY_BASIN
:EvaluationMetrics NASH_SUTCLIFFE RMSE
:SilentMode
:DontWriteWatershedStorage
#
"""
RVP = """
#########################################################################
:FileType rvp ASCII Raven rev217 (v2.9)
:WrittenBy James Craig & Juliane Mai
:CreationDate June 2019
#
# RAVEN run of Salmon River near Prince George using HMETS model setup
#------------------------------------------------------------------------
#
# tied parameters:
# (it is important for OSTRICH to find every parameter place holder somewhere in this file)
# (without this "par_x06" and "par_x10" and "par_x10" wouldn't be detectable)
# para_sum_x05_x06 = {dpar[sum_x05_x06]} = par_x05 + par_x06 = {par[x05]} + {par[x06]}
# para_sum_x09_x10 = {dpar[sum_x09_x10]} = par_x09 + par_x10 = {par[x09]} + {par[x10]}
#-----------------------------------------------------------------
# Soil Classes
#-----------------------------------------------------------------
:SoilClasses
:Attributes,
:Units,
TOPSOIL,
PHREATIC,
:EndSoilClasses
#-----------------------------------------------------------------
# Land Use Classes
#-----------------------------------------------------------------
:LandUseClasses,
:Attributes, IMPERM, FOREST_COV,
:Units, frac, frac,
FOREST, 0.0, 1.0,
:EndLandUseClasses
#-----------------------------------------------------------------
# Vegetation Classes
#-----------------------------------------------------------------
:VegetationClasses,
:Attributes, MAX_HT, MAX_LAI, MAX_LEAF_COND,
:Units, m, none, mm_per_s,
FOREST, 4, 5, 5,
:EndVegetationClasses
#-----------------------------------------------------------------
# Soil Profiles
#-----------------------------------------------------------------
:SoilProfiles
LAKE, 0
ROCK, 0
DEFAULT_P, 2, TOPSOIL, {par[x20]}, PHREATIC, {par[x21]},
# DEFAULT_P, 2, TOPSOIL, x(20), PHREATIC, x(21),
:EndSoilProfiles
#-----------------------------------------------------------------
# Global Parameters
#-----------------------------------------------------------------
:GlobalParameter SNOW_SWI_MIN {par[x09]} # x(9)
:GlobalParameter SNOW_SWI_MAX {dpar[sum_x09_x10]} # x(9)+x(10)
:GlobalParameter SWI_REDUCT_COEFF {par[x11]} # x(11)
:GlobalParameter SNOW_SWI 0.05 #not sure why/if needed...
#-----------------------------------------------------------------
# Soil Parameters
#-----------------------------------------------------------------
:SoilParameterList
:Parameters, POROSITY, PERC_COEFF, PET_CORRECTION, BASEFLOW_COEFF
:Units, -, 1/d, -, 1/d
TOPSOIL, 1.0, {par[x17]}, {par[x15]}, {par[x18]}
PHREATIC, 1.0, 0.0, 0.0, {par[x19]}
# TOPSOIL, 1.0, x(17), x(15), x(18)
# PHREATIC, 1.0, 0.0, 0.0, x(19)
:EndSoilParameterList
#-----------------------------------------------------------------
# Land Use Parameters
#-----------------------------------------------------------------
:LandUseParameterList
:Parameters, MIN_MELT_FACTOR, MAX_MELT_FACTOR, DD_MELT_TEMP, DD_AGGRADATION, REFREEZE_FACTOR, REFREEZE_EXP, DD_REFREEZE_TEMP, HMETS_RUNOFF_COEFF,
:Units, mm/d/C, mm/d/C, C, 1/mm, mm/d/C, -, C, -,
[DEFAULT], {par[x05]}, {dpar[sum_x05_x06]}, {par[x07]}, {par[x08]}, {par[x13]}, {par[x14]}, {par[x12]}, {par[x16]},
# x(5), x(5)+x(6), x(7), x(8), x(13), x(14), x(12), x(16),
:EndLandUseParameterList
:LandUseParameterList
:Parameters, GAMMA_SHAPE, GAMMA_SCALE, GAMMA_SHAPE2, GAMMA_SCALE2,
:Units, -, -, -, -,
[DEFAULT], {par[x01]}, {par[x02]}, {par[x03]}, {par[x04]},
# x(1), x(2), x(3), x(4),
:EndLandUseParameterList
#-----------------------------------------------------------------
# Vegetation Parameters
#-----------------------------------------------------------------
:VegetationParameterList
:Parameters, RAIN_ICEPT_PCT, SNOW_ICEPT_PCT,
:Units, -, -,
[DEFAULT], 0.0, 0.0,
:EndVegetationParameterList
"""
RVC = """
#########################################################################
:FileType rvc ASCII Raven rev217 (v2.9)
:WrittenBy James Craig & Juliane Mai
:CreationDate June 2019
#
# RAVEN run of Salmon River near Prince George using HMETS model setup
#------------------------------------------------------------------------
#
# tied parameters:
# (it is important for OSTRICH to find every parameter place holder somewhere in this file)
# (without this "par_x20" and "par_x21" wouldn't be detectable)
# para_half_x20 = para_x20 * 1000. / 2. = {par[x20]} / 2. [m] = {dpar[half_x20]} [mm]
# para_half_x21 = para_x21 * 1000. / 2. = {par[x21]} / 2. [m] = {dpar[half_x21]} [mm]
# initialize to 1/2 full
#:UniformInitialConditions SOIL[0] {dpar[half_x20]} # x(20)*1000/2 [mm]
#:UniformInitialConditions SOIL[1] {dpar[half_x21]} # x(21)*1000/2 [mm]
:HRUStateVariableTable (formerly :IntialConditionsTable)
:Attributes SOIL[0] SOIL[1]
:Units mm mm
1 {dpar[half_x20]} {dpar[half_x21]}
:EndHRUStateVariableTable
"""
RVT = """
#########################################################################
:FileType rvt ASCII Raven rev217 (v2.9)
:WrittenBy James Craig & Juliane Mai
:CreationDate June 2019
#
# RAVEN run of Salmon River near Prince George using HMETS model setup
#------------------------------------------------------------------------
# meteorological forcings
:Gauge
:Latitude 54.09639
:Longitude -122.67972
:Elevation 606.0
:RedirectToFile data_obs/Salmon-River-Near-Prince-George_meteo_daily.rvt
:EndGauge
# observed streamflow
:RedirectToFile data_obs/Salmon-River-Near-Prince-George_Qobs_daily.rvt
"""
RVH = """
#########################################################################
:FileType rvh ASCII Raven rev217 (v2.9)
:WrittenBy James Craig & Juliane Mai
:CreationDate June 2019
#
# RAVEN run of Salmon River near Prince George using HMETS model setup
#------------------------------------------------------------------------
#
#
:SubBasins
:Attributes NAME DOWNSTREAM_ID PROFILE REACH_LENGTH GAUGED
:Units none none none km none
1, hmets, -1, NONE, _AUTO, 1
:EndSubBasins
:HRUs
:Attributes AREA ELEVATION LATITUDE LONGITUDE BASIN_ID LAND_USE_CLASS VEG_CLASS SOIL_PROFILE AQUIFER_PROFILE TERRAIN_CLASS SLOPE ASPECT
:Units km2 m deg deg none none none none none none deg deg
1 4230.0, 606.0, 54.09639, -122.67972, 1 FOREST FOREST DEFAULT_P [NONE] [NONE] 0.0 0
:EndHRUs
"""
| python |
from django.contrib import admin
from django.utils.html import mark_safe
# Register your models here.
from .models import Product, Collection, ProductImage
from .forms import RequiredInlineFormSet
class ProductImageAdmin(admin.StackedInline):
model = ProductImage
readonly_fields = ['image_tag']
formset = RequiredInlineFormSet
extra = 0
min_num = 1
@admin.register(Product)
class ProductAdmin(admin.ModelAdmin):
inlines = [ProductImageAdmin]
list_display = ("name_pt", "description_pt", "collection", "add_date", "image_display")
list_filter = ("collection", "add_date",)
search_fields = ('name_pt', )
def image_display(self, obj):
product_images = ProductImage.objects.filter(product=obj)
if len(product_images) > 0:
display_image = product_images[0].image.url
else:
display_image = "image_not_found.png"
return mark_safe(f'<img src="/{display_image}" width="50" height="50" />')
image_display.allow_tags = True
image_display.__name__ = "Image"
@admin.register(Collection)
class CollectionAdmin(admin.ModelAdmin):
readonly_fields = ['image_tag']
list_display = ("name_pt", "description_pt", "add_date", "image_display",)
list_filter = ("name_pt", "add_date", )
search_fields = ('name_pt', )
def image_display(self, obj):
return mark_safe(f'<img src="/{obj.image.url}" width="50" height="50" />')
image_display.allow_tags = True
image_display.__name__ = "Image"
| python |
import os
import yaml
_dirname = os.path.dirname(os.path.abspath(__file__))
def load_config(filename):
with open(os.path.join(_dirname, filename)) as file:
config = yaml.load(file, Loader=yaml.FullLoader)
return config
| python |
'''keyvault.py - azurerm functions for the Microsoft.Keyvault resource provider'''
import datetime
import json
from .restfns import do_delete, do_get, do_get_next, do_put, do_post
from .subfns import list_tenants
from .settings import get_rm_endpoint, KEYVAULT_API
def create_keyvault(access_token, subscription_id, rgname, vault_name, location,
template_deployment=True, tenant_id=None, object_id=None):
'''Create a new key vault in the named resource group.
Args:
access_token (str): A valid Azure authentication token.
subscription_id (str): Azure subscription id.
rgname (str): Azure resource group name.
vault_name (str): Name of the new key vault.
location (str): Azure data center location. E.g. westus2.
template_deployment (boolean): Whether to allow deployment from template.
tenant_id (str): Optionally specify a tenant ID (otherwise picks first response) from
ist_tenants().
object_id (str): Optionally specify an object ID representing user or principal for the
access policy.
Returns:
HTTP response. JSON body of key vault properties.
'''
endpoint = ''.join([get_rm_endpoint(),
'/subscriptions/', subscription_id,
'/resourcegroups/', rgname,
'/providers/Microsoft.KeyVault/vaults/', vault_name,
'?api-version=', KEYVAULT_API])
# get tenant ID if not specified
if tenant_id is None:
ret = list_tenants(access_token)
tenant_id = ret['value'][0]['tenantId']
# if object_id is None:
access_policies = [{'tenantId': tenant_id, 'objectId': object_id,
'permissions': {
'keys': ['get', 'create', 'delete', 'list', 'update', 'import',
'backup', 'restore', 'recover'],
'secrets': ['get', 'list', 'set', 'delete', 'backup', 'restore',
'recover'],
'certificates': ['get', 'list', 'delete', 'create', 'import', 'update',
'managecontacts', 'getissuers', 'listissuers',
'setissuers', 'deleteissuers', 'manageissuers',
'recover'],
'storage': ['get', 'list', 'delete', 'set', 'update', 'regeneratekey',
'setsas', 'listsas', 'getsas', 'deletesas']
}}]
vault_properties = {'tenantId': tenant_id, 'sku': {'family': 'A', 'name': 'standard'},
'enabledForTemplateDeployment': template_deployment,
'accessPolicies': access_policies}
vault_body = {'location': location, 'properties': vault_properties}
body = json.dumps(vault_body)
return do_put(endpoint, body, access_token)
def delete_keyvault(access_token, subscription_id, rgname, vault_name):
'''Deletes a key vault in the named resource group.
Args:
access_token (str): A valid Azure authentication token.
subscription_id (str): Azure subscription id.
rgname (str): Azure resource group name.
vault_name (str): Name of the new key vault.
Returns:
HTTP response. 200 OK.
'''
endpoint = ''.join([get_rm_endpoint(),
'/subscriptions/', subscription_id,
'/resourcegroups/', rgname,
'/providers/Microsoft.KeyVault/vaults/', vault_name,
'?api-version=', KEYVAULT_API])
return do_delete(endpoint, access_token)
def get_keyvault(access_token, subscription_id, rgname, vault_name):
'''Gets details about the named key vault.
Args:
access_token (str): A valid Azure authentication token.
subscription_id (str): Azure subscription id.
rgname (str): Azure resource group name.
vault_name (str): Name of the key vault.
Returns:
HTTP response. JSON body of key vault properties.
'''
endpoint = ''.join([get_rm_endpoint(),
'/subscriptions/', subscription_id,
'/resourcegroups/', rgname,
'/providers/Microsoft.KeyVault/vaults/', vault_name,
'?api-version=', KEYVAULT_API])
return do_get(endpoint, access_token)
def list_keyvaults(access_token, subscription_id, rgname):
'''Lists key vaults in the named resource group.
Args:
access_token (str): A valid Azure authentication token.
subscription_id (str): Azure subscription id.
rgname (str): Azure resource group name.
Returns:
HTTP response. 200 OK.
'''
endpoint = ''.join([get_rm_endpoint(),
'/subscriptions/', subscription_id,
'/resourcegroups/', rgname,
'/providers/Microsoft.KeyVault/vaults',
'?api-version=', KEYVAULT_API])
return do_get_next(endpoint, access_token)
def list_keyvaults_sub(access_token, subscription_id):
'''Lists key vaults belonging to this subscription.
Args:
access_token (str): A valid Azure authentication token.
subscription_id (str): Azure subscription id.
Returns:
HTTP response. 200 OK.
'''
endpoint = ''.join([get_rm_endpoint(),
'/subscriptions/', subscription_id,
'/providers/Microsoft.KeyVault/vaults',
'?api-version=', KEYVAULT_API])
return do_get_next(endpoint, access_token)
def set_keyvault_secret(access_token, vault_uri, secret_name, secret_value):
'''Adds a secret to a key vault using the key vault URI.
Creates a new version if the secret already exists.
Args:
access_token (str): A valid Azure authentication token.
vault_uri (str): Vault URI e.g. https://myvault.vault.azure.net.
secret_name (str): Name of the secret to add.
secret_value (str): Value of the secret.
Returns:
HTTP response. 200 OK.
'''
endpoint = ''.join([vault_uri,
'/secrets/', secret_name,
'?api-version=', '7.0'])
current_time = datetime.datetime.now().isoformat()
attributes = {'created': current_time,
'enabled': True,
'exp': None,
'nbf': None,
'recoveryLevel': 'Purgeable',
'updated': current_time}
secret_body = {'attributes': attributes,
'contentType': None,
'kid': None,
'managed': None,
'tags': {'file-encoding': 'utf-8'},
'value': secret_value}
body = json.dumps(secret_body)
print(body)
return do_put(endpoint, body, access_token)
def delete_keyvault_secret(access_token, vault_uri, secret_name):
'''Deletes a secret from a key vault using the key vault URI.
Args:
access_token (str): A valid Azure authentication token.
vault_uri (str): Vault URI e.g. https://myvault.azure.net.
secret_name (str): Name of the secret to add.
Returns:
HTTP response. 200 OK.
'''
endpoint = ''.join([vault_uri,
'/secrets/', secret_name,
'?api-version=', '7.0'])
return do_delete(endpoint, access_token)
| python |
"""
I don't know how much you know already, so I'm assuming you know little
to no Python. This is a multi-line comment, denoted by the three quotation
marks above and below this. Single line and inline comments start with #.
Let's start basic - "print" will send words into the console.
"""
print("Hello! Reddit bot starting up!")
"""
In this next bit here, we're importing praw (Python Reddit API Wrapper).
"Importing" means that you're basically loading another special Python script
called a module. They allow you to do some really fun stuff (line interact
with reddit) without doing a lot of hard work, and they keep your script
looking clean, too.
I have the importing set inside of a try statement, meaning if an error should arise
during this section of code, instead of exiting, it executes the exception
instead. This isn't the best example, because my exception is just to exit,
but it will print a much more human-readable error message than it would
otherwise. We'll see try again later.
"""
try:
mod = "praw"
import praw
mod = "time"
import time
except:
exit("Module "+mod+" is required to run this bot. Please install it with pip and run this script again")
# Next up is variables. Normally, I write my bots to prompt the user for
# this information, then save it to a separate file, but for teaching
# purposes we'll put the information right in the file itself.
botRedditUser = "" # Type your bot reddit username in between the quotes. (leave out /u/)
# This is a "string" variable. Basically, it's text.
botRedditPassword = "" # Same deal
botClientID = "" # See below if you don't know what these two are
botSecret = ""
myUsername = ""
mySubreddit = "" # do not put /r/
keyword = "" # You mentioned a keyword in your post for the bot to respond to. Type that here.
sleepTime = 60*5 # This is the number of seconds the bot will wait before
# refreshing. Since it's a number, we can do math!
# (This will make the bot sleep for 5 minutes)
"""
If you don't know what the client ID or secret are, here's what you do:
1) Go to https://www.reddit.com/prefs/apps and sign in with your bot account.
2) Press the 'create app' button, then enter the following:
Name: randomGifBot (or whatever you want)
App type: script
description: (leave this blank or enter whatever you wish)
about url: https://github.com/WolfgangAxel/Random-Projects/randomGifBot/RGB.py
redirect url: http://127.0.0.1:65010/authorize_callback
3) Finally, press the 'create app' button.
"""
reddit = praw.Reddit(client_id = botClientID,
client_secret=botSecret,
password=botRedditPassword,
user_agent="Random GIF bot for /r/"+mySubreddit+", hosted by /u/"+myUsername,
username = botRedditUser)
print("Successfully connected to Reddit!")
"""
This is us initializing our connection with Reddit. It's a function provided
by praw. You can look through what they all are over at http://praw.readthedocs.io
Functions are whatever they're named, followed by a list of arguments
in parentheses. They're basically dislocated sections of code that can
be run multiple times with multiple inputs. They're pretty easy to understand.
In fact, why don't we make our own function right now?
"""
def getRandomGif():
"""
This will be our function to get a new gif from /r/gifs.
It's a pretty simple function, so we won't take any arguments.
"""
while True:
# "while" means that this portion of the code will loop until
# a condition is met. In this case, our condition is "True".
# This basically means that this will loop indefinitely or until
# it is interrupted.
print("Looking for a gif to send")
randomPost = reddit.subreddit('gifs').random() # get a random post from gifs
# Let's check to see if it's a self-post. If we got like a mod announcement
# or something instead of a gif, this wouldn't be quite as cool.
if not randomPost.is_self:
# Another thing- we don't want just any old gif.
# We want a worthwhile gif.
# So, we'll set a minimum score for our gifs.
if randomPost.score >= 250:
# If it's not a self post, and if the score is good,
# then we'll "return" it to the main function.
# This will probably make more sense later
print("Found a gif! "+randomPost.url)
return randomPost.url
"""
And that's it! if the post we get is a self-post, then the "while" loop
makes it start from the top and try again. Here's what it looks like
without my comments:
def getRandomGif():
while True:
randomPost = reddit.subreddit('gifs').random()
if not randomPost.is_self:
return randomPost.url
With that out of the way, let's write the main loop.
"""
while True: # Our good ol' friend
try:
# This will go through and check each comment in the subreddit
for comment in reddit.subreddit( mySubreddit ).comments():
print("looking at /u/"+comment.author.name+"'s comment...")
if keyword not in comment.body:
print("They don't want a gif.")
continue
# "continue" makes python skip the rest of this and start
# at the top of the "for" loop with the next item
# Now this next part is a little weird.
# I found out when making this bot that the comment replies
# function is a little bit buggy. They only show up properly
# if you pull the comment from the submission as opposed to
# just looking at the comment straight. So, we have to do a
# little dance in order to get what we want.
# What this does is get the ID of the comment we were called
# for, then compares it to the recent comments the bot has
# made. If it matches, then the bot will skip it.
thisID = comment.id
repliedComments = [ myComment.parent().id for myComment in reddit.redditor(botRedditUser).comments.new() ]
if thisID in repliedComments:
print("I already gif'd them.")
continue
print("They want a gif!")
randomGifURL = getRandomGif() # We get the URL of a gif
comment.reply("[Here's your GIFt!]("+randomGifURL+")") # and we reply to the comment
time.sleep(sleepTime) # sleep (do nothing) until next time
except Exception as e:
# This means that if there's any Exceptions (errors) from the code above,
# we execute this instead, with the error message as the variable e.
print("There was an error!:\n\n"+str(e.args)) # str() converts a variable into a string.
# We have to do this since we're adding it
# to the other string
time.sleep(60) # Sleep for one minute, then try again.
"""
And there's the bot!!
"""
| python |
class Resource(object):
def __init__(self, sigfox, resource):
self.sigfox = sigfox
self.resource = resource
def retrieve(self, id="", query=""):
""" Retrieve a list of <resources> according to visibility permissions
and request filters or Retrieve information about a given
<resource_id>.
"""
response = self.sigfox.get(
"{}{}".format(self.resource, id),
query
)
return response
def create(self, body):
""" Create a new <resource>. """
response = self.sigfox.post("{}".format(self.resource), body)
return response
def update(self, id, body):
""" Update a given <resource>. """
response = self.sigfox.put(
"{}{}".format(self.resource, id),
body
)
return response
def delete(self, id):
""" Delete a given <resource>. """
response = self.sigfox.get(
"{}{}".format(self.resource, id)
)
return response
| python |
import os
import time
DEBUG = False
DEFINE = '#define RADIXJOIN_COUNT (size_t) {}*1024\n'
HEADINGSTIMER = ["Tuples", "CollLeft","PartLeft", "CollRight", "PartRight", "SettPart", "SettRedPart", "BuildKey", "BuildVal", "ProbeKey", "ProbAndBuildTup", "Append", "BuildHT", "ProbeHT", "PerfBuildProb", "Runtime", "Complete", "BucketSearchTime", "ExtractingValueBuild", "WritingDataBuild", "OrderingHashBuild", "GettingHT","gettingDChunk","extractingValProbe","writingDataProbe","orderingHashProbe", "remaining"]
HEADINGSNOTIMER = ["Tuples", "runtime"]
START = 6
END = 21
def power2(ex):
if ex == 0:
return 1
else:
return 2*power2(ex-1)
def modifyFileDuckDB(of):
powerOf2 = power2(of)
with open('../../benchmark/micro/radixjoin.cpp', 'r') as file:
# read a list of lines into data
data = file.readlines()
if DEBUG:
print(data)
for i in range(0, len(data)):
if '#define' in data[i]:
if DEBUG:
print(data[i])
data[i] = DEFINE.format(powerOf2)
if DEBUG:
print(data[i])
if DEBUG:
print(data)
with open('../../benchmark/micro/radixjoin.cpp', 'w') as file:
for e in data:
file.write(e)
file.close()
def modifyFileDuckDBNoTimer():
with open('../../src/include/duckdb/execution/operator/join/physical_radix_join.hpp', 'r') as file:
# read a list of lines into data
data = file.readlines()
if DEBUG:
print(data)
for i in range(0, len(data)):
if '#define TIMER ' in data[i]:
if DEBUG:
print(data[i])
data[i] = '#define TIMER 0\n'
if DEBUG:
print(data[i])
if DEBUG:
print(data)
with open('../../src/include/duckdb/execution/operator/join/physical_radix_join.hpp', 'w') as file:
for e in data:
file.write(e)
file.close()
with open('../../src/include/duckdb/execution/radix_hashtable.hpp', 'r') as file:
# read a list of lines into data
data = file.readlines()
if DEBUG:
print(data)
for i in range(0, len(data)):
if '#define TIMER ' in data[i]:
if DEBUG:
print(data[i])
data[i] = '#define TIMER 0\n'
if DEBUG:
print(data[i])
if DEBUG:
print(data)
with open('../../src/include/duckdb/execution/radix_hashtable.hpp', 'w') as file:
for e in data:
file.write(e)
file.close()
def modifyFileDuckDBTimer():
with open('../../src/include/duckdb/execution/operator/join/physical_radix_join.hpp', 'r') as file:
# read a list of lines into data
data = file.readlines()
if DEBUG:
print(data)
for i in range(0, len(data)):
if '#define TIMER ' in data[i]:
if DEBUG:
print(data[i])
data[i] = '#define TIMER 1\n'
if DEBUG:
print(data[i])
if DEBUG:
print(data)
with open('../../src/include/duckdb/execution/operator/join/physical_radix_join.hpp', 'w') as file:
for e in data:
file.write(e)
file.close()
with open('../../src/include/duckdb/execution/radix_hashtable.hpp', 'r') as file:
# read a list of lines into data
data = file.readlines()
if DEBUG:
print(data)
for i in range(0, len(data)):
if '#define TIMER ' in data[i]:
if DEBUG:
print(data[i])
data[i] = '#define TIMER 1\n'
if DEBUG:
print(data[i])
if DEBUG:
print(data)
with open('../../src/include/duckdb/execution/radix_hashtable.hpp', 'w') as file:
for e in data:
file.write(e)
file.close()
pathStart = b'./plotsBenchmark'
if not os.path.exists(pathStart):
os.makedirs(pathStart)
pathDataRuntime = pathStart + b'/data_runtimeTimer.csv'
fDataRuntime = open(pathDataRuntime, 'a+')
for i in range(0, len(HEADINGSTIMER)):
fDataRuntime.write(HEADINGSTIMER[i])
if i != len(HEADINGSTIMER)-1:
fDataRuntime.write(",")
else:
fDataRuntime.write("\n")
fDataRuntime.close()
modifyFileDuckDBTimer()
for i in range(START,END):
print("Timer Modifying to " + str(i))
modifyFileDuckDB(i)
# Change dir to make the new executable
os.chdir("../../build/release/benchmark")
# Configure and make the new executable
os.system("make -j8")
# Change back to the Desktop
os.chdir("../../../Benchmarks/RadixJoin")
# Wait to cool down
time.sleep(5) # sleep 5 seconds
print("Timer Starting modified " + str(i))
# Execute the benchmarkrunner
os.system("python3 duckdbbenchmarkTimer.py")
# Wait to cool down
time.sleep(5) # sleep 5 seconds
# Change dir to make the new executable
os.chdir("../../build/release/benchmark")
# Configure and make the new executable
os.system("make clean")
# Change back to the Desktop
os.chdir("../../../Benchmarks/RadixJoin")
time.sleep(10)
pathDataRuntime = pathStart + b'/data_runtimeNoTimer.csv'
fDataRuntime = open(pathDataRuntime, 'a+')
for i in range(0, len(HEADINGSNOTIMER)):
fDataRuntime.write(HEADINGSNOTIMER[i])
if i != len(HEADINGSNOTIMER)-1:
fDataRuntime.write(",")
else:
fDataRuntime.write("\n")
fDataRuntime.close()
modifyFileDuckDBNoTimer()
for i in range(START,END):
print("No timer Modifying to " + str(i))
modifyFileDuckDB(i)
# Change dir to make the new executable
os.chdir("../../build/release/benchmark")
# Configure and make the new executable
os.system("make -j8")
# Change back to the Desktop
os.chdir("../../../Benchmarks/RadixJoin")
# Wait to cool down
time.sleep(5) # sleep 5 seconds
print("No timer Starting modified " + str(i))
# Execute the benchmarkrunner
os.system("python3 duckdbbenchmarkNoTimer.py")
# Wait to cool down
time.sleep(5) # sleep 5 seconds
# Change dir to make the new executable
os.chdir("../../build/release/benchmark")
# Configure and make the new executable
os.system("make clean")
# Change back to the Desktop
os.chdir("../../../Benchmarks/RadixJoin")
| python |
# O(nlog(n)) time | O(log(n)) space
def quickSort(array):
quickSortHelper(array, 0, len(array) - 1)
return array
def quickSortHelper(array, startIdx, endIdx):
if startIdx >= endIdx:
return
pivotIdx = startIdx
leftIdx = startIdx + 1
rightIdx = endIdx
while rightIdx >= leftIdx:
if array[leftIdx] > array[pivotIdx] and array[rightIdx] < array[pivotIdx]:
array[leftIdx], array[rightIdx] = array[rightIdx], array[leftIdx]
elif array[leftIdx] <= array[pivotIdx]:
leftIdx += 1
elif array[rightIdx] >= array[pivotIdx]:
rightIdx -= 1
array[pivotIdx], array[rightIdx] = array[rightIdx], array[pivotIdx]
leftSubarrayIsSmaller = rightIdx - 1 - startIdx < endIdx - (rightIdx + 1)
if leftSubarrayIsSmaller:
quickSortHelper(array, startIdx, rightIdx - 1)
quickSortHelper(array, rightIdx + 1, endIdx)
else:
quickSortHelper(array, rightIdx + 1, endIdx)
quickSortHelper(array, startIdx, rightIdx - 1)
| python |
from math import factorial
l = []
for i in range(1,100+1):
l.append(1/i)
print('Suma:',sum(l))
print()
print('Wartość minimalna:',min(l))
print()
print('Wartość maksymalna:',max(l))
silnia = factorial(1000)
lz = list(str(silnia))
lz2 = []
for i in range(len(lz)):
lz2.append(int(lz[i]))
print()
print('Suma cyfr 1000!:',sum(lz2)) | python |
#!/usr/bin/env python
import argparse
import sys
import numpy as np
import tensorflow as tf
import librosa
import config
import model
from IPython.lib.display import Audio
parser = argparse.ArgumentParser(description='Train song embeddings.')
parser.add_argument('--config', '-c', required=True, help='Config file')
parser.add_argument('--ckpt', required=True, help='TensorFlow checkpoint file')
parser.add_argument('--song_id', required=True, type=int, help='ID of the song to sample')
parser.add_argument('--n_samples', type=int, default=100, help='Number of sequential samples to take')
args = parser.parse_args()
config = config.load(args.config)
input_song_ids = tf.placeholder(tf.int32, [None])
target_feature_sequences = tf.placeholder(
tf.float32,
[None, None, config['num_features']],
)
feature_outputs = model.build(config, 382, input_song_ids, target_feature_sequences)
saver = tf.train.Saver()
with tf.Session() as sess:
saver.restore(sess, args.ckpt)
print('Model restored.')
outputs = [np.zeros((1, 1, config['num_features']))]
# This is super inefficient since it does not use the known hidden states
# and instead recomputes from scratch
for i in range(args.n_samples):
if (i + 1) % 50 == 0:
print(outputs[-1])
sys.stdout.write('.')
sys.stdout.flush()
history = np.concatenate(outputs, axis=1)
feed_dict = {
input_song_ids: [args.song_id],
target_feature_sequences: history,
}
new_outputs = sess.run(feature_outputs, feed_dict=feed_dict)
last_output = np.expand_dims(new_outputs[:, -1, :], axis=1)
outputs.append(last_output)
sys.stdout.write('\n')
def invlogamplitude(S):
"""librosa.logamplitude is actually 10_log10, so invert that."""
return 10.0*(S/10.0)
# Reconstruct audio:
# https://github.com/librosa/librosa/issues/424
mfccs = np.transpose(np.squeeze(np.concatenate(outputs, axis=1), 0))
n_mfcc = mfccs.shape[0]
n_mel = 128
dctm = librosa.filters.dct(n_mfcc, n_mel)
n_fft = 2048
sr = 22050
mel_basis = librosa.filters.mel(sr, n_fft)
bin_scaling = 1.0/np.maximum(0.0005, np.sum(np.dot(mel_basis.T, mel_basis), axis=0))
recon_stft = bin_scaling[:, np.newaxis] * np.dot(mel_basis.T, invlogamplitude(np.dot(dctm.T, mfccs)))
y_len = int(sr * 2.325)
excitation = np.random.randn(y_len)
E = librosa.stft(excitation)
print(np.shape(recon_stft))
print(np.shape(excitation))
print(np.shape(E))
print(recon_stft)
recon = librosa.istft(E/np.abs(E)*np.sqrt(recon_stft))
Audio(recon, rate=sr)
| python |
# Generated by Django 3.1.1 on 2021-01-12 16:54
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = []
operations = [
migrations.CreateModel(
name="Account",
fields=[
(
"id",
models.AutoField(
auto_created=True, primary_key=True, serialize=False, verbose_name="ID"
),
),
("name", models.CharField(max_length=100)),
("legalname", models.CharField(max_length=100)),
("vat", models.CharField(max_length=20)),
("address", models.CharField(max_length=200)),
("city", models.CharField(max_length=50)),
("zipcode", models.CharField(max_length=32)),
("country", models.CharField(max_length=50)),
("province", models.CharField(max_length=50)),
("geo", models.CharField(blank=True, max_length=20)),
("phone", models.CharField(max_length=100)),
("email", models.CharField(max_length=200)),
("pec", models.CharField(blank=True, max_length=200)),
("sdi", models.CharField(blank=True, max_length=20)),
(
"type",
models.CharField(
choices=[
("INACTIVE", "Inactive"),
("CUSTOMER", "Customer"),
("PROSPECT", "Prospect"),
("COMPETITOR", "Competitor"),
("LEAD", "Lead"),
],
max_length=10,
),
),
("update_date", models.DateTimeField(auto_now=True)),
("create_date", models.DateTimeField(auto_now_add=True)),
],
options={
"verbose_name": "account",
"verbose_name_plural": "accounts",
"db_table": "account",
},
),
]
| python |
from systems.commands.index import Command
from systems.manage.task import channel_communication_key
from utility.data import normalize_value, dump_json
from utility.time import Time
class Send(Command('send')):
def exec(self):
if not self.check_channel_permission():
self.error("You do not have permission to access the {} channel".format(self.communication_channel))
connection = self.manager.task_connection()
if connection:
data = {
'user': self.active_user.name,
'time': Time().now_string,
'message': normalize_value(self.communication_message, parse_json = True)
}
connection.publish(
channel_communication_key(self.communication_channel),
dump_json(data, indent = 2)
)
self.success("Message sent to channel {}: {}".format(self.communication_channel, self.communication_message))
| python |
import numpy as np
class MeanSquaredError():
def __call__(self, y, y_pred):
self.last_y_pred = y_pred
self.last_y = y
assert y_pred.shape == y.shape
self.last_loss = np.sum(np.square(y-y_pred), axis=0)/y_pred.shape[0]
return self.last_loss
def gradient(self):
self.dL_dy = -2*(self.last_y - self.last_y_pred)/self.last_y.shape[0]
return self.dL_dy
class MSE(MeanSquaredError):
def __init__(self):
pass | python |
import unittest
import os
import wikipedia
from programy.services.wikipediaservice import WikipediaService
from programytest.aiml_tests.client import TestClient
class MockWikipediaAPI(object):
DISAMBIGUATIONERROR = 1
PAGEERROR = 2
GENERALEXCEPTION = 3
def __init__(self, response=None, throw_exception=None):
self._response = response
self._throw_exception = throw_exception
def summary(self, title, sentences=0, chars=0, auto_suggest=True, redirect=True):
if self._throw_exception is not None:
if self._throw_exception == MockWikipediaAPI.DISAMBIGUATIONERROR:
raise wikipedia.exceptions.DisambiguationError("Title", "May Refer To")
elif self._throw_exception == MockWikipediaAPI.PAGEERROR:
raise wikipedia.exceptions.PageError(pageid=666)
else:
raise Exception()
else:
return self._response
class WikipediaServiceTests(unittest.TestCase):
def setUp(self):
client = TestClient()
self._client_context = client.create_client_context("testid")
self._client_context.client.license_keys.load_license_key_file(os.path.dirname(__file__)+ os.sep + "test.keys")
def test_ask_question(self):
service = WikipediaService(api=MockWikipediaAPI(response="Test Wikipedia response"))
self.assertIsNotNone(service)
response = service.ask_question(self._client_context, "SUMMARY what is a cat")
self.assertEquals("Test Wikipedia response", response)
def test_ask_question_disambiguous(self):
service = WikipediaService(api=MockWikipediaAPI(response=None, throw_exception=MockWikipediaAPI.DISAMBIGUATIONERROR))
self.assertIsNotNone(service)
response = service.ask_question(self._client_context, "what is a cat")
self.assertEquals("", response)
def test_ask_question_pageerror_exception(self):
service = WikipediaService(api=MockWikipediaAPI(response=None, throw_exception=MockWikipediaAPI.PAGEERROR))
self.assertIsNotNone(service)
response = service.ask_question(self._client_context, "what is a cat")
self.assertEquals("", response)
def test_ask_question_general_exception(self):
service = WikipediaService(api=MockWikipediaAPI(response=None, throw_exception=MockWikipediaAPI.GENERALEXCEPTION))
self.assertIsNotNone(service)
response = service.ask_question(self._client_context, "what is a cat")
self.assertEquals("", response)
| python |
#!/usr/bin/python3
# This file is part of becalm-station
# https://github.com/idatis-org/becalm-station
# Copyright: Copyright (C) 2020 Enrique Melero <[email protected]>
# License: Apache License Version 2.0, January 2004
# The full text of the Apache License is available here
# http://www.apache.org/licenses/
# -*- coding: utf-8 -*-
from flask import Flask, jsonify,send_from_directory, make_response
from flask_cors import CORS
from apscheduler.schedulers.background import BackgroundScheduler
from datetime import datetime
import board
import busio
import adafruit_bmp280
import json
import sqlite3 as sl
import os
# Some configurable variables
dbfile="becalm-station.db"
app = Flask(__name__, static_url_path='')
scheduler = BackgroundScheduler()
scheduler.start()
temperature = -1
pressureh = -1
pressurel = -1
lpressure= -1
lastmeasure = datetime.now()
lbreath= datetime.now()
linspiration=lbreath
rr = -1
ra = -1
tmpPhase=""
rtresh=0.1
def job1():
global linspiration, lpressure, pressureh, pressurel,temperature, rr, lbreath, tmpPhase, ra
tmpLapse=datetime.now()
temperature=bmp280.temperature
tmpPressure=bmp280.pressure
lastBreath=datetime.now()
if pressurel==-1:
pressurel=tmpPressure
if pressureh==-1:
pressureh=tmpPressure
# Have we switched to inspire cycle?
if tmpPressure < (pressureh+pressurel)/2 - rtresh :
# Yes this is below the mid pression range
# we can measure the breathing patterm (rate)
# and we store the pression range between max and min
if tmpPhase == 'E' :
rr=60 / ( datetime.now() - linspiration ).total_seconds()
lbreath=str(datetime.now()).split(".")[0]
ra=pressureh-pressurel
linspiration=datetime.now()
# We are inspiring
tmpPhase="I"
# Have we switched to expire cycle?
if tmpPressure > (pressureh+pressurel)/2 +rtresh :
# If we were inspiring before
# We measure the breathing rate
# and the respiratory amplitude
if tmpPhase == 'I' :
lbreath=datetime.now()
ra=pressureh-pressurel
tmpPhase="E"
if tmpPhase=="E" :
# measure pressure of expiration
pressureh=tmpPressure
if tmpPhase=="I" :
#
pressurel=tmpPressure
lpressure = tmpPressure
lastmeasure = datetime.now()
# Initalize database
con = sl.connect(dbfile)
con.execute('''PRAGMA synchronous = OFF''')
sql = 'INSERT INTO measure (type, value ) values(?, ?)'
data = [
('t',temperature),
('p',lpressure),
('a',ra),
('q',pressurel),
('b',rr)
]
with con:
con.executemany(sql, data)
con.commit()
print("Pressure:" + str(lpressure) + " bmp280 read lapse:" + str( ( lastmeasure - tmpLapse).total_seconds() ) )
# Create library object using our Bus I2C port
i2c = busio.I2C(board.SCL, board.SDA)
bmp280 = adafruit_bmp280.Adafruit_BMP280_I2C(i2c, address=0x76)
bmp280.sea_level_pressure = 1013.25
# Initalize database
#con = sl.connect('becalm-station.db')
job = scheduler.add_job(job1, 'interval', seconds=0.3)
@app.route('/', methods=['GET'])
def data():
# ('t', 'Temperature', '°C', 36, 40, 30, 50, 1),
# ('p', 'Pressure in the mask', 'Pa', 100700, 101400, 100500, 101500, 1),
# ('c', 'CO2 concentration', 'ppm', 110, 190, 100, 200, 0),
# ('h', 'Heartbeat rate', 'beats/min', 110, 190, 100, 200, 0),
# ('o', 'Sp02 - Oxygen saturation in blood', '?', 110, 185, 100, 200, 0),
# ('a', 'Breath range', 'Pa', 110, 185, 100, 200, 0),
# ('b', 'Breathing rate', 'respiraciones/minuto', 110, 185, 100, 200, 0),
# ('q', 'PEEP', 'Pa', 110, 185, 100, 200, 0);
output = dict()
output['t'] = round(temperature,2)
output['p'] = round(lpressure,2)
output['a'] = round(ra, 2)
# output['Expire pressure'] = round(pressureh,2)
output['q'] = round(pressurel,2)
output['b'] = round(rr,2)
# output['Last breath'] = str(lbreath)
# output['Breathing phase'] = tmpPhase
return(output)
@app.route('/debug', methods=['GET'])
def debug():
output = dict()
output['Temperature'] = round(temperature,2)
output['Pressure'] = round(lpressure,2)
output['Breath range'] = round(ra, 2)
output['Expire pressure'] = round(pressureh,2)
output['Inspire pressure'] = round(pressurel,2)
output['Breathing rate'] = round(rr,2)
output['Last breath'] = str(lbreath)
output['Breathing phase'] = tmpPhase
response=make_response(output,200)
response.headers["Refresh"]=0.3
return response
@app.route('/db', methods=['GET'])
def db():
return send_from_directory(os.getcwd(),dbfile)
if __name__ == '__main__':
# app.debug = True
cors = CORS(app, resources={r"/*": {"origins": "*"}})
app.run(host='0.0.0.0', port=8888, threaded=False, processes=1 )
| python |
class Car:
def __init__(self,marka,model,god,speed=0):
self.marka=marka
self.model=model
self.god=god
self.speed=speed
def speed_up(self):
self.speed+=5
def speed_down(self):
self.speed-=5
def speed_stop(self):
self.speed=0
def print_speed(self):
print(f'Скорость: {self.speed}')
def speed_back(self):
self.speed*=-1 | python |
import unittest
from app.models import User,Comment,Blog,Subscriber
class UserModelTest(unittest.TestCase):
def setUp(self):
self.new_user = User(password='blog')
def test_password_setter(self):
self.assertTrue(self.new_user.pass_secure is not None)
def test_no_access_password(self):
with self.assertRaises(AttributeError):
self.new_user.password
def test_password_verification(self):
self.assertTrue(self.new_user.verify_password('blog'))
class CommentModelTest(unittest.TestCase):
def setUp(self):
self.new_comment = Comment(id = 1, comment = 'ha', author = 'me', blog_id = 1)
def test_instance(self):
self.assertTrue(isinstance(self.new_comment,Comment))
def test_variables(self):
self.assertEquals(self.new_comment.id,1)
self.assertEquals(self.new_comment.comment, 'ha')
self.assertEquals(self.new_comment.author, 'me')
self.assertEquals(self.new_comment.blog_id, 1)
def test_get_comment(self):
# self.new_comment.save_comment()
self.get_comments = Comment.get_comment(1)
self.assertEquals(self.get_comments, [] )
class BlogModelTest(unittest.TestCase):
def setUp(self):
self.new_blog = Blog(id = 1, blog = 'ha',user_id = 1)
def test_instance(self):
self.assertTrue(isinstance(self.new_blog,Blog))
def test_variables(self):
self.assertEquals(self.new_blog.id,1)
self.assertEquals(self.new_blog.blog, 'ha')
self.assertEquals(self.new_blog.user_id, 1)
def test_get_blog(self):
self.get_blog = Blog.get_blog(1)
self.assertEquals(self.get_blog, [])
class SubscriberModelTest(unittest.TestCase):
def setUp(self):
self.new_subscriber = Subscriber(id = 1 , name = 'ha', email = 'he')
def test_instance(self):
self.assertTrue(isinstance(self.new_subscriber,Subscriber))
def test_variables(self):
self.assertEquals(self.new_subscriber.id, 1)
self.assertEquals(self.new_subscriber.name, 'ha')
self.assertEquals(self.new_subscriber.email, 'he')
| python |
#!/usr/bin/env python
"""
Example of a 'dynamic' prompt. On that shows the current time in the prompt.
"""
from prompt_toolkit import CommandLineInterface
from prompt_toolkit.layout import Layout
from prompt_toolkit.layout.prompt import Prompt
from pygments.token import Token
import datetime
import time
class ClockPrompt(Prompt):
def tokens(self, cli):
now = datetime.datetime.now()
return [
(Token.Prompt, '%s:%s:%s' % (now.hour, now.minute, now.second)),
(Token.Prompt, ' Enter something: ')
]
def main():
cli = CommandLineInterface(layout=Layout(before_input=ClockPrompt()))
def on_read_start():
"""
This function is called when we start reading at the input.
(Actually the start of the read-input event loop.)
"""
# Following function should be run in the background.
# We do it by using an executor thread from the `CommandLineInterface`
# instance.
def run():
# Send every second a redraw request.
while cli.is_reading_input:
time.sleep(1)
cli.request_redraw()
cli.run_in_executor(run)
cli.onReadInputStart += on_read_start
code_obj = cli.read_input()
print('You said: ' + code_obj.text)
if __name__ == '__main__':
main()
| python |
from django.shortcuts import render
def page_not_found(request, exception):
return render(request, 'error_handling/404.html') | python |
'''
Flask app for Juncture site.
Dependencies: bs4 Flask Flask-Cors html5lib requests
'''
import os, logging
from flask import Flask, request, send_from_directory
from flask_cors import CORS
import requests
logging.getLogger('requests').setLevel(logging.WARNING)
app = Flask(__name__)
CORS(app)
from bs4 import BeautifulSoup
# Prefix for site content
prefix = ''
default_ref = ''
def _add_tag(soup, tag, attrs):
el = soup.new_tag(tag)
el.attrs = attrs
if tag in ('script',):
soup.body.append(el)
else:
soup.head.append(el)
def _remove_tags(soup, tag, attrs):
for el in soup.find_all(tag, attrs): el.decompose()
def _customize_response(html):
'''Perform any post-processing of API-generated HTML.'''
# parse API-generated HTML with BeautifulSoup
# https://beautiful-soup-4.readthedocs.io/en/latest/
soup = BeautifulSoup(html, 'html5lib')
# Custom favicon
_remove_tags(soup, 'link', {'rel':'icon'})
_add_tag(soup, 'link', {'href': '/static/images/favicon.png', 'rel':'icon', 'type':'image/png'})
# Custom stylesheet
#_remove_tags(soup, 'style', {'data-id':'default'})
#_add_tag(soup, 'link', {'href': '/static/css/custom.css', 'rel':'stylesheet'})
return str(soup)
def _get_html(path, base_url, ref=default_ref, **kwargs):
api_endpoint = 'http://localhost:8000/html' if request.host.startswith('localhost') else 'https://api.visual-essays.net/html'
api_url = f'{api_endpoint}{path}?prefix={prefix}&base={base_url}'
if ref: api_url += f'&ref={ref}'
resp = requests.get(api_url)
return resp.status_code, resp.text if resp.status_code == 200 else ''
@app.route('/favicon.ico')
def favicon():
# return send_from_directory(os.path.join(app.root_path, 'static'), 'favicon.ico', mimetype='image/vnd.microsoft.icon')
return send_from_directory(os.path.join(app.root_path, 'static', 'images'), 'favicon.png', mimetype='image/png')
@app.route('/robots.txt')
def robots_txt():
return send_from_directory(os.path.join(app.root_path, 'static'), 'robots.txt', mimetype='text/plain')
@app.route('/sitemap.txt')
def sitemap_txt():
return send_from_directory(os.path.join(app.root_path, 'static'), 'sitemap.txt', mimetype='text/plain')
@app.route('/<path:path>')
@app.route('/')
def render_html(path=None):
base_url = f'/{"/".join(request.base_url.split("/")[3:])}'
if base_url != '/' and not base_url.endswith('/'): base_url += '/'
path = f'/{path}' if path else '/'
status, html = _get_html(path, base_url, **dict(request.args))
if status == 200:
html = _customize_response(html)
return html, status
if __name__ == '__main__':
app.run(debug=True, host='0.0.0.0', port=7777)
| python |
from zone_api import platform_encapsulator as pe
from zone_api.core.devices.illuminance_sensor import IlluminanceSensor
from zone_api_test.core.device_test import DeviceTest
class IlluminanceSensorTest(DeviceTest):
""" Unit tests for illuminance_sensor.py. """
def setUp(self):
self.item = pe.create_number_item('IlluminanceSensorName')
self.set_items([self.item])
super(IlluminanceSensorTest, self).setUp()
self.illuminanceSensor = IlluminanceSensor(self.item)
def testGetIlluminanceLevel_noParams_returnsValidValue(self):
self.assertEqual(0, self.illuminanceSensor.get_illuminance_level())
pe.set_number_value(self.item, 50)
self.assertEqual(50, self.illuminanceSensor.get_illuminance_level())
| python |
import itertools
import demistomock as demisto # noqa: F401
import geopy.distance
from CommonServerPython import * # noqa: F401
requests.packages.urllib3.disable_warnings()
def get_distances_list(src_coords_list: list, events_dict: dict):
distance_list = []
for unique_pair in itertools.combinations(src_coords_list, 2):
geo_distance = round(geopy.distance.geodesic(unique_pair[0], unique_pair[1]).miles, 2)
hr = 'Calculated Distance: {} miles.'.format(str(geo_distance))
context = {
"distance": geo_distance,
"src_coords": unique_pair[0],
"dest_coords": unique_pair[1],
"source_ip": events_dict[unique_pair[0]]["ip"],
"source_country": events_dict[unique_pair[0]]["Country"],
"dest_ip": events_dict[unique_pair[1]]["ip"],
"dest_country": events_dict[unique_pair[1]]["Country"],
"timestamp": events_dict[unique_pair[0]]["event_timestamp"],
"identity": events_dict[unique_pair[0]]["identity_display_name"]
}
distance_list.append(CommandResults(readable_output=hr, outputs=context,
outputs_prefix="GeoEvents", outputs_key_field=""))
return distance_list
def verify_coords(args: dict):
"""
Verify the two given coords lists are identical - we receive two lists (and not one) for BC reasons
Args:
args: the script's arguments
"""
if not set(argToList(args['src_coords'])) == set(argToList(args['dest_coords'])):
raise ValueError('The source coordination list and the destination coordination list '
'should be identical.')
def generate_evetns_dict():
existing = demisto.get(demisto.context(), "ImpossibleTraveler.Events")
return {o['location']: o for o in existing}
def main():
try:
events_dict = generate_evetns_dict()
args = demisto.args()
verify_coords(args)
return_results(get_distances_list(argToList(args['src_coords']), events_dict))
except Exception as e:
return_error('Error occurred while parsing output from command. Exception info:\n' + str(e))
if __name__ in ('__main__', '__builtin__', 'builtins'):
main()
| python |
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import paddle
from .utils import *
class AvgPool1D(paddle.nn.AvgPool1D):
def __init__(self,
kernel_size,
stride=None,
padding=0,
ceil_mode=False,
count_include_pad=True,
divisor_override=None):
super().__init__(
kernel_size=kernel_size,
stride=stride,
padding=padding,
ceil_mode=padding,
exclusive=count_include_pad,
divisor_override=divisor_override)
class AvgPool2D(paddle.nn.AvgPool2D):
def __init__(self,
kernel_size,
stride=None,
padding=0,
ceil_mode=False,
count_include_pad=True,
divisor_override=None):
super().__init__(
kernel_size=kernel_size,
stride=stride,
padding=padding,
ceil_mode=padding,
exclusive=count_include_pad,
divisor_override=divisor_override)
class AvgPool3D(paddle.nn.AvgPool3D):
def __init__(self,
kernel_size,
stride=None,
padding=0,
ceil_mode=False,
count_include_pad=True,
divisor_override=None):
super().__init__(
kernel_size=kernel_size,
stride=stride,
padding=padding,
ceil_mode=padding,
exclusive=count_include_pad,
divisor_override=divisor_override)
class BatchNorm1D(paddle.nn.BatchNorm1D):
def __init__(self,
num_features,
eps=1e-05,
momentum=0.1,
affine=True,
track_running_stats=True):
momentum = 1 - momentum
weight_attr = None
bias_attr = None
if not affine:
weight_attr = paddle.ParamAttr(learning_rate=0.0)
bias_attr = paddle.ParamAttr(learning_rate=0.0)
super().__init__(
num_features,
momentum=momentum,
epsilon=eps,
weight_attr=weight_attr,
bias_attr=bias_attr,
use_global_stats=track_running_stats)
class BatchNorm2D(paddle.nn.BatchNorm2D):
def __init__(self,
num_features,
eps=1e-05,
momentum=0.1,
affine=True,
track_running_stats=True):
momentum = 1 - momentum
weight_attr = None
bias_attr = None
if not affine:
weight_attr = paddle.ParamAttr(learning_rate=0.0)
bias_attr = paddle.ParamAttr(learning_rate=0.0)
super().__init__(
num_features,
momentum=momentum,
epsilon=eps,
weight_attr=weight_attr,
bias_attr=bias_attr,
use_global_stats=track_running_stats)
class BatchNorm3D(paddle.nn.BatchNorm3D):
def __init__(self,
num_features,
eps=1e-05,
momentum=0.1,
affine=True,
track_running_stats=True):
momentum = 1 - momentum
weight_attr = None
bias_attr = None
if not affine:
weight_attr = paddle.ParamAttr(learning_rate=0.0)
bias_attr = paddle.ParamAttr(learning_rate=0.0)
super().__init__(
num_features,
momentum=momentum,
epsilon=eps,
weight_attr=weight_attr,
bias_attr=bias_attr,
use_global_stats=track_running_stats)
class BCEWithLogitsLoss(paddle.nn.BCEWithLogitsLoss):
def __init__(self,
weight=None,
size_average=None,
reduce=None,
reduction='mean',
pos_weight=None):
super().__init__(weight, reduction=reduction, pos_weight=pos_weight)
@property
def in_channels(self):
return self._in_channels
setattr(paddle.nn.layer.conv._ConvNd, "in_channels", in_channels)
@property
def out_channels(self):
return self._out_channels
setattr(paddle.nn.layer.conv._ConvNd, "out_channels", out_channels)
@property
def kernel_size(self):
return self._kernel_size
setattr(paddle.nn.layer.conv._ConvNd, "kernel_size", kernel_size)
@property
def stride(self):
return self._stride
setattr(paddle.nn.layer.conv._ConvNd, "stride", stride)
@property
def padding(self):
return self._padding
setattr(paddle.nn.layer.conv._ConvNd, "padding", padding)
@property
def dilation(self):
return self._dilation
setattr(paddle.nn.layer.conv._ConvNd, "dilation", dilation)
@property
def groups(self):
return self._groups
setattr(paddle.nn.layer.conv._ConvNd, "groups", groups)
class ConstantPad2D(paddle.nn.Pad2D):
def __init__(self, padding, value):
super().__init__(padding, value=value)
class Conv1D(paddle.nn.Conv1D):
def __init__(self,
in_channels,
out_channels,
kernel_size,
stride=1,
padding=0,
dilation=1,
groups=1,
bias=True,
padding_mode='zeros'):
super().__init__(
in_channels,
out_channels,
kernel_size,
stride=stride,
padding=padding,
dilation=dilation,
groups=groups,
padding_mode=padding_mode,
bias_attr=bias if not bias else None)
class Conv2D(paddle.nn.Conv2D):
def __init__(self,
in_channels,
out_channels,
kernel_size,
stride=1,
padding=0,
dilation=1,
groups=1,
bias=True,
padding_mode='zeros'):
super().__init__(
in_channels,
out_channels,
kernel_size,
stride=stride,
padding=padding,
dilation=dilation,
groups=groups,
padding_mode=padding_mode,
bias_attr=bias if not bias else None)
class Conv3D(paddle.nn.Conv3D):
def __init__(self,
in_channels,
out_channels,
kernel_size,
stride=1,
padding=0,
dilation=1,
groups=1,
bias=True,
padding_mode='zeros'):
super().__init__(
in_channels,
out_channels,
kernel_size,
stride=stride,
padding=padding,
dilation=dilation,
groups=groups,
padding_mode=padding_mode,
bias_attr=bias if not bias else None)
class Conv2DTranspose(paddle.nn.Conv2DTranspose):
def __init__(self,
in_channels,
out_channels,
kernel_size,
stride=1,
padding=0,
output_padding=0,
groups=1,
bias=True,
dilation=1,
padding_mode='zeros'):
super().__init__(
in_channels,
out_channels,
kernel_size,
stride=stride,
padding=padding,
output_padding=output_padding,
groups=groups,
dilation=dilation,
bias_attr=bias if not bias else None)
assert padding_mode == 'zeros', "The padding_mode must be zero in Conv2DTranspose."
class CrossEntropyLoss(paddle.nn.CrossEntropyLoss):
def __init__(self,
weight=None,
size_average=None,
ignore_index=-100,
reduce=None,
reduction='mean'):
super().__init__(weight, reduction=reduction, ignore_index=ignore_index)
class Dropout(paddle.nn.Dropout):
def __init__(self, p=0.5, inplace=False):
super().__init__(p)
class Embedding(paddle.nn.Embedding):
def __init__(self,
num_embeddings,
embedding_dim,
padding_idx=None,
max_norm=None,
norm_type=2.0,
scale_grad_by_freq=False,
sparse=False,
_weight=None):
super().__init__(
num_embeddings,
embedding_dim,
padding_idx=padding_idx,
sparse=sparse)
assert max_norm is None, "The max_norm must be None in Embedding!"
assert not scale_grad_by_freq, "The scale_grad_by_freq must False None in Embedding!"
class Identity(paddle.nn.Layer):
def __init__(self, *args, **kwargs):
super().__init__()
def forward(self, input):
return input
class GroupNorm(paddle.nn.GroupNorm):
def __init__(num_groups, num_channels, eps=1e-05, affine=True):
if not affine:
weight_attr = False
bias_attr = False
else:
weight_attr = None
bias_attr = None
super().__init__(num_groups, num_channels, eps, weight_attr, bias_attr)
class InstanceNorm2D(paddle.nn.InstanceNorm2D):
def __init__(self,
num_features,
eps=1e-05,
momentum=0.1,
affine=False,
track_running_stats=False):
momentum = 1 - momentum
weight_attr = None
bias_attr = None
if not affine:
weight_attr = paddle.ParamAttr(learning_rate=0.0)
bias_attr = paddle.ParamAttr(learning_rate=0.0)
super().__init__(
num_features,
momentum=momentum,
epsilon=eps,
weight_attr=weight_attr,
bias_attr=bias_attr)
class KLDivLoss(paddle.nn.Layer):
def __init__(self,
size_average=None,
reduce=None,
reduction='mean',
log_target=False):
super().__init__()
self.reduction = reduction
self.log_target = log_target
def forward(self, input, target):
if self.log_target:
out = paddle.exp(target) * (target - input)
else:
out_pos = target * (paddle.log(target) - input)
zeros = paddle.zeros_like(out_pos)
out = paddle.where(target > 0, out_pos, zeros)
out_sum = paddle.sum(out)
if self.reduction == "sum":
return out_sum
elif self.reduction == "batchmean":
n = input.shape[0]
return out_sum / n
elif self.reduction == "mean":
return paddle.mean(out)
else:
return out
class LayerNorm(paddle.nn.LayerNorm):
def __init__(self, normalized_shape, eps=1e-05, elementwise_affine=True):
if not elementwise_affine:
weight_attr = False
bias_attr = False
else:
weight_attr = None
bias_attr = None
super().__init__(normalized_shape, eps, weight_attr, bias_attr)
class Linear(paddle.nn.Linear):
def __init__(self, in_features, out_features, bias=True):
super().__init__(
in_features, out_features, bias_attr=bias if not bias else None)
class L1Loss(paddle.nn.L1Loss):
def __init__(self, size_average=None, reduce=None, reduction='mean'):
super().__init__(reduction=reduction)
class MaxPool1D(paddle.nn.MaxPool1D):
def __init__(self,
kernel_size,
stride=None,
padding=0,
dilation=1,
return_indices=False,
ceil_mode=False):
super().__init__(
kernel_size,
stride=stride,
padding=padding,
ceil_mode=ceil_mode,
return_mask=return_indices)
assert dilation == 1, "The dilation must be 1 in MaxPool1D."
class MaxPool2D(paddle.nn.MaxPool2D):
def __init__(self,
kernel_size,
stride=None,
padding=0,
dilation=1,
return_indices=False,
ceil_mode=False):
super().__init__(
kernel_size,
stride=stride,
padding=padding,
ceil_mode=ceil_mode,
return_mask=return_indices)
assert dilation == 1, "The dilation must be 1 in MaxPool2D."
class MaxPool3D(paddle.nn.MaxPool3D):
def __init__(self,
kernel_size,
stride=None,
padding=0,
dilation=1,
return_indices=False,
ceil_mode=False):
super().__init__(
kernel_size,
stride=stride,
padding=padding,
ceil_mode=ceil_mode,
return_mask=return_indices)
assert dilation == 1, "The dilation must be 1 in MaxPool3D."
import paddle
import paddle.nn as nn
TYPE_MAPPER = {"fp16": "float16", "fp32": "float32", "fp64": "float64"}
class MaxUnpool2D(paddle.nn.Layer):
def __init__(self, kernel_size, stride=None, padding=0):
super().__init__()
if isinstance(stride, int):
self.kernel_size = (kernel_size, kernel_size)
else:
self.kernel_size = kernel_size
if stride is None:
self.stride = self.kernel_size
else:
if isinstance(stride, int):
self.stride = (stride, stride)
else:
self.stride = stride
if isinstance(padding, int):
self.padding = (padding, padding)
else:
self.padding = padding
def forward(self, input, indices, output_size=None):
if output_size is None:
n, c, h, w = input.shape
out_h = (
h - 1
) * self.stride[0] - 2 * self.padding[0] + self.kernel_size[0]
out_w = (
w - 1
) * self.stride[1] - 2 * self.padding[1] + self.kernel_size[1]
output_size = (n, c, out_h, out_w)
else:
if len(output_size) == len(self.kernel_size) + 2:
output_size = output_size[2:]
t = str(input.dtype).lower().strip().split(".")[-1]
t = TYPE_MAPPER[t]
out = paddle.zeros(output_size, dtype=t)
flatten_out = paddle.flatten(out)
for i in range(indices.shape[0]):
for j in range(indices.shape[1]):
for k in range(indices.shape[2]):
for m in range(indices.shape[3]):
indices[i, j, k, m] = (out.shape[1] * out.shape[2] * out.shape[3]) * i + \
(out.shape[2] * out.shape[3]) * j + indices[i, j, k, m]
flatten_indices = paddle.flatten(indices)
flatten_input = paddle.flatten(input)
for i in range(flatten_indices.shape[0]):
flatten_out[flatten_indices[i].tolist()] = flatten_input[i].tolist()
out = paddle.reshape(flatten_out, out.shape)
return out
class ReflectionPad2D(paddle.nn.Pad2D):
def __init__(self, padding):
super().__init__(padding, mode="reflect")
class ReplicationPad2D(paddle.nn.Pad2D):
def __init__(self, padding):
super().__init__(padding, mode="replicate")
class Softmax(paddle.nn.Softmax):
def __init__(self, dim=None):
super().__init__(axis=dim)
class SyncBatchNorm(paddle.nn.SyncBatchNorm):
def __init__(self,
num_features,
eps=1e-05,
momentum=0.1,
affine=True,
track_running_stats=True,
process_group=None):
momentum = 1 - momentum
weight_attr = None
bias_attr = None
if not affine:
weight_attr = paddle.ParamAttr(learning_rate=0.0)
bias_attr = paddle.ParamAttr(learning_rate=0.0)
super().__init__(
num_features,
momentum=momentum,
epsilon=eps,
weight_attr=weight_attr,
bias_attr=bias_attr,
use_global_stats=track_running_stats)
class ZeroPad2D(paddle.nn.Pad2D):
def __init__(self, padding):
super().__init__(padding)
| python |
from abc import ABC
import numpy
import torch
import torch.distributions as D
import numpy as np
from distributions.BaseDistribution import Plottable2DDistribution
class RotationDistribution(Plottable2DDistribution):
def __init__(self, skewness, n, mean=7):
self.d = 2
self.dimension = 2
self.K = n
self.mean = mean
mix = D.Categorical(torch.ones(n))
theta = torch.tensor([2 * np.pi / n] )
U = torch.tensor([[torch.cos(theta), torch.sin(theta)], [-torch.sin(theta), torch.cos(theta)]])
self.mu = torch.zeros([self.K, self.d])
self.sigma = torch.zeros([self.K, self.d, self.d])
self.mu[0, :] = self.mean * torch.tensor([1., 0.])
self.sigma[0, :, :] = torch.diag(torch.tensor([1., 1. / skewness]))
for i in range(1, n):
self.mu[i, :] = torch.matmul(U, self.mu[i - 1, :])
self.sigma[i, :, :] = torch.matmul(U, np.matmul(self.sigma[i - 1, :, :], U.T))
comp = D.MultivariateNormal(self.mu, self.sigma)
self.target = D.MixtureSameFamily(mix, comp)
def log_prob(self, x):
return self.target.log_prob(x)
def sample(self, n):
return self.target.sample_n(n)
class TwoCircleDistribution(Plottable2DDistribution):
def __init__(self, radius_1 = 4, radius_2 = 8, thickness_1 = 0.5, thickness_2=0.5, mixing = [1., 1.]):
self.r1 = radius_1
self.r2 = radius_2
self.t1 = thickness_1
self.t2 = thickness_2
self.mixing = torch.tensor(mixing)
# Radius distribution
mix = D.Categorical(self.mixing)
comp = D.Normal(torch.FloatTensor([self.r1, self.r2]), torch.FloatTensor([self.t1, self.t2]))
self.radius_d = D.MixtureSameFamily(mix, comp)
# Ring distribution
self.ring = D.Uniform(torch.tensor([-numpy.pi]), torch.tensor([numpy.pi]))
def log_prob(self, x):
r = torch.norm(x, dim=-1)
# print(r)
log_prob = self.radius_d.log_prob(r)
return log_prob
def sample(self, n):
r = self.radius_d.sample_n(n)
u = self.ring.sample_n(n).squeeze()
samples = torch.zeros((n, 2))
samples[:, 0] = r * torch.cos(u)
samples[:, 1] = r * torch.sin(u)
return samples
class OneCircleDistribution(Plottable2DDistribution):
def __init__(self, radius_1 = 4, thickness_1 = 0.5,):
self.r1 = radius_1
self.t1 = thickness_1
# Radius distribution
self.radius_d = D.Normal(torch.FloatTensor([self.r1]), torch.FloatTensor([self.t1]))
# Ring distribution
self.ring = D.Uniform(torch.tensor([-numpy.pi]), torch.tensor([numpy.pi]))
def log_prob(self, x):
r = torch.sqrt((x[:, 0] ** 2) + (x[:, 1] ** 2))
print(r)
log_prob = self.radius_d.log_prob(r)
return log_prob
def sample(self, n):
r = self.radius_d.sample_n(n).squeeze()
u = self.ring.sample_n(n).squeeze()
samples = torch.zeros((n, 2))
samples[:, 0] = r * torch.cos(u)
samples[:, 1] = r * torch.sin(u)
return samples
class TwoSphereDistribution(Plottable2DDistribution):
def __init__(self, radius_1 = 2, radius_2 = 4, thickness_1 = 0.1, thickness_2=0.1, mixing = [1., 1.]):
self.r1 = radius_1
self.r2 = radius_2
self.t1 = thickness_1
self.t2 = thickness_2
self.mixing = torch.tensor(mixing)
# Radius distribution
mix = D.Categorical(self.mixing)
comp = D.Normal(torch.FloatTensor([self.r1, self.r2]), torch.FloatTensor([self.t1, self.t2]))
self.radius_d = D.MixtureSameFamily(mix, comp)
# Ring distribution
self.phi_d = D.Uniform(torch.tensor([0.]), torch.tensor([1.]))
self.theta_d = D.Uniform(torch.tensor([0.]), torch.tensor([2 * np.pi]))
self.ring = D.Uniform(torch.tensor([0., 0]), torch.tensor([1., 2 * np.pi]))
self.r = None
self.u = None
def log_prob(self, x):
r = torch.norm(x, dim=-1)
log_prob = self.radius_d.log_prob(r)
return log_prob
def sample(self, n, store=False):
r = self.radius_d.sample_n(n)
theta = self.theta_d.sample_n(n).squeeze()
phi = self.phi_d.sample_n(n).squeeze()
phi = torch.acos(1 - 2 * phi) # Prevent oversampling on the poles
x = r * torch.sin(phi) * torch.cos(theta)
y = r * torch.sin(phi) * torch.sin(theta)
z = r * torch.cos(phi)
samples = torch.zeros((n, 3))
samples[:, 0] = x
samples[:, 1] = y
samples[:, 2] = z
# samples = torch.cat([xs, ys], dim=1)
if store:
self.theta = theta
self.phi = phi
return samples
| python |
#!/usr/bin/env python
# coding: utf-8
# # QuakeMigrate - Example - Icequake detection
# ## Overview:
# This notebook shows how to run QuakeMigrate for icequake detection, using a 2 minute window of continuous seismic data from Hudson et al (2019). Please refer to this paper for details and justification of the settings used.
#
# Here, we detail how to:
# 1. Create a travel-times lookup table for the example seismometer network
# 2. Run the detect stage to coalesce energy through time
# 3. Run the trigger stage to determine events above a threshold value
# 4. Run the locate stage to refine the earthquake location
#
# We also provide an outline of some of the key outputs
# In[33]:
# Import necessary modules:
import QMigrate.core.model as qmod
import QMigrate.signal.scan as qscan
import QMigrate.io.data as qdata
import QMigrate.io.quakeio as qio
import QMigrate.signal.trigger as qtrigger
# In[34]:
# Set i/o paths:
station_file = "./inputs/stations.txt"
data_in = "./inputs/mSEED"
lut_out = "./outputs/lut/icequake.LUT"
out_path = "./outputs/runs"
run_name = "icequake_example"
# ## 1. Create a travel-times lookup table (LUT)
# In[35]:
# Read in station information
stations = qio.stations(station_file)
# Set the parameters for the travel-times lookup table (LUT)
# Cell count (x,y,z); cell size (x,y,z in metres)
lut = qmod.LUT(stations, cell_count=[20, 20, 140], cell_size=[100, 100, 20])
lut.lonlat_centre(-17.224, 64.328)
# Set the LUT projection (here we use the Lambert Conformal Conic projection)
lut.lcc_standard_parallels = (64.32, 64.335)
lut.projections(grid_proj_type="LCC")
lut.elevation=1400 # Defining the elevation of the top of the grid in m
# Compute for a homogeneous velocity model
v_p_homo_model = 3630
v_s_homo_model = 1833
lut.compute_homogeneous_vmodel(v_p_homo_model, v_s_homo_model)
# Save the LUT
lut.save(lut_out)
# ## 2. Coalesce the seismic energy through time
# In[37]:
# Create a new instance of the MSEED class and set path structure
data = qdata.Archive(station_file=station_file, archive_path=data_in)
data.path_structure(archive_format="YEAR/JD/*_STATION_*")
# Create a new instance of the SeisScan class
scan = qscan.QuakeScan(data, lut_out, output_path=out_path, run_name=run_name)
# In[38]:
# Set detect parameters
scan.sampling_rate = 500 # Sampling rate of data, in Hz
scan.p_bp_filter = [10, 125, 4] # The band-pass filter parameters for the P-phase (10 to 125 Hz, with 4th order corners)
scan.s_bp_filter = [10, 125, 4] # The band-pass filter parameters for the P-phase (10 to 125 Hz, with 4th order corners)
scan.p_onset_win = [0.01, 0.25] # Length of the STA and LTA time windows for the P-phase
scan.s_onset_win = [0.05, 0.5] # Length of the STA and LTA time windows for the S-phase
scan.time_step = 0.75 # The length of the time-step
scan.decimate = [1, 1, 1] # Decimation factors in x,y,z (no decimation here)
scan.n_cores = 12 # Number of cores/processors to use
# Defining the start and end times
starttime = "2014-06-29T18:41:55.0"
endtime = "2014-06-29T18:42:20.0"
# In[39]:
# Run the detect stage to find the coalescence of energy through time:
scan.detect(starttime, endtime)
# ## 3. Run the trigger stage, to detect and output individual icequakes
#
# nb: We can use the same SeisScan object here because we are not using a different decimation. If running trigger and locate on grids with different levels of decimation, a new SeisScan object must be initialised.
# In[41]:
trig = qtrigger.Trigger(out_path, run_name, stations)
trig.normalise_coalescence = True
trig.marginal_window = 2.75
trig.minimum_repeat = 6.
trig.detection_threshold = 1.8
# Run trigger
trig.trigger(starttime, endtime, savefig=True)
# ## 4. Run the locate stage, to relocate triggered events on a less decimated grid
# In[42]:
# Set locate parameters:
scan.marginal_window = 2.75
# Turn on plotting features
scan.plot_coal_video = False
scan.plot_coal_grid = False
scan.plot_coal_picture = True
scan.plot_coal_trace = False
# In[43]:
# Run the locate stage to determine the location of any triggered events
scan.locate(starttime, endtime)
# ## 4. Some of the key outputs
# In[48]:
# Show the .event file, containing event origin time and location:
icequake_event_fname = "./outputs/runs/icequake_example/events/20140629184210330000.event"
with open(icequake_event_fname) as f:
lines = f.readlines()
for line in lines:
print(line)
# In[49]:
# Show the .stn file, containing station time picks:
icequake_stn_fname = "outputs/runs/icequake_example/picks/20140629184210330000.picks"
with open(icequake_stn_fname) as f:
lines = f.readlines()
for line in lines:
print(line)
# In[50]:
# Show the coalescence pdf file, containing event origin time and location:
icequake_coal_image_fname = "outputs/runs/icequake_example/summaries/icequake_example_20140629184210330000_EventSummary.pdf"
from IPython.display import IFrame # For plotting pdf
IFrame(icequake_coal_image_fname, width=800, height=400) # Plot pdf
# References:
#
# Hudson, T.S., Smith, J., Brisbourne, A.M., and White R.S. (2019). Automated detection of basal icequakes and discrimination from surface crevassing. Annals of Glaciology, 79
| python |
class MyClass:
# Class variable
cvar = 'a'
def __init__(self, num=0):
# Instance variable
self.ivar = num
def __repr__(self):
return f'MyClass({self.ivar})'
def method(self):
# Normal class method - requires instance
# Operates within instance namespace
# Class namespace accessible through .__class__
mtype = 'instance'
'''
* From here, if access self.cvar and there's no instance variable "cvar"
then we walk up to the class level and access it
* If we do self.cvar = 'x' - this creates an instance variable which "shadows"
the class variable
* To change the class variable we need to do: self.__class__.cvar = 'x'
'''
return (f'{mtype} method called ({self}, cvar={MyClass.cvar}, ivar={self.ivar})'
f'\n\t(self.__dict__={self.__dict__}'
f'\n\t(self.__class__.__dict__.keys()={tuple(self.__class__.__dict__.keys())})')
@classmethod
def classmethod(cls):
# Works at class level - doesn't have to create an instance (but can - see below)
# Operates within class namespace
mtype = 'class'
return (f'{mtype} method called ({cls}, cvar={MyClass.cvar}, ivar=inaccessible)'
f'\n\t(cls.__dict__.keys()={tuple(cls.__dict__.keys())}')
'''
# Would have to comment out above method to uncomment these two:
@classmethod
def five(cls):
# Alternate constructor/factory function:
return cls(5)
@classmethod
def fifteen(cls):
# Alternate constructor/factory function:
return cls(15)
'''
@staticmethod
def staticmethod():
# Stand alone method - doesn't take instance/class object
# Can be used without an instance
mtype = 'static'
return (f'{mtype} method called ({staticmethod}, cvar={MyClass.cvar}, ivar=inaccessible)'
f'\n\t(staticmethod.__dict__.keys()={tuple(staticmethod.__dict__.keys())}')
if __name__ == '__main__':
c1 = MyClass()
print(f'c1.method(): {c1.method()}')
print(f'\nMyClass.method(c1): {MyClass.method(c1)}')
print('-' * 72)
print(f'c1.classmethod(): {c1.classmethod()}')
print(f'\nMyClass.classmethod(c1): {MyClass.classmethod()}')
print('-' * 72)
print(f'c1.staticmethod(): {c1.staticmethod()}')
print(f'\nMyClass.staticmethod(c1): {MyClass.staticmethod()}')
| python |
# -*- coding: utf-8 -*-
# Description: PHP-FPM netdata python.d module
# Author: Pawel Krupa (paulfantom)
from base import UrlService
import json
# default module values (can be overridden per job in `config`)
# update_every = 2
priority = 60000
retries = 60
# default job configuration (overridden by python.d.plugin)
# config = {'local': {
# 'update_every': update_every,
# 'retries': retries,
# 'priority': priority,
# 'url': 'http://localhost/status?full&json'
# }}
# charts order (can be overridden if you want less charts, or different order)
ORDER = ['connections', 'requests', 'performance', 'request_duration', 'request_cpu', 'request_mem']
CHARTS = {
'connections': {
'options': [None, 'PHP-FPM Active Connections', 'connections', 'active connections', 'phpfpm.connections', 'line'],
'lines': [
["active"],
["maxActive", 'max active'],
["idle"]
]},
'requests': {
'options': [None, 'PHP-FPM Requests', 'requests/s', 'requests', 'phpfpm.requests', 'line'],
'lines': [
["requests", None, "incremental"]
]},
'performance': {
'options': [None, 'PHP-FPM Performance', 'status', 'performance', 'phpfpm.performance', 'line'],
'lines': [
["reached", 'max children reached'],
["slow", 'slow requests']
]},
'request_duration': {
'options': [None, 'PHP-FPM Request Duration', 'milliseconds', 'request duration', 'phpfpm.request_duration', 'line'],
'lines': [
["maxReqDur", 'max request duration'],
["avgReqDur", 'average request duration']
]},
'request_cpu': {
'options': [None, 'PHP-FPM Request CPU', 'percent', 'request CPU', 'phpfpm.request_cpu', 'line'],
'lines': [
["maxReqCPU", 'max request cpu'],
["avgReqCPU", 'average request cpu']
]},
'request_mem': {
'options': [None, 'PHP-FPM Request Memory', 'kilobytes', 'request memory', 'phpfpm.request_mem', 'line'],
'lines': [
["maxReqMem", 'max request memory'],
["avgReqMem", 'average request memory']
]}
}
class Service(UrlService):
def __init__(self, configuration=None, name=None):
UrlService.__init__(self, configuration=configuration, name=name)
if len(self.url) == 0:
self.url = "http://localhost/status?full&json"
self.order = ORDER
self.definitions = CHARTS
self.assignment = {"active processes": 'active',
"max active processes": 'maxActive',
"idle processes": 'idle',
"accepted conn": 'requests',
"max children reached": 'reached',
"slow requests": 'slow'}
self.proc_assignment = {"request duration": 'ReqDur',
"last request cpu": 'ReqCPU',
"last request memory": 'ReqMem'}
def _get_data(self):
"""
Format data received from http request
:return: dict
"""
try:
raw = self._get_raw_data()
except AttributeError:
return None
if '?json' in self.url or '&json' in self.url:
try:
raw_json = json.loads(raw)
except ValueError:
return None
data = {}
for k,v in raw_json.items():
if k in self.assignment:
data[self.assignment[k]] = v
if '&full' in self.url or '?full' in self.url:
c = 0
sum_val = {}
for proc in raw_json['processes']:
if proc['state'] != 'Idle':
continue
c += 1
for k, v in self.proc_assignment.items():
d = proc[k]
if v == 'ReqDur':
d = d/1000
if v == 'ReqMem':
d = d/1024
if 'max' + v not in data or data['max' + v] < d:
data['max' + v] = d
if 'avg' + v not in sum_val:
sum_val['avg' + v] = 0
data['avg' + v] = 0
sum_val['avg' + v] += d
if len(sum_val):
for k, v in sum_val.items():
data[k] = v/c
if len(data) == 0:
return None
return data
raw = raw.split('\n')
data = {}
for row in raw:
tmp = row.split(":")
if str(tmp[0]) in self.assignment:
try:
data[self.assignment[tmp[0]]] = int(tmp[1])
except (IndexError, ValueError):
pass
if len(data) == 0:
return None
return data
| python |
import os
from typing import List, Sequence, Any
import numpy as np
from restools.flow_stats import Ensemble, BadEnsemble
from papers.jfm2020_probabilistic_protocol.data import RPInfo
class DistributionSummary:
def __init__(self):
self.means = []
self.lower_quartiles = []
self.upper_quartiles = []
self.lower_deciles = []
self.upper_deciles = []
def append(self, mean=None, lower_quartile=None, upper_quartile=None, lower_decile=None, upper_decile=None):
self.means.append(mean)
self.lower_quartiles.append(lower_quartile)
self.upper_quartiles.append(upper_quartile)
self.lower_deciles.append(lower_decile)
self.upper_deciles.append(upper_decile)
def find_lam_event_number_by_random_sampling(rps_info: List[List[RPInfo]], sample_number: int, n_per_energy_level: int,
seed: int) -> np.ndarray:
"""
Returns a 2D array of laminarisation event numbers for `sample_number` random samples done with replacement from
the given set of RPs. Note that the seed must be provided from the randomiser to enable reproducibility.
:param rps_info: 2D-list of RPs info
:param n_per_energy_level: number of RPs per energy level in the sample
:param seed: seed used to enable reproducibility
:return: a 2D-array of laminarisation event numbers (first index = sample id, second index = energy level id)
"""
rng = np.random.default_rng(seed) # set the fixed seed for reproducibility (numpy version for checking: 1.17.2)
energy_levels_number = len(rps_info)
n_lam = np.zeros((sample_number, energy_levels_number))
for s_i in range(sample_number):
for e_i in range(energy_levels_number):
for _ in range(n_per_energy_level):
rp_i = rng.integers(0, len(rps_info[e_i]))
n_lam[s_i][e_i] += rps_info[e_i][rp_i].is_laminarised
return n_lam
def plot_distribution_summary(ax, distr: DistributionSummary, x: Sequence[float], obj_to_rasterize: List[Any],
means_line_style='-', means_kwargs={'linewidth': 2, 'color': 'blue'},
quartiles_kwargs={'color': 'blue', 'alpha': 0.5},
deciles_kwargs={'color': 'blue', 'alpha': 0.2}):
ax.plot(x, distr.means, means_line_style, **means_kwargs)
obj = ax.fill_between(x, distr.lower_quartiles, distr.upper_quartiles, **quartiles_kwargs)
obj_to_rasterize.append(obj)
obj = ax.fill_between(x, distr.lower_deciles, distr.upper_deciles, **deciles_kwargs)
obj_to_rasterize.append(obj)
def turbulent_dissipation_rate(task, a, omega, res, ti_builder):
print('Processing task {}'.format(task))
if task == -1:
return None
task_path = res.get_task_path(task)
tis = [ti_builder.get_timeintegration(os.path.join(task_path, 'data-500'))]
try:
ens = Ensemble(tis, max_ke_eps=0.02)
diss_distr = ens.dissipation_distribution()
print(f'Total number of selected data samples is {len(diss_distr.data_samples)} (about {len(diss_distr.data_samples)/2} time units)')
except BadEnsemble as e:
print('Configuration "A = {}, omega = {} (task {})" is skipped because turbulent trajectories are '
'too short'.format(a, omega, task))
return None
else:
return diss_distr.mean()
def exponential_noise_distribution(e, e_max):
# parameter lambda for the exponential distribution is equal to 6/e_max (=> expectation is e_max/6)
l = 6./e_max
return l / (1. - np.exp(-l*e_max)) * np.exp(-l*e)
| python |
import numpy as np
def generate_features(implementation_version, draw_graphs, raw_data, axes, sampling_freq, scale_axes):
# features is a 1D array, reshape so we have a matrix
raw_data = raw_data.reshape(int(len(raw_data) / len(axes)), len(axes))
features = []
graphs = []
# split out the data from all axes
for ax in range(0, len(axes)):
X = []
for ix in range(0, raw_data.shape[0]):
X.append(float(raw_data[ix][ax]))
# X now contains only the current axis
fx = np.array(X)
# process the signal here
fx = fx * scale_axes
# we need to return a 1D array again, so flatten here again
for f in fx:
features.append(f)
return {
'features': features,
'graphs': graphs,
# if you use FFTs then set the used FFTs here (this helps with memory optimization on MCUs)
'fft_used': [],
'output_config': {
# type can be 'flat', 'image' or 'spectrogram'
'type': 'flat',
'shape': {
# shape should be { width, height, channels } for image, { width, height } for spectrogram
'width': len(features)
}
}
}
| python |
import os.path
import subprocess
from .steps import ImagesStep
from common_utils.exceptions import ZCItoolsValueError
from common_utils.data_types.correlation_matrix import CorrelationMatrix
from common_utils.file_utils import ensure_directory, write_str_in_file, get_settings
_circos_conf = """
<colors>
{colors}
</colors>
# Groups
karyotype = data/karyotype.txt
<ideogram>
<spacing>
default = 0.020r
</spacing>
thickness = 40p
stroke_thickness = 0
stroke_color = vdgrey
fill = yes
fill_color = black
# fractional radius position of chromosome ideogram within image
radius = 0.90r
show_label = yes
label_font = bold
label_radius = dims(image,radius) - 100p
label_size = 50
label_parallel = yes
show_bands = no
</ideogram>
# 1-correlation group parts
<highlights>
z = 0
<highlight>
file = data/tiles.txt
r0 = 0.999r-30p
r1 = 0.999r-5p
stroke_thickness = 0
</highlight>
</highlights>
# Correlations
<links>
<link>
ribbon = yes
flat = yes
file = data/links.txt
bezier_radius = 0.0r
radius = 0.999r-30p
thickness = 10
color = grey
stroke_color = dgrey
stroke_thickness = 1
<rules>
<rule>
condition = var(dist) <= 1.5
bezier_radius = 0.3r
</rule>
</rules>
</link>
</links>

<<include etc/colors_fonts_patterns.conf>>
<<include etc/housekeeping.conf>>
"""
def create_circos_correlation(project, step_data, params):
# Read correlation data
cm = None
if params.input_filename:
cm = CorrelationMatrix.from_file(params.input_filename)
if not cm:
raise ZCItoolsValueError('No correlation input data!')
num_c = cm.num_columns()
if num_c < 2:
raise ZCItoolsValueError('Not much of a matrix!')
step = ImagesStep(project, step_data, remove_data=True)
one_width = params.one_width
gap_correlations = params.gap_correlations
ow_2 = one_width // 2
one_plus_gap = one_width + gap_correlations
# Note: column lowercase names are used as column identifiers
data_dir = step.step_file('data')
etc_dir = step.step_file('etc')
ensure_directory(data_dir)
ensure_directory(etc_dir)
colors = dict((lc, 'green') for lc in cm._columns_lower) # ToDo: some defaults
colors['plus_'] = 'blue'
colors['minus_'] = 'red'
for col_def in params.group_color:
col_fields = col_def.split(',', 1)
if len(col_fields) == 2 and cm.check_column(col_fields[0]):
colors[cm.check_column(col_fields[0])] = col_fields[1]
else:
print(f"Warning: '{col_def}' is not column color definition!")
# data directory
# karyotype.txt: defines groups (as chromosomes)
# chr - <name> <label> <start> <end> <color>
# ...
gl = (num_c - 1) * one_width + (num_c - 2) * gap_correlations # group length
write_str_in_file(os.path.join(data_dir, 'karyotype.txt'),
'\n'.join(f"chr - {lc} {c} 0 {gl} color_{lc}"
for lc, c in zip(cm._columns_lower, cm._columns)))
# tiles.txt: defines abs(correlation) == 1 interval, as tiles
# <name> <start> <end> [options]
with open(os.path.join(data_dir, 'tiles.txt'), 'w') as out:
for idx1, c1 in enumerate(cm._columns_lower):
for idx2, c2 in enumerate(cm._columns_lower):
if idx1 == idx2:
continue
pos = (idx1 - idx2 - 1) if idx1 > idx2 else (idx1 - idx2 + (num_c - 1))
start = pos * one_plus_gap
out.write(f"{c1} {start} {start + one_width} fill_color=color_{c2}\n")
# cells.txt: defines correlations as links
# <cell_idx> <group_1> <start_1> <end_1> color=color_{plus|minus}_,dist={int}
# <cell_idx> <group_2> <start_2> <end_2> color=color_{plus|minus}_,dist={int}
# ...
with open(os.path.join(data_dir, 'links.txt'), 'w') as out:
cell_idx = 0
for idx1, c1 in enumerate(cm._columns_lower):
rest_c = cm._columns_lower[idx1 + 1:]
for idx2, c2 in enumerate(rest_c):
corr = cm.get(c1, c2)
if corr is not None:
w = round(abs(corr) * one_width)
w_1 = w // 2
w_2 = w - w_1 # - 1?
centar = ow_2 + idx2 * one_plus_gap
color = 'plus_' if corr >= 0 else 'minus_'
dist = min(idx2 + 1, idx1 + (len(rest_c) - idx2))
atts = f"color=color_{color},dist={dist}"
out.write(f"cell_{cell_idx} {c1} {gl - centar - w_2} {gl - centar + w_1} {atts}\n")
out.write(f"cell_{cell_idx} {c2} {centar - w_1} {centar + w_2} {atts}\n")
cell_idx += 1
# etc directory
write_str_in_file(os.path.join(etc_dir, 'circos.conf'), _circos_conf.format(
colors='\n'.join(f"color_{lc} = {c}" for lc, c in colors.items())
))
subprocess.run(['circos', '-conf', 'etc/circos.conf'], cwd=step.directory)
# View it
if params.show_image:
image_viewer = get_settings().get('image_viewer')
if image_viewer:
subprocess.Popen([image_viewer, step.step_file('circos.png')])
#
# # step.set_table_data(data, columns)
# step.save()
# return step
| python |
import pyCardDeck
from typing import List
class Gamer:
def __init__(self, name: str):
self.hand = []
self.name = name
def __str__(self):
return self.name
class GamePlace:
def __init__(self, gamers: List[Gamer]):
self.deck = pyCardDeck.Deck(
cards=generate_deck(),
name='Poker deck',
reshuffle=False)
self.gamers = gamers
self.table_cards = []
print("Created a table with {} gamers".format(len(self.gamers)))
def Cantrell_Draw(self):
"""
Basic Five card game structure
"""
print("Starting a round of Cantrell Draw")
self.deck.shuffle()
self.deal_cards(5)
#Imagine the first round of betting happened here after cards are drawn and visible to gamer
self.draw1()
self.fold() #gamers who folded the hands after initial cards were distributed
self.remove()
self.after_the_draw()
# Imagine post-turn, pre-draw1 logic for betting here
self.reset() #to update the gamers with hands
self.fold()
self.remove()
self.after_the_draw()
# Imagine some more betting and winner decision here
self.cleanup()
def deal_cards(self, number: int):
"""
Dealer will go through all available gamers and deal them x number of cards.
:param number: How many cards to deal
:type number: int
"""
for _ in range(0, number):
for gamer in self.gamers:
card = self.deck.draw()
gamer.hand.append(card)
print("Dealt {} to gamer {}".format(card, gamer)
def draw1(self,number):
"""
After the first round of betting, if more than one gamer exist on the hand or table than a draw occurs where gamer selects his/her number of cards which he/she wants to replace
"""
# Burn a card/cards
if gamers>1:
self.number = int(input("how many card/cards you want to replace?"))
burned = self.deck.draw()
self.deck.discard(burned)
print("Burned a card/cards: {}".format(burned))
for _ in range(0, number):
card = self.deck.draw()
self.table_cards.append(card)
print("New card on the table: {}".format(card))
else:
print("Game as ended because of only 1 gamer or no gamer exists on the table")
def fold(self, gamer_id):
if gamer_id not in self._gamer_ids:
raise ValueError("Unknown gamer id")
self._folder_ids.add(gamer_id)
def remove(self, gamer_id):
self.fold(gamer_id)
self._dead_gamer_ids.add(gamer_id)
def reset(self):
self._folder_ids = set(self._dead_gamer_ids)
def after_the_draw(self):
"""
A second "after the draw" betting round occurs beginning with the gamer to the dealer's left or else beginning with the gamer who opened the first round (the latter is common when antes are used instead of blinds). This is followed by a showdown
"""
if gamers>1:
self.5card()
#check for the highest holding
else:
print("only 1 gamer and the winner is declared")
def cleanup(self):
"""
Cleans up the table to gather all the cards back
"""
for gamer in self.gamers:
for card in gamer.hand:
self.deck.discard(card)
for card in self.table_cards:
self.deck.discard(card)
self.deck.shuffle_back()
print("Cleanup done")
def generate_deck() -> List[PokerCard]:
"""
Function that generates the deck, instead of writing down 50 cards, we use iteration
to generate the cards for use
:return: List with all 50 poker playing cards
:rtype: List[PokerCard]
"""
suits = ['Hearts', 'Diamonds', 'Clubs', 'Spades']
ranks = {'A': 'Ace',
'2': 'Two',
'3': 'Three',
'4': 'Four',
'5': 'Five',
'6': 'Six',
'7': 'Seven',
'8': 'Eight',
'9': 'Nine',
'10': 'Ten',
'J': 'Jack',
'Q': 'Queen',
'K': 'King'}
cards = []
for suit in suits:
for rank, name in ranks.items():
cards.append(PokerCard(suit, rank, name))
print('Generated deck of cards for the table')
return cards
if __name__ == '__main__':
table = GamePlace([Gamer("Jack"), Gamer("John"), Gamer("Peter")])
table.Cantrell_Draw()
| python |
#coding:utf-8
#created by Philip_Gao
import tensorflow as tf
from mnv3_layers import *
def mobilenetv3_small(inputs, num_classes, is_train=True):
reduction_ratio = 4
with tf.variable_scope('mobilenetv3_small'):
net = conv2d_block(inputs, 16, 3, 2, is_train, name='conv1_1',h_swish=True) # size/2
net = mnv3_block(net, 3, 16, 16, 2, is_train, name='bneck2_1', h_swish=False, ratio=reduction_ratio, se=True) # size/4
net = mnv3_block(net, 3, 72, 24, 2, is_train, name='bneck3_1', h_swish=False, ratio=reduction_ratio, se=False) # size/8
net = mnv3_block(net, 3, 88, 24, 1, is_train, name='bneck3_2', h_swish=False, ratio=reduction_ratio, se=False)
net = mnv3_block(net, 5, 96, 40, 1, is_train, name='bneck4_1', h_swish=True, ratio=reduction_ratio, se=True) # size/16
net = mnv3_block(net, 5, 240, 40, 1, is_train, name='bneck4_2', h_swish=True, ratio=reduction_ratio, se=True)
net = mnv3_block(net, 5, 240, 40, 1, is_train, name='bneck4_3', h_swish=True, ratio=reduction_ratio, se=True)
net = mnv3_block(net, 5, 120, 48, 1, is_train, name='bneck5_1', h_swish=True, ratio=reduction_ratio, se=True)
net = mnv3_block(net, 5, 144, 48, 1, is_train, name='bneck5_2', h_swish=True, ratio=reduction_ratio, se=True)
net = mnv3_block(net, 5, 288, 96, 2, is_train, name='bneck6_1', h_swish=True, ratio=reduction_ratio, se=True) # size/32
net = mnv3_block(net, 5, 576, 96, 1, is_train, name='bneck6_2', h_swish=True, ratio=reduction_ratio, se=True)
net = mnv3_block(net, 5, 576, 96, 1, is_train, name='bneck6_3', h_swish=True, ratio=reduction_ratio, se=True)
net = conv2d_hs(net, 576, is_train, name='conv7_1',se=True) #SE
net = global_avg(net,7)
net = conv2d_NBN_hs(net, 1280, name='conv2d_NBN', bias=True)
net = conv_1x1(net, num_classes, name='logits',bias=True)
logits = flatten(net)
pred = tf.nn.softmax(logits, name='prob')
return logits, pred
input_test = tf.zeros([1,224,224,3])
n_c = 1000
model = mobilenetv3_small(input_test,n_c)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
sess.run(model)
print(model) | python |
import math
import torch
from torch import nn
from ..wdtypes import *
class Wide(nn.Module):
r"""Wide component
Linear model implemented via an Embedding layer connected to the output
neuron(s).
Parameters
-----------
wide_dim: int
size of the Embedding layer. `wide_dim` is the summation of all the
individual values for all the features that go through the wide
component. For example, if the wide component receives 2 features with
5 individual values each, `wide_dim = 10`
pred_dim: int
size of the ouput tensor containing the predictions
Attributes
-----------
wide_linear: :obj:`nn.Module`
the linear layer that comprises the wide branch of the model
Examples
--------
>>> import torch
>>> from pytorch_widedeep.models import Wide
>>> X = torch.empty(4, 4).random_(6)
>>> wide = Wide(wide_dim=X.unique().size(0), pred_dim=1)
>>> out = wide(X)
"""
def __init__(self, wide_dim: int, pred_dim: int = 1):
super(Wide, self).__init__()
self.wide_linear = nn.Embedding(wide_dim + 1, pred_dim, padding_idx=0)
# (Sum(Embedding) + bias) is equivalent to (OneHotVector + Linear)
self.bias = nn.Parameter(torch.zeros(pred_dim))
self._reset_parameters()
def _reset_parameters(self) -> None:
r"""initialize Embedding and bias like nn.Linear. See `original
implementation
<https://pytorch.org/docs/stable/_modules/torch/nn/modules/linear.html#Linear>`_.
"""
nn.init.kaiming_uniform_(self.wide_linear.weight, a=math.sqrt(5))
fan_in, _ = nn.init._calculate_fan_in_and_fan_out(self.wide_linear.weight)
bound = 1 / math.sqrt(fan_in)
nn.init.uniform_(self.bias, -bound, bound)
def forward(self, X: Tensor) -> Tensor: # type: ignore
r"""Forward pass. Simply connecting the Embedding layer with the ouput
neuron(s)"""
out = self.wide_linear(X.long()).sum(dim=1) + self.bias
return out
| python |
from torch import nn
from IAF.layers.utils import accumulate_kl_div
import IAF.layers as layers
import torch
def test_accumulate_kl_div():
class Model(nn.Module):
def __init__(self):
super().__init__()
self.layers = nn.Sequential(
layers.LinearVariational(1, 1, 1), layers.LinearVariational(1, 1, 1)
)
model = Model()
model.layers[0]._kl_divergence_ = 2
model.layers[1]._kl_divergence_ = 2
kl = accumulate_kl_div(model)
assert kl == 4
def test_masked_linear_mask():
"""
See Also: https://www.youtube.com/watch?v=lNW8T0W-xeE
at 11:26
See image:
img/made/made_tests.png
"""
from IAF.layers.autoregressive import set_mask_output_layer
input_size = 4
hidden_size = 5
input_layer = layers.LinearMasked(input_size, hidden_size, input_size, bias=False)
# Example values taken from the first hidden layer.
input_layer.m = torch.tensor([1, 2, 1, 2, 3])
m_input_layer = torch.arange(1, input_size + 1)
# last values is conditional on the previous x-values
# and is the final prediction of the model.
# Should not have any hidden nodes.
m_input_layer[-1] = 1e9
input_layer.set_mask(m_input_layer)
assert torch.all(
input_layer.mask
== torch.tensor(
[
[True, False, False, False],
[True, True, False, False],
[True, False, False, False],
[True, True, False, False],
[True, True, True, False],
]
)
)
# Test the masks of predefined m values.
hidden_layer = layers.LinearMasked(hidden_size, hidden_size, input_size)
hidden_layer.m = torch.tensor([1, 1, 2, 1, 3])
hidden_layer.set_mask(input_layer.m)
assert torch.all(
hidden_layer.mask
== torch.tensor(
[
[True, False, True, False, False],
[True, False, True, False, False],
[True, True, True, True, False],
[True, False, True, False, False],
[True, True, True, True, True],
]
)
)
output_layer = layers.LinearMasked(hidden_size, input_size, input_size)
output_layer = set_mask_output_layer(output_layer, hidden_layer.m)
assert torch.all(
output_layer.mask
== torch.tensor(
[
[False, False, False, False, False],
[True, True, False, True, False],
[True, True, True, True, False],
[True, True, True, True, True],
]
)
)
def test_sequential_masked():
from IAF.layers.autoregressive import SequentialMasked
torch.manual_seed(3)
num_in = 3
a = SequentialMasked(
layers.LinearMasked(num_in, 5, num_in),
nn.ReLU(),
layers.LinearMasked(5, 3, num_in)
)
# Test if the mask is set on all LinearMasked layers.
# At initializing they contain only 1's.
assert torch.any(a[0].mask == 0)
assert torch.any(a[-1].mask == 0)
def test_autoreggressive_made():
# Idea from karpathy; https://github.com/karpathy/pytorch-made/blob/master/made.py
# We predict x, and look at the partial derivatives.
# For the autoregressive property to hold, dy/dx
# can only be dependent of x<d. Where d is the current index.
from IAF.models.made import MADE
input_size = 10
x = torch.ones((1, input_size))
x.requires_grad = True
m = MADE(in_features=input_size, hidden_features=20)
for d in range(input_size):
x_hat = m(x)
# loss w.r.t. P(x_d | x_<d)
loss = x_hat[0, d]
loss.backward()
assert torch.all(x.grad[0, :d] != 0)
assert torch.all(x.grad[0, d:] == 0)
| python |
from __future__ import generator_stop
from __future__ import annotations
from .collision.Avoidance import CollisionAvoidance
from .pub_sub.AMQP import PubSubAMQP
__all__ = [
'CollisionAvoidance',
'PubSubAMQP'
]
__version__ = '0.9.0'
| python |
from string import printable
from pylexers.RegularExpressions.BaseRegularExpressions import (
_EmptySet,
_Epsilon,
_Symbol,
_Or,
_Concat,
_Star,
)
from pylexers.RegularExpressions.BaseRegularExpressions import _RegularExpression
"""
Basic Regular Expressions
"""
class EmptySet(_EmptySet):
pass
class Epsilon(_Epsilon):
pass
class Symbol(_Symbol):
pass
def Or(*regular_expressions: _RegularExpression) -> _RegularExpression:
re = regular_expressions[0]
for r in regular_expressions[1:]:
re = _Or(re, r)
return re
def Concat(
regular_expression_1: _RegularExpression, regular_expression_2: _RegularExpression
) -> _RegularExpression:
return _Concat(regular_expression_1, regular_expression_2)
class Star(_Star):
pass
"""
Extended Regular Expressions
"""
def Sigma(alphabet: str = printable, exclude: str = "") -> _RegularExpression:
return Or(*[_Symbol(a) for a in alphabet if a not in exclude])
def String(string: str) -> _RegularExpression:
if len(string) == 1:
return _Symbol(string)
return Concat(_Symbol(string[0]), String(string))
def AtLeastOne(regular_expression: _RegularExpression) -> _RegularExpression:
return Concat(regular_expression, _Star(regular_expression))
def Optional(regular_expression: _RegularExpression) -> _RegularExpression:
return _Or(regular_expression, _Epsilon())
| python |
from __future__ import division, print_function
import numpy as np
class OnlineStatistics(object):
def __init__(self, axis=0):
self.axis = axis
self.n = None
self.s = None
self.s2 = None
self.reset()
def reset(self):
self.n = 0
self.s = 0.0
self.s2 = 0.0
def add_data(self, data):
if isinstance(self.axis, (list, tuple)):
self.n += np.prod([data.shape[axis] for axis in self.axis])
else:
self.n += data.shape[self.axis]
self.s += data.sum(axis=self.axis)
self.s2 += (data ** 2).sum(axis=self.axis)
@property
def mean(self):
return self.s / self.n
@property
def std(self):
return np.sqrt((self.s2 - (self.s ** 2) / self.n) / self.n)
def divide_nonzero(a, b):
"""
Return a/b for the nonzero elements of b and return 0 for the zero elements of b.
"""
shape = (a * b).shape
nonzero = b != 0
c = np.zeros(shape)
try:
if a.shape == shape:
a = a[nonzero]
except AttributeError:
pass
try:
if b.shape == shape:
b = b[nonzero]
except AttributeError:
pass
c[nonzero] = a / b
return c
def sample_interval(min_limit, max_limit):
assert min_limit.shape == max_limit.shape
assert min_limit.dtype == max_limit.dtype
if min_limit.dtype == np.int:
return np.array([np.random.random_integers(low, high) for (low, high) in zip(min_limit, max_limit)])
else:
return min_limit + np.random.random_sample(min_limit.shape) * (max_limit - min_limit)
def axis2quat(axis, angle):
axis = np.asarray(axis)
axis = 1.0*axis/axis.sum();
return np.append(np.cos(angle/2.0), axis*np.sin(angle/2.0))
def quaternion_multiply(*qs):
if len(qs) == 2:
q0, q1 = qs
return np.array([-q1[1]*q0[1] - q1[2]*q0[2] - q1[3]*q0[3] + q1[0]*q0[0],
q1[1]*q0[0] + q1[2]*q0[3] - q1[3]*q0[2] + q1[0]*q0[1],
-q1[1]*q0[3] + q1[2]*q0[0] + q1[3]*q0[1] + q1[0]*q0[2],
q1[1]*q0[2] - q1[2]*q0[1] + q1[3]*q0[0] + q1[0]*q0[3]])
else:
return quaternion_multiply(qs[0], quaternion_multiply(*qs[1:]))
def clip_pos_aa(pos_aa, min_dof_limits, max_dof_limits):
assert 3 <= len(pos_aa) <= 6
assert 3 <= len(min_dof_limits) <= 4
assert 3 <= len(max_dof_limits) <= 4
pos, aa = np.split(pos_aa, [3])
pos = np.clip(pos, min_dof_limits[:3], max_dof_limits[:3])
min_angle = min_dof_limits[3] if len(min_dof_limits) > 3 else float('-inf')
max_angle = max_dof_limits[3] if len(max_dof_limits) > 3 else float('inf')
angle = np.linalg.norm(aa)
axis = aa / angle if angle else np.array([0, 0, 1])
angle = np.clip(angle, min_angle, max_angle)
aa = axis * angle
return np.concatenate([pos, aa])
def pack_image(image, fixed_point_min=0.01, fixed_point_max=100.0):
assert image.ndim == 3 and image.shape[2] == 1
image = image.squeeze()
fixed_point_image = np.clip(image, fixed_point_min, fixed_point_max)
fixed_point_image = (2 ** 24) * (fixed_point_image - fixed_point_min) / (fixed_point_max - fixed_point_min)
fixed_point_image = fixed_point_image.astype(np.uint32)
fixed_point_image = fixed_point_image.view(dtype=np.uint8).reshape(fixed_point_image.shape + (4,))[..., :-1]
return fixed_point_image
def unpack_image(fixed_point_image, fixed_point_min=0.01, fixed_point_max=100.0):
fixed_point_image = np.concatenate([fixed_point_image, np.zeros(fixed_point_image.shape[:-1] + (1,), dtype=np.uint8)], axis=-1)
fixed_point_image = fixed_point_image.view(np.uint32).astype(int).squeeze()
fixed_point_image = fixed_point_min + fixed_point_image * (fixed_point_max - fixed_point_min) / (2 ** 24)
image = fixed_point_image.astype(np.float32)
image = np.expand_dims(image, axis=-1)
return image
| python |
from PyQt4 import QtGui, QtCore
import sys
sys.path.append('../')
import Code.configuration as cf
import Code.Engine as Engine
# So that the code basically starts looking in the parent directory
Engine.engine_constants['home'] = '../'
import Code.GlobalConstants as GC
import Code.SaveLoad as SaveLoad
import Code.ItemMethods as ItemMethods
import Code.CustomObjects as CustomObjects
import Code.StatusObject as StatusObject
import Code.UnitSprite as UnitSprite
from Code.Dialogue import UnitPortrait
# DATA XML
try:
import xml.etree.cElementTree as ET
except ImportError:
import xml.etree.ElementTree as ET
try:
from xml.dom import minidom
PRETTY = True
except ImportError:
PRETTY = False
def prettify(elem):
rough_string = ET.tostring(elem, 'utf-8')
reparsed = minidom.parseString(rough_string)
return reparsed.toprettyxml(indent=" ")
# === VIEW AND CONTROLLER METHODS ============================================
class ImageWidget(QtGui.QWidget):
def __init__(self, surface, parent=None, x=0):
super(ImageWidget, self).__init__(parent)
w = surface.get_width()
h = surface.get_height()
self.data = surface.get_buffer().raw
self.x = x
# self.image = QtGui.QImage(self.data, w, h, QtGui.QImage.Format_RGB32)
self.image = QtGui.QImage(self.data, w, h, QtGui.QImage.Format_ARGB32)
self.resize(w, h)
def create_icon(image):
icon = ImageWidget(image)
icon = QtGui.QPixmap(icon.image)
icon = QtGui.QIcon(icon)
return icon
def create_pixmap(image):
icon = ImageWidget(image)
icon = QtGui.QPixmap(icon.image)
return icon
def create_chibi(name):
return Engine.subsurface(GC.UNITDICT[name + 'Portrait'], (96, 16, 32, 32)).convert_alpha()
def stretch(grid):
box_h = QtGui.QHBoxLayout()
box_h.addStretch(1)
box_h.addLayout(grid)
box_h.addStretch(1)
box_v = QtGui.QVBoxLayout()
box_v.addStretch(1)
box_v.addLayout(box_h)
box_v.addStretch(1)
return box_v
# === DATA IMPORTING ===
def build_units(class_dict, portrait_data):
units = []
for unit in GC.UNITDATA.getroot().findall('unit'):
u_i = {}
u_i['id'] = unit.find('id').text
u_i['name'] = unit.get('name')
classes = unit.find('class').text.split(',')
u_i['klass'] = classes[-1]
u_i['gender'] = unit.find('gender').text
u_i['level'] = int(unit.find('level').text)
u_i['faction'] = unit.find('faction').text
stats = SaveLoad.intify_comma_list(unit.find('bases').text)
for n in xrange(len(stats), cf.CONSTANTS['num_stats']):
stats.append(class_dict[u_i['klass']]['bases'][n])
assert len(stats) == cf.CONSTANTS['num_stats'], "bases %s must be exactly %s integers long"%(stats, cf.CONSTANTS['num_stats'])
u_i['stats'] = SaveLoad.build_stat_dict(stats)
# print("%s's stats: %s", u_i['name'], u_i['stats'])
u_i['growths'] = SaveLoad.intify_comma_list(unit.find('growths').text)
u_i['growths'].extend([0] * (cf.CONSTANTS['num_stats'] - len(u_i['growths'])))
assert len(u_i['growths']) == cf.CONSTANTS['num_stats'], "growths %s must be exactly %s integers long"%(stats, cf.CONSTANTS['num_stats'])
u_i['items'] = ItemMethods.itemparser(unit.find('inventory').text)
# Parse wexp
u_i['wexp'] = unit.find('wexp').text.split(',')
for index, wexp in enumerate(u_i['wexp'][:]):
if wexp in CustomObjects.WEAPON_EXP.wexp_dict:
u_i['wexp'][index] = CustomObjects.WEAPON_EXP.wexp_dict[wexp]
u_i['wexp'] = [int(num) for num in u_i['wexp']]
assert len(u_i['wexp']) == len(CustomObjects.WEAPON_TRIANGLE.types), "%s's wexp must have as many slots as there are weapon types."%(u_i['name'])
u_i['desc'] = unit.find('desc').text
# Tags
u_i['tags'] = set(unit.find('tags').text.split(',')) if unit.find('tags') is not None and unit.find('tags').text is not None else set()
# Personal Skills
personal_skills = unit.find('skills').text.split(',') if unit.find('skills') is not None and unit.find('skills').text is not None else []
u_i['skills'] = [StatusObject.statusparser(status) for status in personal_skills]
units.append(Unit(u_i, portrait_data))
return units
def find(data, name):
return next((x for x in data if x.name == name), None)
# === MODEL CLASS ===
class Unit(object):
def __init__(self, info, portrait_data):
if info:
self.id = info['id']
self.name = info['name']
self.level = int(info['level'])
self.gender = int(info['gender'])
self.faction = info['faction']
self.klass = info['klass']
self.tags = info['tags']
self.desc = info['desc']
self.stats = info['stats']
self.growths = info['growths']
self.wexp = info['wexp']
self.items = info['items']
self.skills = info['skills']
self.team = 'player'
try:
self.image = create_chibi(self.name)
except KeyError:
self.image = GC.UNITDICT[self.faction + 'Emblem'].convert_alpha()
else:
self.id = 0
self.name = ''
self.level = 1
self.gender = 0
self.faction = ''
self.klass = 'Citizen'
self.tags = set()
self.desc = ''
current_class = find(class_data, self.klass)
self.stats = SaveLoad.build_stat_dict(current_class.bases)
self.growths = [0 for n in xrange(cf.CONSTANTS['num_stats'])]
self.items = []
self.skills = []
self.wexp = [0 for n in xrange(len(CustomObjects.WEAPON_TRIANGLE.types))]
self.team = 'player'
self.image = None
class Klass(object):
def __init__(self, info):
if info:
self.name = info['name']
self.wexp = info['wexp_gain']
self.promotes_from = info['promotes_from']
self.promotes_to = info['turns_into']
self.movement_group = info['movement_group']
self.tags = info['tags']
self.skills = [s[1] for s in info['skills']]
self.skill_levels = [s[0] for s in info['skills']]
self.growths = info['growths']
self.bases = info['bases']
self.promotion = info['promotion']
self.max = info['max']
self.desc = info['desc']
else:
self.name = ''
self.wexp = [0 for n in xrange(len(CustomObjects.WEAPON_TRIANGLE.types))]
self.promotes_from = ''
self.promotes_to = []
self.movement_group = 0
self.tags = set()
self.skills = []
self.skill_levels = []
self.bases = [0 for n in xrange(cf.CONSTANTS['num_stats'])]
self.growths = [0 for n in xrange(cf.CONSTANTS['num_stats'])]
self.promotion = [0 for n in xrange(cf.CONSTANTS['num_stats'])]
self.max = [40, 15, 15, 15, 15, 20, 15, 15, 20]
self.desc = ''
self.unit = GenericUnit(self.name)
self.images = (self.unit.image1, self.unit.image2, self.unit.image3)
self.image = self.images[0]
# === For use by class object ===
class GenericUnit(object):
def __init__(self, klass, gender=0):
self.gender = gender
self.team = 'player'
self.klass = klass
self.stats = {}
self.stats['HP'] = 1
self.currenthp = 1
self.sprite = UnitSprite.UnitSprite(self)
GC.PASSIVESPRITECOUNTER.count = 0
self.image1 = self.sprite.create_image('passive').subsurface(20, 18, 24, 24).convert_alpha()
GC.PASSIVESPRITECOUNTER.increment()
self.image2 = self.sprite.create_image('passive').subsurface(20, 18, 24, 24).convert_alpha()
GC.PASSIVESPRITECOUNTER.increment()
self.image3 = self.sprite.create_image('passive').subsurface(20, 18, 24, 24).convert_alpha()
def get_images(self):
self.images = (self.image1, self.image2, self.image3)
# === Overall View Methods ===
class UnitView(QtGui.QWidget):
def __init__(self, window):
super(UnitView, self).__init__(window)
self.grid = QtGui.QGridLayout()
self.window = window
self.current = None
# === Unit Face Display ===
face_grid = QtGui.QGridLayout()
self.portrait = QtGui.QLabel()
face_grid.addWidget(self.portrait, 0, 0, 4, 4, QtCore.Qt.AlignCenter)
# === Character Data ===
char_grid = QtGui.QGridLayout()
# Name
name_label = QtGui.QLabel('Name:')
char_grid.addWidget(name_label, 0, 0)
self.name = QtGui.QLineEdit()
self.name.setMaxLength(12)
self.name.setStatusTip("Change name")
char_grid.addWidget(self.name, 0, 1, 1, 2)
self.set_name_button = QtGui.QPushButton('Change Name')
self.set_name_button.clicked.connect(self.change_name)
char_grid.addWidget(self.set_name_button, 0, 3)
# Level
level_label = QtGui.QLabel('Level:')
char_grid.addWidget(level_label, 1, 0)
self.level = QtGui.QSpinBox()
self.level.setMinimum(1)
char_grid.addWidget(self.level, 1, 1)
# Gender
gender_label = QtGui.QLabel('Gender:')
char_grid.addWidget(gender_label, 1, 2)
self.gender = QtGui.QSpinBox()
self.gender.setMinimum(0)
self.gender.setMaximum(9)
char_grid.addWidget(self.gender, 1, 3)
# Class
klass_label = QtGui.QLabel('Class:')
char_grid.addWidget(klass_label, 2, 0)
self.klass = QtGui.QComboBox()
self.klass.uniformItemSizes = True
self.klass.setIconSize(QtCore.QSize(48, 32))
for klass in class_data:
self.klass.addItem(create_icon(klass.images[0]), klass.name)
self.klass.currentIndexChanged.connect(self.class_change)
char_grid.addWidget(self.klass, 2, 1, 1, 3)
# Faction
faction_label = QtGui.QLabel('Faction:')
char_grid.addWidget(faction_label, 3, 0)
self.faction = QtGui.QLineEdit()
char_grid.addWidget(self.faction, 3, 1, 1, 3)
# Lordbox
self.lord = QtGui.QCheckBox('Lord?')
char_grid.addWidget(self.lord, 4, 0, 1, 2)
# Boss box
self.boss = QtGui.QCheckBox('Boss?')
char_grid.addWidget(self.boss, 4, 2, 1, 2)
# Description
desc_label = QtGui.QLabel('Desc:')
char_grid.addWidget(desc_label, 5, 0)
self.desc = QtGui.QTextEdit()
self.desc.setFixedHeight(48)
char_grid.addWidget(self.desc, 5, 1, 2, 3)
# === Stats ===
stat_grid = QtGui.QGridLayout()
# Names
stats_label = QtGui.QLabel('Stats:')
stat_grid.addWidget(stats_label, 0, 0)
for index, stat_name in enumerate(cf.CONSTANTS['stat_names']):
stat_label = QtGui.QLabel(stat_name)
stat_grid.addWidget(stat_label, 0, index + 1)
bases_label = QtGui.QLabel('Bases:')
stat_grid.addWidget(bases_label, 1, 0)
growths_label = QtGui.QLabel('Growths:')
stat_grid.addWidget(growths_label, 2, 0)
self.stat_bases = [QtGui.QSpinBox() for stat in cf.CONSTANTS['stat_names']]
self.stat_growths = [QtGui.QSpinBox() for stat in cf.CONSTANTS['stat_names']]
for index, s in enumerate(self.stat_bases):
s.setMinimum(0)
s.setMaximum(cf.CONSTANTS['max_stat'])
stat_grid.addWidget(s, 1, index + 1)
for index, s in enumerate(self.stat_growths):
s.setMinimum(-500)
s.setSingleStep(5)
s.setMaximum(500)
stat_grid.addWidget(s, 2, index + 1)
# === Weapon Exp ===
wexp_grid = QtGui.QGridLayout()
wexp_label = QtGui.QLabel('Wexp:')
wexp_grid.addWidget(wexp_label, 0, 0, 2, 1)
weapon_types = CustomObjects.WEAPON_TRIANGLE.types
for index, wexp_name in enumerate(weapon_types):
name_label = QtGui.QLabel(wexp_name)
icon_label = QtGui.QLabel()
wexp_icon = CustomObjects.WeaponIcon(idx=index)
icon_label.setPixmap(create_pixmap(wexp_icon.image.convert_alpha()))
wexp_grid.addWidget(name_label, 0, (index + 1)*2 + 1)
wexp_grid.addWidget(icon_label, 0, (index + 1)*2)
self.wexp = [QtGui.QSpinBox() for wexp in weapon_types]
for index, s in enumerate(self.wexp):
s.setMinimum(0)
s.setMaximum(CustomObjects.WEAPON_EXP.sorted_list[-1][1])
wexp_grid.addWidget(s, 1, (index + 1)*2, 1, 2)
# Horizontal line
line = QtGui.QFrame()
line.setFrameStyle(QtGui.QFrame.HLine)
line.setLineWidth(0)
wexp_grid.addWidget(line, 2, 0, 1, len(self.wexp)*2 + 2)
# === Items ===
item_grid = QtGui.QGridLayout()
item_label = QtGui.QLabel('Item:')
drop_label = QtGui.QLabel('Drop?')
event_label = QtGui.QLabel('Event?')
self.add_item_button = QtGui.QPushButton('Add Item')
self.add_item_button.clicked.connect(self.add_item)
self.remove_item_button = QtGui.QPushButton('Remove Item')
self.remove_item_button.clicked.connect(self.remove_item)
self.remove_item_button.setEnabled(False)
self.items = []
for num in xrange(cf.CONSTANTS['max_items']):
self.items.append((self.create_item_combo_box(), QtGui.QCheckBox(), QtGui.QCheckBox()))
for index, item in enumerate(self.items):
item_box, drop, event = item
item_grid.addWidget(item_box, index + 1, 0, 1, 2, QtCore.Qt.AlignTop)
item_grid.addWidget(drop, index + 1, 2, QtCore.Qt.AlignTop)
item_grid.addWidget(event, index + 1, 3, QtCore.Qt.AlignTop)
item_grid.addWidget(item_label, 0, 0, 1, 2, QtCore.Qt.AlignTop)
item_grid.addWidget(drop_label, 0, 2, QtCore.Qt.AlignTop)
item_grid.addWidget(event_label, 0, 3, QtCore.Qt.AlignTop)
item_grid.addWidget(self.add_item_button, cf.CONSTANTS['max_items'] + 2, 0, 1, 2, QtCore.Qt.AlignBottom)
item_grid.addWidget(self.remove_item_button, cf.CONSTANTS['max_items'] + 2, 2, 1, 2, QtCore.Qt.AlignBottom)
self.clear_items()
# === Personal Skills ===
skill_grid = QtGui.QGridLayout()
skill_label = QtGui.QLabel('Personal Skill:')
self.add_skill_button = QtGui.QPushButton('Add Skill')
self.add_skill_button.clicked.connect(self.add_skill)
self.remove_skill_button = QtGui.QPushButton('Remove Skill')
self.remove_skill_button.clicked.connect(self.remove_skill)
self.remove_skill_button.setEnabled(False)
self.skills = []
for num in xrange(cf.CONSTANTS['num_skills']):
self.skills.append(self.create_skill_combo_box())
for index, skill_box in enumerate(self.skills):
skill_grid.addWidget(skill_box, index + 1, 0, 1, 2, )
skill_grid.addWidget(skill_label, 0, 0, 1, 2, QtCore.Qt.AlignTop)
skill_grid.addWidget(self.add_skill_button, cf.CONSTANTS['num_skills'] + 2, 0)
skill_grid.addWidget(self.remove_skill_button, cf.CONSTANTS['num_skills'] + 2, 1)
self.clear_skills()
# === Final gridding ===
self.grid.addLayout(face_grid, 0, 0)
self.grid.addLayout(stretch(char_grid), 0, 1)
self.grid.addLayout(stretch(stat_grid), 1, 0, 1, 2)
self.grid.addLayout(stretch(wexp_grid), 2, 0, 1, 2)
self.grid.addLayout(stretch(item_grid), 3, 0)
self.grid.addLayout(stretch(skill_grid), 3, 1)
def change_name(self):
if self.current:
new_name = str(self.name.text())
self.current.name = new_name
try:
self.current.image = create_chibi(new_name)
except KeyError:
# Show pop-up
message_box = QtGui.QMessageBox()
message_box.setText("No png file named %s found in Data/Characters/" % (new_name + 'Portrait.png'))
message_box.exec_()
self.current.image = create_chibi('Generic')
portrait = find(portrait_data, new_name)
if portrait:
self.current.portrait = portrait
self.window.reset()
self.display(self.current)
# Item functions
def clear_items(self):
for index, (item_box, drop, event) in enumerate(self.items):
item_box.hide()
drop.hide()
event.hide()
self.num_items = 0
def add_item(self):
self.num_items += 1
self.remove_item_button.setEnabled(True)
item_box, drop, event = self.items[self.num_items - 1]
item_box.show()
drop.show()
event.show()
if self.num_items >= cf.CONSTANTS['max_items']:
self.add_item_button.setEnabled(False)
def remove_item(self):
self.num_items -= 1
self.add_item_button.setEnabled(True)
item_box, drop, event = self.items[self.num_items]
item_box.hide()
drop.hide()
event.hide()
if self.num_items <= 0:
self.remove_item_button.setEnabled(False)
def create_item_combo_box(self):
item_box = QtGui.QComboBox()
item_box.uniformItemSizes = True
item_box.setIconSize(QtCore.QSize(16, 16))
for item in item_data:
if item.icon:
item_box.addItem(item.icon, item.name)
else:
item_box.addItem(item.name)
return item_box
# Skill functions
def clear_skills(self):
for index, skill_box in enumerate(self.skills):
skill_box.hide()
self.num_skills = 0
def add_skill(self):
self.num_skills += 1
self.remove_skill_button.setEnabled(True)
skill_box = self.skills[self.num_skills - 1]
skill_box.show()
if self.num_skills >= cf.CONSTANTS['num_skills']:
self.add_skill_button.setEnabled(False)
def remove_skill(self):
self.num_skills -= 1
self.add_skill_button.setEnabled(True)
skill_box = self.skills[self.num_skills]
skill_box.hide()
if self.num_skills <= 0:
self.remove_skill_button.setEnabled(False)
def create_skill_combo_box(self):
skill_box = QtGui.QComboBox()
skill_box.uniformItemSizes = True
skill_box.setIconSize(QtCore.QSize(16, 16))
for skill in skill_data:
if skill.icon:
skill_box.addItem(skill.icon, skill.name)
else:
skill_box.addItem(skill.name)
return skill_box
def class_change(self, new):
# Set which wexps are valid
valid_weapons = class_data[new].wexp
for index in xrange(len(self.wexp)):
enable = valid_weapons[index]
self.wexp[index].setEnabled(enable)
if enable:
self.wexp[index].setMinimum(1)
else:
self.wexp[index].setMinimum(0)
self.wexp[index].setValue(0)
# Displaying functions
def display(self, unit):
self.current = unit
# Char data
self.name.setText(unit.name)
# self.team.setCurrentIndex(self.teams.index(unit.team))
self.gender.setValue(unit.gender)
self.level.setValue(unit.level)
self.faction.setText(unit.faction)
self.lord.setChecked('Lord' in unit.tags)
self.boss.setChecked('Boss' in unit.tags)
self.desc.setText(unit.desc)
for idx, klass in enumerate(class_data):
if klass.name == unit.klass:
class_index = idx
break
self.klass.setCurrentIndex(class_index)
self.class_change(class_index)
for index, (stat_name, stat) in enumerate(unit.stats.iteritems()):
self.stat_bases[index].setValue(stat.base_stat)
self.stat_growths[index].setValue(unit.growths[index])
for index, wexp in enumerate(unit.wexp):
self.wexp[index].setValue(wexp)
self.clear_items()
for index, item in enumerate(unit.items):
self.add_item()
item_box, drop_box, event_box = self.items[index]
drop_box.setChecked(item.droppable)
event_box.setChecked(item.event_combat)
item_box.setCurrentIndex([i.name for i in item_data].index(item.name))
self.clear_skills()
for index, skill in enumerate(unit.skills):
self.add_skill()
skill_box = self.skills[index]
skill_box.setCurrentIndex([s.id for s in skill_data].index(skill.id))
portrait = find(portrait_data, unit.name)
if portrait:
portrait.create_image()
pixmap = create_pixmap(Engine.transform_scale(portrait.image.convert_alpha(), (96*2, 80*2)))
self.portrait.setPixmap(pixmap)
else:
self.portrait.clear()
def save_current(self):
if self.current:
# self.current.name = str(self.name.text())
self.current.gender = int(self.gender.value())
self.current.level = int(self.level.value())
self.current.faction = str(self.faction.text())
self.current.tags = set()
if self.lord.isChecked():
self.current.tags.add('Lord')
if self.boss.isChecked():
self.current.tags.add('Boss')
self.current.desc = str(self.desc.toPlainText())
self.current.klass = str(self.klass.currentText())
for index, s in enumerate(self.stat_bases):
self.current.stats.base_stat = int(s.value())
self.current.growths = [int(s.value()) for s in self.stat_growths]
self.current.wexp = [int(s.value()) for s in self.wexp]
self.current.items = []
for index, (item_box, drop_box, event_box) in enumerate(self.items[:self.num_items]):
item = item_data[item_box.currentIndex()]
item.droppable = drop_box.isChecked()
item.event_combat = event_box.isChecked()
self.current.items.append(item)
self.current.skills = []
for index, skill_box in enumerate(self.skills[:self.num_skills]):
self.current.skills.append(skill_data[skill_box.currentIndex()])
def tick(self, current_time):
if GC.PASSIVESPRITECOUNTER.update(current_time):
for index, klass in enumerate(class_data):
icon = create_icon(klass.images[GC.PASSIVESPRITECOUNTER.count])
self.klass.setItemIcon(index, icon)
class ClassView(QtGui.QWidget):
def __init__(self, window):
super(ClassView, self).__init__(window)
self.grid = QtGui.QGridLayout()
self.window = window
self.current = None
# === Character Data ===
char_grid = QtGui.QGridLayout()
# Name
name_label = QtGui.QLabel('Name:')
char_grid.addWidget(name_label, 0, 0)
self.name = QtGui.QLineEdit()
self.name.setMaxLength(12)
self.name.setStatusTip("Change name")
char_grid.addWidget(self.name, 0, 1, 1, 2)
self.set_name_button = QtGui.QPushButton('Change Name')
self.set_name_button.clicked.connect(self.change_name)
char_grid.addWidget(self.set_name_button, 0, 3)
# Description
desc_label = QtGui.QLabel('Desc:')
char_grid.addWidget(desc_label, 1, 0)
self.desc = QtGui.QTextEdit()
self.desc.setFixedHeight(48)
char_grid.addWidget(self.desc, 1, 1, 1, 3)
# Movement Group
move_label = QtGui.QLabel('Movement Group:')
char_grid.addWidget(move_label, 2, 0)
self.movement_group = QtGui.QSpinBox()
self.movement_group.setMinimum(0)
self.movement_group.setMaximum(10) # Placeholder
char_grid.addWidget(self.movement_group, 2, 1)
# Mounted box
self.mounted = QtGui.QCheckBox('Mounted?')
char_grid.addWidget(self.mounted, 2, 2)
# Flying box
self.flying = QtGui.QCheckBox('Flying?')
char_grid.addWidget(self.flying, 2, 3)
# Class
klass_label = QtGui.QLabel('Promotes From:')
char_grid.addWidget(klass_label, 3, 0)
self.promotes_from = QtGui.QComboBox()
self.promotes_from.uniformItemSizes = True
self.promotes_from.setIconSize(QtCore.QSize(48, 32))
self.promotes_from.addItem('None')
for klass in class_data:
self.promotes_from.addItem(create_icon(klass.images[0]), klass.name)
char_grid.addWidget(self.promotes_from, 3, 1, 1, 3)
# === Weapon Exp ===
wexp_grid = QtGui.QGridLayout()
wexp_label = QtGui.QLabel('Wexp:')
wexp_grid.addWidget(wexp_label, 0, 0, 2, 1)
weapon_types = CustomObjects.WEAPON_TRIANGLE.types
for index, wexp_name in enumerate(weapon_types):
name_label = QtGui.QLabel(wexp_name)
icon_label = QtGui.QLabel()
wexp_icon = CustomObjects.WeaponIcon(idx=index)
icon_label.setPixmap(create_pixmap(wexp_icon.image.convert_alpha()))
wexp_grid.addWidget(name_label, 0, (index + 1)*2 + 1)
wexp_grid.addWidget(icon_label, 0, (index + 1)*2)
self.wexp = [QtGui.QSpinBox() for wexp in weapon_types]
for index, s in enumerate(self.wexp):
s.setMinimum(0)
s.setMaximum(CustomObjects.WEAPON_EXP.sorted_list[-1][1])
wexp_grid.addWidget(s, 1, (index + 1)*2, 1, 2)
# Horizontal line
line = QtGui.QFrame()
line.setFrameStyle(QtGui.QFrame.HLine)
line.setLineWidth(0)
wexp_grid.addWidget(line, 2, 0, 1, len(self.wexp)*2 + 2)
# === Stats ===
stat_grid = QtGui.QGridLayout()
# Names
stats_label = QtGui.QLabel('Stats:')
stat_grid.addWidget(stats_label, 0, 0)
for index, stat_name in enumerate(cf.CONSTANTS['stat_names']):
stat_label = QtGui.QLabel(stat_name)
stat_grid.addWidget(stat_label, 0, index + 1)
bases_label = QtGui.QLabel('Bases:')
stat_grid.addWidget(bases_label, 1, 0)
growths_label = QtGui.QLabel('Growths:')
stat_grid.addWidget(growths_label, 2, 0)
promotion_label = QtGui.QLabel('Promotion:')
stat_grid.addWidget(promotion_label, 3, 0)
max_label = QtGui.QLabel('Max:')
stat_grid.addWidget(max_label, 4, 0)
self.stat_bases = [QtGui.QSpinBox() for stat in cf.CONSTANTS['stat_names']]
self.stat_growths = [QtGui.QSpinBox() for stat in cf.CONSTANTS['stat_names']]
self.stat_promotion = [QtGui.QSpinBox() for stat in cf.CONSTANTS['stat_names']]
self.stat_max = [QtGui.QSpinBox() for stat in cf.CONSTANTS['stat_names']]
for index, s in enumerate(self.stat_bases):
s.setMinimum(0)
s.setMaximum(int(self.stat_max[index].value()))
stat_grid.addWidget(s, 1, index + 1)
for index, s in enumerate(self.stat_growths):
s.setMinimum(-500)
s.setSingleStep(5)
s.setMaximum(500)
stat_grid.addWidget(s, 2, index + 1)
for index, s in enumerate(self.stat_promotion):
s.setMinimum(-10)
s.setMaximum(int(self.stat_max[index].value()))
stat_grid.addWidget(s, 3, index + 1)
for index, s in enumerate(self.stat_max):
s.setMinimum(0)
s.setMaximum(60)
s.valueChanged.connect(self.max_change)
stat_grid.addWidget(s, 4, index + 1)
# === Promotions ===
option_grid = QtGui.QGridLayout()
option_label = QtGui.QLabel('Promotes To:')
self.add_option_button = QtGui.QPushButton('Add Option')
self.add_option_button.clicked.connect(self.add_option)
self.remove_option_button = QtGui.QPushButton('Remove Option')
self.remove_option_button.clicked.connect(self.remove_option)
self.remove_option_button.setEnabled(False)
self.options = []
for num in xrange(cf.CONSTANTS['max_promotions']):
self.options.append(self.create_option_combo_box())
for index, option in enumerate(self.options):
option_grid.addWidget(option, index + 1, 0, 1, 2, QtCore.Qt.AlignTop)
option_grid.addWidget(option_label, 0, 0, 1, 2, QtCore.Qt.AlignTop)
option_grid.addWidget(self.add_option_button, cf.CONSTANTS['max_promotions'] + 2, 0, 1, 1, QtCore.Qt.AlignBottom)
option_grid.addWidget(self.remove_option_button, cf.CONSTANTS['max_promotions'] + 2, 1, 1, 1, QtCore.Qt.AlignBottom)
self.clear_options()
# === Personal Skills ===
skill_grid = QtGui.QGridLayout()
skill_label = QtGui.QLabel('Class Skills:')
level_label = QtGui.QLabel('Level:')
skill_label2 = QtGui.QLabel('Skill:')
self.add_skill_button = QtGui.QPushButton('Add Skill')
self.add_skill_button.clicked.connect(self.add_skill)
self.remove_skill_button = QtGui.QPushButton('Remove Skill')
self.remove_skill_button.clicked.connect(self.remove_skill)
self.remove_skill_button.setEnabled(False)
self.skills, self.skill_levels = [], []
for num in xrange(cf.CONSTANTS['num_skills']):
self.skills.append(self.create_skill_combo_box())
skill_level = QtGui.QSpinBox()
skill_level.setMinimum(1)
skill_level.setMaximum(cf.CONSTANTS['max_level'])
self.skill_levels.append(skill_level)
for index, skill_box in enumerate(self.skills):
skill_grid.addWidget(skill_box, index + 2, 1, 1, 3)
skill_grid.addWidget(self.skill_levels[index], index + 2, 0)
skill_grid.addWidget(skill_label, 0, 0, 1, 4, QtCore.Qt.AlignTop)
skill_grid.addWidget(level_label, 1, 0)
skill_grid.addWidget(skill_label2, 1, 1, 1, 3)
skill_grid.addWidget(self.add_skill_button, cf.CONSTANTS['num_skills'] + 3, 0, 1, 2)
skill_grid.addWidget(self.remove_skill_button, cf.CONSTANTS['num_skills'] + 3, 2, 1, 2)
self.clear_skills()
# === Final gridding ===
self.grid.addLayout(stretch(char_grid), 0, 0)
self.grid.addLayout(stretch(wexp_grid), 1, 0, 1, 3)
self.grid.addLayout(stretch(stat_grid), 2, 0, 1, 3)
self.grid.addLayout(stretch(option_grid), 0, 1)
self.grid.addLayout(stretch(skill_grid), 0, 2)
def change_name(self):
if self.current:
new_name = str(self.name.text())
self.current.name = new_name
self.current.images = GenericUnit(new_name).get_images()
self.window.reset()
self.display(self.current)
def max_change(self):
for index, s in enumerate(self.stat_bases):
s.setMaximum(int(self.stat_max[index].value()))
for index, s in enumerate(self.stat_promotion):
s.setMaximum(int(self.stat_max[index].value()))
# Promotion Option functions
def clear_options(self):
for index, option in enumerate(self.options):
option.hide()
self.num_options = 0
def add_option(self):
self.num_options += 1
self.remove_option_button.setEnabled(True)
option = self.options[self.num_options - 1]
option.show()
if self.num_options >= cf.CONSTANTS['max_promotions']:
self.add_option_button.setEnabled(False)
def remove_option(self):
self.num_options -= 1
self.add_option_button.setEnabled(True)
option = self.option[self.num_option]
option.hide()
if self.num_option <= 0:
self.remove_option_button.setEnabled(False)
def create_option_combo_box(self):
option = QtGui.QComboBox()
option.uniformItemSizes = True
option.setIconSize(QtCore.QSize(48, 32))
for klass in class_data:
option.addItem(create_icon(klass.images[0]), klass.name)
return option
# Skill functions
def clear_skills(self):
for index, skill_box in enumerate(self.skills):
skill_box.hide()
for index, level_box in enumerate(self.skill_levels):
level_box.hide()
self.num_skills = 0
def add_skill(self):
self.num_skills += 1
self.remove_skill_button.setEnabled(True)
self.skills[self.num_skills - 1].show()
self.skill_levels[self.num_skills - 1].show()
if self.num_skills >= cf.CONSTANTS['num_skills']:
self.add_skill_button.setEnabled(False)
def remove_skill(self):
self.num_skills -= 1
self.add_skill_button.setEnabled(True)
self.skills[self.num_skills].hide()
self.skill_levels[self.num_skills].hide()
if self.num_skills <= 0:
self.remove_skill_button.setEnabled(False)
def create_skill_combo_box(self):
skill_box = QtGui.QComboBox()
skill_box.uniformItemSizes = True
skill_box.setIconSize(QtCore.QSize(16, 16))
for skill in skill_data:
if skill.image:
skill_box.addItem(create_icon(skill.image), skill.name)
else:
skill_box.addItem(skill.name)
return skill_box
# Displaying functions
def display(self, klass):
self.current = klass
# Char data
self.name.setText(klass.name)
self.desc.setText(klass.desc)
self.movement_group.setValue(klass.movement_group)
self.mounted.setChecked('Mounted' in klass.tags)
self.flying.setChecked('Flying' in klass.tags)
class_index = -1
for idx, k in enumerate(class_data):
if k.name == klass.promotes_from:
class_index = idx
break
self.promotes_from.setCurrentIndex(class_index + 1)
for index in xrange(len(cf.CONSTANTS['stat_names'])):
self.stat_max[index].setValue(klass.max[index])
self.stat_bases[index].setValue(klass.bases[index])
self.stat_growths[index].setValue(klass.growths[index])
self.stat_promotion[index].setValue(klass.promotion[index])
for index, wexp in enumerate(klass.wexp):
self.wexp[index].setValue(wexp)
self.clear_options()
class_names = [c.name for c in class_data]
for index, name in enumerate(klass.promotes_to):
self.add_option()
self.options[index].setCurrentIndex(class_names.index(name))
self.clear_skills()
skill_names = [s.id for s in skill_data]
for index, skill in enumerate(klass.skills):
self.add_skill()
self.skills[index].setCurrentIndex(skill_names.index(skill))
self.skill_levels[index].setValue(klass.skill_levels[index])
def save_current(self):
if self.current:
# self.current.name = str(self.name.text()
self.current.movement_group = int(self.movement_group.value())
self.current.tags = set()
if self.mounted.isChecked():
self.current.tags.add('Mounted')
if self.flying.isChecked():
self.current.tags.add('Flying')
self.current.desc = str(self.desc.toPlainText())
self.current.promotes_from = str(self.promotes_from.currentText())
self.current.bases = [int(s.value()) for s in self.stat_bases]
self.current.growths = [int(s.value()) for s in self.stat_growths]
self.current.promotion = [int(s.value()) for s in self.stat_promotion]
self.current.max = [int(s.value()) for s in self.stat_max]
self.current.wexp = [int(s.value()) for s in self.wexp]
self.current.promotes_to = []
for index, option in enumerate(self.options[:self.num_options]):
klass = class_data[option.currentIndex()]
self.current.promotes_to.append(klass.name)
self.current.skills = []
self.current.skill_levels = []
for index, skill_box in enumerate(self.skills[:self.num_skills]):
self.current.skills.append(skill_data[skill_box.currentIndex()].id)
self.current.skill_levels.append(int(self.skill_levels[index].value()))
def tick(self, current_time):
if GC.PASSIVESPRITECOUNTER.update(current_time):
for index, klass in enumerate(class_data):
icon = create_icon(klass.images[GC.PASSIVESPRITECOUNTER.count])
self.promotes_from.setItemIcon(index + 1, icon)
for option in self.options[:self.num_options]:
option.setItemIcon(index, icon)
class PortraitView(QtGui.QWidget):
def __init__(self, window):
super(PortraitView, self).__init__(window)
self.grid = QtGui.QGridLayout()
self.window = window
# window.setLayout(self.grid)
self.current = None
# === Unit Face Display ===
face_grid = QtGui.QGridLayout()
self.portrait = QtGui.QLabel()
face_grid.addWidget(self.portrait, 0, 0, 4, 4, QtCore.Qt.AlignCenter)
face2_grid = QtGui.QHBoxLayout()
self.blink_button = QtGui.QPushButton('Blink')
self.blink_button.setCheckable(True)
self.blink_button.clicked.connect(self.blink)
self.smile_button = QtGui.QPushButton('Smile')
self.smile_button.setCheckable(True)
self.smile_button.clicked.connect(self.smile)
self.talk_button = QtGui.QPushButton('Talk')
self.talk_button.setCheckable(True)
self.talk_button.clicked.connect(self.talk)
face2_grid.addWidget(self.blink_button)
face2_grid.addWidget(self.smile_button)
face2_grid.addWidget(self.talk_button)
face_grid.addLayout(face2_grid, 4, 0, 1, 4)
blink_label = QtGui.QLabel('Blink Position (x, y)')
mouth_label = QtGui.QLabel('Mouth Position (x, y)')
face_grid.addWidget(blink_label, 5, 0, 1, 2)
face_grid.addWidget(mouth_label, 5, 2, 1, 2)
self.pos_boxes = []
self.portrait_change = True
for num in xrange(4):
box = QtGui.QSpinBox()
box.setMinimum(0)
box.setMaximum(96)
box.valueChanged.connect(self.spin_box_change)
face_grid.addWidget(box, 6, num)
self.pos_boxes.append(box)
# Name
char_grid = QtGui.QGridLayout()
name_label = QtGui.QLabel('Name:')
char_grid.addWidget(name_label, 0, 0)
self.name = QtGui.QLineEdit()
self.name.setMaxLength(12)
self.name.setStatusTip("Change name")
char_grid.addWidget(self.name, 0, 1)
reload_button = QtGui.QPushButton('Find')
reload_button.clicked.connect(self.reload_current)
char_grid.addWidget(reload_button, 0, 2)
self.grid.addLayout(face_grid, 0, 0)
self.grid.addLayout(char_grid, 1, 0)
# For face
def blink(self):
if self.blink_button.isChecked():
self.current.blinking = 1
else:
self.current.blinking = 2
def smile(self):
if self.smile_button.isChecked():
self.current.expression ='Smiling'
else:
self.current.expression = 'Normal'
def talk(self):
if self.talk_button.isChecked():
self.current.talk()
else:
self.current.stop_talking()
def reload_current(self):
if self.current:
name = str(self.name.text())
try:
new_portrait = UnitPortrait(name, self.current.blink_position, self.current.mouth_position, (0, 0))
self.window.data[self.window.list.currentRow()] = new_portrait
self.current = new_portrait
except KeyError:
# Show pop-up
message_box = QtGui.QMessageBox()
message_box.setText("No png file named %s found in Data/Characters/" % (name + 'Portrait.png'))
message_box.exec_()
self.window.reset()
def spin_box_change(self):
if self.portrait_change:
self.current.blink_position = self.pos_boxes[0].value(), self.pos_boxes[1].value()
self.current.mouth_position = self.pos_boxes[2].value(), self.pos_boxes[3].value()
# Displaying functions
def display(self, portrait):
self.current = portrait
# Name
self.name.setText(portrait.name)
# Face
self.smile() # Check these
self.talk()
portrait.create_image()
pixmap = create_pixmap(Engine.transform_scale(portrait.image.convert_alpha(), (96*2, 80*2)))
self.portrait.setPixmap(pixmap)
self.portrait_change = False
self.pos_boxes[0].setValue(portrait.blink_position[0])
self.pos_boxes[1].setValue(portrait.blink_position[1])
self.pos_boxes[2].setValue(portrait.mouth_position[0])
self.pos_boxes[3].setValue(portrait.mouth_position[1])
self.portrait_change = True
def save_current(self):
pass
def tick(self, current_time):
if self.current:
self.current.update(current_time)
self.current.create_image()
pixmap = create_pixmap(Engine.transform_scale(self.current.image.convert_alpha(), (96*2, 80*2)))
self.portrait.setPixmap(pixmap)
class GenericMenu(QtGui.QWidget):
def __init__(self, data, kind, view, parent=None):
super(GenericMenu, self).__init__(parent)
self.data = data
self.kind = kind
# Create list
self.list = QtGui.QListWidget(self)
self.list.setMinimumSize(128, 320)
self.list.uniformItemSizes = True
self.list.setDragDropMode(self.list.InternalMove)
self.list.setIconSize(QtCore.QSize(32, 32))
for index, datum in enumerate(data):
icon = create_icon(datum.image.convert_alpha())
item = QtGui.QListWidgetItem(datum.name)
item.setIcon(icon)
self.list.addItem(item)
self.list.currentItemChanged.connect(self.on_item_changed)
self.list.model().rowsMoved.connect(self.on_reorder)
self.add_button = QtGui.QPushButton("Add " + kind)
self.add_button.clicked.connect(self.add)
self.add_button.setStatusTip("Insert a new " + kind.lower())
self.remove_button = QtGui.QPushButton("Remove " + kind)
self.remove_button.clicked.connect(self.remove)
self.remove_button.setStatusTip("Remove selected " + kind.lower() + " data")
self.save_button = QtGui.QPushButton("Save to File")
self.save_button.clicked.connect(self.save)
self.save_button.setStatusTip("Write out current " + kind.lower() + " data to file")
button_grid = QtGui.QGridLayout()
button_grid.addWidget(self.add_button, 0, 0)
button_grid.addWidget(self.remove_button, 1, 0)
button_grid.addWidget(self.save_button, 2, 0)
# Create view
self.view = view(self)
# Create layout
self.grid = QtGui.QGridLayout()
self.setLayout(self.grid)
self.grid.addWidget(self.list, 0, 0)
self.grid.addLayout(button_grid, 1, 0)
self.grid.addLayout(self.view.grid, 0, 1, 2, 1)
def tick(self, current_time):
self.view.tick(current_time)
def on_item_changed(self, curr, prev):
current_idx = self.list.row(curr)
d = self.data[current_idx]
self.view.save_current()
self.view.display(d)
def on_reorder(self, row, old_idx, new_idx):
moved_d = self.data.pop(old_idx)
new_idx = self.list.currentRow()
self.data.insert(new_idx, moved_d)
def remove(self):
idx = self.list.currentRow()
del self.data[idx]
self.list.takeItem(idx)
if idx < len(self.data):
new = self.data[idx]
self.view.display(new)
else:
self.view.display(self.data[-1])
def reset(self):
idx = self.list.currentRow()
item = self.list.currentItem()
item.setText(self.data[idx].name)
if self.data[idx].image:
item.setIcon(create_icon(self.data[idx].image.convert_alpha()))
else:
item.setIcon(QtGui.QIcon())
class UnitMenu(GenericMenu):
def add(self):
unit = Unit(None, portrait_data)
current_idx = self.list.currentRow()
self.data.insert(current_idx + 1, unit)
icon = create_icon(unit.image)
item = QtGui.QListWidgetItem(unit.name)
item.setIcon(icon)
self.list.insertItem(current_idx + 1, item)
def save(self):
root = ET.Element("unit_catalog")
for u in self.data:
unit = ET.SubElement(root, "unit", name=u.name)
ET.SubElement(unit, "id").text = u.name
ET.SubElement(unit, "gender").text = str(u.gender)
ET.SubElement(unit, "wexp").text = ','.join([str(w) for w in u.wexp])
ET.SubElement(unit, "bases").text = ','.join([str(s.base_stat) for s in u.stats.values()])
ET.SubElement(unit, "growths").text = ','.join([str(g) for g in u.growths])
ET.SubElement(unit, "inventory").text = ','.join([i.id for i in u.items])
ET.SubElement(unit, "level").text = str(u.level)
ET.SubElement(unit, "class").text = u.klass
ET.SubElement(unit, "desc").text = u.desc
ET.SubElement(unit, "faction").text = u.faction
ET.SubElement(unit, "tags").text = ','.join(u.tags)
ET.SubElement(unit, "skills").text = ','.join([s.id for s in u.skills])
if PRETTY:
with open("units.xml", 'w') as fp:
fp.write(prettify(root))
else:
tree = ET.ElementTree(root)
tree.write("units.xml")
# Show pop-up
message_box = QtGui.QMessageBox()
message_box.setText("Saved to units.xml")
message_box.exec_()
class ClassMenu(GenericMenu):
def add(self):
klass = Klass()
current_idx = self.list.currentRow()
self.data.insert(current_idx + 1, klass)
icon = create_icon(klass.image)
item = QtGui.QListWidgetItem(klass.name)
item.setIcon(icon)
self.list.insertItem(current_idx + 1, item)
def save(self):
root = ET.Element("class_info")
for u in self.data:
klass = ET.SubElement(root, "class", name=u.name)
ET.SubElement(klass, "wexp").text = ','.join([str(w) for w in u.wexp])
ET.SubElement(klass, "promotes_from").text = u.promotes_from
ET.SubElement(klass, "turns_into").text = ','.join(u.promotes_to)
ET.SubElement(klass, "movement_group").text = str(u.movement_group)
ET.SubElement(klass, "tags").text = ','.join(u.tags)
skills = zip([str(l) for l in u.skill_levels], u.skills)
ET.SubElement(klass, "skills").text = ';'.join([','.join(s) for s in skills])
ET.SubElement(klass, "bases").text = ','.join([str(b) for b in u.bases])
ET.SubElement(klass, "growths").text = ','.join([str(g) for g in u.growths])
ET.SubElement(klass, "promotion").text = ','.join([str(p) for p in u.promotion])
ET.SubElement(klass, "max").text = ','.join([str(m) for m in u.max])
ET.SubElement(klass, "desc").text = u.desc
if PRETTY:
with open("class_info.xml", 'w') as fp:
fp.write(prettify(root))
else:
tree = ET.ElementTree(root)
tree.write("class_info.xml")
# Show pop-up
message_box = QtGui.QMessageBox()
message_box.setText("Saved to class_info.xml")
message_box.exec_()
class PortraitMenu(GenericMenu):
def add(self):
portrait = UnitPortrait('Generic', (0, 0), (0, 0), (0, 0))
current_idx = self.list.currentRow()
self.data.insert(current_idx + 1, portrait)
icon = create_icon(portrait.image.convert_alpha())
item = QtGui.QListWidgetItem(portrait.name)
item.setIcon(icon)
self.list.insertItem(current_idx + 1, item)
def save(self):
root = ET.Element("portrait_info")
for p in self.data:
unit = ET.SubElement(root, "portrait", name=p.name)
ET.SubElement(unit, "blink").text = ','.join([str(pos) for pos in p.blink_position])
ET.SubElement(unit, "mouth").text = ','.join([str(pos) for pos in p.mouth_position])
if PRETTY:
with open("portrait_coords.xml", 'w') as fp:
fp.write(prettify(root))
else:
tree = ET.ElementTree(root)
tree.write("portrait_coords.xml")
# Show pop-up
message_box = QtGui.QMessageBox()
message_box.setText("Saved to portrait_coords.xml")
message_box.exec_()
class MainEditor(QtGui.QMainWindow):
def __init__(self):
super(MainEditor, self).__init__()
self.setWindowTitle('Game Editor')
self.tabs = QtGui.QTabWidget()
self.setCentralWidget(self.tabs)
# Set up status bar
self.status_bar = self.statusBar()
self.status_bar.showMessage('Ready')
# Set up self.tabs
self.tab_names = ["Units", "Classes", "Items", "Skills",
"Lore", "Portraits", "Weapons", "Terrain",
"Movement", "Constants"]
self.tab_directory = {}
self.menu_directory = {}
for name in self.tab_names:
tab = QtGui.QWidget()
self.tabs.addTab(tab, name)
self.tab_directory[name] = tab
self.tabs.currentChanged.connect(self.page_swap)
self.current_idx = 0
# === Timing ===
self.main_timer = QtCore.QTimer()
self.main_timer.timeout.connect(self.tick)
self.main_timer.start(33) # 30 FPS
self.elapsed_timer = QtCore.QElapsedTimer()
self.elapsed_timer.start()
def start(self):
self.load_tab(self.current_idx)
def page_swap(self, new):
# new is index of tab
print('Switching Pages')
print(self.tab_names[new])
self.current_menu.view.save_current()
self.current_idx = new
self.load_tab(new)
if self.current_menu.view.current:
self.current_menu.view.display(self.current_menu.view.current)
def load_tab(self, idx):
if idx == 0:
self.load_unit_tab()
elif idx == 1:
self.load_class_tab()
elif idx == 5:
self.load_portrait_tab()
def load_unit_tab(self):
if "Units" not in self.menu_directory:
self.menu_directory["Units"] = UnitMenu(unit_data, 'Unit', UnitView)
self.tab_directory["Units"].setLayout(self.menu_directory["Units"].grid)
self.current_menu = self.menu_directory["Units"]
def load_class_tab(self):
if "Classes" not in self.menu_directory:
self.menu_directory["Classes"] = ClassMenu(class_data, 'Class', ClassView)
self.tab_directory["Classes"].setLayout(self.menu_directory["Classes"].grid)
self.current_menu = self.menu_directory["Classes"]
def load_portrait_tab(self):
if "Portraits" not in self.menu_directory:
self.menu_directory["Portraits"] = PortraitMenu(portrait_data, 'Portrait', PortraitView)
self.tab_directory["Portraits"].setLayout(self.menu_directory["Portraits"].grid)
self.current_menu = self.menu_directory["Portraits"]
def tick(self):
current_time = self.elapsed_timer.elapsed()
name = self.tab_names[self.current_idx]
menu = self.menu_directory[name]
menu.tick(current_time)
def load_data(window):
item_data = [ItemMethods.itemparser(item)[0] for item in GC.ITEMDATA]
item_data = sorted(item_data, key=lambda item: GC.ITEMDATA[item.id]['num'])
item_data = [item for item in item_data if not item.virtual]
for item in item_data:
if item.image:
item.image = item.image.convert_alpha()
skill_data = [StatusObject.statusparser(skill.find('id').text) for skill in GC.STATUSDATA.getroot().findall('status')]
for skill in skill_data:
if skill.image:
skill.image = skill.image.convert_alpha()
portrait_dict = SaveLoad.create_portrait_dict()
class_dict = SaveLoad.create_class_dict()
class_data = [Klass(v) for v in class_dict.values()]
unit_data = build_units(class_dict, portrait_dict)
# Setting up portrait data
portrait_data = []
for name, portrait in portrait_dict.items():
portrait_data.append(UnitPortrait(name, portrait['blink'], portrait['mouth'], (0, 0)))
for portrait in portrait_data:
portrait.create_image()
portrait.image = portrait.image.convert_alpha()
return unit_data, class_data, item_data, skill_data, portrait_data
if __name__ == '__main__':
app = QtGui.QApplication(sys.argv)
window = MainEditor()
unit_data, class_data, item_data, skill_data, portrait_data = load_data(window)
window.start()
# Engine.remove_display()
window.show()
app.exec_()
| python |
"""Build V8 extension with Cython."""
from Cython.Build import cythonize
from distutils.command.build import build
from setuptools import setup
from setuptools.extension import Extension
import buildtools
#
# NOTE: You will need to add these to the build_ext command:
#
# --include-dirs "${V8}/include"
# --library-dirs "${V8_OUT}/lib.target:${V8_OUT}/obj.target/src"
#
setup(
name = 'v8',
license = 'MIT',
cmdclass = {
cmd.__name__: cmd
for cmd in buildtools.register_subcommands(
build,
buildtools.make_copy_files(
filenames=[
'icudtl.dat',
'natives_blob.bin',
'snapshot_blob.bin',
],
dst_dir='v8/data',
),
)
},
packages = ['v8'],
ext_modules = cythonize(Extension(
'v8.v8',
language = 'c++',
sources = ['v8/v8.pyx'],
libraries = [
'icui18n',
'icuuc',
'v8',
'v8_libbase',
'v8_libplatform',
],
extra_compile_args = [
'-std=c++11',
'-fno-exceptions',
'-fno-rtti',
],
)),
package_data = {
'v8': [
'data/icudtl.dat',
'data/natives_blob.bin',
'data/snapshot_blob.bin',
],
},
)
| python |
from django import forms
# from django.core.validators import DecimalValidator
from django.db.models.functions import Concat, Substr,Length,Cast
from django.db.models import Func, CharField, F,Value,IntegerField
from .models import Part, PartClass, Manufacturer, Subpart, Seller
from .validators import decimal, alphanumeric, numeric
class PartInfoForm(forms.Form):
quantity = forms.IntegerField(label='Quantity', min_value=1)
class PartForm(forms.Form):
partclasses = PartClass.objects.all()
number_class = forms.ModelChoiceField(
queryset=partclasses, label='Part Class*')
number_item = forms.CharField(
max_length=4,
label='Part Number',
required=False,
validators=[numeric],
widget=forms.TextInput(attrs={'placeholder': 'Auto-Generated if blank'}))
number_variation = forms.CharField(
max_length=2, label='Part Variation', required=False,
validators=[alphanumeric],
widget=forms.TextInput(attrs={'placeholder': 'Subcategory for each class'}))
description = forms.CharField(max_length=255, label='Description*')
revision = forms.CharField(max_length=2, label='Revision*', initial=1)
manufacturer_part_number = forms.CharField(max_length=128, required=False)
manufacturer = forms.ModelChoiceField(queryset=None, required=False)
new_manufacturer = forms.CharField(
max_length=128,
label='Create New Manufacturer',
required=False)
def __init__(self, *args, **kwargs):
self.organization = kwargs.pop('organization', None)
super(PartForm, self).__init__(*args, **kwargs)
self.fields['manufacturer'].queryset = Manufacturer.objects.filter(
organization=self.organization)
def clean(self):
cleaned_data = super(PartForm, self).clean()
mfg = cleaned_data.get("manufacturer")
new_mfg = cleaned_data.get("new_manufacturer")
if mfg and new_mfg:
raise forms.ValidationError(
('Cannot have a manufacturer and a new manufacturer'),
code='invalid')
elif new_mfg:
obj = Manufacturer(name=new_mfg, organization=self.organization)
obj.save()
cleaned_data['manufacturer'] = obj
elif not mfg and not new_mfg:
obj, c = Manufacturer.objects.get_or_create(name=self.organization.name.upper(), organization=self.organization)
cleaned_data['manufacturer'] = obj
class AddSubpartForm(forms.Form):
assembly_subpart = forms.ModelChoiceField(
queryset=None, required=True, label="Subpart")
count = forms.IntegerField(required=True, label='Quantity')
def __init__(self, *args, **kwargs):
self.organization = kwargs.pop('organization', None)
self.part_id = kwargs.pop('part_id', None)
super(AddSubpartForm, self).__init__(*args, **kwargs)
part = None
unusable_part_ids = []
if self.part_id:
part = Part.objects.get(id=self.part_id)
unusable_part_ids = [p.id for p in part.where_used_full()]
unusable_part_ids.append(part.id)
parts = Part.objects.filter(organization=self.organization).exclude(id__in=unusable_part_ids)
parts = parts.all().annotate(item_t= Concat(Value('000'),'number_item',output_field=CharField()))
parts = parts.all().annotate(item = Substr(F('item_t'),Length('item_t')-2,3,output_field=CharField()))
parts = parts.all().annotate(class_t = Concat(Value('00'),F('number_class')))
parts= parts.all().annotate(gc= Substr(F('class_t'),Length('class_t')-1,2,output_field=CharField()))
parts = parts.all().annotate(cm_pn = Concat(F('gc'),F('number_variation'),Value('-'),F('item'),Value('_'),F('revision')))
parts = parts.all().order_by('gc', 'number_variation', 'number_item', 'revision')
self.fields['assembly_subpart'].queryset = parts
self.fields['assembly_subpart'].label_from_instance = \
lambda obj: "%s" % obj.full_part_number(
) + ' ' + obj.description
class AddSellerPartForm(forms.Form):
seller = forms.ModelChoiceField(queryset=None, required=False, label="Seller")
new_seller = forms.CharField(max_length=128, label='Create New Seller', required=False,
widget=forms.TextInput(attrs={'placeholder': 'Leave blank if selecting a seller.'}))
minimum_order_quantity = forms.IntegerField(required=False,
label='MOQ',
validators=[numeric],
widget=forms.TextInput(attrs={'placeholder': 'None'}))
minimum_pack_quantity = forms.IntegerField(required=False,
label='MPQ',
validators=[numeric], widget=forms.TextInput(attrs={'placeholder': 'None'}))
unit_cost = forms.DecimalField(required=True,
label='Unit Cost',
validators=[decimal, ],
widget=forms.TextInput(attrs={'placeholder': '0.00'}))
lead_time_days = forms.IntegerField(required=False,
label='Lead Time (days)',
validators=[numeric],
widget=forms.TextInput(attrs={'placeholder': 'None'}))
nre_cost = forms.DecimalField(required=False,
label='NRE Cost',
validators=[decimal, ],
widget=forms.TextInput(attrs={'placeholder': 'None'}))
ncnr = forms.BooleanField(required=False, label='NCNR')
def __init__(self, *args, **kwargs):
self.organization = kwargs.pop('organization', None)
super(AddSellerPartForm, self).__init__(*args, **kwargs)
self.fields['seller'].queryset = Seller.objects.filter(
organization=self.organization).order_by('name', )
def clean(self):
cleaned_data = super(AddSellerPartForm, self).clean()
seller = cleaned_data.get("seller")
new_seller = cleaned_data.get("new_seller")
if seller and new_seller:
raise forms.ValidationError(
('Cannot have a seller and a new seller.'),
code='invalid')
elif new_seller:
obj = Seller(name=new_seller, organization=self.organization)
obj.save()
cleaned_data['seller'] = obj
elif not seller:
raise forms.ValidationError(
('Must specify a seller.'),
code='invalid')
class FileForm(forms.Form):
file = forms.FileField()
| python |
from fastapi import APIRouter, FastAPI, Request
from ..models import Request as RequestModel
router = APIRouter()
@router.get("/_version")
def get_version(request: Request) -> dict:
return dict(version=request.app.version)
@router.get("/_status")
async def get_status() -> dict:
await RequestModel.query.gino.first()
return dict(status="OK")
def init_app(app: FastAPI) -> None:
app.include_router(router, tags=["System"])
| python |
import os
from flask import Flask, jsonify, request
from flask_restful import Api, Resource
from MailLoader import ImapConnector
import requests
import json
app = Flask(__name__)
api = Api(app)
settings = {
'imap_server': 'imap.gmail.com',
'ProcessorAgent': 'http://procagent.antispam-msu.site/fit-model',
}
@app.route('/', methods=['GET'])
def hello():
return 'Servise is working! It`s learning agent. CMC MSU Antispam'
def send_to_processor(email, inbox, spam):
req_data = {
'email': email,
'inbox': inbox,
'spam': spam
}
response = requests.post(settings['ProcessorAgent'], json=req_data)
return response.status_code
class CreateModel(Resource):
@staticmethod
def post():
data = request.get_json()
#Либо надо делать суперюзера, либо вводить пароль снова
email = data['email']
password = data['password']
inbox_volume = data['inbox_volume']
spam_volume = data['spam_volume']
loader = ImapConnector(settings['imap_server'])
loader.connect(email, password)
inbox = loader.read_folder('INBOX', inbox_volume)
spam = loader.read_folder('Junk', spam_volume)
with open('/home/antispam/agents/LearningAgent/loaded_inbox.txt', 'w') as f:
#indent=0 для читаемости вывода
#json.dump(inbox, f, indent=0)
for r in inbox:
f.write(r.decode('utf-8'))
with open('/home/antispam/agents/LearningAgent/loaded_spam.txt', 'w') as f:
#indent=0 для читаемости вывода
#json.dump(spam, f, indent=0)
for r in spam:
f.write(r.decode('utf-8'))
send_to_processor(email, inbox, spam)
api.add_resource(CreateModel, '/create-model')
if __name__ == '__main__':
app.run(host='0.0.0.0', port=5000, debug=True)
| python |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Jan 1 13:57:09 2019
@author: Tom
"""
import sys
import json
import logging
import configparser
import pprint
from datetime import datetime
from typing import Dict
import requests
import send_gmail
INAT_NODE_API_BASE_URL = "https://api.inaturalist.org/v1/"
INAT_BASE_URL = "https://www.inaturalist.org"
class AuthenticationError(Exception):
''' Exception for an Authentication error '''
class ObservationNotFound(Exception):
''' Exception for an Observation not found error '''
def get_access_token(username: str,
password: str,
app_id: str,
app_secret: str) -> str:
"""
Get an access token using the user's iNaturalist username and password.
(you still need an iNaturalist app to do this)
:param username:
:param password:
:param app_id:
:param app_secret:
:return: the access token, example use:
headers = {"Authorization": "Bearer %s" % access_token}
"""
payload = {
'client_id': app_id,
'client_secret': app_secret,
'grant_type': "password",
'username': username,
'password': password
}
response = requests.post("{base_url}/oauth/token".\
format(base_url=INAT_BASE_URL), payload)
try:
#LOGGER.info("Access token: '%s'" % response.json()["access_token"])
return response.json()["access_token"]
except KeyError as an_error:
raise AuthenticationError("Authentication error, "
" please check credentials.") from an_error
def get_place_name(place_id):
''' Get Place name from ID '''
LOGGER.info("Looking up place: %s", place_id)
place_name = None
place = requests.get("https://api.inaturalist.org/v1/places/%s" \
% place_id)
if place.status_code == 200:
response_data = json.loads(place.text)
try:
place_name = response_data['results'][0]['display_name']
except KeyError:
LOGGER.error("place_id '%s' not found", place_id)
else:
LOGGER.error("response status = %d", place.status_code)
return place_name
def get_project_id(project_slug):
''' Get Project ID from slug (short name) '''
project_id = None
project = requests.get("https://api.inaturalist.org/v1/projects/%s" \
% project_slug)
if project.status_code == 200:
response_data = json.loads(project.text)
try:
project_id = response_data['results'][0]['id']
except KeyError:
LOGGER.error("Project ID not found")
else:
LOGGER.error("Project %s not found", project_slug)
return project_id
# pylint: disable=too-many-locals,too-many-statements
def get_project(project_id, config):
''' retrieve project information, return a list of species IDs '''
project_species = []
project = requests.get(\
'https://api.inaturalist.org/v1/projects/%s?rule_details=true' % \
project_id)
#LOGGER.info("Project Request Status: %d" % project.status_code)
if project.status_code == 200:
response_data = json.loads(project.text)
if int(response_data['total_results']) > 0:
result = response_data['results'][0]
LOGGER.info("----------------------------------")
LOGGER.info("Title: %s", result['title'])
LOGGER.info("Description: %s", result['description'])
place = result['place']
LOGGER.info(" Place: %s (%s)", place['display_name'],
place['id'])
LOGGER.debug("Number of rules: %d",
len(result['project_observation_rules']))
LOGGER.info("Taxon Rules:")
for a_rule in result['project_observation_rules']:
if a_rule['operand_type'] == 'Taxon':
taxon = a_rule['taxon']
LOGGER.info(" Taxon: %s", taxon['name'])
LOGGER.info("----------------------------------")
else:
return project_species
prev_observation_count = config.getint('last run', 'observation_count', fallback=0)
get_url = '%sobservations?project_id=%s' % (INAT_NODE_API_BASE_URL, project_id)
get_req = requests.get(get_url)
#LOGGER.info("GET project request status code: %d", get_req.status_code)
#LOGGER.info("GET project request response: '%s'", get_req.text)
if get_req.status_code == 200:
response_data = json.loads(get_req.text)
observation_count = int(response_data['total_results'])
LOGGER.debug(pprint.pformat(response_data))
LOGGER.info("Project %s observation count: %d, previously: %d",
project_id, observation_count, prev_observation_count)
else:
LOGGER.info("GET failed, status = %d", get_req.status_code)
prev_species_count = config.getint('last run', 'species_count', fallback=0)
LOGGER.info("\nGet project stats for %s", project_id)
get_stats_url = '%sobservations/species_counts' \
'?project_id=%s&place_id=any' \
'&verifiable=any&captive=any' % \
(INAT_NODE_API_BASE_URL, project_id)
get_stats_req = requests.get(get_stats_url)
if get_stats_req.status_code == 200:
response_data = json.loads(get_stats_req.text)
LOGGER.debug(pprint.pformat(response_data))
species_count = int(response_data['total_results'])
LOGGER.info("\nTotal species: %d, previous: %d\n------------",
species_count, prev_species_count)
results = response_data['results']
for a_result in results:
try:
rank = a_result['taxon']['rank']
except KeyError:
rank = '<none>'
taxon = a_result['taxon']['iconic_taxon_name']
if config.getboolean('inaturalist.org', 'showspecies'):
LOGGER.info("Name: %s\n"
"Common name: %s\n"
"Taxon ID: %s\n"
"Rank: %s\n"
"Taxon: %s\n"
"Count: %s\n",
a_result['taxon']['name'],
a_result['taxon']['preferred_common_name'],
a_result['taxon']['id'],
rank,
taxon,
a_result['count'])
project_species.append(a_result['taxon']['id'])
else:
LOGGER.error("Stats request '%s' failed: %d", get_stats_url,
get_stats_req.status_code)
# Save counts to config file
config['last run']['species_count'] = str(species_count)
config['last run']['observation_count'] = str(observation_count)
return project_species
# THIS DIDN'T WORK
def add_ob_2_proj_v1(observation_id, project_id, access_token):
''' Use V1 API to add an observation to a project '''
payload = {"observation_id": observation_id}
post_url = 'https://api.inaturalist.org/v1/projects/%s/add' % project_id
post_req = requests.post(post_url,
data=json.dumps(payload),
headers=_build_auth_header(access_token))
#LOGGER.info("POST request status code: %d", post_req.status_code)
#LOGGER.info("POST request response: '%s'", post_req.text)
if post_req.status_code == 200:
LOGGER.debug("add_ob_2_proj_v1 POST successful")
return True
return False
def add_ob_2_proj(observation_id, project_id, access_token):
''' Use V1 API to add an observation to a project '''
data = {'project_observation[observation_id]': observation_id,
'project_observation[project_id]': project_id}
post_url = '%s/project_observations' % INAT_BASE_URL
post_req = requests.post(post_url,
data=data,
headers=_build_auth_header(access_token))
if post_req.status_code == 200:
LOGGER.debug("add_ob_2_proj POST successful")
return True
LOGGER.error("POST request status code: %d", post_req.status_code)
try:
response_data = json.loads(post_req.text)
for error in response_data['errors']:
LOGGER.error("POST request response: '%s'", error)
except json.JSONDecodeError:
LOGGER.error("Failed to decode post response:\n%s", post_req.text)
return False
def _build_auth_header(access_token: str) -> Dict[str, str]:
''' This function takes the access_token and creates the Authorization
header needed by the non-V1 interface'''
return {"Authorization": "Bearer %s" % access_token}
LOG_FILE_NAME = "/tmp/results.log"
with open(LOG_FILE_NAME, "w"):
pass
LOG_FORMATTER = logging.Formatter("%(asctime)s [%(threadName)-12.12s]"
" [%(levelname)-5.5s] %(message)s")
FILE_LOG_FORMATTER = logging.Formatter("%(message)s")
LOGGER = logging.getLogger()
FILE_HANDLER = logging.FileHandler("{0}".format(LOG_FILE_NAME))
FILE_HANDLER.setFormatter(FILE_LOG_FORMATTER)
LOGGER.addHandler(FILE_HANDLER)
LOG_FORMATTER = logging.Formatter("%(message)s")
CONSOLE_HANDLER = logging.StreamHandler()
CONSOLE_HANDLER.setFormatter(LOG_FORMATTER)
LOGGER.addHandler(CONSOLE_HANDLER)
def print_obs(result):
''' print observations '''
obs_id = result['id']
taxon_id = result['taxon']['id']
# Print some information about observation
LOGGER.info("Observation ID: %s", obs_id)
LOGGER.info("Taxon ID: %s", taxon_id)
LOGGER.info("Name: %s",
result['taxon']['name'])
LOGGER.info("Preferred common name: %s",
result['taxon']['preferred_common_name'])
#LOGGER.info("Rank: %s", rank)
#LOGGER.info("Taxon: %s", taxon)
LOGGER.info("Grade: %s",
result['quality_grade'])
LOGGER.info("Observed at: %s",
result['time_observed_at'])
LOGGER.info("Created at: %s",
result['created_at'])
LOGGER.info("User Name: %s",
result['user']['name'])
#LOGGER.info("User ID: %s",
# result['user']['login'])
#LOGGER.info("Place IDs: %s",
# ",".join(str(x) for x in result['place_ids'][:5]))
#LOGGER.info("Project IDs: %s",
# ",".join(str(x) for x in result['project_ids']))
#LOGGER.info("\n")
# pylint: disable=too-many-branches
def search_new_obs(config, project_id, project_species):
''' Search for new observations for project '''
place_id = config['inaturalist.org']['place_id']
place_name = get_place_name(place_id)
if place_name is None:
LOGGER.error("Failed to find place id: '%s'", place_id)
sys.exit(6)
taxon_list = [x.strip() for x in config['inaturalist.org']['taxon_list'].split(',')]
taxon_response_count = {}
# As we find new species, put in this list
new_species = []
new_species_count = 0
new_species_add = 0
observations_added = 0
observations_add_failures = 0
# Get token information to access iNaturalist.org from config file
try:
access_token = get_access_token(config['inaturalist.org']['username'],
config['inaturalist.org']['password'],
config['inaturalist.org']['app_id'],
config['inaturalist.org']['app_secret'])
except KeyError:
config_filename = config.get('DEFAULT', 'config_filename')
LOGGER.warning("Need to define username, password, app_id, and "
"app_secret in [inaturalist.org] section of "
"configuration file: %s",
config_filename)
sys.exit(7)
excluded_observations = [x.strip() for x in \
config['last run']['excluded_observations'].split(',')]
add_obs_flag = config.getboolean('inaturalist.org',
'addobservations')
# Loop for each taxon in list
# pylint: disable=too-many-nested-blocks
for a_taxon in taxon_list:
LOGGER.info("\nQuery for research grade %s in %s "
"not in project: %s", a_taxon,
config['inaturalist.org']['project_slug'],
place_name)
# Start with page 1
page = 1
done = False
page_size = 100
while not done:
LOGGER.info("Page %d, page size: %d", page, page_size)
# Query all observations in place ID, with matching Taxon ID,
# not already in project, is research grade, on desired page
req_resp = requests.get(\
'https://api.inaturalist.org/v1/observations'
'?place_id=%s'
'&iconic_taxa=%s'
'¬_in_project=%s'
'&quality_grade=research'
'&page=%d'
'&per_page=%s'
'&order=desc'
'&order_by=created_at' % \
(config['inaturalist.org']['place_id'],
a_taxon, project_id,
page, page_size))
LOGGER.info("Observation Request Status: %d", req_resp.status_code)
# 200 means success
if req_resp.status_code == 200:
# convert JSON response to a python dictionary
response_data = json.loads(req_resp.text)
#LOGGER.info("----------------------------------")
if page == 1:
LOGGER.info("Total responses: %d",
response_data['total_results'])
taxon_response_count[a_taxon] = \
response_data['total_results']
# If we get back no results, we are done
# pylint: disable=len-as-condition
if len(response_data['results']) == 0:
done = True
for result in response_data['results']:
if str(result['id']) in excluded_observations:
continue
new_species_flag = True
# Try to add observation to project using access_token for
# authentication
if add_obs_flag:
if add_ob_2_proj(result['id'],
project_id,
access_token):
observations_added += 1
else:
observations_add_failures += 1
excluded_observations.append(str(result['id']))
continue
# If taxon ID is not in list of species already in
# project and not is list of new species we have
# already found
# print banner, increment counter, and set flag
new_species_flag = False
taxon_id = result['taxon']['id']
if taxon_id not in project_species and \
taxon_id not in new_species:
new_species.append(taxon_id)
LOGGER.info("=== NEW SPECIES FOR PROJECT, %d ===", taxon_id)
new_species_add += 1
print_obs(result)
else:
print_obs(result)
page += 1
else:
done = True
LOGGER.info("Observation response: %s", req_resp.text)
for a_taxon in taxon_response_count:
LOGGER.info("Taxon: %s, total results: %d",
a_taxon, taxon_response_count[a_taxon])
if add_obs_flag:
# Get some project information and a list of current species
project_species = get_project(project_id, config)
LOGGER.info("\nNew Species: %d", new_species_count)
LOGGER.info("New Species Added: %d", new_species_add)
LOGGER.info("Observations Added: %d", observations_added)
LOGGER.info("Observations Add Failures: %d", observations_add_failures)
# Save excluded observations for next time
config['last run']['excluded_observations'] = ",".join(excluded_observations)
return new_species
############################################
# Main program #
############################################
# pylint: disable=too-many-statements,too-many-branches,too-many-locals
def main():
''' Main function '''
config = configparser.ConfigParser()
config['DEFAULT'] = {'loggingLevel': 'INFO'}
config['inaturalist.org'] = {'addobservations': True}
config['inaturalist.org'] = {'showspecies': True}
config['inaturalist.org'] = {'searchnew': True}
config['gmail.com'] = {'send_email': False}
config['last run'] = {'excluded_observations': ''}
if len(sys.argv) > 1:
config_filename = sys.argv[1]
else:
config_filename = 'inat_add_obs2project.ini'
try:
dummy_h = open(config_filename, 'r')
dummy_h.close()
except FileNotFoundError:
LOGGER.warning("File: '%s' not found, creating", config_filename)
# Read config file
config.read(config_filename)
config['DEFAULT']['config_filename'] = config_filename
LOGGER.setLevel(config['DEFAULT']['loggingLevel'])
LOGGER.info("Adding observations: %s",
str(config.getboolean('inaturalist.org', 'addobservations')))
LOGGER.info("Show species: %s",
str(config.getboolean('inaturalist.org', 'showspecies')))
now = datetime.utcnow()
try:
last_run = config['last run']['timestamp']
LOGGER.info("This configuration file last run at: '%s'", last_run)
except KeyError:
LOGGER.info("This configuration file has not been used before")
# Update timestamp
config['last run']['timestamp'] = str(now)
# Get project_id from slug name
try:
project_id = get_project_id(config['inaturalist.org']['project_slug'])
except KeyError:
LOGGER.error("Need to define project_slug "
"in [inaturalist.org] section of "
"configuration file: %s",
config_filename)
return 3
if project_id is None:
LOGGER.error("Need to define project_slug "
"in [inaturalist.org] section of "
"configuration file: %s",
config_filename)
return 3
# Get some project information and a list of current species
project_species = get_project(project_id, config)
if project_species is None:
LOGGER.warning("Failed to get species list ")
return 4
# These are some variables used for counting things and keeping track
# of states
search_new = config.getboolean('inaturalist.org',
'searchnew')
if search_new:
new_species = search_new_obs(config, project_id, project_species)
# Read results file into a buffer
with open(LOG_FILE_NAME, "r") as results_file:
results_buffer = results_file.read()
# Send results to the following email addresses
if config.getboolean('gmail.com',
'send_email'):
try:
dummy_gmail_config = config['gmail.com']
if send_gmail.send_email(config, LOGGER, results_buffer,
subject="inat_add_obs2project results"):
LOGGER.info("Email sent")
else:
LOGGER.error("Failed to send email")
except KeyError:
LOGGER.warning("gmail.com configuration not defined")
# Write possibly update to configuration file
config_filename = config.get('DEFAULT', 'config_filename')
try:
with open(config_filename, 'w') as config_file:
config.write(config_file)
except OSError:
LOGGER.error("Failed to write config file, '%s'", config_filename)
return 0
if __name__ == "__main__":
sys.exit(main())
| python |
# SPDX-License-Identifier: MIT
# Copyright (c) 2021 The Pybricks Authors
"""Resource files.
These resources are intended to be used with the standard ``importlib.resources``
module.
"""
UDEV_RULES = "99-pybricksdev.rules"
"""Linux udev rules file name."""
DFU_UTIL_EXE = "dfu-util.exe"
"""Windows version of dfu-util executable."""
| python |
from django.conf.urls import include, url
from rest_framework import routers
# from django.conf import settings
from . import views
router = routers.DefaultRouter()
# router.register(r'gamesession', views.GameSessionViewSet)
router.register(r"event", views.EventViewSet)
router.register(r"players", views.PlayerViewSet)
urlpatterns = [
# url(r'^api/gamesession/?$', views.GameSessionViewSet),
# url(r'^api/event/?$', views.EventViewSet),
url(r"^api/", include(router.urls)),
url(r"^eventcsv/", views.streaming_event_csv),
]
| python |
from app.core.crud import CrudView
class ProjectView(CrudView):
pass
| python |
#
# Copyright 2011, Kristofer Hallin ([email protected])
#
# Mermaid, IRC bot written by Kristofer Hallin
# [email protected]
#
import socket
import select
import urlparse
import urllib
import os
import sys
import ConfigParser
import bot
import log
import listener
import notifier
import threading
from os.path import exists
# Fork to background
def daemonize():
# Fork to background
pid = os.fork()
if pid > 0:
sys.exit(0)
# Create objects and sockets
def launch(nickname, server, port, channel, listenerport, paths):
# Create Bot instance
mermaid = bot.Bot(nickname, server, port, channel)
ircsocket = mermaid.create()
# Create listener instance
talker = listener.Listener(listenerport, ircsocket, channel)
lsocket = talker.create()
# Notify support
notify = notifier.Notifier(ircsocket, channel, paths)
return talker, mermaid, notify
# Create threads and start the bot
def create_threads(talker, mermaid, notify):
# Create listener thread
listener_thread = threading.Thread(target = talker.start)
# Creat bot thread
bot_thread = threading.Thread(target = mermaid.start)
# Notify thread
notify_thread = threading.Thread(target = notify.start)
# Start threads
listener_thread.start()
bot_thread.start()
notify_thread.start()
def main():
# Configuration
nickname = "XXX"
server = "irc.quakenet.org"
port = 6667
channel = "XXX"
listenerport = 1234
paths = "XXX"
daemonize()
talker, mermaid, notify = launch(nickname, server, port, channel, listenerport, paths)
create_threads(talker, mermaid, notify)
if __name__ == '__main__':
main()
| python |
from tweetsole.authorizer import Authorizer
import pytest
import os
def test_has_password():
auth = Authorizer("test")
file = open(auth.path + "/test.enc", 'w+')
file.write("test, test, test,test")
output = auth.has_password()
os.remove(auth.path + "/test.enc")
assert output == True
def test_user_exists():
auth = Authorizer("test")
file = open(auth.path + "/test.csv", 'w+')
file.write("test, test, test,test")
output = auth.user_exists()
os.remove(auth.path + "/test.csv")
assert output == True
def test_split_keys():
keys = [1, 4, 5, 6]
auth = Authorizer()
assert sum(auth.split_keys(keys)) == 16
| python |
#!/usr/bin/env python
import sys
import os
from sets import Set
#-----------------------------------------
# UTILS:
#-----------------------------------------
def Execute(command):
print(command)
os.system(command)
def Execute_py(command, thisTask, step):
print(command)
scriptName = str(step)+'_'+str(thisTask)+'_'+"script.sh"
f = open(scriptName,"w")
f.write("#!/bin/bash\n")
f.write("python "+command)
f.close()
os.system("bash "+scriptName)
def getCommand(config,error,seed,thisTask,step,numDialogs,path):
# removed the -l policy settings - do this in config now.
return "{}/simulate.py -C {} -r {} -s {} -n {} --nocolor > tra_{}_{}.log".format(path,config,str(error),\
str(seed),str(numDialogs),str(thisTask),str(step))
def seed(step, totalDialogues, totalTasks, thisTask):
return (step-1)*totalDialogues*totalTasks + (thisTask-1)*totalDialogues + 10
def getName(name,task, step):
return name+"_"+str(task)+"."+str(step)
def getDictParam(name,task, step):
fullname = getName(name, task, step)
dictionary = fullname+".dct"
parameters = fullname+".prm"
return [dictionary, parameters]
def addPrior(configname):
# TODO - this is wrong almost certain.
config=open(configname, 'a+')
for line in config:
if "[gpsarsa_" in line:
config.write("saveasprior = True"+"\n")
break
#config.write("\nMCGPTDPOLICY: SAVEASPRIOR = T\n")
config.close()
def extractGlobalandLocalPolicies(line):
elems = line.strip().split('=')[1].lstrip().split(';');
return elems
def getGlobalandLocalPolicies(configs, term="inpolicyfile"):
policyset=Set([]) # just use list?
for config in configs:
configfile=open(config, 'r')
for line in configfile:
if term in line:
elems=extractGlobalandLocalPolicies(line)
for elem in elems:
policyset.add(elem)
configfile.close()
names = list(policyset)
if len(names) ==1:
if names[0] == '':
names = []
return names
"""
def addConfig(configname, section, polname):
config = open(configname, 'a+')
for line in config:
if section in line:
# TODO - note this will only work with the one domain for now
config.write("inpolicyfile = "+polname+"\n")
config.write("outpolicyfile = "+polname+"\n")
break
config.close()
"""
#-----------------------------------------
# SCRIPT:
#-----------------------------------------
if len(sys.argv)<6:
print("usage: grid_pyGPtraining.py totaldialogues step pathtoexecutable errorrate config1 config2 config3...")
exit(1)
print(sys.argv)
totalDialogues = int(sys.argv[1])
step = int(sys.argv[2])
path = sys.argv[3]
error = int(sys.argv[4]) # int() doesn't actually matter here
configs = []
i=5
# as in run_grid_pyGPtraining.py -- only entering a single config
while i<len(sys.argv):
configs.append(sys.argv[i])
i=i+1
thisTask = 1
totalTasks = 10
if 'SGE_TASK_ID' in os.environ:
thisTask = int(os.environ['SGE_TASK_ID'])
totalTasks = int(os.environ['SGE_TASK_LAST'])
# Write the config file for this task and step number, working from raw config input
suffConfigs=[]
policynames = getGlobalandLocalPolicies(configs, term="outpolicyfile")
for i in range(len(configs)):
configName = configs[i].split('/')[-1]
suffConfig = str(thisTask)+"_"+str(step)+"_"+configName #+configs[i]
suffConfigs.append(suffConfig)
outfile=open(suffConfig, 'w');
openConfig = open(configs[i],'r')
foundIN, foundOUT = False, False
for line in openConfig:
# Note: need to be careful of comments in config file. will still be read here ...
if 'outpolicyfile' in line:
if '#' in line:
print("Warning - be carefull about comments in config - this isnt #inpolicyfile is it?")
#elems=extractGlobalandLocalPolicies(line)
elems = policynames
policies=[]
for elem in elems:
policies.append(getName(elem,thisTask, step)) # such that out has same task and step as config file
if len(policies) > 1:
policy=';'.join(policies)
else:
policy=''.join(policies)
outfile.write('inpolicyfile = ' +policy+"\n")
outfile.write('outpolicyfile = '+policy+"\n")
foundIN = True
continue
else:
# for rpg policy
EpsDenominator = 10000.0
start = 1 - (1-0.1)*float(step-1)*totalDialogues/EpsDenominator
if 'epsilon_start = 1' in line:
outfile.write('epsilon_start = '+ str(start) + '\n')
elif 'learning = True' in line:
outfile.write('learning = False\n')
elif 'inpolicyfile' in line:
continue
elif 'scale' in line:
outfile.write('scale = 1\n')
else:
outfile.write(line)
if not foundIN:
exit("you must specify inpolicyfile - can add section in this script here to write it to config")
outfile.close()
openConfig.close()
"""
if len(names) == 0 or len(names) == 1:
names = [ 'z' ]
"""
# Dont need this if explictly writing infile and outfile now
"""
for name in names:
[dictionary, parameters] = getDictParam(name,thisTask, step)
if step > 1:
[prevDictionary, prevParameters] = getDictParam(name,thisTask, step-1)
command="cp "+prevDictionary+" "+dictionary
Execute(command)
command="cp "+prevParameters+" "+parameters
Execute(command)
"""
"""
if len(names)==1:
[dictionary, parameters] = getDictParam(names[0],thisTask, step)
for config in suffConfigs:
# TODO - not sure how to deal with with. check with milica re how she wants to deal with in and out params etc
# see numprior and saveasprior options
policyName = dictionary[0:-4] # remove the .dct part
addConfig(config, section="[policy_", polname=policyName)
#addConfig(config, "OUT",dictionary, parameters)
#addConfig(config, "IN", dictionary, parameters)
"""
seed=seed(step, totalDialogues, totalTasks, thisTask);
if len(suffConfigs)>1:
for config in suffConfigs:
command=getCommand(config,error,seed,thisTask,step,totalDialogues,path)
Execute(command)
seed+=totalDialogues
else:
# if there is only one domain
"""
if step == 2:
f = open(suffConfigs[0],'r')
filedata = f.read()
f.close()
newdata = filedata.replace("epsilon_start = 1","epsilon_start = 0.55")
f = open(suffConfigs[0],'w')
f.write(newdata)
f.close()
elif step > 2:
f = open(suffConfigs[0],'r')
filedata = f.read()
f.close()
newdata = filedata.replace("epsilon_start = 0.55","epsilon_start = 0.1")
f = open(suffConfigs[0],'w')
f.write(newdata)
f.close()
"""
command=getCommand(suffConfigs[0],error,seed,thisTask,step,totalDialogues,path)
Execute_py(command, thisTask, step)
# NOT DEALING WITH PRIOR FOR NOW
"""
for config in suffConfigs:
addPrior(config)
command=getCommand(config,error,seed,thisTask,step,1,path)
Execute(command)
seed+=1
"""
#END OF FILE
| python |
import json
class Computer:
def __init__(self):
self.content_danmu = []
self.content_admin = []
def get_message_danmu(self, mode):
if self.content_danmu:
# _danmu 为列表中存储的第一个弹幕
_danmu = self.content_danmu[0]
if mode == 'json_danmu':
# 获取所有信息
_type = 'danmu'
_text = self.get_text(_danmu)
_nickname = self.get_nickname(_danmu)
j = {"type": _type, "nickname": _nickname, "text": _text}
# 将Json转化为二进制
_msg = json.dumps(j, ensure_ascii=False)
print('(Danmu)' + _msg)
return _msg
def get_message_admin(self, mode):
if self.content_admin:
# _admin 为列表中存储的第一个管理员信息
_admin = self.content_admin[0]
if mode == 'json_admin':
# 获取所有信息
_type = 'admin'
_text = self.get_text(_admin)
# 计算名字和礼物
_gift = self.get_gift(_text)
_nickname = self.get_gift_sender(_text)
j = {"type": _type, "nickname": _nickname, "gift": _gift}
# 将Json转化为二进制
_msg = json.dumps(j, ensure_ascii=False)
print('(Admin)' + _msg)
return _msg
def get_text(self, content):
return str(content['text'])
def get_nickname(self, content):
return content['nickname']
def get_uid(self, content):
return content['uid']
def pop_danmu(self):
self.content_danmu.pop(0)
def pop_admin(self):
self.content_admin.pop(0)
def get_gift_sender(self,text):
start = text.find('谢谢') + 2 # 由于读到的是str的最后一个的位置,因此要加上字符长度
end = text.find('赠送滴')
sender = str(text[start:end])
return sender
def get_gift(self, text):
start = text.find('赠送滴') + 3 # 由于读到的是str的最后一个的位置,因此要加上字符长度
end = text.find('~~~')
gift = str(text[start:end])
return gift
# 'text'——str——弹幕
# 'nickname'——str——昵称
# 'uid'——int——用户id
# 'timeline'——str——时间
# 'dm_type'——int——弹幕类型
# ‘guard_level’——int——守护等级
# ‘medel[00]’——int——粉丝牌等级
# ‘medel[02]’——int——粉丝牌主播名称
| python |
# build_compose.py
# ================
#
# This script builds the Docker Compose file used to launch all containers
# needed by the tool, with proper volume mounts, environment variables, and
# labels for behaviors and network conditions as specified in the configuration.
#
# The script generally assumes that it is being run from the root directory of
# the tool, however this can be overridden by passing in a command line option
# `--src`, `-s` specifying the path to the tool directory.
#
# In the event a custom configuration file is desired, the command line option
# `--config`, `-c` can be used to specify the path of the config file.
#
# The tool utilizes an environment file (.env) located in its root directory. If
# a different location is desired, the command line option `--env`, `-e` can be
# used to specify the path of the environment file.
#
# Collected data defaults to a `data/` directory in the root of the tool. To
# output data to a different directory, the command line option `--output`, `-o`
# can be used to specify the path to the data directory.
#
import argparse
import copy
import json
import pathlib
import yaml
from pathlib import Path
def main(tool_dir, config_file, env_file, data_dir):
print("""
Hello! Welcome to DANE.
____ _ _ _ _____ __/ \
| _ \ / \ | \ | | ____| ___/@ )
| | | |/ _ \ | \| | _| O \
| |_| / ___ \| |\ | |___ \_____) \
|____/_/ \_\_| \_|_____| U \_____\
""")
if config_file is None:
config_file = str(Path(tool_dir, 'config.json'))
with open(config_file, 'r') as infile:
config = json.load(infile)
with open(Path(tool_dir, 'docker/compose/base.yml'), 'r') as infile:
compose_base = yaml.full_load(infile)
with open(Path(tool_dir, 'docker/compose/components.yml'), 'r') as infile:
components = yaml.full_load(infile)
# Our compose file to write
compose = copy.deepcopy(compose_base)
# Get all desired network conditions
conditions = config['conditions']
# Get all target behavior scripts to run
behaviors = config['behaviors']
# For each set of desired network conditions, we'll add a network and corres-
# ponding `router` service into the compose file.
#
# Within each set of network conditions, add `client` services for each target
# behavior, connected to the proper network.
# The env and data paths are used in the Compose file and are therefore
# relative to the `built` directory in the tool. If the provided path is not
# relative then it must be absolute.
# We should also check that the env file exists.
if env_file is None:
path_to_check = Path(tool_dir, '.env')
if not path_to_check.exists():
print(f"""
Looks like your environment file doesn't exist yet. Path: {path_to_check}
We'll go ahead and create the file for you.
""")
with open(path_to_check, 'w') as outfile:
outfile.write("""
VPN_USERNAME=
VPN_USERGROUP=
VPN_PASSWORD=
""")
if config['vpn']['enabled']:
print(f"""
Since you have the VPN enabled, you'll need to add your login credentials now.
If you need guidance, consult https://dane-tool.github.io/dane/guide/quickstart
""")
input(f"Please add your VPN login credentials to {path_to_check} and press Enter when you're done.")
else:
print(f"""
Make sure to add your login credentials to the file if you plan on using a VPN!
""")
env_file = '../.env'
else:
env_file = str(Path(env_file).absolute())
if data_dir is None:
data_dir = '../data/'
else:
data_dir = str(Path(data_dir).absolute())
router = copy.deepcopy(components['router'])
compose['services']['router'] = router
for condition in conditions: # -- Networks, routers
latency = condition['latency']
loss = condition['loss']
random = condition['random']
later_latency = condition['later_latency']
later_loss = condition['later_loss']
later_start = condition['later_start']
# Create the network and router referencing it.
client_network = copy.deepcopy(components['network'])
router_network = copy.deepcopy(components['network'])
network_name = f'{latency}-{loss}-{random}-{later_latency}-{later_loss}-{later_start}'
client_network_name = f'client-lossem-{latency}-{loss}-{random}-{later_latency}-{later_loss}-{later_start}'
router_network_name = f'router-lossem-{latency}-{loss}-{random}-{later_latency}-{later_loss}-{later_start}'
compose['networks'][client_network_name] = client_network
compose['networks'][router_network_name] = router_network
lossem = copy.deepcopy(components['lossem'])
lossem_name = f'lossem-{network_name}'
lossem['volumes'].append(f'{data_dir}:/data/')
lossem['networks'][client_network_name] = lossem['networks'].pop('CLIENT_NETWORK')
lossem['networks'][client_network_name]['aliases'].pop()
lossem['networks'][client_network_name]['aliases'].append('lossem-' + client_network_name)
lossem['networks'][router_network_name] = lossem['networks'].pop('ROUTER_NETWORK')
lossem['networks'][router_network_name]['aliases'].pop()
lossem['networks'][router_network_name]['aliases'].append('lossem-' + router_network_name)
router['networks'][router_network_name] = dict()
router['networks'][router_network_name]['aliases'] = list()
router['networks'][router_network_name]['aliases'].append('router-' + router_network_name)
lossem['labels']['com.dane.lossem.latency'] = latency
lossem['labels']['com.dane.lossem.loss'] = loss
lossem['labels']['com.dane.lossem.random'] = random
lossem['labels']['com.dane.lossem.later_latency'] = later_latency
lossem['labels']['com.dane.lossem.later_loss'] = later_loss
lossem['labels']['com.dane.lossem.later_start'] = later_start
compose['services'][lossem_name] = lossem
# Create the clients referencing each behavior. These should also reference
# the network and router we just added.
for behavior in behaviors: # -- Clients
client = copy.deepcopy(components['client'])
# If the behavior is to use a custom script, we strip out 'custom/'
# from the behavior to make the compose service name compatible.
behavior_name = behavior if not behavior.startswith('custom/') else behavior[len('custom/'):]
client_name = f'client-{network_name}-{behavior_name}'
client['depends_on'].append(lossem_name)
client['networks'].append(client_network_name)
client['labels']['com.dane.behavior'] = behavior
client['env_file'].append(env_file)
client['volumes'].append(f'{data_dir}:/data/')
# Configure whether or not the vpn will be set up, the host address,
# etc by passing labels to each client.
client['labels']['com.dane.vpn.enabled'] = config['vpn']['enabled']
client['labels']['com.dane.vpn.server'] = config['vpn']['server']
# Specify shared memory
client['shm_size'] = config['system']['shared_memory_size']
# NOTE: This doesn't handle duplicates/replicas. The service name
# will be the same and thus will share the same key in the dict.
compose['services'][client_name] = client
built_file = Path(tool_dir, 'built/docker-compose.yml')
built_file.parent.mkdir(parents=True, exist_ok=True)
with open(built_file, 'w') as outfile:
outfile.writelines([
'# Built by `build_compose.py` during `compose` phase of tool use.\n',
'# Please do not edit, your changes will be overwritten during the next run.\n',
'\n'
])
yaml.dump(compose, outfile)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
'-s', '--src',
default='.',
help='Path to the root directory of the tool.'
)
parser.add_argument(
'-c', '--config',
default=None,
help='File path of the desired configuration file.'
)
parser.add_argument(
'-e', '--env',
default=None,
help='File path of the desired environment file.'
)
parser.add_argument(
'-o', '--output',
default=None,
help='Path to the data output directory for the tool.'
)
args = parser.parse_args()
tool_dir = args.src
config_file = args.config
env_file = args.env
data_dir = args.output
main(tool_dir, config_file, env_file, data_dir)
| python |
import unittest
from Spheral import *
#-------------------------------------------------------------------------------
# Base class to unit test the ConstantBoundary boundary condition.
#-------------------------------------------------------------------------------
class ConstantBoundaryTest:
def testApplyBoundary(self):
assert self.nodes.numInternalNodes == self.n
assert self.nodes.numGhostNodes == 0
self.boundary.setGhostNodes(self.nodes)
self.boundary.applyGhostBoundary(self.nodes.massDensity())
self.boundary.applyGhostBoundary(self.field)
assert self.nodes.numGhostNodes == self.nghost
assert self.boundary.numConstantNodes == self.nghost
ghostNodes = self.boundary.ghostNodes(self.nodes)
assert len(ghostNodes) == self.nghost
for i in ghostNodes:
r = self.nodes.positions()[i].magnitude()
assert r > self.rmax and r < self.rbound
assert abs(self.field[i] + r) < self.tiny
assert abs(self.nodes.massDensity()[i] - self.rho) < self.tiny
#-------------------------------------------------------------------------------
# 1-D test.
#-------------------------------------------------------------------------------
class ConstantBoundaryTest1d(ConstantBoundaryTest, unittest.TestCase):
def setUp(self):
self.tiny = 1.0e-5
from DistributeNodes import distributeNodes1d
gamma = 5.0/3.0
mu = 1.0
neighborSearchType = Neighbor1d.NeighborSearchType.GatherScatter
numGridLevels = 10
topGridCellSize = 0.25
origin = Vector1d(0.0)
kernelExtent = 2.0
self.rho = 1.0
H1 = SymTensor1d(1.0/0.01)
self.eos = GammaLawGasMKS1d(gamma, mu)
self.nodes = SphNodeList1d(self.eos)
self.neighbor = NestedGridNeighbor1d(self.nodes,
neighborSearchType,
numGridLevels,
topGridCellSize,
origin,
kernelExtent)
self.nodes.registerNeighbor(self.neighbor)
self.n = 100
self.nghost = 20
self.rmin = 0.0
self.rmax = 1.0
self.rbound = 1.2
distributeNodes1d([(self.nodes, self.n + self.nghost, (self.rmin, self.rbound))])
self.nodes.setMass(ScalarField1d(self.nodes, 0.5))
self.nodes.setHfield(SymTensorField1d(self.nodes, H1))
self.nodes.setMassDensity(ScalarField1d(self.nodes, self.rho))
constantNodeIDs = vector_of_int()
for i in xrange(self.n, self.n + self.nghost):
constantNodeIDs.append(i)
self.field = ScalarField1d(self.nodes)
for i in constantNodeIDs:
self.field[i] = -(self.nodes.positions()[i].magnitude())
self.boundary = ConstantBoundary1d(self.nodes, constantNodeIDs)
assert self.boundary.numConstantNodes == self.nghost
self.nodes.deleteNodes(constantNodeIDs)
assert self.nodes.numNodes == self.n
return
#-------------------------------------------------------------------------------
# 2-D test.
#-------------------------------------------------------------------------------
class ConstantBoundaryTest2d(ConstantBoundaryTest, unittest.TestCase):
def setUp(self):
self.tiny = 1.0e-5
from GenerateNodeDistribution2d import GenerateNodeDistribution2d
from ParMETISDistributeNodes import distributeNodes2d
gamma = 5.0/3.0
mu = 1.0
neighborSearchType = Neighbor2d.NeighborSearchType.GatherScatter
numGridLevels = 10
topGridCellSize = 0.25
origin = Vector2d(0.0)
kernelExtent = 2.0
self.rho = 1.0
seed = "constantDTheta"
self.eos = GammaLawGasMKS2d(gamma, mu)
self.nodes = SphNodeList2d(self.eos)
self.neighbor = NestedGridNeighbor2d(self.nodes,
neighborSearchType,
numGridLevels,
topGridCellSize,
origin,
kernelExtent)
self.nodes.registerNeighbor(self.neighbor)
nRadial, nTheta = 50, 50
nRadialGhost, nThetaGhost = 10, 50
self.rmin = 0.0
self.rmax = 1.0
self.rbound = 1.2
generator = GenerateNodeDistribution2d(nRadial, nTheta, self.rho, seed,
rmin = self.rmin,
rmax = self.rbound,
nNodePerh = 2.01)
n1 = generator.globalNumNodes()
nodeInfo = distributeNodes2d([(self.nodes, n1, generator)])
self.nodes.setMassDensity(ScalarField2d(self.nodes, self.rho))
constantNodeIDs = vector_of_int()
for i in xrange(n1):
if self.nodes.positions()[i].magnitude() > self.rmax:
constantNodeIDs.append(i)
self.nghost = len(constantNodeIDs)
self.n = self.nodes.numNodes - self.nghost
self.field = ScalarField2d(self.nodes)
for i in constantNodeIDs:
self.field[i] = -(self.nodes.positions()[i].magnitude())
self.boundary = ConstantBoundary2d(self.nodes, constantNodeIDs)
assert self.boundary.numConstantNodes == self.nghost
self.nodes.deleteNodes(constantNodeIDs)
assert self.nodes.numNodes == self.n
return
#-------------------------------------------------------------------------------
# 3-D test.
#-------------------------------------------------------------------------------
class ConstantBoundaryTest3d(ConstantBoundaryTest, unittest.TestCase):
def setUp(self):
self.tiny = 1.0e-5
from GenerateNodeDistribution3d import GenerateNodeDistribution3d
from ParMETISDistributeNodes import distributeNodes3d
gamma = 5.0/3.0
mu = 1.0
neighborSearchType = Neighbor3d.NeighborSearchType.GatherScatter
numGridLevels = 10
topGridCellSize = 10.0
origin = Vector3d(0.0)
kernelExtent = 2.0
self.rho = 1.0
seed = "lattice"
self.eos = GammaLawGasMKS3d(gamma, mu)
self.nodes = SphNodeList3d(self.eos)
self.neighbor = NestedGridNeighbor3d(self.nodes,
neighborSearchType,
numGridLevels,
topGridCellSize,
origin,
kernelExtent)
self.nodes.registerNeighbor(self.neighbor)
nx, ny, nz = 20, 20, 20
nxGhost, nyGhost, nzGhost = 10, 10, 10
xmin, xmax = (-1.2, -1.2, -1.2), (1.2, 1.2, 1.2)
self.rmin = 0.0
self.rmax = 1.0
self.rbound = 1.2
generator = GenerateNodeDistribution3d(nx + nxGhost,
ny + nyGhost,
nz + nzGhost, self.rho, seed,
xmin = xmin,
xmax = xmax,
rmin = self.rmin,
rmax = self.rbound,
nNodePerh = 2.01)
n1 = generator.globalNumNodes()
nodeInfo = distributeNodes3d([(self.nodes, n1, generator)])
self.nodes.setMassDensity(ScalarField3d(self.nodes, self.rho))
constantNodeIDs = vector_of_int()
for i in xrange(n1):
if self.nodes.positions()[i].magnitude() > self.rmax:
constantNodeIDs.append(i)
self.nghost = len(constantNodeIDs)
self.n = self.nodes.numNodes - self.nghost
self.field = ScalarField3d(self.nodes)
for i in constantNodeIDs:
self.field[i] = -(self.nodes.positions()[i].magnitude())
self.boundary = ConstantBoundary3d(self.nodes, constantNodeIDs)
assert self.boundary.numConstantNodes == self.nghost
self.nodes.deleteNodes(constantNodeIDs)
assert self.nodes.numNodes == self.n
return
if __name__ == "__main__":
unittest.main()
| python |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.