content
stringlengths 0
894k
| type
stringclasses 2
values |
---|---|
from . import Plugin
class CatchallPlugin(Plugin):
priority = -5
"""
Turns metrics that aren't matched by any other plugin in something a bit more useful (than not having them at all)
Another way to look at it is.. plugin:catchall is the list of targets you can better organize ;)
Note that the assigned tags (i.e. source tags) are best guesses. We can't know for sure!
(this description goes for all catchall plugins)
"""
targets = [
{
'match': '^(?P<n1>[^\.=]+)\.?(?P<n2>[^\.=]*)\.?(?P<n3>[^\.=]*)\.?(?P<n4>[^\.=]*)\.?(?P<n5>[^\.=]*)\.?(?P<n6>[^\.=]*)\.?(?P<n7>[^\.=]*)$',
'target_type': 'unknown',
'configure': [
lambda self, target: self.add_tag(target, 'what', 'unknown'),
lambda self, target: self.add_tag(target, 'source', 'unknown')
]
},
]
# vim: ts=4 et sw=4:
|
python
|
import sys
import logging
import numpy as np
import sympy as sp
import ANNarchy_future.api as api
import ANNarchy_future.parser as parser
class Symbol(sp.core.Symbol):
"""Subclass of sp.core.Symbol allowing to store additional attributes.
"""
def __new__(self, name, method='euler'):
obj = sp.core.Symbol.__new__(self, name)
self.method = method
return obj
class PreNeuron(object):
"""
Placeholder for presynaptic attributes.
"""
pass
class PostNeuron(object):
"""
Placeholder for postsynaptic attributes.
"""
pass
class StandaloneObject(object):
"""
Mimicks a Neuron or Synapse in standalone mode.
"""
def __init__(self, attributes):
self.attributes = attributes
class Equations(object):
"""Context to define equations.
It should be primarily used inside a `Neuron` or `Synapse` class,
but can also be used in a standalone mode by providing a list of attributes:
```python
with Equations(symbols=['tau', 'v', 'r']) as n:
n.dv_dt = (n.cast(0.04) - n.v)/n.tau
n.r = sp.tanh(n.v)
print(n)
```
"""
def __init__(self,
symbols : list = None,
method : str ='euler',
neuron = None,
synapse = None ):
"""Creates the Equations context.
Args:
symbols: list of attributes when in standalone mode.
method: numerical method (euler, midpoint, exponential, rk4, event-driven)
neuron: Neuron instance (passed by the population).
synapse: Synapse instance (passed by the projection).
"""
# Logger
self._logger = logging.getLogger(__name__)
self._logger.debug("Equations() created.")
# Standalone mode
if neuron is None and synapse is None and symbols is not None:
self.object = StandaloneObject(symbols)
self.object._random_variables = {}
self._logger.info("Custom symbols: " + str(symbols))
elif neuron is not None:
self.object = neuron
if not hasattr(neuron, '_random_variables'):
self.object._random_variables = {}
elif synapse is not None:
self.object = synapse
if not hasattr(synapse, '_random_variables'):
self.object._random_variables = {}
else:
self._logger.error("Equations() requires one argument among `symbols`, `neuron` or `synapse`.")
sys.exit(1)
# Numerical method
self.method = method
# Built-in symbols
self.symbols = parser.symbols_dict.copy()
# List of tuples (name, Equation)
self.equations = []
# List of random variables
self.random_variables = {}
# Start recording assignments
self._started = False
###########################################################################
# Context management
###########################################################################
def __enter__(self):
if isinstance(self.object, api.Neuron):
for attr in self.object.attributes:
# Symbol
symbol = sp.Symbol(attr)
self.symbols[attr] = symbol
setattr(self, attr, symbol)
if attr in self.object._parser.variables:
# Add derivative
symbol = sp.Symbol("d" + attr + "/dt")
self.symbols['d'+attr+'_dt'] = symbol
setattr(self, 'd'+attr+'_dt', symbol)
self._logger.debug("Neuron symbols: " + str(self.symbols))
elif isinstance(self.object, api.Synapse):
for attr in self.object.attributes:
# Symbol
symbol = sp.Symbol(attr)
self.symbols[attr] = symbol
setattr(self, attr, symbol)
if attr in self.object._parser.variables:
# Add derivative
symbol = sp.Symbol("d" + attr + "/dt")
self.symbols['d'+attr+'_dt'] = symbol
setattr(self, 'd'+attr+'_dt', symbol)
self.pre = PreNeuron()
self.post = PostNeuron()
for attr in self.object.pre_attributes:
# Symbol
symbol = sp.Symbol("pre."+attr)
self.symbols["pre."+attr] = symbol
setattr(self.pre, attr, symbol)
for attr in self.object.post_attributes:
# Symbol
symbol = sp.Symbol("post."+attr)
self.symbols["post."+attr] = symbol
setattr(self.post, attr, symbol)
self._logger.debug("Synapse symbols: " + str(self.symbols))
else: # Custom set of variables
for attr in self.object.attributes:
# Symbol
symbol = sp.Symbol(attr)
self.symbols[attr] = symbol
setattr(self, attr, symbol)
# Derivative if needed
symbol = sp.Symbol("d" + attr + "/dt")
self.symbols['d'+attr+'_dt'] = symbol
setattr(self, 'd'+attr+'_dt', symbol)
self._started = True
return self
def __exit__(self, exc_type, exc_value, traceback):
self._logger.info(str(self))
def __str__(self):
string = ""
for name, dist in self.random_variables.items():
string += name + " = " + dist.human_readable() + "\n"
for name, eq in self.equations:
string += sp.ccode(self.symbols[name]) + " = " + sp.ccode(eq) + "\n"
return string
def __getattribute__(self, name):
return object.__getattribute__(self, name)
def __setattr__(self, name, value):
# After __enter__(), track modifications to the variables
if hasattr(self, '_started') and self._started:
# Do not assign equations to symbols, just store them
if name in self.symbols.keys():
self.equations.append((name, value))
else:
object.__setattr__(self, name, value)
else:
object.__setattr__(self, name, value)
###########################################################################
# Built-in vocabulary
###########################################################################
@property
def t(self):
"Current time in ms."
return self.symbols['t']
@property
def dt(self):
"Step size in ms."
return self.symbols['dt']
def ite(self, cond, then, els):
"""If-then-else ternary operator.
Equivalent to:
```python
def ite(cond, then, els):
if cond:
return then
else:
return els
```
Args:
cond: condition.
then: returned value when cond is true.
else: returned value when cond is false.
"""
return sp.Piecewise((then, cond), (els, True))
def clip(self, val:sp.Symbol, min:sp.Symbol, max:sp.Symbol = None):
"""Sets the lower and upper bounds of a variable.
Equivalent to:
```python
def clip(val, min, max):
if val < min:
return min
elif val > max:
return max
else:
return val
```
Args:
val: variable.
min: lower bound.
max: upper bound.
"""
if min is None and max is None: # Do nothing
return val
elif min is not None and max is None: # Lower bound
return sp.Piecewise((min, val<min), (val, True))
elif min is None and max is not None: # Upper bound
return sp.Piecewise((max, val>max), (val, True))
else: # Two-sided clip
return sp.Piecewise((min, val<min), (max, val>max), (val, True))
def cast(self, val:float) -> sp.Symbol:
"""Cast floating point numbers to symbols in order to avoid numerical errors.
Args:
val (float):
"""
return sp.Symbol(str(float(val)))
###########################################################################
# Random distributions
###########################################################################
def Uniform(self, min:float, max:float):
"""
Uniform distribution between `min` and `max`.
Args:
min: lower bound.
max: upper bound.
"""
name = "__rand__" + str(len(self.object._random_variables))
obj = parser.RandomDistributions.Uniform(name, min, max)
self.random_variables[name] = obj
self.object._random_variables[name] = obj
return sp.Symbol(name)
def Normal(self, mu:float, sigma:float):
"""
Normal distribution with mean `mu` and standard deviation `sigma`.
Args:
mu: mean.
sigma: standard deviation.
"""
name = "__rand__" + str(len(self.object._random_variables))
obj = parser.RandomDistributions.Normal(name, mu, sigma)
self.random_variables[name] = obj
self.object._random_variables[name] = obj
return sp.Symbol(name)
|
python
|
from juriscraper.OpinionSite import OpinionSite
from datetime import datetime
import re
class Site(OpinionSite):
def __init__(self, *args, **kwargs):
super(Site, self).__init__(*args, **kwargs)
self.court_id = self.__module__
self.url = "http://www.courts.state.va.us/scndex.htm"
def _get_case_names(self):
path = "//p[./a[contains(./@href, '.pdf')]]/b/text()"
return list(self.html.xpath(path))
def _get_docket_numbers(self):
path = "//p[./a[contains(./@href, '.pdf')]]/a/text()"
return [
doc.strip()
for doc in self.html.xpath(path)
if doc.strip().isdigit()
]
def _get_case_dates(self):
dates = []
path = "//p[./a[contains(./@href, '.pdf')]]/text()"
pattern = r"^\s*\d{2}/\d{2}/\d{4}"
for s in self.html.xpath(path):
date_str = re.findall(pattern, s)
if len(date_str):
dates.append(
datetime.strptime(date_str[0].strip(), "%m/%d/%Y").date()
)
return dates
def _get_download_urls(self):
path = "//p[./a[contains(./@href, '.pdf')]]/a[2]/@href"
urls = [url for url in self.html.xpath(path) if url.endswith(".pdf")]
return urls
def _get_precedential_statuses(self):
return ["Published"] * len(self.case_names)
|
python
|
from django.conf.urls import patterns, include, url
from django.contrib import admin
from core.views import JobList, JobDetail, JobCreate
admin.autodiscover()
urlpatterns = patterns('',
# Examples:
# url(r'^$', 'emplea_do.views.home', name='home'),
# url(r'^blog/', include('blog.urls')),
url(r'^$', JobList.as_view()),
url(r'^job/new/$', JobCreate.as_view()),
url(r'^job/(?P<pk>\d+)/$', JobDetail.as_view()),
url(r'^admin/', include(admin.site.urls)),
)
|
python
|
import RPi.GPIO as GPIO
import time
from pathlib import Path
import os
import sys
relay_signal = 4
# GPIO setup
GPIO.setmode(GPIO.BCM)
GPIO.setup(relay_signal, GPIO.OUT)
# pulls the signal pin high signalling the relay switch to close
def garage_btn_down(pin):
GPIO.output(pin, GPIO.HIGH) # Close OSC
# pulls the signal pin low signalling the relay switch to open
def garage_btn_up(pin):
GPIO.output(pin, GPIO.LOW) # Open OSC
# presses the garage door button for 1 second
def press_button():
garage_btn_down(relay_signal)
time.sleep(1)
garage_btn_up(relay_signal)
# returns true if garage_is_open status file exists
def garage_is_open():
return os.path.exists('/home/pi/garage_is_open')
# returns true if garage_is_moving status file exists
def garage_is_moving():
return os.path.exists('/home/pi/garage_is_moving')
# reverses the garage while in motion
def reverse_door():
press_button()
print("garage button pressed (stopping)")
press_button()
print("garage button pressed (reversing)")
# main entry point
if __name__ == '__main__':
try:
if "--open" in sys.argv:
if not garage_is_open() and not garage_is_moving():
press_button()
print("garage button pressed (opening)")
Path('/home/pi/garage_is_moving').touch()
elif not garage_is_open() and garage_is_moving():
reverse_door()
else:
print("ignoring since garage is already open or in transit")
elif "--close" in sys.argv:
if garage_is_open() and not garage_is_moving():
Path('/home/pi/garage_is_moving').touch()
press_button()
print("garage button pressed (closing)")
else:
print("ignoring since garage is already closed or in transit")
except KeyboardInterrupt:
pass
finally:
GPIO.cleanup()
|
python
|
# Common libraries
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
# Restrict minor warnings
import warnings
warnings.filterwarnings('ignore')
# Import test and train data
df_train = pd.read_csv('../input/train.csv')
df_Test = pd.read_csv('../input/test.csv')
df_test = df_Test
# From both train and test data
df_train = df_train.drop(['Soil_Type7', 'Soil_Type15','Soil_Type8', 'Soil_Type25'], axis = 1)
df_test = df_test.drop(['Soil_Type7', 'Soil_Type15','Soil_Type8', 'Soil_Type25'], axis = 1)
# Also drop 'Id'
df_train = df_train.iloc[:,1:]
df_test = df_test.iloc[:,1:]
# Taking only non-categorical values
Size = 10
X_temp = df_train.iloc[:,:Size]
X_test_temp = df_test.iloc[:,:Size]
r,c = df_train.shape
X_train = np.concatenate((X_temp,df_train.iloc[:,Size:c-1]),axis=1)
y_train = df_train.Cover_Type.values
r,c = df_test.shape
X_test = np.concatenate((X_test_temp, df_test.iloc[:,Size:c]), axis = 1)
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import train_test_split
# Setting parameters
x_data, x_test_data, y_data, y_test_data = train_test_split(X_train, y_train, test_size = 0.3)
rf_para = [{'n_estimators':[50, 100], 'max_depth':[5,10,15], 'max_features':[0.1, 0.3], \
'min_samples_leaf':[1,3], 'bootstrap':[True, False]}]
from sklearn.grid_search import GridSearchCV, RandomizedSearchCV
rfc = GridSearchCV(RandomForestClassifier(), param_grid=rf_para, cv = 10, n_jobs=-1)
rfc.fit(x_data, y_data)
rfc.best_params_
rfc.grid_scores_
print ('Best accuracy obtained: {}'.format(rfc.best_score_))
print ('Parameters:')
for key, value in rfc.best_params_.items():
print('\t{}:{}'.format(key,value))
# Best params: {'max_features': 0.3, 'n_estimators': 100, 'bootstrap': False, 'max_depth': 15, 'min_samples_leaf': 1}
RFC = RandomForestClassifier(n_estimators=100, max_depth=10, max_features=0.3, bootstrap=True, min_samples_leaf=1,\
n_jobs=-1)
RFC.fit(X_train, y_train)
# y_pred = RFC.predict(X_test)
from sklearn.model_selection import learning_curve
from sklearn.model_selection import ShuffleSplit
def plot_learning_curve(model,title, X, y,n_jobs = 1, ylim = None, cv = None,train_sizes = np.linspace(0.1, 1, 5)):
# Figrue parameters
plt.figure(figsize=(10,8))
plt.title(title)
if ylim is not None:
plt.ylim(*ylim)
plt.xlabel('Training Examples')
plt.ylabel('Score')
train_sizes, train_score, test_score = learning_curve(model, X, y, cv = cv, n_jobs=n_jobs, train_sizes=train_sizes)
# Calculate mean and std
train_score_mean = np.mean(train_score, axis=1)
train_score_std = np.std(train_score, axis=1)
test_score_mean = np.mean(test_score, axis=1)
test_score_std = np.std(test_score, axis=1)
plt.grid()
plt.fill_between(train_sizes, train_score_mean - train_score_std, train_score_mean + train_score_std,\
alpha = 0.1, color = 'r')
plt.fill_between(train_sizes, test_score_mean - test_score_std, test_score_mean + test_score_std,\
alpha = 0.1, color = 'g')
plt.plot(train_sizes, train_score_mean, 'o-', color="r", label="Training score")
plt.plot(train_sizes, test_score_mean, 'o-', color="g", label="Cross-validation score")
plt.legend(loc = "best")
return plt
# Plotting Learning Curve
title = 'Learning Curve(Random Forest)'
model = RFC
cv = ShuffleSplit(n_splits=50, test_size=0.2,random_state=0)
plot_learning_curve(model,title,X_train, y_train, n_jobs=-1,ylim=None,cv=cv)
plt.show()
from xgboost.sklearn import XGBClassifier
from sklearn.metrics import accuracy_score
from sklearn.cross_validation import StratifiedKFold
from scipy.stats import randint, uniform
cv = StratifiedKFold(y_train, n_folds=10, shuffle=True)
params_dist_grid = {
'max_depth': [1, 5, 10],
'gamma': [0, 0.5, 1],
'n_estimators': randint(1, 1001), # uniform discrete random distribution
'learning_rate': uniform(), # gaussian distribution
'subsample': uniform(), # gaussian distribution
'colsample_bytree': uniform(), # gaussian distribution
'reg_lambda':uniform(),
'reg_alpha':uniform()
}
xgbc_fixed = {'booster':['gbtree'], 'silent':1}
bst_gridd = RandomizedSearchCV(estimator=XGBClassifier(*xgbc_fixed), param_distributions=params_dist_grid,\
scoring='accuracy', cv=cv, n_jobs=-1)
# bst_gridd.fit(X_train, y_train)
# bst_gridd.grid_scores_
# print ('Best accuracy obtained: {}'.format(bst_gridd.best_score_))
# print ('Parameters:')
# for key, value in bst_gridd.best_params_.items():
# print('\t{}:{}'.format(key,value))
# Best parameters selected using code in above cell
# Splitting the train data to test the best parameters
from sklearn.model_selection import train_test_split
seed = 123
x_data, x_test_data, y_data, y_test_data = train_test_split(X_train, y_train, test_size = 0.3,random_state=seed)
eval_set = [(x_test_data, y_test_data)]
XGBC = XGBClassifier(silent=1,n_estimators=641,learning_rate=0.2,max_depth=10,gamma=0.5,nthread=-1,\
reg_alpha = 0.05, reg_lambda= 0.35, max_delta_step = 1, subsample = 0.83, colsample_bytree = 0.6)
# Calculating error
XGBC.fit(x_data, y_data, early_stopping_rounds=100, eval_set=eval_set, eval_metric='merror', verbose=True)
pred = XGBC.predict(x_test_data)
accuracy = accuracy_score(y_test_data, pred);
print ('accuracy:%0.2f%%'%(accuracy*100))
xgbc_pred= XGBC.predict(X_test)
# saving to a csv file to make submission
solution = pd.DataFrame({'Id':df_Test.Id, 'Cover_Type':xgbc_pred}, columns = ['Id','Cover_Type'])
solution.to_csv('Xgboost_sol.csv', index=False)
|
python
|
# -*- coding: utf-8 -*-
"""
Gromov-Wasserstein transport method
===================================
"""
# Author: Erwan Vautier <[email protected]>
# Nicolas Courty <[email protected]>
#
# License: MIT License
import numpy as np
from .bregman import sinkhorn
from .utils import dist
def square_loss(a, b):
"""
Returns the value of L(a,b)=(1/2)*|a-b|^2
"""
return 0.5 * (a - b)**2
def kl_loss(a, b):
"""
Returns the value of L(a,b)=a*log(a/b)-a+b
"""
return a * np.log(a / b) - a + b
def tensor_square_loss(C1, C2, T):
"""
Returns the value of \mathcal{L}(C1,C2) \otimes T with the square loss
function as the loss function of Gromow-Wasserstein discrepancy.
Where :
C1 : Metric cost matrix in the source space
C2 : Metric cost matrix in the target space
T : A coupling between those two spaces
The square-loss function L(a,b)=(1/2)*|a-b|^2 is read as :
L(a,b) = f1(a)+f2(b)-h1(a)*h2(b) with :
f1(a)=(a^2)/2
f2(b)=(b^2)/2
h1(a)=a
h2(b)=b
Parameters
----------
C1 : ndarray, shape (ns, ns)
Metric cost matrix in the source space
C2 : ndarray, shape (nt, nt)
Metric costfr matrix in the target space
T : ndarray, shape (ns, nt)
Coupling between source and target spaces
Returns
-------
tens : ndarray, shape (ns, nt)
\mathcal{L}(C1,C2) \otimes T tensor-matrix multiplication result
"""
C1 = np.asarray(C1, dtype=np.float64)
C2 = np.asarray(C2, dtype=np.float64)
T = np.asarray(T, dtype=np.float64)
def f1(a):
return (a**2) / 2
def f2(b):
return (b**2) / 2
def h1(a):
return a
def h2(b):
return b
tens = -np.dot(h1(C1), T).dot(h2(C2).T)
tens -= tens.min()
return tens
def tensor_kl_loss(C1, C2, T):
"""
Returns the value of \mathcal{L}(C1,C2) \otimes T with the square loss
function as the loss function of Gromow-Wasserstein discrepancy.
Where :
C1 : Metric cost matrix in the source space
C2 : Metric cost matrix in the target space
T : A coupling between those two spaces
The square-loss function L(a,b)=(1/2)*|a-b|^2 is read as :
L(a,b) = f1(a)+f2(b)-h1(a)*h2(b) with :
f1(a)=a*log(a)-a
f2(b)=b
h1(a)=a
h2(b)=log(b)
Parameters
----------
C1 : ndarray, shape (ns, ns)
Metric cost matrix in the source space
C2 : ndarray, shape (nt, nt)
Metric costfr matrix in the target space
T : ndarray, shape (ns, nt)
Coupling between source and target spaces
Returns
-------
tens : ndarray, shape (ns, nt)
\mathcal{L}(C1,C2) \otimes T tensor-matrix multiplication result
References
----------
.. [12] Peyré, Gabriel, Marco Cuturi, and Justin Solomon,
"Gromov-Wasserstein averaging of kernel and distance matrices."
International Conference on Machine Learning (ICML). 2016.
"""
C1 = np.asarray(C1, dtype=np.float64)
C2 = np.asarray(C2, dtype=np.float64)
T = np.asarray(T, dtype=np.float64)
def f1(a):
return a * np.log(a + 1e-15) - a
def f2(b):
return b
def h1(a):
return a
def h2(b):
return np.log(b + 1e-15)
tens = -np.dot(h1(C1), T).dot(h2(C2).T)
tens -= tens.min()
return tens
def update_square_loss(p, lambdas, T, Cs):
"""
Updates C according to the L2 Loss kernel with the S Ts couplings
calculated at each iteration
Parameters
----------
p : ndarray, shape (N,)
masses in the targeted barycenter
lambdas : list of float
list of the S spaces' weights
T : list of S np.ndarray(ns,N)
the S Ts couplings calculated at each iteration
Cs : list of S ndarray, shape(ns,ns)
Metric cost matrices
Returns
----------
C : ndarray, shape (nt,nt)
updated C matrix
"""
tmpsum = sum([lambdas[s] * np.dot(T[s].T, Cs[s]).dot(T[s])
for s in range(len(T))])
ppt = np.outer(p, p)
return np.divide(tmpsum, ppt)
def update_kl_loss(p, lambdas, T, Cs):
"""
Updates C according to the KL Loss kernel with the S Ts couplings calculated at each iteration
Parameters
----------
p : ndarray, shape (N,)
weights in the targeted barycenter
lambdas : list of the S spaces' weights
T : list of S np.ndarray(ns,N)
the S Ts couplings calculated at each iteration
Cs : list of S ndarray, shape(ns,ns)
Metric cost matrices
Returns
----------
C : ndarray, shape (ns,ns)
updated C matrix
"""
tmpsum = sum([lambdas[s] * np.dot(T[s].T, Cs[s]).dot(T[s])
for s in range(len(T))])
ppt = np.outer(p, p)
return np.exp(np.divide(tmpsum, ppt))
def gromov_wasserstein(C1, C2, p, q, loss_fun, epsilon,
max_iter=1000, tol=1e-9, verbose=False, log=False):
"""
Returns the gromov-wasserstein coupling between the two measured similarity matrices
(C1,p) and (C2,q)
The function solves the following optimization problem:
.. math::
\GW = arg\min_T \sum_{i,j,k,l} L(C1_{i,k},C2_{j,l})*T_{i,j}*T_{k,l}-\epsilon(H(T))
s.t. \GW 1 = p
\GW^T 1= q
\GW\geq 0
Where :
C1 : Metric cost matrix in the source space
C2 : Metric cost matrix in the target space
p : distribution in the source space
q : distribution in the target space
L : loss function to account for the misfit between the similarity matrices
H : entropy
Parameters
----------
C1 : ndarray, shape (ns, ns)
Metric cost matrix in the source space
C2 : ndarray, shape (nt, nt)
Metric costfr matrix in the target space
p : ndarray, shape (ns,)
distribution in the source space
q : ndarray, shape (nt,)
distribution in the target space
loss_fun : string
loss function used for the solver either 'square_loss' or 'kl_loss'
epsilon : float
Regularization term >0
max_iter : int, optional
Max number of iterations
tol : float, optional
Stop threshold on error (>0)
verbose : bool, optional
Print information along iterations
log : bool, optional
record log if True
Returns
-------
T : ndarray, shape (ns, nt)
coupling between the two spaces that minimizes :
\sum_{i,j,k,l} L(C1_{i,k},C2_{j,l})*T_{i,j}*T_{k,l}-\epsilon(H(T))
"""
C1 = np.asarray(C1, dtype=np.float64)
C2 = np.asarray(C2, dtype=np.float64)
T = np.outer(p, q) # Initialization
cpt = 0
err = 1
while (err > tol and cpt < max_iter):
Tprev = T
if loss_fun == 'square_loss':
tens = tensor_square_loss(C1, C2, T)
elif loss_fun == 'kl_loss':
tens = tensor_kl_loss(C1, C2, T)
T = sinkhorn(p, q, tens, epsilon)
if cpt % 10 == 0:
# we can speed up the process by checking for the error only all
# the 10th iterations
err = np.linalg.norm(T - Tprev)
if log:
log['err'].append(err)
if verbose:
if cpt % 200 == 0:
print('{:5s}|{:12s}'.format(
'It.', 'Err') + '\n' + '-' * 19)
print('{:5d}|{:8e}|'.format(cpt, err))
cpt += 1
if log:
return T, log
else:
return T
def gromov_wasserstein2(C1, C2, p, q, loss_fun, epsilon,
max_iter=1000, tol=1e-9, verbose=False, log=False):
"""
Returns the gromov-wasserstein discrepancy between the two measured similarity matrices
(C1,p) and (C2,q)
The function solves the following optimization problem:
.. math::
\GW_Dist = \min_T \sum_{i,j,k,l} L(C1_{i,k},C2_{j,l})*T_{i,j}*T_{k,l}-\epsilon(H(T))
Where :
C1 : Metric cost matrix in the source space
C2 : Metric cost matrix in the target space
p : distribution in the source space
q : distribution in the target space
L : loss function to account for the misfit between the similarity matrices
H : entropy
Parameters
----------
C1 : ndarray, shape (ns, ns)
Metric cost matrix in the source space
C2 : ndarray, shape (nt, nt)
Metric costfr matrix in the target space
p : ndarray, shape (ns,)
distribution in the source space
q : ndarray, shape (nt,)
distribution in the target space
loss_fun : string
loss function used for the solver either 'square_loss' or 'kl_loss'
epsilon : float
Regularization term >0
max_iter : int, optional
Max number of iterations
tol : float, optional
Stop threshold on error (>0)
verbose : bool, optional
Print information along iterations
log : bool, optional
record log if True
Returns
-------
gw_dist : float
Gromov-Wasserstein distance
"""
if log:
gw, logv = gromov_wasserstein(
C1, C2, p, q, loss_fun, epsilon, max_iter, tol, verbose, log)
else:
gw = gromov_wasserstein(C1, C2, p, q, loss_fun,
epsilon, max_iter, tol, verbose, log)
if loss_fun == 'square_loss':
gw_dist = np.sum(gw * tensor_square_loss(C1, C2, gw))
elif loss_fun == 'kl_loss':
gw_dist = np.sum(gw * tensor_kl_loss(C1, C2, gw))
if log:
return gw_dist, logv
else:
return gw_dist
def gromov_barycenters(N, Cs, ps, p, lambdas, loss_fun, epsilon,
max_iter=1000, tol=1e-9, verbose=False, log=False, init_C=None):
"""
Returns the gromov-wasserstein barycenters of S measured similarity matrices
(Cs)_{s=1}^{s=S}
The function solves the following optimization problem:
.. math::
C = argmin_C\in R^NxN \sum_s \lambda_s GW(C,Cs,p,ps)
Where :
Cs : metric cost matrix
ps : distribution
Parameters
----------
N : Integer
Size of the targeted barycenter
Cs : list of S np.ndarray(ns,ns)
Metric cost matrices
ps : list of S np.ndarray(ns,)
sample weights in the S spaces
p : ndarray, shape(N,)
weights in the targeted barycenter
lambdas : list of float
list of the S spaces' weights
loss_fun : tensor-matrix multiplication function based on specific loss function
update : function(p,lambdas,T,Cs) that updates C according to a specific Kernel
with the S Ts couplings calculated at each iteration
epsilon : float
Regularization term >0
max_iter : int, optional
Max number of iterations
tol : float, optional
Stop threshol on error (>0)
verbose : bool, optional
Print information along iterations
log : bool, optional
record log if True
init_C : bool, ndarray, shape(N,N)
random initial value for the C matrix provided by user
Returns
-------
C : ndarray, shape (N, N)
Similarity matrix in the barycenter space (permutated arbitrarily)
"""
S = len(Cs)
Cs = [np.asarray(Cs[s], dtype=np.float64) for s in range(S)]
lambdas = np.asarray(lambdas, dtype=np.float64)
# Initialization of C : random SPD matrix (if not provided by user)
if init_C is None:
xalea = np.random.randn(N, 2)
C = dist(xalea, xalea)
C /= C.max()
else:
C = init_C
cpt = 0
err = 1
error = []
while(err > tol and cpt < max_iter):
Cprev = C
T = [gromov_wasserstein(Cs[s], C, ps[s], p, loss_fun, epsilon,
max_iter, 1e-5, verbose, log) for s in range(S)]
if loss_fun == 'square_loss':
C = update_square_loss(p, lambdas, T, Cs)
elif loss_fun == 'kl_loss':
C = update_kl_loss(p, lambdas, T, Cs)
if cpt % 10 == 0:
# we can speed up the process by checking for the error only all
# the 10th iterations
err = np.linalg.norm(C - Cprev)
error.append(err)
if log:
log['err'].append(err)
if verbose:
if cpt % 200 == 0:
print('{:5s}|{:12s}'.format(
'It.', 'Err') + '\n' + '-' * 19)
print('{:5d}|{:8e}|'.format(cpt, err))
cpt += 1
return C
|
python
|
# coding: utf-8
"""
Cloudbreak API
Cloudbreak is a powerful left surf that breaks over a coral reef, a mile off southwest the island of Tavarua, Fiji. Cloudbreak is a cloud agnostic Hadoop as a Service API. Abstracts the provisioning and ease management and monitoring of on-demand clusters. SequenceIQ's Cloudbreak is a RESTful application development platform with the goal of helping developers to build solutions for deploying Hadoop YARN clusters in different environments. Once it is deployed in your favourite servlet container it exposes a REST API allowing to span up Hadoop clusters of arbitary sizes and cloud providers. Provisioning Hadoop has never been easier. Cloudbreak is built on the foundation of cloud providers API (Amazon AWS, Microsoft Azure, Google Cloud Platform, Openstack), Apache Ambari, Docker lightweight containers, Swarm and Consul. For further product documentation follow the link: <a href=\"http://hortonworks.com/apache/cloudbreak/\">http://hortonworks.com/apache/cloudbreak/</a>
OpenAPI spec version: 2.9.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class BaseImageResponse(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'hdp_stacks': 'list[StackDetailsJson]',
'hdf_stacks': 'list[StackDetailsJson]',
'_date': 'str',
'description': 'str',
'os': 'str',
'os_type': 'str',
'uuid': 'str',
'version': 'str',
'repo': 'dict(str, str)',
'images': 'dict(str, dict(str, str))',
'stack_details': 'StackDetailsJson',
'default_image': 'bool',
'package_versions': 'dict(str, str)'
}
attribute_map = {
'hdp_stacks': 'hdpStacks',
'hdf_stacks': 'hdfStacks',
'_date': 'date',
'description': 'description',
'os': 'os',
'os_type': 'osType',
'uuid': 'uuid',
'version': 'version',
'repo': 'repo',
'images': 'images',
'stack_details': 'stackDetails',
'default_image': 'defaultImage',
'package_versions': 'packageVersions'
}
def __init__(self, hdp_stacks=None, hdf_stacks=None, _date=None, description=None, os=None, os_type=None, uuid=None, version=None, repo=None, images=None, stack_details=None, default_image=False, package_versions=None):
"""
BaseImageResponse - a model defined in Swagger
"""
self._hdp_stacks = None
self._hdf_stacks = None
self.__date = None
self._description = None
self._os = None
self._os_type = None
self._uuid = None
self._version = None
self._repo = None
self._images = None
self._stack_details = None
self._default_image = None
self._package_versions = None
if hdp_stacks is not None:
self.hdp_stacks = hdp_stacks
if hdf_stacks is not None:
self.hdf_stacks = hdf_stacks
if _date is not None:
self._date = _date
if description is not None:
self.description = description
if os is not None:
self.os = os
if os_type is not None:
self.os_type = os_type
if uuid is not None:
self.uuid = uuid
if version is not None:
self.version = version
if repo is not None:
self.repo = repo
if images is not None:
self.images = images
if stack_details is not None:
self.stack_details = stack_details
if default_image is not None:
self.default_image = default_image
if package_versions is not None:
self.package_versions = package_versions
@property
def hdp_stacks(self):
"""
Gets the hdp_stacks of this BaseImageResponse.
:return: The hdp_stacks of this BaseImageResponse.
:rtype: list[StackDetailsJson]
"""
return self._hdp_stacks
@hdp_stacks.setter
def hdp_stacks(self, hdp_stacks):
"""
Sets the hdp_stacks of this BaseImageResponse.
:param hdp_stacks: The hdp_stacks of this BaseImageResponse.
:type: list[StackDetailsJson]
"""
self._hdp_stacks = hdp_stacks
@property
def hdf_stacks(self):
"""
Gets the hdf_stacks of this BaseImageResponse.
:return: The hdf_stacks of this BaseImageResponse.
:rtype: list[StackDetailsJson]
"""
return self._hdf_stacks
@hdf_stacks.setter
def hdf_stacks(self, hdf_stacks):
"""
Sets the hdf_stacks of this BaseImageResponse.
:param hdf_stacks: The hdf_stacks of this BaseImageResponse.
:type: list[StackDetailsJson]
"""
self._hdf_stacks = hdf_stacks
@property
def _date(self):
"""
Gets the _date of this BaseImageResponse.
:return: The _date of this BaseImageResponse.
:rtype: str
"""
return self.__date
@_date.setter
def _date(self, _date):
"""
Sets the _date of this BaseImageResponse.
:param _date: The _date of this BaseImageResponse.
:type: str
"""
self.__date = _date
@property
def description(self):
"""
Gets the description of this BaseImageResponse.
:return: The description of this BaseImageResponse.
:rtype: str
"""
return self._description
@description.setter
def description(self, description):
"""
Sets the description of this BaseImageResponse.
:param description: The description of this BaseImageResponse.
:type: str
"""
self._description = description
@property
def os(self):
"""
Gets the os of this BaseImageResponse.
:return: The os of this BaseImageResponse.
:rtype: str
"""
return self._os
@os.setter
def os(self, os):
"""
Sets the os of this BaseImageResponse.
:param os: The os of this BaseImageResponse.
:type: str
"""
self._os = os
@property
def os_type(self):
"""
Gets the os_type of this BaseImageResponse.
:return: The os_type of this BaseImageResponse.
:rtype: str
"""
return self._os_type
@os_type.setter
def os_type(self, os_type):
"""
Sets the os_type of this BaseImageResponse.
:param os_type: The os_type of this BaseImageResponse.
:type: str
"""
self._os_type = os_type
@property
def uuid(self):
"""
Gets the uuid of this BaseImageResponse.
:return: The uuid of this BaseImageResponse.
:rtype: str
"""
return self._uuid
@uuid.setter
def uuid(self, uuid):
"""
Sets the uuid of this BaseImageResponse.
:param uuid: The uuid of this BaseImageResponse.
:type: str
"""
self._uuid = uuid
@property
def version(self):
"""
Gets the version of this BaseImageResponse.
:return: The version of this BaseImageResponse.
:rtype: str
"""
return self._version
@version.setter
def version(self, version):
"""
Sets the version of this BaseImageResponse.
:param version: The version of this BaseImageResponse.
:type: str
"""
self._version = version
@property
def repo(self):
"""
Gets the repo of this BaseImageResponse.
:return: The repo of this BaseImageResponse.
:rtype: dict(str, str)
"""
return self._repo
@repo.setter
def repo(self, repo):
"""
Sets the repo of this BaseImageResponse.
:param repo: The repo of this BaseImageResponse.
:type: dict(str, str)
"""
self._repo = repo
@property
def images(self):
"""
Gets the images of this BaseImageResponse.
:return: The images of this BaseImageResponse.
:rtype: dict(str, dict(str, str))
"""
return self._images
@images.setter
def images(self, images):
"""
Sets the images of this BaseImageResponse.
:param images: The images of this BaseImageResponse.
:type: dict(str, dict(str, str))
"""
self._images = images
@property
def stack_details(self):
"""
Gets the stack_details of this BaseImageResponse.
:return: The stack_details of this BaseImageResponse.
:rtype: StackDetailsJson
"""
return self._stack_details
@stack_details.setter
def stack_details(self, stack_details):
"""
Sets the stack_details of this BaseImageResponse.
:param stack_details: The stack_details of this BaseImageResponse.
:type: StackDetailsJson
"""
self._stack_details = stack_details
@property
def default_image(self):
"""
Gets the default_image of this BaseImageResponse.
:return: The default_image of this BaseImageResponse.
:rtype: bool
"""
return self._default_image
@default_image.setter
def default_image(self, default_image):
"""
Sets the default_image of this BaseImageResponse.
:param default_image: The default_image of this BaseImageResponse.
:type: bool
"""
self._default_image = default_image
@property
def package_versions(self):
"""
Gets the package_versions of this BaseImageResponse.
:return: The package_versions of this BaseImageResponse.
:rtype: dict(str, str)
"""
return self._package_versions
@package_versions.setter
def package_versions(self, package_versions):
"""
Sets the package_versions of this BaseImageResponse.
:param package_versions: The package_versions of this BaseImageResponse.
:type: dict(str, str)
"""
self._package_versions = package_versions
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, BaseImageResponse):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
|
python
|
"""initialize the connection and wait for a message directed at it"""
from irc import *
from decision import *
import os
import random
channel = "#bitswebteam"
server = "irc.freenode.net"
nickname = "EisBot"
irc = IRC()
irc.connect(server, channel, nickname)
while 1:
text = irc.get_text()
print text
if "PRIVMSG" in text and channel in text and "@choose" in text:
irc.send(channel, decision.decide(text))
elif "PRIVMSG" in text and channel in text and "@adventure" in text:
irc.send(channel, "The adventure begins!..")
adventure.start()
|
python
|
import pickle
import argparse
import cv2
from tqdm import tqdm
from lib.config import Config
def parse_args():
parser = argparse.ArgumentParser(description="Tool to generate qualitative results videos")
parser.add_argument("--pred", help=".pkl file to load predictions from")
parser.add_argument("--cfg", default="config.yaml", help="Config file")
parser.add_argument("--cover", default="tusimple_cover.png", help="Cover image file")
parser.add_argument("--out", default="video.avi", help="Output filename")
parser.add_argument("--view", action="store_true", help="Show predictions instead of creating video")
return parser.parse_args()
def add_cover_img(video, cover_path, frames=90):
cover = cv2.imread(cover_path)
for _ in range(frames):
video.write(cover)
def create_video(filename, width, height, fps=30):
fourcc = cv2.VideoWriter_fourcc(*'MP42')
video = cv2.VideoWriter(filename, fourcc, float(fps), (width, height))
return video
def main():
args = parse_args()
cfg = Config(args.cfg)
dataset = cfg.get_dataset('test')
height, width = cfg['datasets']['test']['parameters']['img_size']
print('Using resolution {}x{}'.format(width, height))
if not args.view:
video = create_video(args.out, width, height)
# add_cover_img(video, args.cover)
with open(args.pred, "rb") as pred_file:
predictions = pickle.load(pred_file)
for idx, pred in tqdm(zip(range(len(dataset)), predictions), total=len(dataset)):
if idx < 2200: continue
if idx > 3000: break
det_pred, cls_pred = pred
assert det_pred.shape[0] == 1 # batch size == 1
frame = dataset.draw_annotation(idx,
pred=det_pred[0].cpu().numpy(),
cls_pred=cls_pred[0].cpu().numpy() if cls_pred is not None else None)
assert frame.shape[:2] == (height, width)
if args.view:
cv2.imshow('frame', frame)
cv2.waitKey(0)
else:
video.write(frame)
if not args.view:
video.release()
print('Video saved as {}'.format(args.out))
if __name__ == '__main__':
main()
|
python
|
# Copyright (c) 2008, Willow Garage, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the Willow Garage, Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# author: Wim Meeussen
import threading
import rclpy
import tf2_py as tf2
import tf2_ros
from tf2_msgs.srv import FrameGraph
# TODO(vinnamkim): It seems rosgraph is not ready
# import rosgraph.masterapi
from time import sleep
from rclpy.duration import Duration
from rclpy.task import Future
class Buffer(tf2.BufferCore, tf2_ros.BufferInterface):
"""
Standard implementation of the :class:`tf2_ros.BufferInterface` abstract data type.
Inherits from :class:`tf2_ros.buffer_interface.BufferInterface` and :class:`tf2.BufferCore`.
Stores known frames and offers a ROS service, "tf_frames", which responds to client requests
with a response containing a :class:`tf2_msgs.FrameGraph` representing the relationship of
known frames.
"""
def __init__(self, cache_time = None, node = None):
"""
Constructor.
:param cache_time: (Optional) How long to retain past information in BufferCore.
:param node: (Optional) If node create a tf2_frames service, It responses all frames as a yaml
"""
if cache_time is not None:
tf2.BufferCore.__init__(self, cache_time)
else:
tf2.BufferCore.__init__(self)
tf2_ros.BufferInterface.__init__(self)
self._new_data_callbacks = []
self._callbacks_to_remove = []
self._callbacks_lock = threading.RLock()
if node is not None:
self.srv = node.create_service(FrameGraph, 'tf2_frames', self.__get_frames)
def __get_frames(self, req, res):
return FrameGraph.Response(frame_yaml=self.all_frames_as_yaml())
def set_transform(self, *args, **kwargs):
super().set_transform(*args, **kwargs)
self._call_new_data_callbacks()
def set_transform_static(self, *args, **kwargs):
super().set_transform_static(*args, **kwargs)
self._call_new_data_callbacks()
def _call_new_data_callbacks(self):
with self._callbacks_lock:
for callback in self._new_data_callbacks:
callback()
# Remove callbacks after to avoid modifying list being iterated on
for callback in self._callbacks_to_remove:
self._new_data_callbacks.remove(callback)
self._callbacks_to_remove.clear()
def _remove_callback(self, callback):
with self._callbacks_lock:
# Actually remove the callback later
self._callbacks_to_remove.append(callback)
def lookup_transform(self, target_frame, source_frame, time, timeout=Duration()):
"""
Get the transform from the source frame to the target frame.
:param target_frame: Name of the frame to transform into.
:param source_frame: Name of the input frame.
:param time: The time at which to get the transform. (0 will get the latest)
:param timeout: (Optional) Time to wait for the target frame to become available.
:return: The transform between the frames.
:rtype: :class:`geometry_msgs.msg.TransformStamped`
"""
self.can_transform(target_frame, source_frame, time, timeout)
return self.lookup_transform_core(target_frame, source_frame, time)
async def lookup_transform_async(self, target_frame, source_frame, time):
"""
Get the transform from the source frame to the target frame asyncronously.
:param target_frame: Name of the frame to transform into.
:param source_frame: Name of the input frame.
:param time: The time at which to get the transform. (0 will get the latest)
:return: the transform
:rtype: :class:`geometry_msgs.msg.TransformStamped`
"""
await self.wait_for_transform_async(target_frame, source_frame, time)
return self.lookup_transform_core(target_frame, source_frame, time)
def lookup_transform_full(self, target_frame, target_time, source_frame, source_time, fixed_frame, timeout=Duration()):
"""
Get the transform from the source frame to the target frame using the advanced API.
:param target_frame: Name of the frame to transform into.
:param target_time: The time to transform to. (0 will get the latest)
:param source_frame: Name of the input frame.
:param source_time: The time at which source_frame will be evaluated. (0 will get the latest)
:param fixed_frame: Name of the frame to consider constant in time.
:param timeout: (Optional) Time to wait for the target frame to become available.
:return: The transform between the frames.
:rtype: :class:`geometry_msgs.msg.TransformStamped`
"""
self.can_transform_full(target_frame, target_time, source_frame, source_time, fixed_frame, timeout)
return self.lookup_transform_full_core(target_frame, target_time, source_frame, source_time, fixed_frame)
async def lookup_transform_full_async(self, target_frame, target_time, source_frame, source_time, fixed_frame):
"""
Get the transform from the source frame to the target frame using the advanced API asyncronously.
:param target_frame: Name of the frame to transform into.
:param target_time: The time to transform to. (0 will get the latest)
:param source_frame: Name of the input frame.
:param source_time: The time at which source_frame will be evaluated. (0 will get the latest)
:param fixed_frame: Name of the frame to consider constant in time.
:return: the transform
:rtype: :class:`geometry_msgs.msg.TransformStamped`
"""
await self.wait_for_transform_full_async(target_frame, target_time, source_frame, source_time, fixed_frame)
return self.lookup_transform_full_core(target_frame, target_time, source_frame, source_time, fixed_frame)
def can_transform(self, target_frame, source_frame, time, timeout=Duration(), return_debug_tuple=False):
"""
Check if a transform from the source frame to the target frame is possible.
:param target_frame: Name of the frame to transform into.
:param source_frame: Name of the input frame.
:param time: The time at which to get the transform. (0 will get the latest)
:param timeout: (Optional) Time to wait for the target frame to become available.
:param return_debug_type: (Optional) If true, return a tuple representing debug information.
:return: True if the transform is possible, false otherwise.
:rtype: bool
"""
clock = rclpy.clock.Clock()
if timeout != Duration():
start_time = clock.now()
# TODO(vinnamkim): rclpy.Rate is not ready
# See https://github.com/ros2/rclpy/issues/186
# r = rospy.Rate(20)
while (clock.now() < start_time + timeout and
not self.can_transform_core(target_frame, source_frame, time)[0] and
(clock.now() + Duration(seconds=3.0)) >= start_time): # big jumps in time are likely bag loops, so break for them
# r.sleep()
sleep(0.02)
core_result = self.can_transform_core(target_frame, source_frame, time)
if return_debug_tuple:
return core_result
return core_result[0]
def can_transform_full(self, target_frame, target_time, source_frame, source_time, fixed_frame, timeout=Duration(),
return_debug_tuple=False):
"""
Check if a transform from the source frame to the target frame is possible (advanced API).
Must be implemented by a subclass of BufferInterface.
:param target_frame: Name of the frame to transform into.
:param target_time: The time to transform to. (0 will get the latest)
:param source_frame: Name of the input frame.
:param source_time: The time at which source_frame will be evaluated. (0 will get the latest)
:param fixed_frame: Name of the frame to consider constant in time.
:param timeout: (Optional) Time to wait for the target frame to become available.
:param return_debug_type: (Optional) If true, return a tuple representing debug information.
:return: True if the transform is possible, false otherwise.
:rtype: bool
"""
clock = rclpy.clock.Clock()
if timeout != Duration():
start_time = clock.now()
# TODO(vinnamkim): rclpy.Rate is not ready
# See https://github.com/ros2/rclpy/issues/186
# r = rospy.Rate(20)
while (clock.now() < start_time + timeout and
not self.can_transform_full_core(target_frame, target_time, source_frame, source_time, fixed_frame)[0] and
(clock.now() + Duration(seconds=3.0)) >= start_time): # big jumps in time are likely bag loops, so break for them
# r.sleep()
sleep(0.02)
core_result = self.can_transform_full_core(target_frame, target_time, source_frame, source_time, fixed_frame)
if return_debug_tuple:
return core_result
return core_result[0]
def wait_for_transform_async(self, target_frame, source_frame, time):
"""
Wait for a transform from the source frame to the target frame to become possible.
:param target_frame: Name of the frame to transform into.
:param source_frame: Name of the input frame.
:param time: The time at which to get the transform. (0 will get the latest)
:return: A future that becomes true when the transform is available
:rtype: rclpy.task.Future
"""
fut = rclpy.task.Future()
if self.can_transform_core(target_frame, source_frame, time)[0]:
# Short cut, the transform is available
fut.set_result(True)
return fut
def _on_new_data():
try:
if self.can_transform_core(target_frame, source_frame, time)[0]:
fut.set_result(True)
except BaseException as e:
fut.set_exception(e)
self._new_data_callbacks.append(_on_new_data)
fut.add_done_callback(lambda _: self._remove_callback(_on_new_data))
return fut
def wait_for_transform_full_async(self, target_frame, target_time, source_frame, source_time, fixed_frame):
"""
Wait for a transform from the source frame to the target frame to become possible.
:param target_frame: Name of the frame to transform into.
:param target_time: The time to transform to. (0 will get the latest)
:param source_frame: Name of the input frame.
:param source_time: The time at which source_frame will be evaluated. (0 will get the latest)
:param fixed_frame: Name of the frame to consider constant in time.
:return: A future that becomes true when the transform is available
:rtype: rclpy.task.Future
"""
fut = rclpy.task.Future()
if self.can_transform_full_core(target_frame, target_time, source_frame, source_time, fixed_frame)[0]:
# Short cut, the transform is available
fut.set_result(True)
return fut
def _on_new_data():
try:
if self.can_transform_full_core(target_frame, target_time, source_frame, source_time, fixed_frame)[0]:
fut.set_result(True)
except BaseException as e:
fut.set_exception(e)
self._new_data_callbacks.append(_on_new_data)
fut.add_done_callback(lambda _: self._remove_callback(_on_new_data))
return fut
|
python
|
#!/usr/bin/env python
"""
Copyright (c) 2018 Keitaro AB
Use of this source code is governed by an MIT license
that can be found in the LICENSE file or at
https://opensource.org/licenses/MIT.
"""
"""
Executable module for checking github version updates for repositories from repositories.txt
and alerting the slack channel in case of new unrecorded one
"""
import urllib.request
import json
import os
import etcd3
def check_version(repositories):
"""
For list of repositories, check their versions and recorded ones
:param repositories:
:return:
"""
for repository in repositories:
# user/repository
name_path = get_name_path(repository)
# github api url for requesting tag list
url = get_api_url(name_path)
# get the tag list
tags = get_tags(url)
check_records(name_path, tags[0])
def get_repositories():
"""
Parse repository list from file
:return:
"""
repositories = []
with open('/app/repositories.txt', 'r') as file:
content = file.readlines()
repositories = [row.strip() for row in content if row.strip()[0] != '#']
return repositories
def alert_slack(repository, new_version):
"""
Alerts the slack channel with formatting and content of message
:param repository:
:param new_version:
:param slack_hook:
:return:
"""
slack_hook = os.environ.get('SLACK_HOOK')
slack_channel = os.environ.get('SLACK_CHANNEL', '#random')
request_dict = dict()
request_dict['attachments'] = []
attachment = dict()
attachment['fallback'] = "New version ({}) found for {}!".format(new_version, repository)
attachment['color'] = '#4286f4'
attachment['title'] = "New version found for {}!".format(repository)
attachment['fields'] = [
{
'title': "Repository: _*{}*_".format(repository),
'value': "New version: `{}`".format(new_version)
},
{
'value': "Url: https://github.com/{}/releases/tag/{}".format(repository, new_version)
}
]
request_dict['attachments'].append(attachment)
request_dict['channel'] = slack_channel
request_dict['link_names'] = 1
request_dict['username'] = 'version-check-bot'
request_dict['icon_emoji'] = ':arrow_up:'
# convert dict to json, and encode it with utf-8
request_data = json.dumps(request_dict).encode('utf-8')
request = urllib.request.Request(slack_hook, request_data, {'Content-type': 'application/json'}, method="POST")
response = urllib.request.urlopen(request)
# TODO: parse response
return response
def get_tags(url):
"""
Function for obtaining tags from url
:param url:
:return tags:
"""
reader = urllib.request.urlopen(url)
json_raw = reader.read()
reader.close()
json_parsed = json.loads(json_raw)
tags = []
for tag_object in json_parsed:
tag = tag_object['ref'].split('/')[-1]
# skip beta and alpha tags, and the minio bad first old tag
if 'beta' not in tag and 'alpha' not in tag and 'release-1434511043' not in tag:
tags.append(tag)
return sorted(tags, reverse=True)
def get_name_path(url):
"""
Function for getting the repository name from given github url
:param url:
:return:
"""
# remove trailing slash
if url[-1] == '/':
url = url[:-1]
url_split = url.split('/')
if len(url_split) > 2:
return url_split[-1] + '/' + url_split[-2]
# if only user/repo is given instead of full url
return url
def get_api_url(for_repository):
"""
Get the api url for GET request to obtain tags
:param for_repository:
:return:
"""
api_url = 'https://api.github.com/repos/{}/git/refs/tags'.format(for_repository)
return api_url
def etcd_client():
"""
Creates an etcd client instance with connection
:return: returns an etcd client instance
"""
host = os.environ.get('ETCD_HOST')
port = os.environ.get('ETCD_PORT')
etcd = etcd3.client(host=host, port=port)
return etcd
def check_records(repository, version):
"""
Checks if there is a record for the repository with that version,
if not, sends an alert
:param repository:
:param version:
:return:
"""
etcd = etcd_client()
records = [(x[1].key.decode('utf-8'), x[0].decode('utf-8')) for x in etcd.get_prefix('/version-check')]
etcd_key_path = '/version-check/{}'.format(repository)
# get recorded version
recorded_version_bytes = etcd.get(etcd_key_path)[0]
# if none found, send alert and update records
if recorded_version_bytes is None:
alert_slack(repository, version)
etcd.put(etcd_key_path, version)
return True
# decode the bytes
recorded_version = recorded_version_bytes.decode('utf-8')
# if the recorded version is different, update and send alert
if recorded_version != version:
alert_slack(repository, version)
etcd.put(etcd_key_path, version)
return True
return False
if __name__ == '__main__':
check_version(get_repositories())
|
python
|
from enum import Enum
class Order(Enum):
ASC = "ASC"
DESC = "DESC"
class DatabaseType(Enum):
PSYCOPG2 = "PSYCOPG2"
SQLITE3 = "SQLITE3"
class Fetch(Enum):
ONE = "ONE"
ALL = "ALL"
|
python
|
# -*- coding:utf-8 -*-
import threading
local_value = threading.local()
def test_put_thread(value):
local_value.value = value
test_read_thread()
def test_read_thread():
print(local_value.value + " in thread: %s" % threading.current_thread().name)
t1 = threading.Thread(target=test_put_thread, args=("张三",), name='Thread-A')
t2 = threading.Thread(target=test_put_thread, args=("李四",), name='Thread-B')
t1.start()
t2.start()
t1.join()
t2.join()
|
python
|
from flask import (Blueprint, render_template, redirect, request, url_for,
abort, flash)
from itsdangerous import URLSafeTimedSerializer
from app import app, models, db
from app.models import Survey, SurveyOptions
from app.forms import survey as survey_forms
from sqlalchemy import func
# Create a survey blueprint
surveybp = Blueprint('surveybp', __name__, url_prefix='/survey')
# Function to Create a New Survey
@surveybp.route('/create_survey', methods=['GET','POST'])
def create_survey():
form = survey_forms.CreateSurvey()
if form.validate_on_submit():
# Create a survey with options
survey_name=form.survey_name.data,
created_by_user_id=form.created_by_user_id.data,
survey_options_list=[]
survey_options_list.append(form.option1.data)
survey_options_list.append(form.option2.data)
if form.option3.data:
survey_options_list.append(form.option3.data)
if form.option4.data:
survey_options_list.append(form.option4.data)
if form.option5.data:
survey_options_list.append(form.option5.data)
if form.option6.data:
survey_options_list.append(form.option6.data)
if form.option7.data:
survey_options_list.append(form.option7.data)
if form.option8.data:
survey_options_list.append(form.option8.data)
# Get max survey_id so we can add survey and options to respective db tables
max_survey_id = db.session.query(func.max(Survey.id)).scalar()
new_survey = Survey(id=max_survey_id+1, survey_name=survey_name, created_by_user_id=created_by_user_id)
db.session.add(new_survey)
for option in survey_options_list:
new_survey_option = SurveyOptions(survey_id=max_survey_id+1, option_name=option)
db.session.add(new_survey_option)
# Insert the survey and survey_options into the database
db.session.commit()
flash('Added survey to database!', 'positive')
return redirect(url_for('index'))
return render_template('survey/create_survey.html', form=form, title='Create Survey')
|
python
|
# coding: utf-8
import os
import sys
from threading import Thread
sys.path.append(os.path.join(os.path.dirname(__file__), '../util'))
from pluginbase import PluginBase
class NotifierBase(PluginBase):
def __init__(self):
self.modtype = 'notifier'
super().__init__()
def start_notification(self, event_id, message, datapaths=[]):
"""this method is diretry called by home-recorder"""
self.notifier_thread = Thread(target=self.notify, args=(event_id, message, datapaths))
self.notifier_thread.start()
def join(self, timeout=None):
self.notifier_thread.join(timeout)
def notify(self, event_id, message, datapaths):
"""
notify() is called indirectly by home-recorder for notifing events and errors.
Override this method in your notifier.
Arguments are:
- event_id: Unique id of the event.
- message: message string to be notified.
- datapaths: A list of abs paths.
"""
pass
|
python
|
# -*- coding: utf-8 -*-
# Copyright (c) 2017, FinByz Tech Pvt. Ltd. and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe import _
from frappe.model.document import Document
from frappe.utils import add_days
class TruckPartsInventory(Document):
def validate(self):
if self.truck_part == "Battery" or self.truck_part == "Tyre":
if not self.serial_number:
frappe.throw(_("Please specify serial number"))
def before_save(self):
exp_life = frappe.db.get_value("Item", self.truck_part, 'lifespan')
self.expected_life = exp_life
if self.manufacturing_date:
self.expected_end_life = add_days(self.manufacturing_date, self.expected_life)
else:
self.expected_end_life = add_days(self.purchase_date, self.expected_life)
|
python
|
import pygame
import json
class Spritesheet:
# Takes in Spritesheet file name. Assumes a png and a json file exist
def __init__(self, filename):
self.filename = filename
self.sprite_sheet = pygame.image.load(filename).convert()
self.meta_data = self.filename.replace('png', 'json')
with open(self.meta_data) as f:
self.data = json.load(f)
f.close()
# helper function for parse sprite
def get_sprite(self, x, y, w, h):
sprite = pygame.Surface((w, h))
sprite.set_colorkey((0,0,0))
sprite.blit(self.sprite_sheet,(0, 0),(x, y, w, h))
return sprite
# Takes in Sprite name and returns the selected sprite from the spritesheet
def parse_sprite(self, name):
sprite = self.data['frames'][name]['frame']
x, y, w, h = sprite["x"], sprite["y"], sprite["w"], sprite["h"]
image = self.get_sprite(x, y, w, h)
return image
|
python
|
"""
This configuration file contains common data for
parametrizing various tests such as isotherms.
"""
PRESSURE_PARAM = [
(1, {}), # Standard return
(100000, {
'pressure_unit': 'Pa'
}), # Unit specified
(0.128489, {
'pressure_mode': 'relative'
}), # Mode relative
(12.8489, {
'pressure_mode': 'relative%'
}), # Mode relative%
(0.128489, {
'pressure_unit': 'Pa',
'pressure_mode': 'relative'
}), # Mode and unit specified
(3, {
'limits': (2.3, 5.0)
}), # Range specified
]
LOADING_PARAM = [
(1, {}), # Standard return
(0.001, {
'loading_unit': 'mol'
}), # Loading unit specified
(0.876484, {
'loading_basis': 'volume_gas',
'loading_unit': 'cm3'
}), # Loading basis specified
(1000, {
'material_unit': 'kg'
}), # Adsorbent unit specified
(2, {
'material_basis': 'volume',
'material_unit': 'cm3',
}), # Adsorbent basis specified
(0.0280135, {
'loading_basis': 'fraction',
}), # Fractional weight (will be 1/1000 mol * 28.01 g/mol)
(2.80134, {
'loading_basis': 'percent',
}), # Percent weight
(0.01, {
'loading_basis': 'fraction',
'material_basis': 'molar',
'material_unit': 'mmol',
}), # Fractional molar (will be 1/1000 mol * 10 g/mol)
(0.081274, {
'loading_basis': 'fraction',
'material_basis': 'volume',
'material_unit': 'cm3',
}), # Fractional volume
(
56.02696, {
'loading_basis': 'mass',
'loading_unit': 'kg',
'material_basis': 'volume',
'material_unit': 'm3',
}
), # All specified
(3.0, {
'limits': (2.3, 5.0)
}), # Range specified
]
PRESSURE_AT_PARAM = [
(1, 1, {}), # Standard return
(1, 1, {
'branch': 'ads'
}), # Branch specified
(1, 100000, {
'pressure_unit': 'Pa'
}), # Pressure unit specified
(2, 0.256978, {
'pressure_mode': 'relative'
}), # Pressure mode specified
(0.002, 2, {
'loading_unit': 'mol'
}), # Loading unit specified
(0.02808, 1.00237, {
'loading_basis': 'mass',
'loading_unit': 'g'
}), # Loading mode specified
(1000, 1, {
'material_unit': 'kg'
}), # Loading basis specified
(2, 1, {
'material_basis': 'volume',
'material_unit': 'cm3'
}), # Adsorbent basis specified
(
0.1, 0.229334, {
'pressure_mode': 'relative',
'pressure_unit': 'Pa',
'loading_basis': 'mass',
'loading_unit': 'g',
'material_basis': 'volume',
'material_unit': 'cm3',
}
), # All specified
]
LOADING_AT_PARAM = [
(1, 1, {}), # Standard return
(1, 1, {
'branch': 'ads'
}), # Branch specified
(100000, 1, {
'pressure_unit': 'Pa'
}), # Pressure unit specified
(0.256978, 2, {
'pressure_mode': 'relative'
}), # Pressure mode specified
(2, 0.002, {
'loading_unit': 'mol'
}), # Loading unit specified
(1.00237, 0.02808, {
'loading_basis': 'mass',
'loading_unit': 'g'
}), # Loading mode specified
(1, 1000, {
'material_unit': 'kg'
}), # Loading basis specified
(1, 2, {
'material_basis': 'volume',
'material_unit': 'cm3'
}), # Adsorbent basis specified
(
0.229334, 0.1, {
'pressure_unit': 'Pa',
'pressure_mode': 'relative',
'loading_basis': 'mass',
'loading_unit': 'g',
'material_basis': 'volume',
'material_unit': 'cm3',
}
), # All specified
]
|
python
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import argparse
import platform
import sys
if platform.system() != 'Linux':
sys.exit(-1)
from .killer import Killer
from .utils import load_config
parser = argparse.ArgumentParser(description='ssh key killer')
parser.add_argument('-c', '--config', dest='config', metavar='config', default="/etc/ssh-key-killer/",
type=str, help="a config file or a directory contains config files")
parser.add_argument('--verify', action="store_true", help="verify the config file")
args = parser.parse_args()
if args.verify:
load_config(args.config)
else:
Killer(configfile=args.config).invalid()
|
python
|
# coding: utf-8
'''
@Date : 2021-05-05
@Author : Zekang Li
@Mail : [email protected]
@Homepage: zekangli.com
'''
import torch
import string
import torch.nn as nn
from transformers import *
SPECIAL_TOKENS = ["<bos>", "<eos>", "<info>", "<speaker1>", "<speaker2>", "<empty>", "<pad>"]
SPECIAL_TOKENS_DICT = {'bos_token': "<bos>", 'eos_token': "<eos>", 'additional_special_tokens': ["<info>", "<speaker1>", "<speaker2>", "<empty>"], 'pad_token': "<pad>"}
def tokenize(obj,tokenizer):
if isinstance(obj, str):
return tokenizer.convert_tokens_to_ids(tokenizer.tokenize(obj))
if isinstance(obj, dict):
return dict((n, tokenize(o, tokenizer)) for n, o in obj.items())
return list(tokenize(o, tokenizer) for o in obj)
def build_input_from_dialogue(conv, tokenizer):
bos, eos, info, speaker1, speaker2, empty, pad = [x[0] for x in tokenize(SPECIAL_TOKENS, tokenizer)]
sentence_index = [0]
conv_seq = []
label_seq = []
token_type_seq = []
for i in range(len(conv)):
if len(conv_seq) + len(conv[i][:128]) + 10 > 1000:
break
if i % 2 == 0:
speaker = 0
conv_seq.append(speaker1)
else:
speaker = 1
conv_seq.append(speaker2)
label_seq.append(-100)
token_type_seq.append(speaker)
conv_seq.extend(conv[i][:128])
label_seq.extend(conv[i][:128])
token_type_seq.extend([speaker]*len(conv[i][:128]))
conv_seq.append(eos)
label_seq.append(eos)
token_type_seq.append(speaker)
conv_seq.append(empty)
label_seq.append(-100)
token_type_seq.append(speaker)
sentence_index.append(len(conv_seq)-1)
conv_seq = torch.LongTensor(conv_seq)
label_seq = torch.LongTensor(label_seq)
sentence_index = torch.LongTensor(sentence_index)
token_type_seq = torch.LongTensor(token_type_seq)
return conv_seq, label_seq, sentence_index, token_type_seq
def norm(a):
mean = torch.mean(a, dim=1).unsqueeze(1)
std = torch.std(a, dim=1).unsqueeze(1)
return (a - mean) / std
def add_punc(text):
r = ""
for i in text:
if i in string.punctuation:
r += " "
r += i
else:
r += i
return r.replace(" ", " ")
class FlowScore:
def __init__(self, model_path):
self.cuda = "cuda" if torch.cuda.is_available() else "cpu"
self.model = torch.load(model_path, map_location=self.cuda)
self.model.planing_model.device = self.cuda
self.tokenizer = GPT2Tokenizer.from_pretrained("./models/")
self.tokenizer.add_special_tokens(SPECIAL_TOKENS_DICT)
self.model.eval()
self.cos = nn.CosineSimilarity(dim=2, eps=1e-9)
def score(self, dialogue):
with torch.no_grad():
conv_seq, _, sentence_index, token_type_seq = build_input_from_dialogue(tokenize(dialogue, self.tokenizer),
self.tokenizer)
conv_seq = conv_seq.unsqueeze(0).cuda()
sentence_index = sentence_index.unsqueeze(0).cuda()
token_type_seq = token_type_seq.unsqueeze(0).cuda()
conv_hidden_state = self.model.speak_model(conv_seq, token_type_ids=token_type_seq)[0]
sentence_hidden = conv_hidden_state.index_select(1, sentence_index[0])
output, loss_plan = self.model.planing_model(sentence_hidden)
sentence_hidden_delta = sentence_hidden[:, 1:, :] - sentence_hidden[:, :-1, :]
output_delta = output[:, :-1, :] - sentence_hidden[:, :-1, :]
sentence_hidden_len = torch.sqrt(torch.sum(sentence_hidden_delta ** 2, dim=-1))
output_len = torch.sqrt(torch.sum(output_delta ** 2, dim=-1))
min_len = torch.min(sentence_hidden_len, output_len)
x = self.cos(sentence_hidden_delta, output_delta) * (min_len ** 2 / (sentence_hidden_len * output_len))
DPKS_score = torch.pow(2, -torch.mean(torch.log(((x + 1) / 2)[0, 3::2])))
return DPKS_score.item()
|
python
|
import os
import json, decimal
import boto3
from boto3.dynamodb.conditions import Key, Attr
tableName = os.environ.get('ACCESS_TABLE_NAME')
def handler(event, context):
client = boto3.resource('dynamodb')
table = client.Table(tableName)
print(table.table_status)
print(event)
username = event['requestContext']['authorizer']['claims']['cognito:username']
res = table.scan(FilterExpression=Key('shared_user').eq(username))
data = res['Items']
while 'LastEvaluatedKey' in res:
res = table.scan(ExclusiveStartKey=res['LastEvaluatedKey'])
data.extend(res['Items'])
usersWithAccess = []
for entry in data:
usersWithAccess.append(entry['username'])
body = {
'users_with_access': usersWithAccess
}
response = {
"statusCode": 200,
"body": json.dumps(body),
"headers": {
"Access-Control-Allow-Origin": "*",
"Access-Control-Allow-Headers": "*"
}
}
print(response)
return response
|
python
|
A_0219_9 = {0: {'A': 0.2420334938061355, 'C': -0.12964532145406094, 'E': -0.30716253311729963, 'D': -0.20136070573849837, 'G': 0.09315559829464488, 'F': 0.414085377622071, 'I': -0.11091042509341746, 'H': -0.40154576951031906, 'K': -0.11190222068525454, 'M': -0.16860437072777687, 'L': 0.20901824078912526, 'N': 0.032287878091663676, 'Q': 0.11764813950492131, 'P': -0.4510381206036012, 'S': -0.2651472265026922, 'R': -0.4731550123861466, 'T': -0.28413749083180867, 'W': 0.2955906501791419, 'V': -0.09635304020464032, 'Y': 0.7408657058985775}, 1: {'A': -0.41943263805417924, 'C': -0.6061935504333876, 'E': -0.6076150068101372, 'D': -0.607213243223602, 'G': -0.42689370995900866, 'F': -0.26210331331656417, 'I': -0.10967350782637802, 'H': -0.6288986336633852, 'K': -0.6139266374902476, 'M': 1.190384146568963, 'L': 1.4017298248353962, 'N': -0.6067032731427604, 'Q': 0.05055904458391644, 'P': -0.6281390090418096, 'S': -0.607352492311383, 'R': -0.6344244536958641, 'T': -0.4094068917461391, 'W': -0.607724675232467, 'V': -0.3341434179358044, 'Y': -0.5821559530728522}, 2: {'A': 0.03151441917923293, 'C': -0.5230670093085569, 'E': -0.43100928136539085, 'D': 0.09133983002015222, 'G': -0.10854506054292079, 'F': -0.039475930714978576, 'I': -0.25845810564074134, 'H': 0.05885721963591881, 'K': -0.3618581378273778, 'M': 0.9018137470311799, 'L': 0.2321708275679429, 'N': 0.07831164228003454, 'Q': 0.22144506774368122, 'P': 0.05214050830759903, 'S': 0.08094336297852421, 'R': -0.558514130048989, 'T': -0.15441584094409533, 'W': 0.18996836396566266, 'V': -0.5017537954964252, 'Y': 0.369764095683107}, 3: {'A': -0.28626087976730247, 'C': -0.025125042713760825, 'E': 0.47178602649403406, 'D': 0.7623297880080667, 'G': 0.3136472556573316, 'F': -0.3657171680025016, 'I': -0.21747542126833005, 'H': -0.5194043975409929, 'K': -0.293285801247425, 'M': -0.22636485330335548, 'L': -0.1553118171029022, 'N': -0.0979392609940178, 'Q': 0.17191291166213873, 'P': 0.05158066081625252, 'S': 0.10243891992817417, 'R': -0.3305725976478324, 'T': -0.2792916406115506, 'W': -0.31829212874127993, 'V': -0.47387241520285656, 'Y': -0.40070222297241603}, 4: {'A': -0.0664630090303782, 'C': 0.08906413209160546, 'E': -0.01631783995225214, 'D': 0.2811505654670672, 'G': 0.2699699503925373, 'F': -0.23462556888397043, 'I': -0.29073835177154284, 'H': -0.3009509959974252, 'K': -0.14121867364784013, 'M': 0.0678370240476132, 'L': -0.24882748064831306, 'N': 0.18968866265159356, 'Q': -0.37324808981501906, 'P': -0.09042667420234526, 'S': 0.09594153430999246, 'R': -0.5370676695087087, 'T': 0.18691516307466047, 'W': 0.7048833323442629, 'V': 0.08399414782445343, 'Y': 0.5009958670368024}, 5: {'A': 0.07878350356281144, 'C': 0.2795393513032944, 'E': -0.09436084459364735, 'D': -0.009535790747915907, 'G': -0.03321434575342667, 'F': -0.25484921353296797, 'I': 0.35957002643872255, 'H': -0.06394434795697493, 'K': -0.1414000989242711, 'M': -0.028836961840174796, 'L': 0.1073770390964023, 'N': -0.12028478909114225, 'Q': 0.07539197454994266, 'P': -0.1708262132646714, 'S': 0.2049606141156023, 'R': -0.5078537547759676, 'T': 0.07290481680957006, 'W': -0.29945826510496754, 'V': 0.1495075300850585, 'Y': 0.013283264778613241}, 6: {'A': 0.01411226130059054, 'C': 0.06692290277721402, 'E': -0.06340146779041687, 'D': 0.1746866532150916, 'G': -0.25992464000755505, 'F': 0.5009868235293345, 'I': 0.06355712998714634, 'H': 0.35077213192757506, 'K': -0.26925800741860245, 'M': -0.013849751311046675, 'L': -0.32758688479932485, 'N': -0.22385598526874687, 'Q': -0.06336166705835866, 'P': 0.47510097331107193, 'S': -0.2906131684624784, 'R': -0.47252967044892613, 'T': -0.11343812404340384, 'W': 0.221827025663051, 'V': -0.2039985639445891, 'Y': 0.29109835352637226}, 7: {'A': -0.2214844136003379, 'C': 0.253972043605802, 'E': -0.11180878646892282, 'D': 0.04866386625383542, 'G': 0.21129609322137066, 'F': 0.09675763577527521, 'I': -0.478466495950241, 'H': -0.22278119575014163, 'K': 0.0024895662503172412, 'M': -0.08637762364986003, 'L': -0.16451622726927184, 'N': 0.14658405355475607, 'Q': -0.22460008840640644, 'P': 0.06863736746483798, 'S': 0.02806708910872769, 'R': 0.17046998008080952, 'T': 0.12640810468428135, 'W': -0.325349814172603, 'V': -0.051103104718504995, 'Y': 0.4213757307211304}, 8: {'A': 0.48788428252771016, 'C': -0.02730369832594655, 'E': -0.6113264720739239, 'D': -0.6061935504333876, 'G': -4.0, 'F': -0.553306353566414, 'I': 0.3385355359756546, 'H': -0.612884043508607, 'K': -0.6261305872802506, 'M': 0.006175759526607426, 'L': 0.1606962734073028, 'N': -0.5384273463457844, 'Q': -0.530233857024553, 'P': -4.0, 'S': -0.22209665774387793, 'R': -0.6118447637583928, 'T': 0.3905794756125614, 'W': -0.6207885684222972, 'V': 1.2769558604632072, 'Y': -0.6997587692314469}, -1: {'slope': 0.10404992803361, 'intercept': -0.3897501239679909}}
|
python
|
# from typing import Dict
# import os
# from unittest import TestCase, main
#
# import json
#
# from monolithcaching import CacheManager
#
#
# class TestCacheManager(TestCase):
#
# @staticmethod
# def get_meta_data(meta_data_path: str) -> Dict:
# with open(meta_data_path) as json_file:
# meta = json.load(json_file)
# return meta
#
# def test_cache(self):
# test = CacheManager()
# test.create_cache()
#
# cache_directory_check = test.worker.base_dir
# meta_file_path = test.worker.base_dir + "meta.json"
# meta_data = self.get_meta_data(meta_data_path=meta_file_path)
#
# self.assertEqual(True, os.path.isdir(cache_directory_check))
# self.assertEqual(test.worker.base_dir, test.worker.base_dir)
# self.assertEqual(None, test.worker._existing_cache)
# self.assertEqual(True, os.path.isfile(meta_file_path))
# self.assertEqual({}, meta_data)
#
# test.insert_meta(key="one", value=1)
# test.insert_meta(key="two", value=2)
#
# self.assertEqual({"one": 1, "two": 2}, self.get_meta_data(meta_data_path=meta_file_path))
# self.assertEqual({"one": 1, "two": 2}, test.meta)
#
# existing_cach_path = test.cache_path
# del test
#
# if os.environ.get("CI") is None:
# self.assertEqual(False, os.path.isdir(existing_cach_path))
#
# def test_lock(self):
# test = CacheManager()
# test.create_cache()
#
# self.assertEqual({}, test.meta)
# test.lock_cache()
# self.assertEqual({"locked": True}, test.meta)
# existing_cach_path = test.cache_path
#
# del test
#
# new_test = CacheManager()
# new_test.create_cache(existing_cache=existing_cach_path)
# self.assertEqual({"locked": True}, new_test.meta)
# self.assertEqual(True, new_test.worker._locked)
# self.assertEqual(True, os.path.isdir(existing_cach_path))
# new_test.unlock_cache()
# self.assertEqual({"locked": False}, new_test.meta)
# self.assertEqual(False, new_test.worker._locked)
# self.assertEqual(True, os.path.isdir(existing_cach_path))
#
# del new_test
#
# if os.environ.get("CI") is None:
# self.assertEqual(False, os.path.isdir(existing_cach_path))
#
#
# if __name__ == "__main__":
# main()
|
python
|
def addTwoNumbers(self, l1, l2):
"""
:type l1: ListNode
:type l2: ListNode
:rtype: ListNode
"""
traversal1 = l1
traversal2 = l2
dummy = head = ListNode(0)
carry = 0
while traversal1 != None or traversal2 != None or carry != 0:
if traversal1:
carry += traversal1.val
traversal1 = traversal1.next
if traversal2:
carry += traversal2.val
traversal2 = traversal2.next
head.next = ListNode(carry % 10)
head = head.next
carry = carry / 10
return dummy.next
|
python
|
# coding=utf-8
import json
from common import constant
from common import errcode
from common.mylog import logger
from dao.question.question_dao import QuestionDao
from dao.question.user_question_map_dao import UserQuestionMapDao
from handlers.base.base_handler import BaseHandler
from myutil import tools
class GetQuestionListByCategoryHandler(BaseHandler):
methods = ['POST']
def __init__(self):
expect_request_para = {
"category_id": None,
"page_num": None,
"page_size": None,
"common_param": None,
}
need_para = (
"category_id",
"page_num",
"page_size",
"common_param",
)
super(GetQuestionListByCategoryHandler, self).__init__(expect_request_para, need_para)
def _parse_and_check_parameters(self):
"""
参数校验
:return:
"""
if not super(GetQuestionListByCategoryHandler, self)._parse_and_check_parameters():
self.ret_code = errcode.PARAMETER_ERROR
self.ret_msg = "param error!"
return False
self.category_id = tools.str_to_int(self.para_map["category_id"], 0)
if self.category_id <= 0:
self.ret_code = errcode.PARAMETER_ERROR
self.ret_msg = "category param error!"
return False
self.page_num = tools.str_to_int(self.para_map["page_num"], 0)
if self.page_num <= 0:
self.ret_code = errcode.PARAMETER_ERROR
self.ret_msg = "page_num param error!"
return False
self.page_size = tools.str_to_int(self.para_map["page_size"], 0)
if self.page_size <= 0:
self.ret_code = errcode.PARAMETER_ERROR
self.ret_msg = "page_size param error!"
return False
if self.user_type == constant.USER_SOURCE_LOGIN_USER:
self.uid = self.rid
else:
self.uid = self.tid
return True
def _process_imp(self):
question_list = QuestionDao.get_by_category(self.category_id, self.page_num, self.page_size)
# 将答案json规范
for question_item in question_list:
try:
question_item["answer"] = json.loads(question_item["answer"])
except Exception, ex:
logger.error(ex, exc_info=1)
question_item["answer"] = []
continue
# 用户获取的答案记录在案
UserQuestionMapDao.insert(self.user_type, self.uid, question_item["id"])
self.ret_code = errcode.NO_ERROR
self.ret_msg = 'ok'
self.ret_data = {
"questions": question_list
}
return
|
python
|
import pytest
from ethereum import tester
from functools import (
reduce
)
from fixtures import (
MAX_UINT,
fake_address,
token_events,
owner_index,
owner,
wallet_address,
get_bidders,
fixture_decimals,
contract_params,
get_token_contract,
token_contract,
create_contract,
print_logs,
create_accounts,
txnCost,
test_bytes,
event_handler,
)
from utils_logs import LogFilter
@pytest.fixture()
def proxy_contract(chain, create_contract):
AuctionProxy = chain.provider.get_contract_factory('Proxy')
proxy_contract = create_contract(AuctionProxy, [])
print_logs(proxy_contract, 'Payable', 'Proxy')
return proxy_contract
@pytest.fixture()
def proxy_erc223_contract(chain, create_contract):
AuctionProxy = chain.provider.get_contract_factory('ProxyERC223')
proxy_erc223_contract = create_contract(AuctionProxy, [])
return proxy_erc223_contract
@pytest.mark.parametrize('decimals', fixture_decimals)
def test_token_init(
chain,
web3,
wallet_address,
get_token_contract,
proxy_contract,
decimals):
(A, B, C, D, E) = web3.eth.accounts[:5]
auction = proxy_contract
multiplier = 10**(decimals)
initial_supply = 5000 * multiplier
# Transaction fails when auction address is invalid
with pytest.raises(TypeError):
token = get_token_contract([
0,
wallet_address,
initial_supply
], {'from': E}, decimals=decimals)
with pytest.raises(TypeError):
token = get_token_contract([
proxy_contract.address,
0,
wallet_address,
initial_supply
], {'from': E}, decimals=decimals)
with pytest.raises(TypeError):
token = get_token_contract([
fake_address,
wallet_address,
initial_supply
], {'from': E}, decimals=decimals)
# Test max uint - 2 as supply (has to be even)
token = get_token_contract([
proxy_contract.address,
wallet_address,
MAX_UINT - 1
], {'from': E}, decimals=decimals)
with pytest.raises(TypeError):
token = get_token_contract([
proxy_contract.address,
wallet_address,
MAX_UINT + 1
], {'from': E}, decimals=decimals)
# Transaction fails if initial_supply == 0
with pytest.raises(tester.TransactionFailed):
token = get_token_contract([
proxy_contract.address,
wallet_address,
0
], {'from': E}, decimals=decimals)
with pytest.raises(TypeError):
token = get_token_contract([
proxy_contract.address,
wallet_address,
-2
], {'from': E}, decimals=decimals)
# Fails when supply is an odd number; auction and wallet addresses
# are assigned a different number of tokens
with pytest.raises(tester.TransactionFailed):
token = get_token_contract([
proxy_contract.address,
wallet_address,
10000001,
], {'from': E}, decimals=decimals)
token = get_token_contract([
proxy_contract.address,
wallet_address,
initial_supply
], {'from': E}, decimals=decimals)
assert token.call().decimals() == decimals
@pytest.mark.parametrize('decimals', fixture_decimals)
def test_token_variable_access(
chain,
web3,
wallet_address,
get_token_contract,
proxy_contract,
decimals):
owner = web3.eth.coinbase
(A, B, C) = web3.eth.accounts[1:4]
multiplier = 10**(decimals)
initial_supply = 3000 * multiplier
token = get_token_contract([
proxy_contract.address,
wallet_address,
initial_supply
], {'from': owner}, decimals=decimals)
assert token.call().name() == 'Yobicash Token'
assert token.call().symbol() == 'YBC'
assert token.call().decimals() == decimals
assert token.call().totalSupply() == initial_supply
def test_token_balanceOf(
chain,
web3,
wallet_address,
token_contract,
proxy_contract,
contract_params):
token = token_contract(proxy_contract.address)
multiplier = 10**(contract_params['decimals'])
supply = contract_params['supply'] * multiplier
half_balance = supply // 2
assert token.call().balanceOf(proxy_contract.address) == half_balance
assert token.call().balanceOf(wallet_address) == half_balance
def transfer_tests(
bidders,
balances,
multiplier,
token,
event_handler):
(A, B, C) = bidders
ev_handler = event_handler(token)
with pytest.raises(TypeError):
token.transact({'from': A}).transfer(0, 10)
with pytest.raises(TypeError):
token.transact({'from': A}).transfer(fake_address, 10)
with pytest.raises(TypeError):
token.transact({'from': A}).transfer(B, MAX_UINT + 1)
with pytest.raises(TypeError):
token.transact({'from': A}).transfer(B, -5)
with pytest.raises(tester.TransactionFailed):
balance_A = token.call().balanceOf(A)
token.transact({'from': A}).transfer(B, balance_A + 1)
with pytest.raises(tester.TransactionFailed):
balance_B = token.call().balanceOf(B)
token.transact({'from': A}).transfer(B, MAX_UINT + 1 - balance_B)
txn_hash = token.transact({'from': A}).transfer(B, 0)
ev_handler.add(txn_hash, token_events['transfer'])
assert token.call().balanceOf(A) == balances[0]
assert token.call().balanceOf(B) == balances[1]
txn_hash = token.transact({'from': A}).transfer(B, 120)
ev_handler.add(txn_hash, token_events['transfer'])
assert token.call().balanceOf(A) == balances[0] - 120
assert token.call().balanceOf(B) == balances[1] + 120
txn_hash = token.transact({'from': B}).transfer(C, 66)
ev_handler.add(txn_hash, token_events['transfer'])
assert token.call().balanceOf(B) == balances[1] + 120 - 66
assert token.call().balanceOf(C) == balances[2] + 66
ev_handler.check()
def transfer_erc223_tests(
bidders,
balances,
multiplier,
token,
proxy,
token_erc223,
proxy_erc223,
event_handler):
(A, B, C) = bidders
ev_handler = event_handler(token_erc223)
test_data = test_bytes() # 32 bytes
test_data2 = test_bytes(value=20)
assert not test_data == test_data2
balance_A = token.call().balanceOf(A)
balance_proxy = token.call().balanceOf(proxy.address)
balance_proxy_erc223 = token.call().balanceOf(proxy_erc223.address)
with pytest.raises(TypeError):
token.transact({'from': A}).transfer(B, balance_A, 0)
# Make sure it fails when internal call of transfer(to, value) fails
with pytest.raises(tester.TransactionFailed):
token.transact({'from': A}).transfer(B, balance_A + 1, test_data)
with pytest.raises(tester.TransactionFailed):
token.transact({'from': A}).transfer(proxy_erc223.address, balance_A + 1, test_data)
# Receiver contracts without a tokenFallback
with pytest.raises(tester.TransactionFailed):
token.transact({'from': A}).transfer(proxy.address, balance_A, test_data)
# TODO FIXME erc223 transfer event not handled correctly
txn_hash = token.transact({'from': A}).transfer(proxy_erc223.address, balance_A, test_data)
# ev_handler.add(txn_hash, token_events['transfer'])
assert token.call().balanceOf(A) == 0
assert token.call().balanceOf(proxy_erc223.address) == balance_proxy_erc223 + balance_A
# Arbitrary tests to see if the tokenFallback function from the proxy is called
assert proxy_erc223.call().sender() == A
assert proxy_erc223.call().value() == balance_A
balance_B = token.call().balanceOf(B)
balance_proxy_erc223 = token.call().balanceOf(proxy_erc223.address)
txn_hash = token.transact({'from': B}).transfer(proxy_erc223.address, 0, test_data2)
# ev_handler.add(txn_hash, token_events['transfer'])
assert token.call().balanceOf(B) == balance_B
assert token.call().balanceOf(proxy_erc223.address) == balance_proxy_erc223
assert proxy_erc223.call().sender() == B
assert proxy_erc223.call().value() == 0
txn_hash = token.transact({'from': A}).transfer(proxy_erc223.address, 0)
# ev_handler.add(txn_hash, token_events['transfer'])
txn_hash = token.transact({'from': A}).transfer(proxy.address, 0)
# ev_handler.add(txn_hash, token_events['transfer'])
ev_handler.check()
@pytest.mark.parametrize('decimals', fixture_decimals)
def test_token_transfer(
chain,
web3,
wallet_address,
get_bidders,
get_token_contract,
token_contract,
proxy_contract,
proxy_erc223_contract,
decimals,
event_handler):
(A, B, C) = get_bidders(3)
multiplier = 10**(decimals)
token = get_token_contract([
proxy_contract.address,
wallet_address,
5000 * multiplier,
], decimals=decimals)
assert token.call().decimals() == decimals
token.transact({'from': wallet_address}).transfer(A, 3000)
token.transact({'from': wallet_address}).transfer(B, 2000)
token.transact({'from': wallet_address}).transfer(C, 1000)
transfer_tests(
(A, B, C),
[3000, 2000, 1000],
multiplier,
token,
event_handler)
token_erc223 = token_contract(proxy_erc223_contract.address)
token_erc223.transact({'from': wallet_address}).transfer(A, 3000)
token_erc223.transact({'from': wallet_address}).transfer(B, 2000)
token_erc223.transact({'from': wallet_address}).transfer(C, 1000)
transfer_erc223_tests(
(A, B, C),
[3000, 2000, 1000],
multiplier,
token,
proxy_contract,
token_erc223,
proxy_erc223_contract,
event_handler)
@pytest.mark.parametrize('decimals', fixture_decimals)
def test_token_approve(
web3,
wallet_address,
get_token_contract,
proxy_contract,
decimals,
event_handler):
(A, B, C) = web3.eth.accounts[1:4]
multiplier = 10**(decimals)
token = get_token_contract([
proxy_contract.address,
wallet_address,
5000 * multiplier
], decimals=decimals)
assert token.call().decimals() == decimals
ev_handler = event_handler(token)
token.transact({'from': wallet_address}).transfer(A, 3000)
token.transact({'from': wallet_address}).transfer(B, 2000)
token.transact({'from': wallet_address}).transfer(C, 1000)
with pytest.raises(TypeError):
token.transact({'from': A}).approve(0, B)
with pytest.raises(TypeError):
token.transact({'from': A}).approve(fake_address, B)
with pytest.raises(TypeError):
token.transact({'from': A}).approve(B, -3)
# We can approve more than we have
# with pytest.raises(tester.TransactionFailed):
txn_hash = token.transact({'from': A}).approve(B, 3000 + 1)
ev_handler.add(txn_hash, token_events['approve'])
txn_hash = token.transact({'from': A}).approve(A, 300)
ev_handler.add(txn_hash, token_events['approve'])
assert token.call().allowance(A, A) == 300
with pytest.raises(tester.TransactionFailed):
txn_hash = token.transact({'from': A}).approve(B, 300)
txn_hash = token.transact({'from': A}).approve(B, 0)
txn_hash = token.transact({'from': A}).approve(B, 300)
ev_handler.add(txn_hash, token_events['approve'])
txn_hash = token.transact({'from': B}).approve(C, 650)
ev_handler.add(txn_hash, token_events['approve'])
assert token.call().allowance(A, B) == 300
assert token.call().allowance(B, C) == 650
ev_handler.check()
@pytest.mark.parametrize('decimals', fixture_decimals)
def test_token_allowance(
web3,
wallet_address,
get_bidders,
get_token_contract,
proxy_contract,
decimals):
(A, B) = get_bidders(2)
multiplier = 10**(decimals)
token = get_token_contract([
proxy_contract.address,
wallet_address,
5000 * multiplier
], decimals=decimals)
assert token.call().decimals() == decimals
token.transact({'from': wallet_address}).transfer(A, 3000)
token.transact({'from': wallet_address}).transfer(B, 2000)
with pytest.raises(TypeError):
token.call().allowance(0, B)
with pytest.raises(TypeError):
token.call().allowance(fake_address, B)
with pytest.raises(TypeError):
token.call().allowance(A, 0)
with pytest.raises(TypeError):
token.call().allowance(A, fake_address)
assert token.call().allowance(A, B) == 0
assert token.call().allowance(B, A) == 0
token.transact({'from': A}).approve(B, 300)
assert token.call().allowance(A, B) == 300
@pytest.mark.parametrize('decimals', fixture_decimals)
def test_token_transfer_from(
chain,
web3,
wallet_address,
get_bidders,
get_token_contract,
proxy_contract,
decimals,
event_handler):
(A, B, C) = get_bidders(3)
multiplier = 10**(decimals)
token = get_token_contract([
proxy_contract.address,
wallet_address,
5000 * multiplier
], decimals=decimals)
assert token.call().decimals() == decimals
ev_handler = event_handler(token)
token.transact({'from': wallet_address}).transfer(A, 3000)
token.transact({'from': wallet_address}).transfer(B, 2000)
token.transact({'from': wallet_address}).transfer(C, 1000)
txn_hash = token.transact({'from': B}).approve(A, 300)
ev_handler.add(txn_hash, token_events['approve'])
assert token.call().allowance(B, A) == 300
with pytest.raises(TypeError):
token.transact({'from': A}).transferFrom(0, C, 10)
with pytest.raises(TypeError):
token.transact({'from': A}).transferFrom(B, 0, 10)
with pytest.raises(TypeError):
token.transact({'from': A}).transferFrom(fake_address, C, 10)
with pytest.raises(TypeError):
token.transact({'from': A}).transferFrom(B, fake_address, 10)
with pytest.raises(TypeError):
token.transact({'from': A}).transferFrom(B, C, MAX_UINT + 1)
with pytest.raises(TypeError):
token.transact({'from': A}).transferFrom(B, C, -5)
with pytest.raises(tester.TransactionFailed):
allowance_B = token.call().allowance(B, A)
token.transact({'from': A}).transferFrom(B, C, allowance_B + 1)
# We can allow more than the balance, but we cannot transfer more
with pytest.raises(tester.TransactionFailed):
balance_B = token.call().balanceOf(B)
token.transact({'from': B}).approve(A, balance_B + 10)
token.transact({'from': A}).transferFrom(B, C, balance_B + 10)
# Test for overflow
with pytest.raises(tester.TransactionFailed):
balance_B = token.call().balanceOf(B)
overflow = MAX_UINT + 1 - balance_B
token.transact({'from': B}).approve(A, overflow)
token.transact({'from': A}).transferFrom(B, C, overflow)
with pytest.raises(tester.TransactionFailed):
txn_hash = token.transact({'from': B}).approve(A, 300)
txn_hash = token.transact({'from': B}).approve(A, 0)
txn_hash = token.transact({'from': B}).approve(A, 300)
ev_handler.add(txn_hash, token_events['approve'])
assert token.call().allowance(B, A) == 300
balance_A = token.call().balanceOf(A)
balance_B = token.call().balanceOf(B)
balance_C = token.call().balanceOf(C)
txn_hash = token.transact({'from': A}).transferFrom(B, C, 0)
ev_handler.add(txn_hash, token_events['transfer'])
assert token.call().balanceOf(A) == balance_A
assert token.call().balanceOf(B) == balance_B
assert token.call().balanceOf(C) == balance_C
txn_hash = token.transact({'from': A}).transferFrom(B, C, 150)
ev_handler.add(txn_hash, token_events['transfer'])
assert token.call().balanceOf(A) == balance_A
assert token.call().balanceOf(B) == balance_B - 150
assert token.call().balanceOf(C) == balance_C + 150
ev_handler.check()
@pytest.mark.parametrize('decimals', fixture_decimals)
def test_burn(
chain,
web3,
wallet_address,
get_bidders,
get_token_contract,
proxy_contract,
decimals,
txnCost,
event_handler):
decimals = 18
eth = web3.eth
(A, B) = get_bidders(2)
multiplier = 10**(decimals)
initial_supply = 5000 * multiplier
token = get_token_contract([
proxy_contract.address,
wallet_address,
initial_supply
], decimals=decimals)
assert token.call().decimals() == decimals
ev_handler = event_handler(token)
token.transact({'from': wallet_address}).transfer(A, 3000)
token.transact({'from': wallet_address}).transfer(B, 2000)
with pytest.raises(TypeError):
token.transact({'from': B}).burn(-3)
with pytest.raises(TypeError):
token.transact({'from': B}).burn(MAX_UINT + 1)
with pytest.raises(tester.TransactionFailed):
token.transact({'from': B}).burn(0)
with pytest.raises(tester.TransactionFailed):
token.transact({'from': B}).burn(2000 + 1)
# Balance should not change besides transaction costs
tokens_B = token.call().balanceOf(B)
balance_B = eth.getBalance(B)
burnt = 250
txn_hash = token.transact({'from': B}).burn(burnt)
txn_cost = txnCost(txn_hash)
ev_handler.add(txn_hash, token_events['burn'])
assert token.call().totalSupply() == initial_supply - burnt
assert token.call().balanceOf(B) == tokens_B - burnt
assert balance_B == eth.getBalance(B) + txn_cost
tokens_B = token.call().balanceOf(B)
balance_B = eth.getBalance(B)
total_supply = token.call().totalSupply()
txn_hash = token.transact({'from': B}).burn(tokens_B)
txn_cost = txnCost(txn_hash)
assert token.call().totalSupply() == total_supply - tokens_B
assert token.call().balanceOf(B) == 0
assert balance_B == eth.getBalance(B) + txn_cost
ev_handler.check()
def test_event_handler(token_contract, proxy_contract, event_handler):
token = token_contract(proxy_contract.address)
ev_handler = event_handler(token)
fake_txn = 0x0343
# Add fake events with no transactions
ev_handler.add(fake_txn, token_events['deploy'])
ev_handler.add(fake_txn, token_events['setup'])
ev_handler.add(fake_txn, token_events['transfer'])
ev_handler.add(fake_txn, token_events['approve'])
ev_handler.add(fake_txn, token_events['burn'])
# This should fail
with pytest.raises(Exception):
ev_handler.check(1)
|
python
|
import progressbar
def test_with():
with progressbar.ProgressBar(max_value=10) as p:
for i in range(10):
p.update(i)
def test_with_stdout_redirection():
with progressbar.ProgressBar(max_value=10, redirect_stdout=True) as p:
for i in range(10):
p.update(i)
def test_with_extra_start():
with progressbar.ProgressBar(max_value=10) as p:
p.start()
p.start()
|
python
|
#!/usr/bin/env python
#
# Copyright 2016 Steve Kyle. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Serengeti TAXII Client Script
"""
import sys
import os
from datetime import datetime
import warnings
import glob
from . import args
from . import config
from . import service
def main(*args, **kwargs):
"""Client Entry Point"""
parser = args.get_arg_parser()
args = parser.parse_args()
config = config.read_config()
config.merge_args(args)
request = config.get_request()
response = service.handler(request)
print(response)
if __name__ == '__main__':
main()
|
python
|
from .find_maximum_value_binary_tree import find_maximum_value, BST
def test_find_maximum_value_tree_with_one_value():
one_value = BST([5])
assert find_maximum_value(one_value) == 5
def test_find_maximum_value_tree_with_two_values():
one_value = BST([10, 2])
assert find_maximum_value(one_value) == 10
def test_find_maximum_value_balanced():
balanced = BST([10, 7, 3, 16, 12, 8, 20])
assert find_maximum_value(balanced) == 20
def test_find_maximum_value_left():
left = BST([10, 8, 6, 4])
assert find_maximum_value(left) == 10
def test_find_maximum_value_right():
right = BST([1, 3, 5, 7, 9])
assert find_maximum_value(right) == 9
|
python
|
import matplotlib.pyplot as plt
import networkx as nx
import os
import random
import warnings
from matplotlib.backends import backend_gtk3
from settings import OUTPUT_DIR
warnings.filterwarnings('ignore', module=backend_gtk3.__name__)
RESTART_PROBABILITY = 0.15
STEPS_MULTIPLIER = 100
def random_walk_sample(graph, n):
selected_nodes = set()
while len(selected_nodes) < n:
last_node = random.choice(list(graph.nodes))
selected_nodes.add(last_node)
for i in range(STEPS_MULTIPLIER * n):
last_node = random.choice(list(graph.neighbors(last_node)))
selected_nodes.add(last_node)
if len(selected_nodes) >= n:
break
subgraph = graph.subgraph(selected_nodes)
return subgraph
def save_graph_figure(graph, name):
plt.title(name, fontsize=16)
nx.draw(graph, node_size=100)
plt.savefig(os.path.join(OUTPUT_DIR, '%s.png' % name))
plt.close('all')
|
python
|
from collections import KeysView
def find(iterable: list or KeysView, key=lambda x: x):
for elem in iterable:
if key(elem):
return elem
return None
def index(iterable: list, key=lambda x: x) -> int:
x = 0
for elem in iterable:
if key(elem):
return x
x += 1
return -1
|
python
|
from django.apps import AppConfig
class CalToolConfig(AppConfig):
name = 'cal_tool'
|
python
|
#
#
# 0=================================0
# | Kernel Point Convolutions |
# 0=================================0
#
#
# ----------------------------------------------------------------------------------------------------------------------
#
# Callable script to start a training on MyhalCollision dataset
#
# ----------------------------------------------------------------------------------------------------------------------
#
# Hugues THOMAS - 06/03/2020
#
# ----------------------------------------------------------------------------------------------------------------------
#
# Imports and global variables
# \**********************************/
#
# Common libs
import sys
import time
import signal
import os
os.environ.update(OMP_NUM_THREADS='1',
OPENBLAS_NUM_THREADS='1',
NUMEXPR_NUM_THREADS='1',
MKL_NUM_THREADS='1',)
import numpy as np
import torch
# Dataset
from slam.PointMapSLAM import pointmap_slam, detect_short_term_movables, annotation_process
from slam.dev_slam import bundle_slam, pointmap_for_AMCL
from torch.utils.data import DataLoader
from datasets.MyhalCollision import MyhalCollisionDataset, MyhalCollisionSlam, MyhalCollisionSampler, \
MyhalCollisionCollate
from utils.config import Config
from utils.trainer import ModelTrainer
from models.architectures import KPCollider
from os.path import exists, join
from os import makedirs
# ----------------------------------------------------------------------------------------------------------------------
#
# Config Class
# \******************/
#
class MyhalCollisionConfig(Config):
"""
Override the parameters you want to modify for this dataset
"""
####################
# Dataset parameters
####################
# Dataset name
dataset = 'MyhalCollision'
# Number of classes in the dataset (This value is overwritten by dataset class when Initializating dataset).
num_classes = None
# Type of task performed on this dataset (also overwritten)
dataset_task = ''
# Number of CPU threads for the input pipeline
input_threads = 16
#########################
# Architecture definition
#########################
# Define layers (only concerning the 3D architecture)
architecture = ['simple',
'resnetb_strided',
'resnetb',
'resnetb_strided',
'resnetb',
'resnetb_strided',
'resnetb',
'resnetb_strided',
'resnetb',
'nearest_upsample',
'unary',
'nearest_upsample',
'unary',
'nearest_upsample',
'unary',
'nearest_upsample',
'unary']
######################
# Collision parameters
######################
# Number of propagating layer
n_2D_layers = 30
# Total time propagated
T_2D = 3.0
# Size of 2D convolution grid
dl_2D = 0.12
# Power of the loss for the 2d predictions (use smaller prop loss when shared weights)
power_2D_init_loss = 1.0
power_2D_prop_loss = 50.0
neg_pos_ratio = 1.0
loss2D_version = 2
# Specification of the 2D networks composition
init_2D_levels = 4 # 3
init_2D_resnets = 3 # 2
prop_2D_resnets = 3 # 2
# Path to a pretrained 3D network. if empty, ignore, if 'todo', then only train 3D part of the network.
#pretrained_3D = 'Log_2021-01-27_18-53-05'
pretrained_3D = ''
# Detach the 2D network from the 3D network when backpropagating gradient
detach_2D = False
# Share weights for 2D network TODO: see if not sharing makes a difference
shared_2D = False
# Trainable backend 3D network
apply_3D_loss = True
#frozen_layers = ['encoder_blocks', 'decoder_blocks', 'head_mlp', 'head_softmax']
# Use visibility mask for training
use_visibility = False
###################
# KPConv parameters
###################
# Radius of the input sphere
in_radius = 8.0
val_radius = 8.0
n_frames = 3
in_features_dim = n_frames
max_in_points = -1
max_val_points = -1
# Choice of input features
first_features_dim = 100
# Number of batch
batch_num = 6
val_batch_num = 6
# Number of kernel points
num_kernel_points = 15
# Size of the first subsampling grid in meter
first_subsampling_dl = 0.06
# Radius of convolution in "number grid cell". (2.5 is the standard value)
conv_radius = 2.5
# Radius of deformable convolution in "number grid cell". Larger so that deformed kernel can spread out
deform_radius = 6.0
# Radius of the area of influence of each kernel point in "number grid cell". (1.0 is the standard value)
KP_extent = 1.2
# Behavior of convolutions in ('constant', 'linear', 'gaussian')
KP_influence = 'linear'
# Aggregation function of KPConv in ('closest', 'sum')
aggregation_mode = 'sum'
# Can the network learn modulations
modulated = False
# Batch normalization parameters
use_batch_norm = True
batch_norm_momentum = 0.02
# Deformable offset loss
# 'point2point' fitting geometry by penalizing distance from deform point to input points
# 'point2plane' fitting geometry by penalizing distance from deform point to input point triplet (not implemented)
deform_fitting_mode = 'point2point'
deform_fitting_power = 1.0 # Multiplier for the fitting/repulsive loss
deform_lr_factor = 0.1 # Multiplier for learning rate applied to the deformations
repulse_extent = 1.2 # Distance of repulsion for deformed kernel points
#####################
# Training parameters
#####################
# Maximal number of epochs
max_epoch = 1000
# Learning rate management
learning_rate = 1e-2
momentum = 0.98
lr_decays = {i: 0.1 ** (1 / 120) for i in range(1, max_epoch)}
grad_clip_norm = 100.0
# Number of steps per epochs
epoch_steps = 500
# Number of validation examples per epoch
validation_size = 30
# Number of epoch between each checkpoint
checkpoint_gap = 20
# Augmentations
augment_scale_anisotropic = False
augment_symmetries = [False, False, False]
augment_rotation = 'vertical'
augment_scale_min = 0.99
augment_scale_max = 1.01
augment_noise = 0.001
augment_color = 1.0
# Do we nee to save convergence
saving = True
saving_path = None
# ----------------------------------------------------------------------------------------------------------------------
#
# Main Call
# \***************/
#
if __name__ == '__main__':
# NOT_NOW_TODO: Optimize online predictions
# > Try to parallelise the batch preprocessing for a single input frame.
# > Use OMP for neighbors processing
# > Use the polar coordinates to get neighbors???? (avoiding tree building time)
# > cpp extension for conversion into a 2D lidar_range_scan
#
############################
# Initialize the environment
############################
# Set which gpu is going to be used (auto for automatic choice)
GPU_ID = 'auto'
# Automatic choice (need pynvml to be installed)
if GPU_ID == 'auto':
print('\nSearching a free GPU:')
for i in range(torch.cuda.device_count()):
a = torch.cuda.list_gpu_processes(i)
print(torch.cuda.list_gpu_processes(i))
a = a.split()
if a[1] == 'no':
GPU_ID = a[0][-1:]
# Safe check no free GPU
if GPU_ID == 'auto':
print('\nNo free GPU found!\n')
a = 1/0
else:
print('\nUsing GPU:', GPU_ID, '\n')
# Set GPU visible device
os.environ['CUDA_VISIBLE_DEVICES'] = GPU_ID
chosen_gpu = int(GPU_ID)
###################
# Training sessions
###################
# Day used as map
map_day = '2020-10-02-13-39-05'
train_days_RandBounce = ['2021-05-15-23-15-09',
'2021-05-15-23-33-25',
'2021-05-15-23-54-50',
'2021-05-16-00-44-53',
'2021-05-16-01-09-43',
'2021-05-16-20-37-47',
'2021-05-16-20-59-49',
'2021-05-16-21-22-30',
'2021-05-16-22-26-45',
'2021-05-16-22-51-06',
'2021-05-16-23-34-15',
'2021-05-17-01-21-44',
'2021-05-17-01-37-09',
'2021-05-17-01-58-57',
'2021-05-17-02-34-27',
'2021-05-17-02-56-02',
'2021-05-17-03-54-39',
'2021-05-17-05-26-10',
'2021-05-17-05-41-45']
train_days_RandWand = ['2021-05-17-14-04-52',
'2021-05-17-14-21-56',
'2021-05-17-14-44-46',
'2021-05-17-15-26-04',
'2021-05-17-15-50-45',
'2021-05-17-16-14-26',
'2021-05-17-17-02-17',
'2021-05-17-17-27-02',
'2021-05-17-17-53-42',
'2021-05-17-18-46-44',
'2021-05-17-19-02-37',
'2021-05-17-19-39-19',
'2021-05-17-20-14-57',
'2021-05-17-20-48-53',
'2021-05-17-21-36-22',
'2021-05-17-22-16-13',
'2021-05-17-22-40-46',
'2021-05-17-23-08-01',
'2021-05-17-23-48-22',
'2021-05-18-00-07-26',
'2021-05-18-00-23-15',
'2021-05-18-00-44-33',
'2021-05-18-01-24-07']
train_days_RandFlow = ['2021-06-02-19-55-16',
'2021-06-02-20-33-09',
'2021-06-02-21-09-48',
'2021-06-02-22-05-23',
'2021-06-02-22-31-49',
'2021-06-03-03-51-03',
'2021-06-03-14-30-25',
'2021-06-03-14-59-20',
'2021-06-03-15-43-06',
'2021-06-03-16-48-18',
'2021-06-03-18-00-33',
'2021-06-03-19-07-19',
'2021-06-03-19-52-45',
'2021-06-03-20-28-22',
'2021-06-03-21-32-44',
'2021-06-03-21-57-08']
######################
# Automatic Annotation
######################
# Choose the dataset between train_days_RandBounce, train_days_RandWand, or train_days_RandFlow
train_days = np.array(train_days_RandBounce)
# Validation sessions
val_inds = [0, 1, 2]
train_inds = [i for i in range(len(train_days)) if i not in val_inds]
# Check if we need to redo annotation (only if there is no collison folder)
redo_annot = False
for day in train_days:
annot_path = join('../Data/Simulation/collisions', day)
if not exists(annot_path):
redo_annot = True
break
# train_days = ['2020-10-20-16-30-49']
# redo_annot = True
if redo_annot:
# Initiate dataset
slam_dataset = MyhalCollisionSlam(day_list=train_days, map_day=map_day)
# Create a refined map from the map_day.
# UNCOMMENT THIS LINE if you are using your own data for the first time
# COMMENT THIS LINE if you already have a nice clean map of the environment as a point cloud
# like this one: Data/Simulation/slam_offline/2020-10-02-13-39-05/map_update_0001.ply
# slam_dataset.refine_map()
# Groundtruth annotation
annotation_process(slam_dataset, on_gt=False)
# TODO: Loop closure for aligning days together when not simulation
# Annotation of preprocessed 2D+T point clouds for SOGM generation
slam_dataset.collision_annotation()
print('annotation finished')
##############
# Prepare Data
##############
print()
print('Data Preparation')
print('****************')
# Initialize configuration class
config = MyhalCollisionConfig()
# Override with configuration from previous 3D network if given
if config.pretrained_3D and config.pretrained_3D != 'todo':
# Check if path exists
previous_path = os.path.join('results', config.pretrained_3D)
if not exists(previous_path):
raise ValueError('Given path for previous 3D network does not exist')
# Load config
prev_config = MyhalCollisionConfig()
prev_config.load(previous_path)
# List of params we should not overwrite:
kept_params = ['n_2D_layers',
'T_2D',
'dl_2D',
'power_2D_init_loss',
'power_2D_prop_loss',
'neg_pos_ratio',
'init_2D_levels',
'init_2D_resnets',
'prop_2D_resnets',
'pretrained_3D',
'detach_2D',
'shared_2D',
'apply_3D_loss',
'frozen_layers',
'max_epoch',
'learning_rate',
'momentum',
'lr_decays',
'grad_clip_norm',
'epoch_steps',
'validation_size',
'checkpoint_gap',
'saving',
'saving_path',
'input_threads']
for attr_name, attr_value in vars(config).items():
if attr_name not in kept_params:
setattr(config, attr_name, getattr(prev_config, attr_name))
# Get path from argument if given
if len(sys.argv) > 1:
config.saving_path = sys.argv[1]
###############
# Previous chkp
###############
# Choose here if you want to start training from a previous snapshot (None for new training)
# Choose index of checkpoint to start from. If None, uses the latest chkp
chkp_idx = None
chosen_chkp = None
if config.pretrained_3D and config.pretrained_3D != 'todo':
# Check if path exists
chkp_path = os.path.join('results', config.pretrained_3D, 'checkpoints')
if not exists(chkp_path):
raise ValueError('Given path for previous 3D network does contain any checkpoints')
# Find all snapshot in the chosen training folder
chkps = [f for f in os.listdir(chkp_path) if f[:4] == 'chkp']
# Find which snapshot to restore
if chkp_idx is None:
chosen_chkp = 'current_chkp.tar'
else:
chosen_chkp = np.sort(chkps)[chkp_idx]
chosen_chkp = os.path.join('results', config.pretrained_3D, 'checkpoints', chosen_chkp)
#####################
# Init input pipeline
#####################
# Initialize datasets (dummy validation)
training_dataset = MyhalCollisionDataset(config, train_days[train_inds], chosen_set='training', balance_classes=True)
test_dataset = MyhalCollisionDataset(config, train_days[val_inds], chosen_set='validation', balance_classes=False)
# Initialize samplers
training_sampler = MyhalCollisionSampler(training_dataset)
test_sampler = MyhalCollisionSampler(test_dataset)
# Initialize the dataloader
training_loader = DataLoader(training_dataset,
batch_size=1,
sampler=training_sampler,
collate_fn=MyhalCollisionCollate,
num_workers=config.input_threads,
pin_memory=True)
test_loader = DataLoader(test_dataset,
batch_size=1,
sampler=test_sampler,
collate_fn=MyhalCollisionCollate,
num_workers=config.input_threads,
pin_memory=True)
# Calibrate max_in_point value
if config.max_in_points < 0:
config.max_in_points = 1e9
training_loader.dataset.max_in_p = 1e9
training_sampler.calib_max_in(config, training_loader, untouched_ratio=0.9, verbose=True)
if config.max_val_points < 0:
config.max_val_points = 1e9
test_loader.dataset.max_in_p = 1e9
test_sampler.calib_max_in(config, test_loader, untouched_ratio=0.95, verbose=True)
# Calibrate samplers
training_sampler.calibration(training_loader, verbose=True)
test_sampler.calibration(test_loader, verbose=True)
# debug_timing(training_dataset, training_loader)
# debug_timing(test_dataset, test_loader)
# debug_class_w(training_dataset, training_loader)
print('\nModel Preparation')
print('*****************')
# Define network model
t1 = time.time()
net = KPCollider(config, training_dataset.label_values, training_dataset.ignored_labels)
debug = False
if debug:
print('\n*************************************\n')
print(net)
print('\n*************************************\n')
for param in net.parameters():
if param.requires_grad:
print(param.shape)
print('\n*************************************\n')
print("Model size %i" % sum(param.numel() for param in net.parameters() if param.requires_grad))
print('\n*************************************\n')
# Freeze layers if necessary
if config.frozen_layers:
for name, child in net.named_children():
if name in config.frozen_layers:
for param in child.parameters():
if param.requires_grad:
param.requires_grad = False
child.eval()
# Define a trainer class
trainer = ModelTrainer(net, config, chkp_path=chosen_chkp, gpu_id=chosen_gpu)
print('Done in {:.1f}s\n'.format(time.time() - t1))
print('\nStart training')
print('**************')
# Training
trainer.train(net, training_loader, test_loader, config)
print('Forcing exit now')
os.kill(os.getpid(), signal.SIGINT)
|
python
|
#!/usr/bin/env python
"""Split large file into multiple pieces for upload to S3.
S3 only supports 5Gb files for uploading directly, so for larger CloudBioLinux
box images we need to use boto's multipart file support.
This parallelizes the task over available cores using multiprocessing.
Usage:
s3_multipart_upload.py <file_to_transfer> <bucket_name> [<s3_key_name>]
if <s3_key_name> is not specified, the filename will be used.
--norr -- Do not use reduced redundancy storage.
--public -- Make uploaded files public.
--cores=n -- Number of cores to use for upload
Files are stored at cheaper reduced redundancy storage by default.
"""
import os
import sys
import glob
import subprocess
import contextlib
import functools
import multiprocessing
from multiprocessing.pool import IMapIterator
from optparse import OptionParser
import boto
def main(transfer_file, bucket_name, s3_key_name=None, use_rr=True,
make_public=True, cores=None):
if s3_key_name is None:
s3_key_name = os.path.basename(transfer_file)
conn = boto.connect_s3()
bucket = conn.lookup(bucket_name)
if bucket is None:
bucket = conn.create_bucket(bucket_name)
mb_size = os.path.getsize(transfer_file) / 1e6
if mb_size < 50:
_standard_transfer(bucket, s3_key_name, transfer_file, use_rr)
else:
_multipart_upload(bucket, s3_key_name, transfer_file, mb_size, use_rr,
cores)
s3_key = bucket.get_key(s3_key_name)
if make_public:
s3_key.set_acl("public-read")
def upload_cb(complete, total):
sys.stdout.write(".")
sys.stdout.flush()
def _standard_transfer(bucket, s3_key_name, transfer_file, use_rr):
print " Upload with standard transfer, not multipart",
new_s3_item = bucket.new_key(s3_key_name)
new_s3_item.set_contents_from_filename(transfer_file, reduced_redundancy=use_rr,
cb=upload_cb, num_cb=10)
print
def map_wrap(f):
@functools.wraps(f)
def wrapper(*args, **kwargs):
return apply(f, *args, **kwargs)
return wrapper
def mp_from_ids(mp_id, mp_keyname, mp_bucketname):
"""Get the multipart upload from the bucket and multipart IDs.
This allows us to reconstitute a connection to the upload
from within multiprocessing functions.
"""
conn = boto.connect_s3()
bucket = conn.lookup(mp_bucketname)
mp = boto.s3.multipart.MultiPartUpload(bucket)
mp.key_name = mp_keyname
mp.id = mp_id
return mp
@map_wrap
def transfer_part(mp_id, mp_keyname, mp_bucketname, i, part):
"""Transfer a part of a multipart upload. Designed to be run in parallel.
"""
mp = mp_from_ids(mp_id, mp_keyname, mp_bucketname)
print " Transferring", i, part
with open(part) as t_handle:
mp.upload_part_from_file(t_handle, i+1)
os.remove(part)
def _multipart_upload(bucket, s3_key_name, tarball, mb_size, use_rr=True,
cores=None):
"""Upload large files using Amazon's multipart upload functionality.
"""
def split_file(in_file, mb_size, split_num=5):
prefix = os.path.join(os.path.dirname(in_file),
"%sS3PART" % (os.path.basename(s3_key_name)))
# require a split size between 5Mb (AWS minimum) and 250Mb
split_size = int(max(min(mb_size / (split_num * 2.0), 250), 5))
if not os.path.exists("%saa" % prefix):
cl = ["split", "-b%sm" % split_size, in_file, prefix]
subprocess.check_call(cl)
return sorted(glob.glob("%s*" % prefix))
mp = bucket.initiate_multipart_upload(s3_key_name, reduced_redundancy=use_rr)
with multimap(cores) as pmap:
for _ in pmap(transfer_part, ((mp.id, mp.key_name, mp.bucket_name, i, part)
for (i, part) in
enumerate(split_file(tarball, mb_size, cores)))):
pass
mp.complete_upload()
@contextlib.contextmanager
def multimap(cores=None):
"""Provide multiprocessing imap like function.
The context manager handles setting up the pool, worked around interrupt issues
and terminating the pool on completion.
"""
if cores is None:
cores = max(multiprocessing.cpu_count() - 1, 1)
def wrapper(func):
def wrap(self, timeout=None):
return func(self, timeout=timeout if timeout is not None else 1e100)
return wrap
IMapIterator.next = wrapper(IMapIterator.next)
pool = multiprocessing.Pool(cores)
yield pool.imap
pool.terminate()
if __name__ == "__main__":
parser = OptionParser()
parser.add_option("-r", "--norr", dest="use_rr",
action="store_false", default=True)
parser.add_option("-p", "--public", dest="make_public",
action="store_true", default=False)
parser.add_option("-c", "--cores", dest="cores",
default=multiprocessing.cpu_count())
(options, args) = parser.parse_args()
if len(args) < 2:
print __doc__
sys.exit()
kwargs = dict(use_rr=options.use_rr, make_public=options.make_public,
cores=int(options.cores))
main(*args, **kwargs)
|
python
|
#!/usr/bin/env python
# coding: utf-8
###################################################################################################
#
# File : frame_study.py
#
# Auhtor : P.Antilogus
#
# Version : 22 Feb 2019
#
# Goal : this python file read raw data image , and can be used for specific sensor diagnostic like :
# - cte
# - overscan
# - noise
#
# Example : see notebooks using this package
#
# Remark : it is under the process to be cleaned - simplified ...but this is not my top priority today ;-)
#
try:
import pyfits
except :
import astropy.io.fits as pyfits
import numpy as np
import glob
import os
import sys
import matplotlib.pyplot as plt
import matplotlib
import time
import pickle
matplotlib.rcParams['axes.formatter.useoffset'] = False
#
def image_area(image) :
# input : image ==> fits image
# output : image section coordinate to be used in python table: ymin , ymax ,xmin, xmax
# -use pyfits to open the file
# -extract the image area to be used in python table from the DATASEC keyword
#
r=image[1].header['DATASEC'][1:-1].split(',')
x=r[0].split(':')
y=r[1].split(':')
#
return int(y[0])-1,int(y[1]),int(x[0])-1,int(x[1])
#
class Ifile :
# Handel ( select et all ) file list from LSST ccd test bench
def __init__(self,dirall=['/Users/antilog/scratch/20160901'],Pickle=False,root_for_pickle='/sps/lsst/DataBE/lpnws5203',fkey={},verbose=False,Slow=True,single_t=False,nskip=0,nkeep=-1):
# dirall : a list of directory/file to read in : the file header will be used to select or not the file if fkey is set or the file will be read from the content of the pickle file (if Pickle is True ) , fkey will also be used for selection.
# fkey : {'selection_name' : {'fits_header_name':{'key':value} , ... } : a triple dictionary of { 'selection_name' : {header : {key:value}} } , to select a file
self.nkept=0
self.all_file=[]
self.clap=[]
self.selection=[]
self.fkey=fkey
self.directory=sorted(dirall)
self.stat={}
self.nseen=0
# loop on header
if Pickle :
self.all_file_from_pickle(dirall=dirall,root_for_pickle=root_for_pickle,fkey=fkey,verbose=verbose,Slow=Slow,single_t=single_t,nskip=nskip,nkeep=nkeep)
else :
self.all_file_from_dir(dirall=dirall,fkey=fkey,verbose=verbose,Slow=Slow,single_t=single_t,nskip=nskip,nkeep=nkeep)
return
def all_file_from_dir(self,dirall,fkey,verbose,Slow,single_t,nskip,nkeep):
# dirname : can be a directory name or a file with * at the moment it ends by .fz
# ex : /Users/antilog/scratch/REB_DATA/20160513/linearity
# or : /Users/antilog/scratch/REB_DATA/20160513/linearity/reb3*.fz
# fkey : dictionary key word for image selection
# verbose : if True , print messages about each selected file ( default= False )
# single_t : keep only one file per exposure time (default False )
self.all_file=[]
old_time=[0.]
fits_is_open=False
#
# fill all_file from the header of the files in dirall .
for dirname in self.directory :
# have we allready all the needed file ?
if nkeep> 0 and self.nkept==nkeep :
break
# build the list of file for this directory
if (len(os.path.splitext(dirname)[1])>0) :
file_list=glob.glob(dirname)
else :
file_list=glob.glob("%s/*.fz" % (dirname))
file_list.sort()
# loop on files to select them if needed
for filenamed in file_list :
#
keep=True
if len(fkey)==0 :
selection='Main'
else :
fits_is_open=False
dname, fname=os.path.split((filenamed))
# is there any extra selection based on Header , key , value ?
for selection, sel_id in fkey.items() :
local_keep=False
# select of file name if any
if 'first' in sel_id.keys() :
if fname < sel_id['first'] : continue
if 'last' in sel_id.keys() :
if fname > sel_id['last'] : continue
local_keep=True
if 'key' in sel_id.keys() :
if not(fits_is_open) :
fitsfile=pyfits.open(filenamed)
fits_is_open=True
for header, key_cur in sel_id['key'].items() :
if not ( local_keep ) : break
for key, value in key_cur.items() :
if ( key in fitsfile[header].header ) :
if ( fitsfile[header].header[key]!=value ):
local_keep=False
break
else :
local_keep=False
break
# this file is ok for the current selection , remark : a file is selected in the first selection that is compatible with
if local_keep : break
keep=local_keep
#
if (keep and single_t ) :
if not(fits_is_open) :
fitsfile=pyfits.open(filenamed)
fits_is_open=True
new_time=fitsfile[0].header['EXPTIME']
if new_time in old_time :
keep=False
else :
old_time.append(new_time)
if (keep) :
self.nseen+=1
if self.nseen>nskip :
if not(fits_is_open) :
fitsfile=pyfits.open(filenamed)
fits_is_open=True
self.all_file.append(datafile(fitsfile,Slow))
self.selection.append(selection)
# to be updated with a call to clap
#self.clap.append(new_time)
if verbose : print ('%d : Selected %s File %s ' % (self.nkept,selection,filenamed) )
self.nkept+=1
if self.nkept==nkeep and nkeep > 0 :
# we selected the number of files requested
fitsfile.close()
break
if fits_is_open :
fitsfile.close()
del fitsfile
fits_is_open=False
return
def all_file_from_pickle(self,dirall,root_for_pickle,fkey,verbose,Slow,single_t,nskip,nkeep):
old_time=[0.]
# fill all_file from the header of the files in dirall .
for dirname in self.directory :
# have we allready all the needed file ?
if nkeep> 0 and self.nkept==nkeep :
break
# build the list of file for this directory
if (len(os.path.splitext(dirname)[1])>0) :
file_list=glob.glob(dirname)
else :
file_list=glob.glob("%s/*.pkl" % (dirname))
file_list.sort()
# loop on files to select them if needed
for pickle_file in file_list :
# open the pickle file
input=open(pickle_file,'rb')
file=pickle.load(input)
#
for i_cur in range(len(file)) :
#filename=file[i_cur].filename
dname=file[i_cur].dir
fname=file[i_cur].filename
clap=file[i_cur].clap
keep=True
if len(fkey)==0 :
selection='Main'
else :
#
# is there any extra selection based on Header , key , value ?
for selection, sel_id in fkey.items() :
local_keep=False
# select of file name if any
if 'first' in sel_id.keys() :
if fname < sel_id['first'] : continue
if 'last' in sel_id.keys() :
if fname > sel_id['last'] : continue
local_keep=True
# test key (=) key+ (=>) key- (<=)
if 'key' in sel_id.keys() :
for header, key_cur in sel_id['key'].items() :
if not ( local_keep ) : break
for key, value in key_cur.items() :
if ( key in file[i_cur].header[header] ) :
if (file[i_cur].header[header][key] is None) or ( file[i_cur].header[header][key]!=value ):
local_keep=False
break
else :
local_keep=False
break
# this file is ok for the current selection , remark : a file is selected in the first selection that is compatible with
if not(local_keep) : continue
#
if 'key+' in sel_id.keys() :
for header, key_cur in sel_id['key+'].items() :
if not ( local_keep ) : break
for key, value in key_cur.items() :
if ( key in file[i_cur].header[header] ) :
if (file[i_cur].header[header][key] is None) or ( file[i_cur].header[header][key]<value ):
local_keep=False
break
else :
local_keep=False
break
# this file is ok for the current selection , remark : a file is selected in the first selection that is compatible with
if not(local_keep) : continue
#
if 'key-' in sel_id.keys() :
for header, key_cur in sel_id['key-'].items() :
if not ( local_keep ) : break
for key, value in key_cur.items() :
if ( key in file[i_cur].header[header] ) :
if (file[i_cur].header[header][key] is None) or ( file[i_cur].header[header][key]>value ):
local_keep=False
break
else :
local_keep=False
break
# this file is ok for the current selection , remark : a file is selected in the first selection that is compatible with
if local_keep : break
keep=local_keep
#
if (keep and single_t ) :
new_time=file[i_cur].header['Primary']['EXPTIME']
if new_time in old_time :
keep=False
else :
old_time.append(new_time)
if (keep) :
self.nseen+=1
if self.nseen>nskip :
fitsfile=pyfits.open(root_for_pickle+'/'+dname+'/'+fname)
self.all_file.append(datafile(fitsfile,Slow))
fitsfile.close()
#
self.clap.append(clap)
self.selection.append(selection)
if verbose : print ('%d : Selected %s File %s ' % (self.nkept,selection,fname) )
self.nkept+=1
if self.nkept==nkeep and nkeep > 0 :
# we selected the number of files requested
break
return
def plot(self,plt_name='',on_screen=False) :
# define last to plot :
#
fig=plt.figure(figsize=(15,15))
title="Noise estimation from Overscan : %s " % (plt_name)
fig.suptitle(title)
iplt=1
ax=fig.add_subplot(3,3,iplt)
iplt+=1
return
class datafile :
def __init__(self, fitsfile,Slow=True):
'''
Construct all the necessary attributes for the datafile object.
Parameters :
fitsfile (list of str ) : list of file to process , they should be all from the same raft-sensor
Slow (boll) : computed extended image properties or not ( Default : True )
remark for CTE analysis alone , Slow can be set to False , will be faster
'''
#
self.Image=[]
self.Hdu=[]
self.HduMax=0
self.fft=[]
self.w=[]
self.Mean=[]
self.Std=[]
self.Median=[]
self.MedPScan=[]
self.StdPScan=[]
self.MeanSScan=[]
self.MedSScan=[]
self.StdSScan=[]
self.StdSScanOS=[]
self.MeanPScan=[]
self.StdPScanOS=[]
self.Mean_col=[]
self.Std_col=[]
self.Mean_line=[]
self.Median_line=[]
self.Std_line=[]
self.Std_l60=[]
self.Attenuator=0.
self.CCD_COND={}
self.Range=0.
self.PreExp=0.
self.PostExp=0.
# image area
first_line,first_p_over,first_col,first_s_over=image_area(fitsfile)
self.first_col=first_col
self.first_s_over=first_s_over
self.first_line=first_line
self.first_p_over=first_p_over
#
try :
self.exptime=float(fitsfile[0].header['EXPTIME'])
except :
# Paris test bench key value for exposure time is different
self.exptime=float(fitsfile[0].header['EXPOSURE'])
try :
self.ccdslot=(fitsfile[0].header['CCDSLOT']).strip()
self.raftbay=(fitsfile[0].header['RAFTBAY']).strip()
except :
self.ccdslot=''
self.raftbay=''
self.fluxs_last=[]
self.fluxp_last=[]
self.fluxs_last_std=[]
#self.fluxs_last_var=[]
self.fluxp_last_std=[]
self.fluxp_used=[]
#
self.over4_col_std=[]
self.over4_line_std=[]
#
#
# self.Date=JdUtc(fitsfile[0].header['DATE']).Jd for i in range(len(fitsfile)):
for i in range(1,min(17,len(fitsfile))):
if ( fitsfile[i].header['XTENSION'].strip()=='IMAGE' ) :
self.Hdu.append(i)
self.HduMax=i
# Remark : for the moment we don't which REB slice we are looki
# for e2v and BNL data it's [8:]
# self.Channel.append(int(fitsfile[i].header['EXTNAME'][8:]))
# for Paris data it's [5:]
#self.Channel.append(int(fitsfile[i].header['EXTNAME'][5:]))
# self.Image.append(np.copy(fitsfile[i].data))
self.Image.append(fitsfile[i].header['EXTNAME'].strip())
# image
# Mean and noise
self.Median.append(np.median(fitsfile[i].data[first_line:first_p_over,first_col:first_s_over]))
if Slow :
self.Mean.append(fitsfile[i].data[first_line:first_p_over,first_col:first_s_over].mean())
self.Std.append(fitsfile[i].data[first_line:first_p_over,first_col:first_s_over].std())
# line OverScan
self.MedPScan.append(np.median(fitsfile[i].data[first_p_over+5:,first_col:first_s_over]))
self.MeanPScan.append(fitsfile[i].data[first_p_over+5:,first_col:first_s_over].mean())
self.StdPScan.append(fitsfile[i].data[first_p_over+5:,first_col:first_s_over].std())
# Serial over-scan + 1 to remove CTE / xxshoot
self.MedSScan.append(np.median(fitsfile[i].data[:,first_s_over+5:]))
self.MeanSScan.append(fitsfile[i].data[:,first_s_over+5:].mean())
self.StdSScan.append(fitsfile[i].data[:,first_s_over+5:].std())
# information for 2D diagmostic of the overscan : does the overscan is flat in function of the column ? line ?
# --- data in the overscan corner ( pixels are overscan in line and column )
self.over4_col_std.append(fitsfile[i].data[first_p_over:,first_s_over:].std(axis=1).mean())
self.over4_line_std.append(fitsfile[i].data[first_p_over:,first_s_over:].std(axis=0).mean())
if Slow :
# Same but bias subtrracted
mean_line=np.median(fitsfile[i].data[first_p_over:,:],axis=0)
mean_column=np.median(fitsfile[i].data[:,first_s_over:],axis=1)
last_l=len(fitsfile[i].data[:,0])
last_s=len(fitsfile[i].data[0,:])
rawl=np.zeros((last_l-first_p_over,last_s))
raws=np.zeros((last_l,last_s-first_s_over))
for l in range(first_p_over,last_l) :
rawl[l-first_p_over,:]=fitsfile[i].data[l,:]-mean_line
self.StdPScanOS.append((rawl[:,first_col:].std(axis=1)).mean())
#
for c in range(first_s_over,last_s) :
raws[:,c-first_s_over]=fitsfile[i].data[:,c]-mean_column
self.StdSScanOS.append((raws[first_line:,:].std(axis=0)).mean())
# average allong the column and line
#self.Mean_col.append(fitsfile[i].data[first_line:first_p_over,:].mean(axis=0))
#self.Mean_line.append(fitsfile[i].data[:,first_col:first_s_over].mean(axis=1))
#self.Median_line.append(np.median(fitsfile[i].data[:,first_col:first_s_over],axis=1))
#self.Std_col.append(fitsfile[i].data[first_line:first_p_over,:].std(axis=0))
#self.Std_line.append(fitsfile[i].data[:,first_col:first_s_over].std(axis=1))
#self.Std_l60.append(fitsfile[i].data[:60,first_col:first_s_over].std())
# For CTE Serie
#
# REMARK : The size cut ( 28 , line +/- 10 ... ) are hard wired and used in CTE part of the code to compute statistic !!!!
#
self.fluxs_last.append(fitsfile[i].data[first_line+10:first_p_over-10,first_s_over-1:first_s_over+28].mean(axis=0)-fitsfile[i].data[first_line+10:first_p_over-10,first_s_over+15:].mean())
#
self.fluxs_last_std.append(fitsfile[i].data[first_line+10:first_p_over-10,first_s_over-1:first_s_over+28].std(axis=0)/np.sqrt(float(first_p_over-10-first_line-10)))
#
#self.fluxs_last_var.append(fitsfile[i].data[first_line+100:first_p_over-100,first_s_over-1:first_s_over+28].std(axis=0)**2-fitsfile[i].data[first_line+100:first_p_over-100,first_s_over+15:].std()**2)
# For CTE //
#
# fluxp=np.array([ fitsfile[i].data[first_p_over-1:first_p_over+28,icol] - np.median(fitsfile[i].data[first_p_over+5:,icol ]) for icol in range(first_col+10,first_s_over-10) ])
overscan_offset= np.median(fitsfile[i].data[first_p_over+5:,first_col+10:first_s_over-10],axis=0)
#
# we do a median per slice , to kill outlier , but keep a statistical precision < 1 adu ...still not that precise compared to a mean
#self.fluxp_last.append((np.median(fluxp[0:100],axis=0)+np.median(fluxp[100:200],axis=0)+np.median(fluxp[200:300],axis=0)+np.median(fluxp[300:400],axis=0)+np.median(fluxp[400:500],axis=0))/5.)
# the correct version : kill outlier ( there is outlier in case of blooming column ) : to speed up we just kill based on the last physical column
self.fluxp_last.append(np.zeros((29)))
self.fluxp_last_std.append(np.zeros((29)))
self.fluxp_used.append(np.zeros((29)))
#
last_line=fitsfile[i].data[first_p_over-1,first_col+10:first_s_over-10]-overscan_offset
last_line_median=np.median(last_line)
last_line_std=5*last_line.std()
column_ok=[icol for icol in range(len(last_line)) if np.abs(last_line[icol]-last_line_median) < last_line_std ]
#
for j in range(29):
fluxp_truncated=(fitsfile[i].data[first_p_over-1+j,first_col+10:first_s_over-10]-overscan_offset)[column_ok]
self.fluxp_last[-1][j]=np.mean(fluxp_truncated)
self.fluxp_last_std[-1][j]=np.std(fluxp_truncated)/np.sqrt(len(fluxp_truncated))
self.fluxp_used[-1][j]=len(fluxp_truncated)
#fluxp=np.array([ fitsfile[i].data[first_p_over-1:first_p_over+28,icol] - np.median(fitsfile[i].data[first_p_over+5:,icol ]) for icol in range(first_col+10,first_s_over-10) ])
#self.fluxp_last.append(np.median(fluxp,axis=0))
# self.fluxp_last.append(np.median(fitsfile[i].data[first_p_over-1:first_p_over+28,first_col+10:first_s_over-10],axis=1)-np.median(fitsfile[i].data[first_p_over-1:first_p_over+28,first_s_over+5:first_s_over+15]))
else:
# last image section read
break
return
class cte :
def __init__(self, all_file, gain=[0.704650434205,0.68883578783,0.688459358774,0.696697494642,0.689209827484,0.696579402812,0.698973006751,0.689613072912,0.682880384357,0.696206655845,0.690349506621,0.691506176017,0.690763478766,0.689762341309,0.694801544092,0.850025229184 ],serie=True):
#
nb_f_max=len(all_file)
#
self.cte_flux=np.zeros((16,nb_f_max))
self.cte_time=np.zeros((nb_f_max))
self.cte_ftime=np.zeros((nb_f_max))
self.cte_flux_s=np.zeros((16,nb_f_max))
self.cte_y=np.zeros((16,nb_f_max,28))
self.cte_y_s=np.zeros((16,nb_f_max,28))
self.cte_y_std=np.zeros((16,nb_f_max,28))
self.cte_y_s_std=np.zeros((16,nb_f_max,28))
self.ylev=np.zeros((16,nb_f_max))
self.ylev_std=np.zeros((16,nb_f_max))
self.nb_file=np.zeros((nb_f_max))
self.serie=serie
self.cte_noise_s=np.zeros((16,nb_f_max))
self.cte_noise_s_std=np.zeros((16,nb_f_max))
self.overscan_std=np.zeros((16,nb_f_max))
self.over8_18=np.zeros((16,nb_f_max))
self.over8_18_std=np.zeros((16,nb_f_max))
# pixel number in unit of CCD
self.i_f=0
#
if nb_f_max==0 : return
#
self.first_file=all_file[0]
#
cte_noise_std=np.zeros((16,nb_f_max,28))
#
for f in all_file :
im_flux=np.median(np.array(f.Median))
if self.i_f>0 and f.exptime in self.cte_time[0:self.i_f] :
all_cur=np.flatnonzero(self.cte_time[0:self.i_f] == f.exptime)
# Attention could be that we have the same exposure time but not the same flux (extra filter)
found_cur=-1
for cur_cur in all_cur :
ratio=max(self.cte_ftime[cur_cur]/im_flux,im_flux/self.cte_ftime[cur_cur])
if ratio<1.1 :
found_cur=cur_cur
if found_cur > -1 :
i_cur=found_cur
else:
i_cur=self.i_f
self.cte_ftime[i_cur]=im_flux
self.cte_time[i_cur]=f.exptime
self.i_f+=1
else :
i_cur=self.i_f
self.cte_ftime[i_cur]=im_flux
self.cte_time[i_cur]=f.exptime
self.i_f+=1
for ch in range(f.HduMax) :
if serie :
# CTE serie
if ch==0 :
# print ('%s ,time %f, Channel %d, flux %f (flux last col %f) , image %f , signal dispersion %f , scan serie %f , scan serie dispersion %f ' % (f.filename,f.exptime,ch,f.Mean[ch]-f.MeanSScan[ch],f.fluxs_last[ch][0],f.Mean[ch], f.Std[ch],f.MeanSScan[ch],f.StdSScan[ch]))
self.first=f.first_s_over
# what matter in the CTE def is how many transfer you did for the last column read , which is the size of the pre-scan + size of the image (in the past we subtracted the prescan which is an error)
self.nb_pixel=f.first_s_over
self.nb_file[i_cur]+=1
flux_last=f.fluxs_last[ch][0]
#
self.cte_y[ch,i_cur,:]+=f.fluxs_last[ch][1:]
self.cte_y_std[ch,i_cur,:]+=(f.fluxs_last_std[ch][1:])**2
cte_noise_std[ch,i_cur,:]+=(f.fluxs_last_std[ch][1:])**2*(float(f.first_p_over-10-f.first_line-10))
self.overscan_std[ch,i_cur]+=(f.over4_col_std[ch])**2
else :
# CTE //
if ch==0 :
# print ('%s ,time % f, Channel %d, flux %f (flux last line %f) , image %f , signal dispersion %f , scan // %f , scan // dispersion %f ' % (f.filename,f.exptime,ch,f.Mean[ch]-f.MedPScan[ch],f.fluxp_last[ch][0],f.Mean[ch], f.Std[ch],f.MedPScan[ch],f.StdPScan[ch]))
self.first=f.first_p_over
self.nb_pixel=f.first_p_over
self.nb_file[i_cur]+=1
flux_last=f.fluxp_last[ch][0]
#
self.cte_y[ch,i_cur,:]+=f.fluxp_last[ch][1:]
self.cte_y_std[ch,i_cur,:]+=f.fluxp_last_std[ch][1:]**2
cte_noise_std[ch,i_cur,:]+=(f.fluxp_last_std[ch][1:])**2*f.fluxp_used[ch][1:]
self.overscan_std[ch,i_cur]+=(f.over4_line_std[ch])**2
#if flux_last==0. : flux_last=1e-6
self.cte_flux[ch,i_cur]+=flux_last
# self.i_f+=1
#fl=np.argsort(self.cte_flux,axis=1)
ft=np.argsort(self.cte_ftime[0:self.i_f])
l_ft=len(ft)
#print('order in time ',ft)
self.lmax=np.zeros((16),dtype=np.int16)
# we take the number of amplifiers from the last file read
for ch in range(f.HduMax) :
l_k=0
cte_sig=np.zeros((l_ft))
#for l in fl[ch,:] :
for l in ft[:] :
# protection against divide by 0 improbable ?
if self.cte_flux[ch,l]==0 : self.cte_flux[ch,l]=1.0e-6
self.cte_y_s[ch,l_k,:]=self.cte_y[ch,l,:]*gain[ch]/self.nb_file[l]
# remark that the 1/n below ...it's because sqrt(1/n) **2 is needed to get the error on the mean , and not the dispersion . ...
self.cte_y_s_std[ch,l_k,:]=np.sqrt(self.cte_y_std[ch,l,:])*gain[ch]/self.nb_file[l]
self.cte_noise_s[ch,l_k]=np.sqrt(cte_noise_std[ch,l,2:].mean(axis=0)/(self.nb_file[l]))*gain[ch]
self.cte_flux_s[ch,l_k]=self.cte_flux[ch,l]*gain[ch]/self.nb_file[l]
self.cte_noise_s_std[ch,l_k]=np.sqrt(cte_noise_std[ch,l,2:].std(axis=0)/(self.nb_file[l])/(26))*gain[ch]
l_k+=1
for l in range(1,l_ft) :
if self.cte_flux_s[ch,l]<self.cte_flux_s[ch,self.lmax[ch]] and self.cte_flux_s[ch,self.lmax[ch]] > 100000 :
self.lmax[ch]=l
break
self.lmax[ch]=l
if len(self.cte_flux_s[ch,:])==1 : self.lmax[ch]=1
self.ylev[ch,0:self.lmax[ch]]=(self.cte_y_s[ch,0:self.lmax[ch],0]+self.cte_y_s[ch,0:self.lmax[ch],1])/self.cte_flux_s[ch,0:self.lmax[ch]]/float(self.nb_pixel)
#self.ylev_std[ch,0:self.lmax[ch]]=self.ylev[ch,0:self.lmax[ch]]*np.sqrt((self.cte_y_s_std[ch,0:self.lmax[ch],0]/self.cte_y_s[ch,0:self.lmax[ch],0])**2+(self.cte_y_s_std[ch,0:self.lmax[ch],1]/self.cte_y_s[ch,0:self.lmax[ch],1])**2)
self.ylev_std[ch,0:self.lmax[ch]]=np.sqrt(self.cte_y_s_std[ch,0:self.lmax[ch],0]**2+self.cte_y_s_std[ch,0:self.lmax[ch],1]**2)/self.cte_flux_s[ch,0:self.lmax[ch]]/float(self.nb_pixel)
# re-order and normalize Overscan data
self.overscan_std[ch,0:l_ft]=np.sqrt(self.overscan_std[ch,ft]/self.nb_file[ft])*gain[ch]
# overscan stability
self.over8_18[ch,0:self.lmax[ch]]=(self.cte_y_s[ch,0:self.lmax[ch],8:18]).mean(axis=1)
self.over8_18_std[ch,0:self.lmax[ch]]=np.sqrt(np.sum(self.cte_y_s_std[ch,0:self.lmax[ch],8:18]**2,axis=1))/10.
return
def print_cte(self,ccd_name,nf=0):
if self.serie :
print('Serial CTE for %s ----------------------------------------------------------------' % (ccd_name) )
else :
print(' // CTE for %s ----------------------------------------------------------------' % (ccd_name) )
#
print('Ch | flux | 1-CTE | Signal in Overscan | Overscan Noise Noise in |')
print(' | | | ov 1 ov 2 ov 8 to 18 | overscan corner |')
for n in range(nf,self.i_f) :
print('---------------------------------------------------------------------------------------------------------')
for ch in range(16) :
if n>=self.lmax[ch] :
print('%02d | % 6.0f | saturation (no eval) | % 6.02f % 6.02f % 6.02f+/-%5.02f | % 5.02f+/-%5.02f % 5.02f |' % (
ch,
self.cte_flux_s[ch,n],
self.cte_y_s[ch,n,0],
self.cte_y_s[ch,n,1],
self.over8_18[ch,n],self.over8_18_std[ch,n],
self.cte_noise_s[ch,n],self.cte_noise_s_std[ch,n],
self.overscan_std[ch,n]))
else :
print('%02d | % 6.0f | %9.3g+/-%9.3g | % 6.02f % 6.02f % 6.02f+/-%5.02f | % 5.02f+/-%5.02f % 5.02f |' % (
ch,
self.cte_flux_s[ch,n],
self.ylev[ch,n],self.ylev_std[ch,n],
self.cte_y_s[ch,n,0],
self.cte_y_s[ch,n,1],
self.over8_18[ch,n],self.over8_18_std[ch,n],
self.cte_noise_s[ch,n],self.cte_noise_s_std[ch,n],
self.overscan_std[ch,n])
)
print('---------------------------------------------------------------------------------------------------------')
return
def plot_cte(self,ch,ccd_name,nf=0,on_screen=False,root_dir='.',unit='e-') :
'''
plot_cte(self,ch,ccd_name,nf=0,on_screen=False,root_dir='.')
Plot the CTE results from cte class per channel
Parameters:
ch (int) : channel index ( = hdu number -1 in data file ) to plot
ccd_name (str) : ccd name : extra sting used in caption to identify this plot serie
( remark the ccd name itself , from file header is automaticaly added , this is more to identify run
( or test level in plots label
nf (int) : index of first flux entry to plot (default=0)
on_screen (bool) : do we plot on display (or just save png on disk ) (default=False)
root_dir (str) : top directory to save directory tree with plots
(default = '.' , directory used to save the plots will be ./raft_name/ccd_name/ch/ )
unit (str) : unit of flux used ( e- or ADU )
'''
#
root_plt=os.path.join(root_dir,self.first_file.raftbay,self.first_file.ccdslot,str(self.first_file.Hdu[ch]))
label_header=ccd_name+' '+self.first_file.raftbay+' '+self.first_file.ccdslot+' '+self.first_file.Image[ch]+' (hdu='+str(self.first_file.Hdu[ch])+')'
# create the directorty
os.makedirs(root_plt,exist_ok=True)
#
xx=[max(np.min(self.cte_flux_s[:,nf:self.lmax[ch]])*.9,10.),min(2.0e5,np.max(self.cte_flux_s[:,nf:self.lmax[ch]])*1.1)]
#
pix_col=['b','c']
pix_sym=['<','>']
fig=plt.figure(figsize=(10,12))
x=range(self.first,self.first+28)
if self.serie :
title="CTI Serial : "+label_header
yv=5.0e-6
else :
title="CTI // : "+label_header
yv=3.0e-6
yy=[yv,yv]
fig.suptitle(title,y=0.94)
#fig.tight_layout()
iplt=1
ax=fig.add_subplot(3,3,iplt)
ax.xaxis.set_label_position('top')
ax.xaxis.tick_top()
iplt+=1
#ylev=(self.cte_y_s[ch,nf:self.lmax[ch],0]+self.cte_y_s[ch,nf:self.lmax[ch],1])/self.cte_flux_s[ch,nf:self.lmax[ch]]/float(self.nb_pixel)
label='%02d' % self.first_file.Hdu[ch]
#plt.plot(self.cte_flux_s[ch,nf:self.lmax[ch]],self.ylev[ch,nf:self.lmax[ch]],'o',color='r',label=label)
plt.errorbar(self.cte_flux_s[ch,nf:self.lmax[ch]],self.ylev[ch,nf:self.lmax[ch]], yerr=self.ylev_std[ch,nf:self.lmax[ch]],fmt='o', ecolor='r',label=label)
#print(self.ylev[ch,nf:self.lmax[ch]])
#print(self.ylev_std[ch,nf:self.lmax[ch]])
plt.plot(xx,yy,'g')
if self.serie :
plt.xlabel('<flux> of last column in '+unit)
else :
plt.xlabel('<flux> of last line in '+unit)
plt.xlim(xx[0],xx[1])
y_min=min(max(int(np.min(self.ylev[ch,nf:self.lmax[ch]])*500)/100.,1.e-8),1e-7)
y_max=5e-5
plt.ylim(y_min,y_max)
plt.ylabel('1-CTE')
plt.xscale('log')
if (abs(y_max/y_min>80.)) : plt.yscale('log')
# plt.locator_params(axis="both", tight=True, nbins=10)
plt.legend()
ax=fig.add_subplot(3,3,iplt)
ax.xaxis.set_label_position('top')
ax.xaxis.tick_top()
iplt+=1
y_min=1.
y_max=0
for pix in range(2) :
ylev=self.cte_y_s[ch,nf:self.lmax[ch],pix]/self.cte_flux_s[ch,nf:self.lmax[ch]]/float(self.nb_pixel)
#
label="pix + %d " % (pix+1)
y_min=min(y_min,np.min(ylev))
y_max=max(0.,np.max(ylev))
plt.plot(self.cte_flux_s[ch,nf:self.lmax[ch]],ylev,pix_sym[pix],color=pix_col[pix],label=label)
plt.plot(xx,yy,'g')
if self.serie :
plt.xlabel('<flux> of last column in '+unit)
else :
plt.xlabel('<flux> of last line in '+unit)
#plt.ylabel('1-CTE')
y_min=min(max(y_min*.5,1.e-8),5e-7)
y_max=max(y_max*1.5,1e-5)
plt.ylim(y_min,y_max)
plt.xscale('log')
if (y_max/y_min>80.) : plt.yscale('log')
plt.xlim(xx[0],xx[1])
plt.legend()
ax=fig.add_subplot(3,3,iplt)
ax.xaxis.set_label_position('top')
ax.xaxis.tick_top()
ax.yaxis.set_label_position("right")
ax.yaxis.tick_right()
iplt+=1
y_min=0.
y_max=0.
for pix in range(2) :
label="pix + %d " % (pix+1)
plt.plot(self.cte_flux_s[ch,nf:self.lmax[ch]],self.cte_y_s[ch,nf:self.lmax[ch],pix],pix_sym[pix],color=pix_col[pix],label=label)
y_min=min(-1.,np.min(self.cte_y_s[ch,nf:self.lmax[ch],0:1]))
y_max=max(1.,np.max(self.cte_y_s[ch,nf:self.lmax[ch],0:1])*1.1)
if self.serie :
plt.xlabel('<flux> of last column in '+unit)
else :
plt.xlabel('<flux> of last line in '+unit)
plt.ylabel('signal in overscan pixel(s) in '+unit)
plt.xscale('log')
if y_max > 10. :
plt.yscale('symlog')
plt.plot([xx[0],xx[1]],[0.,0.],'--',color='black')
#
plt.ylim(y_min,y_max)
plt.xlim(xx[0],xx[1])
plt.legend(loc=2)
#plt.xticks(ticks_flux)
#
ax=fig.add_subplot(3,3,iplt)
iplt+=1
#
xx=[self.cte_flux_s[ch,nf]*0.9,self.cte_flux_s[ch,self.lmax[ch]-1]*1.1]
yy=[0.,0.]
plt.plot(xx,yy,'b--')
plt.errorbar(self.cte_flux_s[ch,nf:self.lmax[ch]],
self.over8_18[ch,nf:self.lmax[ch]],yerr=self.over8_18_std[ch,nf:self.lmax[ch]],fmt='o',color='r', ecolor='r',label='Signal Overscan[8:18]')
if self.serie :
plt.xlabel('<flux> of last column in '+unit)
plt.ylabel('signal in '+unit+' in serial Overscan')
else :
plt.xlabel('<flux> of last line in '+unit)
plt.ylabel('signal in '+unit+' in // Overscan')
plt.xscale('log')
plt.ylim(min(np.min(self.over8_18[ch,nf:self.lmax[ch]])*1.2,-0.5),min(10.,max(0.5,np.max(self.over8_18[ch,nf:max(nf+1,self.lmax[ch]-1)])*1.5)))
plt.legend(loc=2)
ax=fig.add_subplot(3,3,iplt)
ax.yaxis.set_label_position("right")
iplt+=1
plt.plot(self.cte_flux_s[ch,nf:self.lmax[ch]],self.overscan_std[ch,nf:self.lmax[ch]],'<',label='Corner Noise')
plt.errorbar(self.cte_flux_s[ch,nf:self.lmax[ch]],
self.cte_noise_s[ch,nf:self.lmax[ch]],yerr=self.cte_noise_s_std[ch,nf:self.lmax[ch]],fmt='o',color='r', ecolor='r',label='Frame Noise')
try :
mean_noiseV=np.array([self.cte_noise_s[ch,ii] for ii in range(nf,self.lmax[ch]) if self.cte_flux_s[ch,ii] > 1000 and self.cte_flux_s[ch,ii] < 50000])
if len(mean_noiseV)>0 :
mean_noise=mean_noiseV.mean()
xx=[self.cte_flux_s[ch,nf],self.cte_flux_s[ch,self.lmax[ch]-1]]
yy=[mean_noise,mean_noise]
plt.plot(xx,yy,'b--')
except :
pass
if self.serie :
plt.xlabel('<flux> of last column in '+unit)
plt.ylabel('Noise from Serial Overscan in '+unit)
else :
plt.xlabel('<flux> of last line in '+unit)
plt.ylabel('Noise from // Overscan in '+unit)
plt.xscale('log')
ymin_cc=3.
ymax_cc=max(min(30.,np.max(self.cte_noise_s[ch,nf:max(nf+1,self.lmax[ch]-2)])*1.2),10.)
#print(ymax_cc,np.max(self.cte_noise_s[ch,nf:max(nf+1,self.lmax[ch]-5)])*1.5)
plt.ylim(ymin_cc,ymax_cc)
#if ymax_cc > 20 :
plt.legend(loc=2)
#else :
# plt.legend(loc=3)
#
ax=fig.add_subplot(3,3,iplt)
iplt+=1
flux=0.
l_last=nf
#lmax=len(self.cte_flux_s[ch,:])
count=0
im=0
#
y_min=0.
y_max=0.
#max_plt=max(int((self.lmax[ch]-nf)/4)+1,9)
for l in range(nf,self.lmax[ch]) :
if ((self.cte_flux_s[ch,l_last]/self.cte_flux_s[ch,l] < 0.9 ) and ( l_last < l )) :
# first test to only plot result for point different enough , second test to be sure that we have already selected something , third test (l<lamx[ch] ) to avoid to plot too saturated guy
if im>1 :
if self.serie :
label="%5.1f %s in last Col. " % (self.cte_flux_s[ch,l_last:self.lmax[ch]].mean(axis=0),unit)
else :
label="%5.1f %s in last Line" % (self.cte_flux_s[ch,l_last:self.lmax[ch]].mean(axis=0),unit)
else :
label="%5.1f" % (self.cte_flux_s[ch,l_last:l].mean(axis=0))
yplt=self.cte_y_s[ch,l_last:l,:].mean(axis=0)
y_min=min(max(min(np.min(yplt)*1.2,0.),-10.),y_min)
y_max=max(min(np.max(yplt)*1.2,100.),y_max)
plt.plot(x,yplt,label=label)
l_last=l
count+=1
if count == 9 and im<2 and l < self.lmax[ch]-1 :
count = 0
#plt.yscale('log')
if self.serie :
plt.xlabel('column number (serial overscan)')
else :
plt.xlabel('line number (// overscan)')
if im==0 or im==1 :
plt.ylabel('Overscan Signal in '+unit)
ymax=max(y_max,y_min+1.)
plt.ylim(y_min,y_max)
if im==0 : plt.plot([x[0],x[-1]],[0.,0.],'--',color='black')
if y_max>80. :
plt.yscale('symlog')
plt.xlim(self.first,self.first+27)
if im == 0 :
ax.yaxis.set_label_position("right")
ax.yaxis.tick_right()
# plt.legend(bbox_to_anchor=(1.05, 1),loc=2, borderaxespad=0.)
#else :
plt.legend(loc=1)
ax=fig.add_subplot(3,3,iplt)
iplt+=1
y_min=0
y_max=0
im+=1
if count !=0 or l==nf :
if self.serie :
plt.xlabel('column number (serial overscan)')
else :
plt.xlabel('line number (// overscan)')
if im<2 : plt.ylabel('Overscan Signal in '+unit)
if self.serie :
label="%5.1f %s in last Col. " % (self.cte_flux_s[ch,l_last:self.lmax[ch]].mean(axis=0),unit)
else :
label="%5.1f %s in last Line" % (self.cte_flux_s[ch,l_last:self.lmax[ch]].mean(axis=0),unit)
yplt=self.cte_y_s[ch,l_last:self.lmax[ch],:].mean(axis=0)
y_min=min(max(min(np.min(yplt)*1.2,0.),-10.),y_min)
y_max=max(min(np.max(yplt)*1.2,100.),y_max)
plt.plot(x,yplt,label=label)
plt.xlim(self.first,self.first+27)
plt.ylim(y_min,y_max)
#plt.ylim(-10.,min(np.max(yplt)*1.2,100.))
if y_max>80. :
plt.yscale('symlog')
plt.legend(bbox_to_anchor=(1.05, 1),loc=2, borderaxespad=0.)
#plt.legend()
# Overscan noise
#ax=fig.add_subplot(3,3,iplt)
#iplt+=1
#
if on_screen : plt.show()
if self.serie :
plotfile=root_plt+'/cte_serial.png'
else :
plotfile=root_plt+'/cte_parallel.png'
fig.savefig(plotfile)
if not(on_screen) : plt.close(fig)
return
def cte_example():
get_ipython().magic('matplotlib inline')
print (' file =',sys.argv[1:-1],' TESTTYPE=',sys.argv[-1])
selection=sys.argv[-1]
file=Ifile(dirall=sys.argv[1:-1],fkey={'IMGTYPE':'Acquisition','TESTTYPE':sys.argv[-1]})
#
plt.interactive(1)
#file=Ifile(dirall=['/Users/antilog/scratch/e2v_190/20170314102625/*.fits'],fkey={})
#
cte_data=cte(allfile=file.allfile,gain=[0.704650434205,0.68883578783,0.688459357874,0.696697494642,0.689209827484,0.696579402812,0.698973006751,0.689613072912,0.682880384357,0.696206655845,0.690349506621,0.691506176017,0.690763478766,0.689762341309,0.694801544092,0.850025229184 ])
for ch in range(16) :
cte_data.plot_extra(ch=ch,ccd_name=selection,nf=0,on_screen=True)
def fft_noise(h_all,channel=range(1,17),fplot=True,mean=False,start=1,int_pixel=1.8e-6,int_line=30.e-6,verbose=0,legend=True,xboundary=(20,500),yboundary=(30,2000),label='',color_v=None,two=True,axes=None,index=None) :
cmap=plt.get_cmap('nipy_spectral')
colors=[cmap(j)[:3] for j in np.linspace(0,1,17)]
if color_v != None :
color_val=color_v
else :
color_val=channel
#
nb_l=yboundary[1]-yboundary[0]
nb_c=xboundary[1]-xboundary[0]
nb_file=len(h_all)
nb_channel=len(channel)
freq_x = np.fft.rfftfreq(nb_c, d=int_pixel)[start:]
freqf_x = np.flipud(1./np.fft.rfftfreq(nb_c,1)[start:])
noise=np.zeros((nb_channel))
#
# image area
first_line,first_p_over,first_col,first_s_over=image_area(h_all[0])
first_good_overs=first_s_over+2
first_good_overp=first_p_over+2
#
for ich in range(nb_channel) :
ch=int(channel[ich])
for i_h in range(nb_file) :
h=h_all[i_h]
#
if i_h==0 :
(n_y,n_x)=np.shape(h[1].data)
#delta time between 2 pixels from 2 # lines
delta_line=int_pixel*n_x+int_line
freq_y = np.fft.rfftfreq(nb_l, d=delta_line)[start:]
freqf_y = np.flipud(1./np.fft.rfftfreq(nb_l,d=delta_line/int_pixel)[start:])
freq=np.append(freq_y,freq_x)
freqf=np.append(freqf_x,freqf_y)
#
mean_line=np.median(h[ch].data[yboundary[0]:yboundary[1],:],axis=0)
mean_column=np.median(h[ch].data[:,xboundary[0]:xboundary[1]],axis=1)
if (ich==0 and i_h==0) or ( not(mean) and i_h==0 ) :
ff_x=np.zeros((int(nb_c/2)))
ff_y=np.zeros((int(nb_l/2)))
for l in range(yboundary[0],yboundary[1]) :
raw=h[ch].data[l,:]-mean_line
to_fft=raw[xboundary[0]:xboundary[1]]-raw[first_good_overs:].mean()
ff_x+=np.absolute(np.fft.rfft(to_fft))[start:]
for c in range(xboundary[0],xboundary[1]) :
raw=h[ch].data[:,c]-mean_column
to_fft=raw[yboundary[0]:yboundary[1]]-raw[first_good_overp:].mean()
ff_y+=np.absolute(np.fft.rfft(to_fft))[start:]
noise[ich]+=(h[ch].data[yboundary[0]:yboundary[1],first_good_overs:].std())**2
if verbose>1 : print ('channel %d noise %3.3f Overscan dispersion = %3.3f '%(ch,h[ch].data[yboundary[0]:yboundary[1],first_good_overs:].std(),(h[ch].data[yboundary[0]:yboundary[1],first_good_overs:].mean(axis=1)).std()))
if (i_h==nb_file-1) and ( ich==len(channel)-1 or not(mean) ) :
if mean :
# en fait on doit / par le nombr d bin de la fft , pas du signal ...facteur 2 ?
ff_xn=ff_x/nb_l/nb_c/nb_file/nb_channel/2.
ff_yn=ff_y/nb_l/nb_c/nb_file/nb_channel
xnorm=np.append(ff_yn,ff_xn)
label_ch=label+'<'+','.join(map(str,channel))+'>'
else :
ff_xn=ff_x/nb_l/nb_c/nb_file/2.
ff_yn=ff_y/nb_l/nb_c/nb_file
xnorm=np.append(ff_yn,ff_xn)
label_ch=label+'%d' % (ch)
if fplot :
if two :
if index!=None :
axes[index[0]].plot(freq_x,ff_xn,label=label_ch,color=colors[color_val[ich]])
axes[index[1]].plot(freq_y,ff_yn,label=label_ch,color=colors[color_val[ich]])
else :
plt.plot(freq_x,ff_xn,label=label_ch,color=colors[color_val[ich]])
plt.plot(freq_y,ff_yn,label=label_ch,color=colors[color_val[ich]])
else :
if index!=None :
axes[index[0]].plot(freq,xnorm,label=label_ch,color=colors[color_val[ich]])
else :
plt.plot(freq,xnorm,label=label_ch,color=colors[color_val[ich]])
else :
if two :
ff_xnf=np.flipud(ff_xn)
ff_ynf=np.flipud(ff_yn)
if index!=None :
axes[index[0]].plot(freqf_x,ff_xnf,label=label_ch,color=colors[color_val[ich]])
axes[index[1]].plot(freqf_y,ff_ynf,label=label_ch,color=colors[color_val[ich]])
else :
plt.plot(freqf_x,ff_xnf,label=label_ch,color=colors[color_val[ich]])
plt.plot(freqf_y,ff_ynf,label=label_ch,color=colors[color_val[ich]])
else :
xnormf=np.flipud(xnorm)
if index!=None :
axes[index[0]].plot(freqf,xnormf,label=label_ch,color=colors[color_val[ich]])
else :
plt.plot(freqf,xnormf,label=label_ch,color=colors[color_val[ich]])
if verbose :
argsort=np.argsort(xnorm)
sort=np.sort(xnorm)
ff_mean=np.mean(xnorm)
ff_sum=np.sum(xnorm)
# do a quick and durty bias on the sigma ...
ff_sig=xnorm.std()
#for i in range(1,len(argsort)) :
# if xnorm[argsort[-i]] < ff_mean+3*ff_sig : break
#ff_sig=sort[0:-i].std()
#
print(' sum(fft) %g <fft level> %g fft dispersion %g ' % (ff_sum,ff_mean,ff_sig))
if not(fplot) :
xnormf=np.flipud(xnorm)
for i in range(1,len(argsort)) :
if xnorm[argsort[-i]] < ff_mean+2*ff_sig : break
if i>1 and ( ( argsort[-i]+1 in argsort[-i+1:] ) or ( argsort[-i]-1 in argsort[-i+1:] )) : continue
print (' fft bin %d , delta(pixels) %f (%6.1f Hz) : %g ' % (argsort[-i]+1,freqf[-argsort[-i]-1],freq[argsort[-i]],xnorm[argsort[-i]]))
#
if legend :
if fplot :
plt.xlabel('Noise in Hz')
else :
plt.xlabel('Noise Period in Pixel(s)')
plt.legend(bbox_to_anchor=(1.05, 1),loc=2, borderaxespad=0.)
#plt.xscale('log')
#plt.yscale('log')
noise=np.sqrt(noise/nb_file)
return freq,xnorm,noise
def for_ever(top_dir='/data/frames',do_fft=False,do_cte=False,xboundary=(20,500),yboundary=(30,2000),gain=[1.,1.,1.,1.,1.,1.,1.,1.,1.,1.,1.,1.,1.,1.,1.,1.]) :
filename_old=''
old_dir=''
while True :
dir=top_dir+'/'+time.strftime('%Y%m%d')
#dir=top_dir
files = glob.glob(dir+'/*.fz')
if dir != old_dir :
print (' Scan Dir %s ========================================' % (dir) )
old_dir=dir
if files:
_,new_file =os.path.split( max(files, key=os.path.getctime) )
if new_file != filename_old :
filename=dir+'/'+new_file
# so it's a new file , but it could be not fully written
time.sleep(5)
all_file=Ifile(dirall=[filename],single_t=False)
filename_old=new_file
print ('%s -------------------------------------------------------------------------' % (new_file) )
print ('Ch | mean median med. - std | <ov //> std std | <ov S.> std std |')
print (' | image image S_over image | ov // Fix//o | ov S. Fix S. |')
for ch in range(16) :
print ('%02d | % 6.0f % 6.0f % 6.0f % 8.02f | % 6.0f % 8.02f % 8.02f | % 6.0f % 8.02f % 8.02f |' % (ch,
all_file.all_file[0].Mean[ch],
all_file.all_file[0].Median[ch],
all_file.all_file[0].Median[ch]-all_file.all_file[0].MeanSScan[ch],
all_file.all_file[0].Std[ch],
all_file.all_file[0].MeanPScan[ch],
all_file.all_file[0].StdPScan[ch],
all_file.all_file[0].StdPScanOS[ch],
all_file.all_file[0].MeanSScan[ch],
all_file.all_file[0].StdSScan[ch],
all_file.all_file[0].StdSScanOS[ch]) )
print ('----------------------------------------------------------------------------------------------------')
if do_cte :
cte_s=cte(all_file=all_file.all_file,gain=gain,serie=True)
ccd=new_file
cte_s.print_cte(ccd_name=ccd,nf=0)
#for ch in range(16) :
# cte_s.plot_cte(ch=ch,ccd_name=ccd,nf=0,on_screen=True)
# plt.show()
cte_p=cte(all_file=all_file.all_file,gain=gain,serie=False)
cte_p.print_cte(ccd_name=ccd,nf=0)
#for ch in range(16) :
# cte_p.plot_cte(ch=ch,ccd_name=ccd,nf=0,on_screen=True)
# plt.show()
if do_fft :
fitsfile=pyfits.open(filename)
fft_it([fitsfile],channel=range(1,9),xboundary=xboundary,yboundary=yboundary)
fft_it([fitsfile],channel=range(9,17),xboundary=xboundary,yboundary=yboundary)
plt.show()
fitsfile.close()
time.sleep(2)
return
|
python
|
#!/usr/bin/python
#
# Copyright 2012 Sonya Huang
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import csv, re, xlrd
from usa import config, eia, dbsetup
from common import fileutils, sqlhelper
from common.dbconnect import db
# tables by measurement
pricetable = "%s.seds_price" % config.EIA_SCHEMA
usetable = "%s.seds_use_btu" % config.EIA_SCHEMA
# we need to parse three files before we can populate the yearly
# tables
data = {}
def parse_measurement(filename, measurement, tracker):
filepath = fileutils.getcache(filename)
with open(filepath) as f:
csvf = csv.reader(f)
header = next(csvf)
for stryear in header[2:]:
year = int(stryear)
if year not in data:
data[year] = {}
for row in csvf:
if len(row) == len(header):
if row[0] == "US":
msn = row[1][:4]
for i in range(2, len(row)):
year = int(header[i])
value = row[i].strip()
if len(value):
if msn not in data[year]:
data[year][msn] = {measurement: value}
else:
data[year][msn][measurement] = value
source = msn[0:2]
sector = msn[2:4]
insert_values = [year, source, sector, float(value)]
if measurement == "price":
tracker.insert_row(pricetable, insert_values)
elif measurement == "use_btu":
tracker.insert_row(usetable, insert_values)
def create_consolidated_tables():
allsources = eia.fossilfuels + eia.elec_sources + \
eia.nuclear + eia.renewables
allsectors = ["TC", "AC", "CC", "IC", "RC"] + eia.elec_sectors
for year in config.STUDY_YEARS:
strings = {
"renewables": sqlhelper.set_repr(eia.renewables),
"elec_sources": sqlhelper.set_repr(eia.elec_sources),
"elec_sectors": sqlhelper.set_repr(eia.elec_sectors),
"allsources": sqlhelper.set_repr(allsources),
"allsectors": sqlhelper.set_repr(allsectors),
"from_table": "%s.seds_us_%d" % (config.EIA_SCHEMA, year),
"tablename": "%s.seds_short_%d" % (config.EIA_SCHEMA, year),
}
db.execute("DROP TABLE IF EXISTS %(tablename)s CASCADE" % strings)
db.execute("""
SELECT source, sector,
case when sum(use_btu) = 0 then 0
else sum(ex) / sum(use_btu) end as price,
sum(use_btu) as use_btu,
sum(ex) as ex
INTO %(tablename)s
FROM (SELECT case when source in %(renewables)s then 'RE'
when source in %(elec_sources)s then 'ES'
else source end as source,
case when sector in %(elec_sectors)s then 'EI'
else sector end as sector,
price, use_btu, ex
FROM %(from_table)s
WHERE source in %(allsources)s
AND sector in %(allsectors)s) e
GROUP by source, sector order by source, sector""" % strings)
def doparse():
tracker = dbsetup.MultiTableStateTracker()
tracker.create_table(pricetable,
["year", "source", "sector", "price"],
["int", "char(2)", "char(2)", "float"],
cascade=True)
tracker.create_table(usetable,
["year", "source", "sector", "use_btu"],
["int", "char(2)", "char(2)", "float"],
cascade=True)
tracker.warmup()
parse_measurement("eia/pr_all.csv", "price", tracker)
parse_measurement("eia/use_all_btu.csv", "use_btu", tracker)
parse_measurement("eia/ex_all.csv", "ex", tracker)
tracker.flush()
# tables by year
years = sorted(data)
for year in years:
tablename = "eia.seds_us_%d" % year
tracker.create_table(tablename,
["source", "sector", "price", "use_btu", "ex"],
["char(2)", "char(2)", "float", "float", "float"],
cascade=True)
tracker.warmup()
msns = sorted(data[year])
for msn in msns:
values = data[year][msn]
source = msn[0:2]
sector = msn[2:4]
insert_values = [source, sector]
for field in ("price", "use_btu", "ex"):
next_value = 0 # this will turn out to help our calculations
if field in values:
# convert expenditures to the same units as io table
if field == "ex":
next_value = float(values[field]) * 1000
else:
next_value = float(values[field])
insert_values.append(next_value)
tracker.insert_row(tablename, insert_values)
tracker.flush()
create_consolidated_tables()
|
python
|
class Solution(object):
def lengthOfLongestSubstringTwoDistinct(self, s):
"""
:type s: str
:rtype: int
"""
if not s:
return 0
c = s[0]
maxlen = 0
chars = set([c])
llen = 0
start = 0
end = 0
for i, cc in enumerate(s):
if cc == c:
end += 1
llen += 1
elif cc in chars:
c = cc
start = i - 1
end = i
llen += 1
elif len(chars) == 1:
c = cc
chars.add(cc)
start = i - 1
end = i
llen += 1
else:
if llen > maxlen:
maxlen = llen
chars = set([c, cc])
c = cc
llen = end - start + 1
start = i - 1
end = i
# print cc, c, start, end, llen, chars
if llen > maxlen:
maxlen = llen
return maxlen
|
python
|
import sys
from sqlalchemy import create_engine
import pandas as pd
import numpy as np
import re
from nltk.corpus import stopwords
from nltk.tokenize import word_tokenize
from nltk.stem import PorterStemmer, WordNetLemmatizer
from sklearn.feature_extraction.text import CountVectorizer, TfidfTransformer
from sklearn.model_selection import GridSearchCV
from sklearn.feature_extraction.text import TfidfVectorizer
import joblib
import nltk
nltk.download(['punkt','stopwords'])
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LogisticRegression
from sklearn.multioutput import MultiOutputClassifier
from sklearn.pipeline import Pipeline
from sklearn.metrics import classification_report
from nltk.stem.porter import *
def load_data(database_filepath):
##
"""
Load Data Function
Arguments:
database_filepath -> path to SQLite db
Output:
X -> feature DataFrame
Y -> label DataFrame
category_names -> used for data visualization (app)
"""
##
engine = create_engine(f'sqlite:///{database_filepath}')
df = pd.read_sql_table('InsertTableName',engine)
X = df['message']
Y = df.iloc[:, 4:]
del Y['child_alone']
return X , Y , Y.columns.values
def tokenize(text):.
##
"""
Tokenize function
Arguments:
text -> list of text messages (english)
Output:
clean_tokens -> tokenized text, clean for ML modeling
"""
##
lemmatizer = WordNetLemmatizer()
# Convert to lowercase
text = text.lower()
# Remove punctuation characters
text = re.sub(r"[^a-zA-Z0-9]", " ", text)
# Split text into words using NLTK
words = word_tokenize(text)
# Remove stop words
words = [w for w in words if w not in stopwords.words("english")]
# Reduce words to their root form
stemmed = [PorterStemmer().stem(w) for w in words]
clean = [lemmatizer.lemmatize(t) for t in stemmed]
return clean
def build_model():
##
"""
Build Model function
This function output is a Scikit ML Pipeline that process text messages
according to NLP best-practice and apply a classifier.
"""
##
pipeline = Pipeline([
('vect', TfidfVectorizer(tokenizer=tokenize)),
('clf', MultiOutputClassifier(estimator=LogisticRegression()))
])
parameters = {'clf__estimator__C': [0.1,1,10]}
cv = GridSearchCV(pipeline, parameters,cv=5)
return cv
def evaluate_model(model, X_test, Y_test, category_names):
##
"""
Evaluate Model function
This function applies ML pipeline to a test set and prints out
model performance
Arguments:
model -> Scikit ML Pipeline
X_test -> test features
Y_test -> test labels
category_names -> label names (multi-output)
"""
##
y_pred = pd.DataFrame(model.predict(X_test),columns=Y_test.columns.get_values())
print(classification_report(np.hstack(Y_test), np.hstack(y_pred)))
pass
def save_model(model, model_filepath):
##
"""
Save Model function
This function saves trained model as Pickle file, to be loaded later.
Arguments:
model -> GridSearchCV or Scikit Pipelin object
model_filepath -> destination path to save .pkl file
"""
##
joblib.dump(model,f'{model_filepath}')
pass
def main():
if len(sys.argv) == 3:
database_filepath, model_filepath = sys.argv[1:]
print('Loading data...\n DATABASE: {}'.format(database_filepath))
X, Y, category_names = load_data(database_filepath)
X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=0.2)
print('Building model...')
model = build_model()
print('Training model...')
model.fit(X_train, Y_train)
print('Evaluating model...')
evaluate_model(model, X_test, Y_test, category_names)
print('Saving model...\n MODEL: {}'.format(model_filepath))
save_model(model, model_filepath)
print('Trained model saved!')
else:
print('Please provide the filepath of the disaster messages database '\
'as the first argument and the filepath of the pickle file to '\
'save the model to as the second argument. \n\nExample: python '\
'train_classifier.py ../data/DisasterResponse.db classifier.pkl')
if __name__ == '__main__':
main()
|
python
|
#!/usr/bin/env python
# -*- coding: utf-8
# Copyright 2017-2019 The FIAAS Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
from k8s.models.autoscaler import HorizontalPodAutoscaler
from mock import create_autospec
from requests import Response
from utils import TypeMatcher
from fiaas_deploy_daemon import ExtensionHookCaller
from fiaas_deploy_daemon.deployer.kubernetes.autoscaler import should_have_autoscaler, AutoscalerDeployer
from fiaas_deploy_daemon.specs.models import AutoscalerSpec, ResourcesSpec, ResourceRequirementSpec, \
LabelAndAnnotationSpec
LABELS = {"autoscaler_deployer": "pass through"}
AUTOSCALER_API = '/apis/autoscaling/v1/namespaces/default/horizontalpodautoscalers/'
def test_default_spec_should_create_no_autoscaler(app_spec):
assert should_have_autoscaler(app_spec) is False
def test_autoscaler_enabled_and_1_replica_gives_no_autoscaler(app_spec):
app_spec = app_spec._replace(
autoscaler=AutoscalerSpec(enabled=True, min_replicas=1, max_replicas=1, cpu_threshold_percentage=50))
assert should_have_autoscaler(app_spec) is False
def test_autoscaler_enabled_and_2_max_replicas_and_no_requested_cpu_gives_no_autoscaler(app_spec):
app_spec = app_spec._replace(
autoscaler=AutoscalerSpec(enabled=True, min_replicas=1, max_replicas=2, cpu_threshold_percentage=50))
assert should_have_autoscaler(app_spec) is False
def test_autoscaler_enabled_and_2_max_replicas_and__requested_cpu_gives_autoscaler(app_spec):
app_spec = app_spec._replace(
autoscaler=AutoscalerSpec(enabled=True, min_replicas=1, max_replicas=2, cpu_threshold_percentage=50))
app_spec = app_spec._replace(resources=ResourcesSpec(limits=[], requests=ResourceRequirementSpec(cpu=1, memory=1)))
assert should_have_autoscaler(app_spec)
class TestAutoscalerDeployer(object):
@pytest.fixture
def extension_hook(self):
return create_autospec(ExtensionHookCaller, spec_set=True, instance=True)
@pytest.fixture
def deployer(self, owner_references, extension_hook):
return AutoscalerDeployer(owner_references, extension_hook)
@pytest.mark.usefixtures("get")
def test_new_autoscaler(self, deployer, post, app_spec, owner_references, extension_hook):
app_spec = app_spec._replace(
autoscaler=AutoscalerSpec(enabled=True, min_replicas=2, max_replicas=4, cpu_threshold_percentage=50))
app_spec = app_spec._replace(
resources=ResourcesSpec(limits=[], requests=ResourceRequirementSpec(cpu=1, memory=1)))
expected_autoscaler = {
'metadata': pytest.helpers.create_metadata('testapp', labels=LABELS),
'spec': {
"scaleTargetRef": {
"kind": "Deployment",
"name": "testapp",
"apiVersion": "apps/v1"
},
"minReplicas": 2,
"maxReplicas": 4,
"targetCPUUtilizationPercentage": 50
},
}
mock_response = create_autospec(Response)
mock_response.json.return_value = expected_autoscaler
post.return_value = mock_response
deployer.deploy(app_spec, LABELS)
pytest.helpers.assert_any_call(post, AUTOSCALER_API, expected_autoscaler)
owner_references.apply.assert_called_once_with(TypeMatcher(HorizontalPodAutoscaler), app_spec)
extension_hook.apply.assert_called_once_with(TypeMatcher(HorizontalPodAutoscaler), app_spec)
@pytest.mark.usefixtures("get")
def test_new_autoscaler_with_custom_labels_and_annotations(self, deployer, post, app_spec, owner_references,
extension_hook):
app_spec = app_spec._replace(
autoscaler=AutoscalerSpec(enabled=True, min_replicas=2, max_replicas=4, cpu_threshold_percentage=50))
app_spec = app_spec._replace(
resources=ResourcesSpec(limits=[], requests=ResourceRequirementSpec(cpu=1, memory=1)))
labels = LabelAndAnnotationSpec(deployment={}, horizontal_pod_autoscaler={"custom": "label"}, ingress={},
service={}, pod={}, status={})
annotations = LabelAndAnnotationSpec(deployment={}, horizontal_pod_autoscaler={"custom": "annotation"},
ingress={}, service={}, pod={}, status={})
app_spec = app_spec._replace(labels=labels, annotations=annotations)
expected_autoscaler = {
'metadata': pytest.helpers.create_metadata('testapp', labels={"autoscaler_deployer": "pass through",
"custom": "label"},
annotations={"custom": "annotation"}),
'spec': {
"scaleTargetRef": {
"kind": "Deployment",
"name": "testapp",
"apiVersion": "apps/v1"
},
"minReplicas": 2,
"maxReplicas": 4,
"targetCPUUtilizationPercentage": 50
}
}
mock_response = create_autospec(Response)
mock_response.json.return_value = expected_autoscaler
post.return_value = mock_response
deployer.deploy(app_spec, LABELS)
pytest.helpers.assert_any_call(post, AUTOSCALER_API, expected_autoscaler)
owner_references.apply.assert_called_once_with(TypeMatcher(HorizontalPodAutoscaler), app_spec)
extension_hook.apply.assert_called_once_with(TypeMatcher(HorizontalPodAutoscaler), app_spec)
def test_no_autoscaler_gives_no_post(self, deployer, delete, post, app_spec):
deployer.deploy(app_spec, LABELS)
delete.assert_called_with(AUTOSCALER_API + app_spec.name)
pytest.helpers.assert_no_calls(post)
def test_no_autoscaler_gives_no_put(self, deployer, delete, put, app_spec):
deployer.deploy(app_spec, LABELS)
delete.assert_called_with(AUTOSCALER_API + app_spec.name)
pytest.helpers.assert_no_calls(put)
|
python
|
# -*- coding: utf-8 -*-
#
# michael a.g. aïvázis <[email protected]>
# (c) 1998-2022 all rights reserved
# support
import qed
# custom properties
def selectors(default={}, **kwds):
"""
A map from selector names to their legal values
"""
# build the trait descriptor and return it
return qed.properties.dict(
schema=qed.properties.tuple(schema=qed.properties.str(), default=()),
default=default, **kwds)
# end of file
|
python
|
#!/usr/bin/env python
import numpy
from numpy.random import RandomState
from sklearn.datasets import make_friedman1
from sklearn.model_selection import train_test_split
from typing import Union
from backprop.network import Network
_demo_problem_num_train_samples: int = 1000
_demo_problem_num_test_samples: int = 100
_demo_num_uninformative_columns: int = 0
_random_state = 0
def demo_backprop(
num_train_samples: int = _demo_problem_num_train_samples,
num_test_samples: int = _demo_problem_num_test_samples,
num_uninformative_columns: int = _demo_num_uninformative_columns,
random_state: Union[int, None, RandomState] =_random_state
):
random_state = random_state if isinstance(random_state, RandomState) \
else RandomState(random_state)
# make training and test data sets for demo
inputs_train, inputs_test, outputs_train, outputs_test = make_test_problem(
n_train_samples=num_train_samples, n_test_samples=num_test_samples,
n_uninformative=num_uninformative_columns, random_state=random_state
)
# build network
num_inputs = inputs_train.shape[1]
num_outputs = outputs_train.shape[1]
num_hidden = 2 * num_inputs * num_outputs
# make a network with a single hidden layer with num_hidden nodes
network = Network(num_inputs, num_hidden, num_outputs,
random_state=random_state)
# to make two hidden layers, could do:
# network = Network(num_inputs, num_hidden, num_hidden, num_outputs)
# train network on training set
network.train_online(inputs=inputs_train, correct_outputs=outputs_train)
# predict results on test set
predict_test = network.predict(inputs_test)
# calculate error
err = ((predict_test - outputs_test)**2).sum(axis=1).mean(axis=0)
print('Cross-validated error: %.3g' % err)
def make_test_problem(
n_train_samples: int = _demo_problem_num_train_samples,
n_test_samples: int = _demo_problem_num_test_samples,
n_uninformative: int = 0,
random_state: Union[int, None, RandomState] = _random_state
) -> (numpy.ndarray, numpy.ndarray, numpy.ndarray, numpy.ndarray):
n_samples = n_train_samples + n_test_samples
assert n_uninformative >= 0
n_features = 5 + n_uninformative
inputs, outputs = make_friedman1(
n_samples=n_samples, n_features=n_features, random_state=random_state
)
if inputs.ndim == 1:
inputs = numpy.reshape(inputs, inputs.shape + (1,))
if outputs.ndim == 1:
outputs = numpy.reshape(outputs, outputs.shape + (1,))
inputs_train, inputs_test, outputs_train, outputs_test = train_test_split(
inputs, outputs,
train_size=n_train_samples, test_size=n_test_samples,
random_state=random_state
)
return inputs_train, inputs_test, outputs_train, outputs_test
if __name__ == "__main__":
demo_backprop()
|
python
|
# Copyright 2020 Board of Trustees of the University of Illinois.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import utils.datasetutils as datasetutils
class Capability():
def __init__(self, injson):
self.name = None
self.description = None
self.isOpenSource = None
self.apiDocUrl = None
self.deploymentDetails = None
self.apiBaseUrl = None
self.version = None
self.healthCheckUrl = None
self.status = None
self.dataDeletionEndpointDetails = None
self.contacts = None
# self.creationDate = None
# self.lastModifiedDate = None
self, restjson = datasetutils.update_capability_dataset_from_json(self, injson)
def set_name(self, name):
self.name = name
def get_name(self):
return self.name
def set_description(self, description):
self.description = description
def get_description(self):
return self.description
def set_is_open_source(self, isOpenSource):
self.isOpenSource = isOpenSource
def get_is_open_source(self):
return self.isOpenSource
def set_api_doc_url(self, apiDocUrl):
self.apiDocUrl = apiDocUrl
def get_api_doc_url(self):
return self.apiDocUrl
def set_deployment_details(self, deploymentDetails):
self.deploymentDetails = deploymentDetails
def get_deployment_details(self):
return self.deploymentDetails
def set_docker_image_name(self, dockerImageName):
self.dockerImageName = dockerImageName
def get_docker_image_name(self):
return self.dockerImageName
def set_environment_variables(self, environmentVariables):
self.environmentVariables = environmentVariables
def get_environment_variables(self):
return self.environmentVariables
def set_database_details(self, databaseDetails):
self.databaseDetails = databaseDetails
def get_database_details(self):
return self.databaseDetails
def set_version(self, version):
self.version = version
def get_version(self):
return self.version
def set_health_check_url(self, healthCheckUrl):
self.healthCheckUrl = healthCheckUrl
def get_health_check_url(self):
return self.healthCheckUrl
def set_auth_method(self, authMethod):
self.authMethod = authMethod
def get_auth_method(self):
return self.authMethod
def set_status(self, status):
self.status = status
def get_status(self):
return self.status
def set_data_deletion_endpoint_details(self, dataDeletionEndpointDetails):
self.dataDeletionEndpointDetails = dataDeletionEndpointDetails
def get_data_deletion_endpoint_details(self):
return self.dataDeletionEndpointDetails
def set_contacts(self, contacts):
self.contacts = contacts
def get_contacts(self):
return self.contacts
# def set_creation_date(self, creationDate):
# self.creationDate = creationDate
#
# def get_creation_date(self):
# return self.creationDate
#
# def set_last_modified_date(self, lastModifiedDate):
# self.lastModifiedDate = lastModifiedDate
#
# def get_last_modified_date(self):
# return self.lastModifiedDate
|
python
|
import numpy as np
from mrcnn import utils
def build_rpn_targets(image_shape, anchors, gt_class_ids, gt_boxes, config):
"""Given the anchors and GT boxes, compute overlaps and identify positive
anchors and deltas to refine them to match their corresponding GT boxes.
anchors: [num_anchors, (y1, x1, y2, x2)]
gt_class_ids: [num_gt_boxes] Integer class IDs.
gt_boxes: [num_gt_boxes, IMAGE_SOURCES, (y1, x1, y2, x2)]
Returns:
rpn_match: [N] (int32) matches between anchors and GT boxes.
1 = positive anchor, -1 = negative anchor, 0 = neutral
rpn_bbox: [N, IMAGE_SOURCES, (dy, dx, log(dh), log(dw))] Anchor bbox deltas.
"""
n_boxes, n_image_source = gt_boxes.shape[:2]
# RPN Match: 1 = positive anchor, -1 = negative anchor, 0 = neutral
rpn_match = np.zeros([anchors.shape[0]], dtype=np.int32)
# RPN bounding boxes: [max anchors per image, (dy, dx, log(dh), log(dw))]
rpn_bbox = np.zeros((config.RPN_TRAIN_ANCHORS_PER_IMAGE, n_image_source, 4))
# master boxes
master_gt_boxes = gt_boxes[:, 0, :]
# Handle COCO crowds
# A crowd box in COCO is a bounding box around several instances. Exclude
# them from training. A crowd box is given a negative class ID.
# crowd_ix = np.where(gt_class_ids < 0)[0]
# if crowd_ix.shape[0] > 0:
# # Filter out crowds from ground truth class IDs and boxes
# non_crowd_ix = np.where(gt_class_ids > 0)[0]
# crowd_boxes = master_gt_boxes[crowd_ix]
# gt_class_ids = gt_class_ids[non_crowd_ix]
# master_gt_boxes = master_gt_boxes[non_crowd_ix]
# # Compute overlaps with crowd boxes [anchors, crowds]
# crowd_overlaps = utils.compute_overlaps(anchors, crowd_boxes)
# crowd_iou_max = np.amax(crowd_overlaps, axis=1)
# no_crowd_bool = (crowd_iou_max < 0.001)
# else:
# # All anchors don't intersect a crowd
# no_crowd_bool = np.ones([anchors.shape[0]], dtype=bool)
# Compute overlaps [num_anchors, num_gt_boxes]
overlaps = utils.compute_overlaps(anchors, master_gt_boxes)
# Match anchors to GT Boxes
# If an anchor overlaps a GT box with IoU >= 0.7 then it's positive.
# If an anchor overlaps a GT box with IoU < 0.3 then it's negative.
# Neutral anchors are those that don't match the conditions above,
# and they don't influence the loss function.
# However, don't keep any GT box unmatched (rare, but happens). Instead,
# match it to the closest anchor (even if its max IoU is < 0.3).
#
# 1. Set negative anchors first. They get overwritten below if a GT box is
# matched to them. Skip boxes in crowd areas.
anchor_iou_argmax = np.argmax(overlaps, axis=1)
anchor_iou_max = overlaps[np.arange(overlaps.shape[0]), anchor_iou_argmax]
# rpn_match[(anchor_iou_max < 0.3) & (no_crowd_bool)] = -1
rpn_match[anchor_iou_max < 0.3] = -1
# 2. Set an anchor for each GT box (regardless of IoU value).
# If multiple anchors have the same IoU match all of them
gt_iou_argmax = np.argwhere(overlaps == np.max(overlaps, axis=0))[:,0]
rpn_match[gt_iou_argmax] = 1
# 3. Set anchors with high overlap as positive.
rpn_match[anchor_iou_max >= 0.7] = 1
# Subsample to balance positive and negative anchors
# Don't let positives be more than half the anchors
ids = np.where(rpn_match == 1)[0]
extra = len(ids) - (config.RPN_TRAIN_ANCHORS_PER_IMAGE // 2)
if extra > 0:
# Reset the extra ones to neutral
ids = np.random.choice(ids, extra, replace=False)
rpn_match[ids] = 0
# Same for negative proposals
ids = np.where(rpn_match == -1)[0]
extra = len(ids) - (config.RPN_TRAIN_ANCHORS_PER_IMAGE -
np.sum(rpn_match == 1))
if extra > 0:
# Rest the extra ones to neutral
ids = np.random.choice(ids, extra, replace=False)
rpn_match[ids] = 0
# For positive anchors, compute shift and scale needed to transform them
# to match the corresponding GT boxes.
ids = np.where(rpn_match == 1)[0]
ix = 0 # index into rpn_bbox
# TODO: use box_refinement() rather than duplicating the code here
for i, a in zip(ids, anchors[ids]):
# Anchor
a_h = a[2] - a[0]
a_w = a[3] - a[1]
a_center_y = a[0] + 0.5 * a_h
a_center_x = a[1] + 0.5 * a_w
for idx_image_source in range(n_image_source):
# Closest gt box (it might have IoU < 0.7)
gt = gt_boxes[anchor_iou_argmax[i], idx_image_source]
# Convert coordinates to center plus width/height.
# GT Box
gt_h = gt[2] - gt[0]
gt_w = gt[3] - gt[1]
gt_center_y = gt[0] + 0.5 * gt_h
gt_center_x = gt[1] + 0.5 * gt_w
# Compute the bbox refinement that the RPN should predict.
rpn_bbox[ix, idx_image_source] = [
(gt_center_y - a_center_y) / a_h,
(gt_center_x - a_center_x) / a_w,
np.log(gt_h / a_h),
np.log(gt_w / a_w),
]
# Normalize
rpn_bbox[ix, idx_image_source] /= config.RPN_BBOX_STD_DEV
ix += 1
return rpn_match, rpn_bbox
|
python
|
from django import forms
from django.contrib import admin
from django.utils.safestring import mark_safe
from .models import Episode, ShowDate
from .tasks import generate_peaks
class ShowsCommonModelAdminMixin:
save_on_top = True
list_filter = ("published", "show_code")
list_display_links = ("show_code", "display_name")
@admin.display(description="Name", ordering="name", empty_value=mark_safe("<em>Untitled</em>"))
def display_name(self, obj):
return obj.name or None
def get_fields(self, request, obj=None):
fields = list(super().get_fields(request, obj=obj))
if obj is None:
for field in self.get_readonly_fields(request, obj=obj):
fields.remove(field)
return fields
class EpisodeAdminModelForm(forms.ModelForm):
name_from_ffprobe = forms.BooleanField(
label="Generate name from file's metadata.",
required=False,
help_text=(
'Attempt to extract name from metadata on save. Will attempt to do so in <strong>"artist - title"</strong>'
" format."
),
)
class EpisodeAdmin(ShowsCommonModelAdminMixin, admin.ModelAdmin):
form = EpisodeAdminModelForm
fields = (
"show_code",
"published",
"slug",
"asset_url",
"name",
"name_from_ffprobe",
"description",
"date",
"duration",
"guid",
"has_peaks",
)
readonly_fields = ("guid", "has_peaks")
list_display = ("published", "show_code", "display_name", "date", "date", "duration", "has_peaks")
@admin.display(description="Peaks", boolean=True)
def has_peaks(self, obj):
return bool(obj.peaks)
def save_model(self, request, obj, form, change):
if form.cleaned_data["name_from_ffprobe"]:
obj.name = " - ".join(filter(None, (obj.ffprobe.artist, obj.ffprobe.title)))
super().save_model(request, obj, form, change)
generate_peaks(obj)
class ShowDateAdmin(ShowsCommonModelAdminMixin, admin.ModelAdmin):
fields = ("show_code", "published", "name", "dates", "start_time", "duration", "end_time")
list_display = ("published", "show_code", "display_name", "start_time", "end_time", "duration")
readonly_fields = ("end_time",)
admin.site.register(Episode, EpisodeAdmin)
admin.site.register(ShowDate, ShowDateAdmin)
|
python
|
""" Events emitted by the artist model """
from dataclasses import dataclass
from typing import List, Optional
from OpenCast.domain.event.event import Event, ModelId
@dataclass
class ArtistCreated(Event):
name: str
ids: List[ModelId]
thumbnail: Optional[str]
@dataclass
class ArtistThumbnailUpdated(Event):
thumbnail: str
@dataclass
class ArtistDeleted(Event):
ids: List[ModelId]
@dataclass
class ArtistVideosUpdated(Event):
ids: List[ModelId]
|
python
|
from unittest import TestCase
from lie2me import Field
class CommonTests(object):
def get_instance(self):
return self.Field()
def test_submitting_empty_value_on_required_field_returns_error(self):
field = self.get_instance()
field.required = True
value, error = field.submit(field.empty_value())
self.assertTrue(error)
def test_submitting_empty_value_on_optional_field_does_not_return_error(self):
field = self.get_instance()
field.required = False
value, error = field.submit(field.empty_value())
self.assertFalse(error)
def test_field_is_required_by_default(self):
field = self.get_instance()
value, error = field.submit(field.empty_value())
self.assertTrue(error)
def test_field_with_default_is_not_required(self):
field = self.get_instance()
field.default = self.valid_default
value, error = field.submit(field.empty_value())
self.assertFalse(error)
def test_field_instance_can_overwrite_specific_messages(self):
field = self.get_instance()
field.messages = {'required': 'Lorem ipsum'}
value, error = field.submit(None)
self.assertIn('Lorem ipsum', str(error))
|
python
|
import os
import sys
from RLTest import Env
from redisgraph import Graph, Node, Edge
sys.path.append(os.path.join(os.path.dirname(__file__), '..'))
from base import FlowTestsBase
redis_graph = None
male = ["Roi", "Alon", "Omri"]
female = ["Hila", "Lucy"]
class testGraphMixLabelsFlow(FlowTestsBase):
def __init__(self):
self.env = Env()
global redis_graph
redis_con = self.env.getConnection()
redis_graph = Graph("G", redis_con)
self.populate_graph()
def populate_graph(self):
redis_graph
nodes = {}
# Create entities
for m in male:
node = Node(label="male", properties={"name": m})
redis_graph.add_node(node)
nodes[m] = node
for f in female:
node = Node(label="female", properties={"name": f})
redis_graph.add_node(node)
nodes[f] = node
for n in nodes:
for m in nodes:
if n == m: continue
edge = Edge(nodes[n], "knows", nodes[m])
redis_graph.add_edge(edge)
redis_graph.commit()
# Connect a single node to all other nodes.
def test_male_to_all(self):
query = """MATCH (m:male)-[:knows]->(t) RETURN m,t ORDER BY m.name"""
actual_result = redis_graph.query(query)
self.env.assertEquals(len(actual_result.result_set), (len(male) * (len(male + female)-1)))
def test_male_to_male(self):
query = """MATCH (m:male)-[:knows]->(t:male) RETURN m,t ORDER BY m.name"""
actual_result = redis_graph.query(query)
self.env.assertEquals(len(actual_result.result_set), (len(male) * (len(male)-1)))
def test_male_to_female(self):
query = """MATCH (m:male)-[:knows]->(t:female) RETURN m,t ORDER BY m.name"""
actual_result = redis_graph.query(query)
self.env.assertEquals(len(actual_result.result_set), (len(male) * len(female)))
def test_female_to_all(self):
query = """MATCH (f:female)-[:knows]->(t) RETURN f,t ORDER BY f.name"""
actual_result = redis_graph.query(query)
self.env.assertEquals(len(actual_result.result_set), (len(female) * (len(male + female)-1)))
def test_female_to_male(self):
query = """MATCH (f:female)-[:knows]->(t:male) RETURN f,t ORDER BY f.name"""
actual_result = redis_graph.query(query)
self.env.assertEquals(len(actual_result.result_set), (len(female) * len(male)))
def test_female_to_female(self):
query = """MATCH (f:female)-[:knows]->(t:female) RETURN f,t ORDER BY f.name"""
actual_result = redis_graph.query(query)
self.env.assertEquals(len(actual_result.result_set), (len(female) * (len(female)-1)))
def test_all_to_female(self):
query = """MATCH (f)-[:knows]->(t:female) RETURN f,t ORDER BY f.name"""
actual_result = redis_graph.query(query)
self.env.assertEquals(len(actual_result.result_set), (len(male) * len(female)) + (len(female) * (len(female)-1)))
def test_all_to_male(self):
query = """MATCH (f)-[:knows]->(t:male) RETURN f,t ORDER BY f.name"""
actual_result = redis_graph.query(query)
self.env.assertEquals(len(actual_result.result_set), (len(male) * (len(male)-1)) + len(female) * len(male))
def test_all_to_all(self):
query = """MATCH (f)-[:knows]->(t) RETURN f,t ORDER BY f.name"""
actual_result = redis_graph.query(query)
self.env.assertEquals(len(actual_result.result_set), (len(male+female) * (len(male+female)-1)))
|
python
|
import io
import logging
from typing import List, Union
LOG = logging.getLogger(__name__)
class FileHelper:
"""Encapsulates file related functions."""
def __init__(self, filepath, line_idx, contents):
self.filepath = filepath
self.line_idx = line_idx
self.contents = contents
@classmethod
def read_file(cls, fpath: str) -> List[str]:
"""Reads a file from FS. Returns a lis of strings from it.
:param fpath: File path
"""
with io.open(fpath, encoding='utf-8') as f:
data = f.read().splitlines()
return data
def write(self):
"""Writes updated contents back to a file."""
LOG.debug(f'Writing `{self.filepath}` ...')
with io.open(self.filepath, 'w', encoding='utf-8') as f:
f.write('\n'.join(self.contents))
def line_replace(self, value: str, offset: int = 0):
"""Replaces a line in file.
:param value: New line.
:param offset: Offset from line_idx
"""
target_idx = self.line_idx + offset
self.contents[target_idx] = value
def insert(self, value: Union[List[str], str], offset: int = 1):
"""Inserts a line (or many) into file.
:param value: New line(s).
:param offset: Offset from line_idx
"""
target_idx = self.line_idx + offset
if not isinstance(value, list):
value = [value]
self.contents[target_idx:target_idx] = value
def iter_after(self, offset: int) -> str:
"""Generator. Yields lines after line_idx
:param offset:
"""
target_idx = self.line_idx + offset
for line in self.contents[target_idx:]:
yield line
|
python
|
#!/usr/bin/python
import sys
import tarfile
import json
import gzip
import pandas as pd
import botometer
from pandas.io.json import json_normalize
## VARIABLE INITIATION
#system argument
# arg 1 = 'rs' or 'sn'
# arg 2 = hour file 6,7 or 8 ?
# arg 3 = start row
# arg 4 = end row
# arg 5 = key selection, 1,2,3,4
# sn 7 : total row 33277
# sn 8 : total row 53310
# rs 7 : 7230
# rs 8 : 10493
mashape_key = "QRraJnMT9KmshkpJ7iu74xKFN1jtp1IyBBijsnS5NGbEuwIX54"
if(int(sys.argv[5])==1):
twitter_app_auth = {
'consumer_key': 'qngvt8PPer3irSHHkx71gqpJg',
'consumer_secret': 'bAH258rRds9uWAi38kSwxgbJ1x0rAspasQACgOruuK4qnKsXld',
'access_token': '218041595-yrk9WyMnTjh4PBidhApb0DwryK83Wzr32IWi6bP4',
'access_token_secret': 'GCmOzFmzrOoAv59lCpKRQrC9e7H1P0449iaBW1rI66saS',
}
elif (int(sys.argv[5])==2):
twitter_app_auth = {
'consumer_key': 'xQkTg8KSU7HlEEvaD8EJA',
'consumer_secret': 'TMFRBmvGdGJtzwFJ3fyluPWszl5qCDuwBUqy0AGj0g',
'access_token': '218041595-JUmLw0xEtnJVrqn03DCirlZpnL1Z7taWwKYZYUPN',
'access_token_secret': 'cIdkjvTghunH6GGLRIjQW06ghyOFkX1w7jnurcJPVyIQw',
}
elif (int(sys.argv[5])==3):
twitter_app_auth = {
'consumer_key': 'sPzHpcj4jMital75nY7dfd4zn',
'consumer_secret': 'rTGm68zdNmLvnTc22cBoFg4eVMf3jLVDSQLOwSqE9lXbVWLweI',
'access_token': '4258226113-4UnHbbbxoRPz10thy70q9MtEk9xXfJGOpAY12KW',
'access_token_secret': '549HdasMEW0q2uV05S5s4Uj5SdCeEWT8dNdLNPiAeeWoX',
}
elif (int(sys.argv[5])==4):
twitter_app_auth = {
'consumer_key': 'wZnIRW0aMRmHuQ3Rh5c2v7al4',
'consumer_secret': 'ugFcKDc0WP7ktDw3Ch1ZddWknckkfFiH9ZvIKFDwg7k8ivDyFB',
'access_token': '218041595-JSRBUY3CJ55km9Jb0QnJA6lQnyRoPfvpq6lNAsak',
'access_token_secret': 'ck1wTLfMP5CeLAfnbkS3U7oKxY6e0xu9C7fosq3fNH8gO',
}
else:
twitter_app_auth = {
'consumer_key': 'kcnlkVFRADdxaWNtWNAy3LquT',
'consumer_secret': 'bAH258rRds9uWAi38kSwxgbJ1x0rAspasQACgOruuK4qnKsXld',
'access_token': '218041595-yrk9WyMnTjh4PBidhApb0DwryK83Wzr32IWi6bP4',
'access_token_secret': 'GCmOzFmzrOoAv59lCpKRQrC9e7H1P0449iaBW1rI66saS',
}
bom = botometer.Botometer(wait_on_ratelimit=True,
mashape_key=mashape_key,
**twitter_app_auth)
if(sys.argv[1]=='rs'):
input_file="data/distinct_userlist_rs_201606230"+sys.argv[2]+".csv"
else:
input_file="data/distinct_userlist_201606230"+sys.argv[2]+".csv"
bot_data = pd.read_csv(input_file, index_col = 0, names =['screen_name'])
print(len(bot_data))
distinct_uname=[]
for i in bot_data.values:
distinct_uname.append((str('@'+i).replace("['","")).replace("']",''))
botoresult = pd.DataFrame()
for screen_name, result in bom.check_accounts_in(distinct_uname[int(sys.argv[3]):int(sys.argv[4])]):
botoresult=botoresult.append(result, ignore_index=True)
output_bot=pd.concat([botoresult.user.apply(pd.Series), botoresult.scores.apply(pd.Series), botoresult.categories.apply(pd.Series)], axis=1)
print("bot result :",len(botoresult))
print("bot output :",len(output_bot))
output_file="data/outputbot_201606230"+sys.argv[2]+"_"+sys.argv[1]+"_"+sys.argv[6]+".csv"
output_bot.to_csv(output_file, sep=',', encoding='utf-8')
|
python
|
from analyzers.utility_functions import auth
from parsers.Parser import Parser
class HthParser(Parser):
__url = "https://fantasy.premierleague.com/api/leagues-h2h-matches/league/{}/?page={}&event={}"
__HUGE_HTH_LEAGUE = 19824
def __init__(self, team_id, leagues, current_event):
super().__init__(team_id)
self.__leagues = leagues
self.__current_event = current_event
"""
self.__leagues is a dictionary:
- keys are leagues codes
- values are strings = names of these leagues
result is a dictionary:
- keys are opponent ids
- values are strings = names of the league where the match is going to be played
"""
def get_opponents_ids(self):
result = {}
session = auth()
for key, value in self.__leagues.items():
# Ignoring this league because there's an issue with it
if key == self.__HUGE_HTH_LEAGUE:
continue
(opponent_id, (my_points, opponent_points)) = self.__get_opponent_id(session=session, league_code=key)
# Regular match
if opponent_id is not None:
result[opponent_id] = value
# H2H league with odd number of players:
# In this case, opponent is league's average score
# opponent_id[1] = average score
else:
result["AVERAGE"] = (my_points, opponent_points, value)
return result
# TO-DO: Issue with HUGE H2H leagues
# Example league: 19824
def __get_opponent_id(self, session, league_code, page_cnt=1):
new_url = self.__url.format(league_code, page_cnt, self.__current_event)
response = session.get(new_url).json()
# has_next = response["has_next"]
data = response["results"]
opponent_id = -1
points = -1
for element in data:
match = [element["entry_1_entry"], element["entry_2_entry"]]
points = [element["entry_1_points"], element["entry_2_points"]]
if match[0] == self._id:
opponent_id = match[1]
elif match[1] == self._id:
opponent_id = match[0]
points.reverse()
if opponent_id != -1:
result = (opponent_id, points)
return result
else:
return self.__get_opponent_id(session, league_code, page_cnt + 1)
|
python
|
import numpy as np
from math import log2
from scamp_filter.Item import Item as I
from termcolor import colored
def approx(target, depth=5, max_coeff=-1, silent=True):
coeffs = {}
total = 0.0
current = 256
for i in range(-8, depth):
if total == target:
break
# if the error is smaller than half the current coefficient, we go further away from target
if abs(total - target) > 1/2*current:
# decide which direction brings us closer to the target
if abs((total-current)-target) > abs(total + current - target):
coeffs[current] = 1
total += current
else:
coeffs[current] = -1
total -= current
current /= 2
if max_coeff > 0 and len(coeffs) >= max_coeff:
break
if not silent:
print("Target: %.5f\n" % target)
print("Error: %.5f\n" % (total-target))
print(coeffs)
return total, coeffs
def print_filter(filter):
print('----------------------')
for row in filter:
for item in row:
print('%5s'%str(item), end=' ')
print('')
print('----------------------')
def approx_filter(filter, depth=4, max_coeff=-1, verbose=0):
if verbose>1:
print(colored('>> Input filter', 'yellow'))
print_filter(filter)
if verbose>0:
print(colored('>> Approximating Filter', 'magenta'))
pre_goal = []
h, w = filter.shape
approximated_filter = np.zeros(filter.shape)
for (y, x), val in np.ndenumerate(filter):
a, coeffs = approx(val, depth, silent=True, max_coeff=max_coeff)
approximated_filter[y, x] = a
pre_goal = pre_goal + [I(int(-log2(c)), x-w//2, h//2-y) if weight == 1 else -I(int(-log2(c)), x-w//2, h//2-y) for c, weight in coeffs.items()]
if verbose>1:
print(colored('>> Approximated filter', 'yellow'))
print_filter(approximated_filter)
return pre_goal, approximated_filter
|
python
|
###########################################################
# Re-bindings for unpickling
#
# We want to ensure class
# sage.modular.congroup_element.CongruenceSubgroupElement still exists, so we
# can unpickle safely.
#
###########################################################
from sage.modular.arithgroup.arithgroup_element import ArithmeticSubgroupElement
CongruenceSubgroupElement = ArithmeticSubgroupElement
|
python
|
"""Impementation for print_rel_notes."""
def print_rel_notes(
name,
repo,
version,
outs = None,
setup_file = "",
deps_method = "",
toolchains_method = "",
org = "bazelbuild",
changelog = None,
mirror_host = None):
tarball_name = ":%s-%s.tar.gz" % (repo, version)
# Must use Label to get a path relative to the rules_pkg repository,
# instead of the calling BUILD file.
print_rel_notes_helper = Label("//pkg/releasing:print_rel_notes")
tools = [print_rel_notes_helper]
cmd = [
"LC_ALL=C.UTF-8 $(location %s)" % str(print_rel_notes_helper),
"--org=%s" % org,
"--repo=%s" % repo,
"--version=%s" % version,
"--tarball=$(location %s)" % tarball_name,
]
if setup_file:
cmd.append("--setup_file=%s" % setup_file)
if deps_method:
cmd.append("--deps_method=%s" % deps_method)
if toolchains_method:
cmd.append("--toolchains_method=%s" % toolchains_method)
if changelog:
cmd.append("--changelog=$(location %s)" % changelog)
# We should depend on a changelog as a tool so that it is always built
# for the host configuration. If the changelog is generated on the fly,
# then we would have to run commands against our revision control
# system. That only makes sense locally on the host, because the
# revision history is never exported to a remote build system.
tools.append(changelog)
if mirror_host:
cmd.append("--mirror_host=%s" % mirror_host)
cmd.append(">$@")
native.genrule(
name = name,
srcs = [
tarball_name,
],
outs = outs or [name + ".txt"],
cmd = " ".join(cmd),
tools = tools,
)
|
python
|
#!/usr/bin/env python
# Run VGG benchmark series.
# Prepares and runs multiple tasks on multiple GPUs: one task per GPU.
# Waits if no GPUs available. For GPU availability check uses "nvidia-smi dmon" command.
# 2018 (C) Peter Bryzgalov @ CHITECH Stair Lab
import multigpuexec
import time
import os
import datetime
# Set GPU range
gpus = range(0, 1)
# Change hostname
host = "p3.2xlarge"
# Set number of runs
runs = 1
# Set mini-batch sizes
batchsizes = [7, 8, 9] + range(10, 200, 10) + range(200, 501, 50)
# Log algos
# batchsizes = [10, 20, 50, 100, 150, 200, 400, 500]
# Set algorithms
backfilterconvalgos = ["cudnn"]
algods = ["cudnn"] # Data gradient algorithm
algofwds = ["cudnn"]
benchmark = "VGG"
template = "VGG.dnntemplate"
datasetsize = 50000
date = datetime.datetime.today().strftime('%Y%m%d')
nvprof = False
with_memory = False
debuginfo = False
debuginfo_option = ""
if debuginfo:
debuginfo_option = " --debug"
tasks = []
command = "./run_dnnmark_template.sh -b test_{} --template {}".format(benchmark, template)
logdir = "logs/{}/dnnmark_{}_microseries_{}/".format(host, benchmark, date)
if not os.path.exists(logdir):
os.makedirs(logdir)
print "Logdir", logdir
logfile_base = "dnnmark_{}_{}".format(host, benchmark)
for batch in batchsizes:
for algod in algods:
for algo in backfilterconvalgos:
for algofwd in algofwds:
algod_opt = " --algod {}".format(algod)
logname = "{}_bs{}_algos{}-{}-{}".format(logfile_base, batch, algofwd, algo, algod)
for run in range(runs):
logfile = os.path.join(logdir, "{}_{:02d}.log".format(logname, run))
if os.path.isfile(logfile):
print "file", logfile, "exists."
else:
command_pars = command + " -n {} --algo {} --algod {} --algofwd {} -d {}{}".format(
batch, algo, algod, algofwd, datasetsize, debuginfo_option)
task = {"comm": command_pars, "logfile": logfile, "batch": batch, "nvsmi": with_memory}
tasks.append(task)
if nvprof:
iterations = 10
# print "BS: {}, Iterations: {}".format(batch,iterations)
nvlogname = "{}_iter{}".format(logname, iterations)
command_pars = command + " -n {} -d {} --algo {} --algod {} --algofwd {} --iter {} --warmup 0".format(
batch, datasetsize, algo, algod, algofwd, iterations)
logfile = os.path.join(logdir, "{}_%p.nvprof".format(nvlogname))
if os.path.isfile(logfile):
print "file", logfile, "exists."
else:
profcommand = "nvprof -u s --profile-api-trace none --unified-memory-profiling off --profile-child-processes --csv --log-file {} {}".format(
logfile, command_pars)
task = {"comm": profcommand, "logfile": logfile, "batch": batch, "nvsmi": False}
tasks.append(task)
print "Have", len(tasks), "tasks"
gpu = -1
for i in range(0, len(tasks)):
gpu = multigpuexec.getNextFreeGPU(gpus, start=gpu + 1, c=1, d=1, nvsmi=tasks[i]["nvsmi"], mode="dmon", debug=False)
gpu_info = multigpuexec.getGPUinfo(gpu)
f = open(tasks[i]["logfile"], "w+")
f.write(tasks[i]["comm"] + "\n")
f.write("b{}\n".format(tasks[i]["batch"]))
f.write("GPU{}: {}\n".format(gpu, gpu_info))
f.close()
print time.strftime("[%d %H:%M:%S]"),
multigpuexec.runTask(tasks[i], gpu, nvsmi=tasks[i]["nvsmi"], delay=0, debug=False)
print tasks[i]["logfile"]
print "{}/{} tasks".format(i + 1, len(tasks))
time.sleep(1)
|
python
|
from distutils.core import Extension, setup
import numpy as np
setup(
name="numpy_ctypes_example",
version="1.0",
description="numpy ctypes example",
author="Mateen Ulhaq",
author_email="[email protected]",
maintainer="[email protected]",
url="https://github.com/YodaEmbedding/experiments",
ext_modules=[
Extension(
name="lib",
sources=["lib.c"],
extra_compile_args=["-Ofast", "-march=native"],
include_dirs=[np.get_include()],
),
],
)
|
python
|
"""
Copyright 2015 Paul T. Grogan, Massachusetts Institute of Technology
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
"""
Test cases for L{ofspy.context}.
"""
import unittest
from ..federation import Federation
from ..simulator import Simulator
from ..context import Context
from ..surface import Surface
from ..orbit import Orbit
from ..demand import Demand
from ..valueSchedule import ValueSchedule
class ContextTestCase(unittest.TestCase):
def setUp(self):
self.default = Context(seed=0)
self.locs = []
for s in range(6):
self.locs.append(Surface(s, name='SUR{0}'.format(s+1)))
self.locs.append(Orbit(s, 'LEO', name='LEO{0}'.format(s+1)))
self.locs.append(Orbit(s, 'MEO', name='MEO{0}'.format(s+1)))
self.locs.append(Orbit(s, 'GEO', name='GEO{0}'.format(s+1)))
self.evts = []
for d in range(8):
self.evts.append(Demand(None, 'SAR', 1,
ValueSchedule([(1,500),(4,400)], -50),
name='SAR1.{0}'.format(d+1)))
for d in range(12):
self.evts.append(Demand(None, 'SAR', 1,
ValueSchedule([(2,450),(5,350)], -100),
name='SAR2.{0}'.format(d+1)))
for d in range(23):
self.evts.append(Demand(None, 'SAR', 1,
ValueSchedule([(3,400),(6,300)], -150),
name='SAR3.{0}'.format(d+1)))
for d in range(8):
self.evts.append(Demand(None, 'VIS', 1,
ValueSchedule([(1,600),(4,500)], -50),
name='VIS1.{0}'.format(d+1)))
for d in range(17):
self.evts.append(Demand(None, 'VIS', 1,
ValueSchedule([(2,500),(5,400)], -100),
name='VIS2.{0}'.format(d+1)))
for d in range(8):
self.evts.append(Demand(None, 'VIS', 1,
ValueSchedule([(3,450),(6,350)], -150),
name='VIS3.{0}'.format(d+1)))
self.default = Context(locations=self.locs, events=self.evts,
federations=[Federation()], seed=0)
self.sim = Simulator(entities=[self.default],
initTime=0, timeStep=1, maxTime=3)
def tearDown(self):
self.default = None
self.locs = None
self.evts = None
def test_propagate(self):
self.assertEqual(self.default.propagate(self.locs[0], 0), self.locs[0])
self.assertEqual(self.default.propagate(self.locs[0], 1), self.locs[0])
self.assertEqual(self.default.propagate(self.locs[0], 2), self.locs[0])
self.assertEqual(self.default.propagate(self.locs[1], 0), self.locs[1])
self.assertEqual(self.default.propagate(self.locs[1], 1), self.locs[9])
self.assertEqual(self.default.propagate(self.locs[1], 2), self.locs[17])
self.assertEqual(self.default.propagate(self.locs[1], 3), self.locs[1])
self.assertEqual(self.default.propagate(self.locs[1], 4), self.locs[9])
self.assertEqual(self.default.propagate(self.locs[1], -1), self.locs[17])
self.assertEqual(self.default.propagate(self.locs[2], 0), self.locs[2])
self.assertEqual(self.default.propagate(self.locs[2], 1), self.locs[6])
self.assertEqual(self.default.propagate(self.locs[2], 2), self.locs[10])
self.assertEqual(self.default.propagate(self.locs[3], 0), self.locs[3])
self.assertEqual(self.default.propagate(self.locs[3], 1), self.locs[3])
self.assertEqual(self.default.propagate(self.locs[3], 2), self.locs[3])
def test_init(self):
self.assertEqual(self.default.currentEvents, [])
self.assertEqual(self.default.futureEvents, [])
self.assertEqual(self.default.pastEvents, [])
self.default.init(self.sim)
self.assertEqual(self.default.currentEvents, [])
self.assertNotEqual(self.default.futureEvents, [])
self.assertEqual(len(self.default.futureEvents),
len(self.default.events))
self.assertEqual(self.default.pastEvents, [])
def test_tick(self):
self.default.init(self.sim)
self.default.tick(self.sim)
def test_tock(self):
self.default.init(self.sim)
self.default.tick(self.sim)
self.default.tock()
self.assertEqual(len(self.default.currentEvents), 6)
self.assertEqual(len(self.default.futureEvents),
len(self.default.events) - 6)
|
python
|
# -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
# pylint: disable=invalid-overridden-method
import functools
from typing import ( # pylint: disable=unused-import
Union,
Optional,
Any,
IO,
Iterable,
AnyStr,
Dict,
List,
Tuple,
TYPE_CHECKING,
)
try:
from urllib.parse import urlparse, quote, unquote # pylint: disable=unused-import
except ImportError:
from urlparse import urlparse # type: ignore
from urllib2 import quote, unquote # type: ignore
from azure.core.tracing.decorator import distributed_trace
from azure.core.tracing.decorator_async import distributed_trace_async
from azure.core.async_paging import AsyncItemPaged
from .._shared.base_client_async import AsyncStorageAccountHostsMixin
from .._shared.request_handlers import add_metadata_headers, serialize_iso
from .._shared.response_handlers import (
return_response_headers,
process_storage_error,
return_headers_and_deserialized,
)
from .._deserialize import deserialize_queue_properties, deserialize_queue_creation
from .._generated.version import VERSION
from .._generated.aio import AzureQueueStorage
from .._generated.models import StorageErrorException, SignedIdentifier
from .._generated.models import QueueMessage as GenQueueMessage
from .._models import QueueMessage, AccessPolicy
from ._models import MessagesPaged
from .._shared.policies_async import ExponentialRetry
from .._queue_client import QueueClient as QueueClientBase
if TYPE_CHECKING:
from datetime import datetime
from azure.core.pipeline.policies import HTTPPolicy
from .._models import QueueSasPermissions, QueueProperties
class QueueClient(AsyncStorageAccountHostsMixin, QueueClientBase):
"""A client to interact with a specific Queue.
:param str account_url:
The URL to the storage account. In order to create a client given the full URI to the queue,
use the :func:`from_queue_url` classmethod.
:param queue_name: The name of the queue.
:type queue_name: str
:param credential:
The credentials with which to authenticate. This is optional if the
account URL already has a SAS token. The value can be a SAS token string,
an instance of a AzureSasCredential from azure.core.credentials, an account
shared access key, or an instance of a TokenCredentials class from azure.identity.
:keyword str api_version:
The Storage API version to use for requests. Default value is '2019-07-07'.
Setting to an older version may result in reduced feature compatibility.
:keyword str secondary_hostname:
The hostname of the secondary endpoint.
:keyword message_encode_policy: The encoding policy to use on outgoing messages.
Default is not to encode messages. Other options include :class:`TextBase64EncodePolicy`,
:class:`BinaryBase64EncodePolicy` or `None`.
:keyword message_decode_policy: The decoding policy to use on incoming messages.
Default value is not to decode messages. Other options include :class:`TextBase64DecodePolicy`,
:class:`BinaryBase64DecodePolicy` or `None`.
.. admonition:: Example:
.. literalinclude:: ../samples/queue_samples_message_async.py
:start-after: [START async_create_queue_client]
:end-before: [END async_create_queue_client]
:language: python
:dedent: 16
:caption: Create the queue client with url and credential.
.. literalinclude:: ../samples/queue_samples_message_async.py
:start-after: [START async_create_queue_client_from_connection_string]
:end-before: [END async_create_queue_client_from_connection_string]
:language: python
:dedent: 8
:caption: Create the queue client with a connection string.
"""
def __init__(
self,
account_url, # type: str
queue_name, # type: str
credential=None, # type: Optional[Any]
**kwargs # type: Any
):
# type: (...) -> None
kwargs["retry_policy"] = kwargs.get("retry_policy") or ExponentialRetry(**kwargs)
loop = kwargs.pop('loop', None)
super(QueueClient, self).__init__(
account_url, queue_name=queue_name, credential=credential, loop=loop, **kwargs
)
self._client = AzureQueueStorage(self.url, pipeline=self._pipeline, loop=loop) # type: ignore
self._client._config.version = kwargs.get('api_version', VERSION) # pylint: disable=protected-access
self._loop = loop
@distributed_trace_async
async def create_queue(self, **kwargs):
# type: (Optional[Any]) -> None
"""Creates a new queue in the storage account.
If a queue with the same name already exists, the operation fails with
a `ResourceExistsError`.
:keyword dict(str,str) metadata:
A dict containing name-value pairs to associate with the queue as
metadata. Note that metadata names preserve the case with which they
were created, but are case-insensitive when set or read.
:keyword int timeout:
The server timeout, expressed in seconds.
:return: None or the result of cls(response)
:rtype: None
:raises: StorageErrorException
.. admonition:: Example:
.. literalinclude:: ../samples/queue_samples_hello_world_async.py
:start-after: [START async_create_queue]
:end-before: [END async_create_queue]
:language: python
:dedent: 12
:caption: Create a queue.
"""
metadata = kwargs.pop('metadata', None)
timeout = kwargs.pop('timeout', None)
headers = kwargs.pop("headers", {})
headers.update(add_metadata_headers(metadata)) # type: ignore
try:
return await self._client.queue.create( # type: ignore
metadata=metadata, timeout=timeout, headers=headers, cls=deserialize_queue_creation, **kwargs
)
except StorageErrorException as error:
process_storage_error(error)
@distributed_trace_async
async def delete_queue(self, **kwargs):
# type: (Optional[Any]) -> None
"""Deletes the specified queue and any messages it contains.
When a queue is successfully deleted, it is immediately marked for deletion
and is no longer accessible to clients. The queue is later removed from
the Queue service during garbage collection.
Note that deleting a queue is likely to take at least 40 seconds to complete.
If an operation is attempted against the queue while it was being deleted,
an :class:`HttpResponseError` will be thrown.
:keyword int timeout:
The server timeout, expressed in seconds.
:rtype: None
.. admonition:: Example:
.. literalinclude:: ../samples/queue_samples_hello_world_async.py
:start-after: [START async_delete_queue]
:end-before: [END async_delete_queue]
:language: python
:dedent: 16
:caption: Delete a queue.
"""
timeout = kwargs.pop('timeout', None)
try:
await self._client.queue.delete(timeout=timeout, **kwargs)
except StorageErrorException as error:
process_storage_error(error)
@distributed_trace_async
async def get_queue_properties(self, **kwargs):
# type: (Optional[Any]) -> QueueProperties
"""Returns all user-defined metadata for the specified queue.
The data returned does not include the queue's list of messages.
:keyword int timeout:
The timeout parameter is expressed in seconds.
:return: User-defined metadata for the queue.
:rtype: ~azure.storage.queue.QueueProperties
.. admonition:: Example:
.. literalinclude:: ../samples/queue_samples_message_async.py
:start-after: [START async_get_queue_properties]
:end-before: [END async_get_queue_properties]
:language: python
:dedent: 16
:caption: Get the properties on the queue.
"""
timeout = kwargs.pop('timeout', None)
try:
response = await self._client.queue.get_properties(
timeout=timeout, cls=deserialize_queue_properties, **kwargs
)
except StorageErrorException as error:
process_storage_error(error)
response.name = self.queue_name
return response # type: ignore
@distributed_trace_async
async def set_queue_metadata(self, metadata=None, **kwargs):
# type: (Optional[Dict[str, Any]], Optional[Any]) -> None
"""Sets user-defined metadata on the specified queue.
Metadata is associated with the queue as name-value pairs.
:param metadata:
A dict containing name-value pairs to associate with the
queue as metadata.
:type metadata: dict(str, str)
:keyword int timeout:
The server timeout, expressed in seconds.
.. admonition:: Example:
.. literalinclude:: ../samples/queue_samples_message_async.py
:start-after: [START async_set_queue_metadata]
:end-before: [END async_set_queue_metadata]
:language: python
:dedent: 16
:caption: Set metadata on the queue.
"""
timeout = kwargs.pop('timeout', None)
headers = kwargs.pop("headers", {})
headers.update(add_metadata_headers(metadata)) # type: ignore
try:
return await self._client.queue.set_metadata( # type: ignore
timeout=timeout, headers=headers, cls=return_response_headers, **kwargs
)
except StorageErrorException as error:
process_storage_error(error)
@distributed_trace_async
async def get_queue_access_policy(self, **kwargs):
# type: (Optional[Any]) -> Dict[str, Any]
"""Returns details about any stored access policies specified on the
queue that may be used with Shared Access Signatures.
:keyword int timeout:
The server timeout, expressed in seconds.
:return: A dictionary of access policies associated with the queue.
:rtype: dict(str, ~azure.storage.queue.AccessPolicy)
"""
timeout = kwargs.pop('timeout', None)
try:
_, identifiers = await self._client.queue.get_access_policy(
timeout=timeout, cls=return_headers_and_deserialized, **kwargs
)
except StorageErrorException as error:
process_storage_error(error)
return {s.id: s.access_policy or AccessPolicy() for s in identifiers}
@distributed_trace_async
async def set_queue_access_policy(self, signed_identifiers, **kwargs):
# type: (Dict[str, AccessPolicy], Optional[Any]) -> None
"""Sets stored access policies for the queue that may be used with Shared
Access Signatures.
When you set permissions for a queue, the existing permissions are replaced.
To update the queue's permissions, call :func:`~get_queue_access_policy` to fetch
all access policies associated with the queue, modify the access policy
that you wish to change, and then call this function with the complete
set of data to perform the update.
When you establish a stored access policy on a queue, it may take up to
30 seconds to take effect. During this interval, a shared access signature
that is associated with the stored access policy will throw an
:class:`HttpResponseError` until the access policy becomes active.
:param signed_identifiers:
SignedIdentifier access policies to associate with the queue.
This may contain up to 5 elements. An empty dict
will clear the access policies set on the service.
:type signed_identifiers: dict(str, ~azure.storage.queue.AccessPolicy)
:keyword int timeout:
The server timeout, expressed in seconds.
.. admonition:: Example:
.. literalinclude:: ../samples/queue_samples_message_async.py
:start-after: [START async_set_access_policy]
:end-before: [END async_set_access_policy]
:language: python
:dedent: 16
:caption: Set an access policy on the queue.
"""
timeout = kwargs.pop('timeout', None)
if len(signed_identifiers) > 15:
raise ValueError(
"Too many access policies provided. The server does not support setting "
"more than 15 access policies on a single resource."
)
identifiers = []
for key, value in signed_identifiers.items():
if value:
value.start = serialize_iso(value.start)
value.expiry = serialize_iso(value.expiry)
identifiers.append(SignedIdentifier(id=key, access_policy=value))
signed_identifiers = identifiers # type: ignore
try:
await self._client.queue.set_access_policy(queue_acl=signed_identifiers or None, timeout=timeout, **kwargs)
except StorageErrorException as error:
process_storage_error(error)
@distributed_trace_async
async def send_message( # type: ignore
self,
content, # type: Any
**kwargs # type: Optional[Any]
):
# type: (...) -> QueueMessage
"""Adds a new message to the back of the message queue.
The visibility timeout specifies the time that the message will be
invisible. After the timeout expires, the message will become visible.
If a visibility timeout is not specified, the default value of 0 is used.
The message time-to-live specifies how long a message will remain in the
queue. The message will be deleted from the queue when the time-to-live
period expires.
If the key-encryption-key field is set on the local service object, this method will
encrypt the content before uploading.
:param obj content:
Message content. Allowed type is determined by the encode_function
set on the service. Default is str. The encoded message can be up to
64KB in size.
:keyword int visibility_timeout:
If not specified, the default value is 0. Specifies the
new visibility timeout value, in seconds, relative to server time.
The value must be larger than or equal to 0, and cannot be
larger than 7 days. The visibility timeout of a message cannot be
set to a value later than the expiry time. visibility_timeout
should be set to a value smaller than the time-to-live value.
:keyword int time_to_live:
Specifies the time-to-live interval for the message, in
seconds. The time-to-live may be any positive number or -1 for infinity. If this
parameter is omitted, the default time-to-live is 7 days.
:keyword int timeout:
The server timeout, expressed in seconds.
:return:
A :class:`~azure.storage.queue.QueueMessage` object.
This object is also populated with the content although it is not
returned from the service.
:rtype: ~azure.storage.queue.QueueMessage
.. admonition:: Example:
.. literalinclude:: ../samples/queue_samples_message_async.py
:start-after: [START async_send_messages]
:end-before: [END async_send_messages]
:language: python
:dedent: 16
:caption: Send messages.
"""
visibility_timeout = kwargs.pop('visibility_timeout', None)
time_to_live = kwargs.pop('time_to_live', None)
timeout = kwargs.pop('timeout', None)
self._config.message_encode_policy.configure(
require_encryption=self.require_encryption,
key_encryption_key=self.key_encryption_key,
resolver=self.key_resolver_function
)
encoded_content = self._config.message_encode_policy(content)
new_message = GenQueueMessage(message_text=encoded_content)
try:
enqueued = await self._client.messages.enqueue(
queue_message=new_message,
visibilitytimeout=visibility_timeout,
message_time_to_live=time_to_live,
timeout=timeout,
**kwargs
)
queue_message = QueueMessage(content=content)
queue_message.id = enqueued[0].message_id
queue_message.inserted_on = enqueued[0].insertion_time
queue_message.expires_on = enqueued[0].expiration_time
queue_message.pop_receipt = enqueued[0].pop_receipt
queue_message.next_visible_on = enqueued[0].time_next_visible
return queue_message
except StorageErrorException as error:
process_storage_error(error)
@distributed_trace_async
async def receive_message(self, **kwargs):
# type: (Optional[Any]) -> QueueMessage
"""Removes one message from the front of the queue.
When the message is retrieved from the queue, the response includes the message
content and a pop_receipt value, which is required to delete the message.
The message is not automatically deleted from the queue, but after it has
been retrieved, it is not visible to other clients for the time interval
specified by the visibility_timeout parameter.
If the key-encryption-key or resolver field is set on the local service object, the message will be
decrypted before being returned.
:keyword int visibility_timeout:
If not specified, the default value is 0. Specifies the
new visibility timeout value, in seconds, relative to server time.
The value must be larger than or equal to 0, and cannot be
larger than 7 days. The visibility timeout of a message cannot be
set to a value later than the expiry time. visibility_timeout
should be set to a value smaller than the time-to-live value.
:keyword int timeout:
The server timeout, expressed in seconds.
:return:
Returns a message from the Queue.
:rtype: ~azure.storage.queue.QueueMessage
.. admonition:: Example:
.. literalinclude:: ../samples/queue_samples_message_async.py
:start-after: [START receive_one_message]
:end-before: [END receive_one_message]
:language: python
:dedent: 12
:caption: Receive one message from the queue.
"""
visibility_timeout = kwargs.pop('visibility_timeout', None)
timeout = kwargs.pop('timeout', None)
self._config.message_decode_policy.configure(
require_encryption=self.require_encryption,
key_encryption_key=self.key_encryption_key,
resolver=self.key_resolver_function)
try:
message = await self._client.messages.dequeue(
number_of_messages=1,
visibilitytimeout=visibility_timeout,
timeout=timeout,
cls=self._config.message_decode_policy,
**kwargs
)
wrapped_message = QueueMessage._from_generated( # pylint: disable=protected-access
message[0]) if message != [] else None
return wrapped_message
except StorageErrorException as error:
process_storage_error(error)
@distributed_trace
def receive_messages(self, **kwargs):
# type: (Optional[Any]) -> AsyncItemPaged[QueueMessage]
"""Removes one or more messages from the front of the queue.
When a message is retrieved from the queue, the response includes the message
content and a pop_receipt value, which is required to delete the message.
The message is not automatically deleted from the queue, but after it has
been retrieved, it is not visible to other clients for the time interval
specified by the visibility_timeout parameter.
If the key-encryption-key or resolver field is set on the local service object, the messages will be
decrypted before being returned.
:keyword int messages_per_page:
A nonzero integer value that specifies the number of
messages to retrieve from the queue, up to a maximum of 32. If
fewer are visible, the visible messages are returned. By default,
a single message is retrieved from the queue with this operation.
`by_page()` can be used to provide a page iterator on the AsyncItemPaged if messages_per_page is set.
`next()` can be used to get the next page.
:keyword int visibility_timeout:
If not specified, the default value is 0. Specifies the
new visibility timeout value, in seconds, relative to server time.
The value must be larger than or equal to 0, and cannot be
larger than 7 days. The visibility timeout of a message cannot be
set to a value later than the expiry time. visibility_timeout
should be set to a value smaller than the time-to-live value.
:keyword int timeout:
The server timeout, expressed in seconds.
:return:
Returns a message iterator of dict-like Message objects.
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.storage.queue.QueueMessage]
.. admonition:: Example:
.. literalinclude:: ../samples/queue_samples_message_async.py
:start-after: [START async_receive_messages]
:end-before: [END async_receive_messages]
:language: python
:dedent: 16
:caption: Receive messages from the queue.
"""
messages_per_page = kwargs.pop('messages_per_page', None)
visibility_timeout = kwargs.pop('visibility_timeout', None)
timeout = kwargs.pop('timeout', None)
self._config.message_decode_policy.configure(
require_encryption=self.require_encryption,
key_encryption_key=self.key_encryption_key,
resolver=self.key_resolver_function
)
try:
command = functools.partial(
self._client.messages.dequeue,
visibilitytimeout=visibility_timeout,
timeout=timeout,
cls=self._config.message_decode_policy,
**kwargs
)
return AsyncItemPaged(command, results_per_page=messages_per_page, page_iterator_class=MessagesPaged)
except StorageErrorException as error:
process_storage_error(error)
@distributed_trace_async
async def update_message(
self,
message,
pop_receipt=None,
content=None,
**kwargs
):
# type: (Any, int, Optional[str], Optional[Any], Any) -> QueueMessage
"""Updates the visibility timeout of a message. You can also use this
operation to update the contents of a message.
This operation can be used to continually extend the invisibility of a
queue message. This functionality can be useful if you want a worker role
to "lease" a queue message. For example, if a worker role calls :func:`~receive_messages()`
and recognizes that it needs more time to process a message, it can
continually extend the message's invisibility until it is processed. If
the worker role were to fail during processing, eventually the message
would become visible again and another worker role could process it.
If the key-encryption-key field is set on the local service object, this method will
encrypt the content before uploading.
:param message:
The message object or id identifying the message to update.
:type message: str or ~azure.storage.queue.QueueMessage
:param str pop_receipt:
A valid pop receipt value returned from an earlier call
to the :func:`~receive_messages` or :func:`~update_message` operation.
:param obj content:
Message content. Allowed type is determined by the encode_function
set on the service. Default is str.
:keyword int visibility_timeout:
Specifies the new visibility timeout value, in seconds,
relative to server time. The new value must be larger than or equal
to 0, and cannot be larger than 7 days. The visibility timeout of a
message cannot be set to a value later than the expiry time. A
message can be updated until it has been deleted or has expired.
The message object or message id identifying the message to update.
:keyword int timeout:
The server timeout, expressed in seconds.
:return:
A :class:`~azure.storage.queue.QueueMessage` object. For convenience,
this object is also populated with the content, although it is not returned by the service.
:rtype: ~azure.storage.queue.QueueMessage
.. admonition:: Example:
.. literalinclude:: ../samples/queue_samples_message_async.py
:start-after: [START async_update_message]
:end-before: [END async_update_message]
:language: python
:dedent: 16
:caption: Update a message.
"""
visibility_timeout = kwargs.pop('visibility_timeout', None)
timeout = kwargs.pop('timeout', None)
try:
message_id = message.id
message_text = content or message.content
receipt = pop_receipt or message.pop_receipt
inserted_on = message.inserted_on
expires_on = message.expires_on
dequeue_count = message.dequeue_count
except AttributeError:
message_id = message
message_text = content
receipt = pop_receipt
inserted_on = None
expires_on = None
dequeue_count = None
if receipt is None:
raise ValueError("pop_receipt must be present")
if message_text is not None:
self._config.message_encode_policy.configure(
self.require_encryption, self.key_encryption_key, self.key_resolver_function
)
encoded_message_text = self._config.message_encode_policy(message_text)
updated = GenQueueMessage(message_text=encoded_message_text)
else:
updated = None # type: ignore
try:
response = await self._client.message_id.update(
queue_message=updated,
visibilitytimeout=visibility_timeout or 0,
timeout=timeout,
pop_receipt=receipt,
cls=return_response_headers,
queue_message_id=message_id,
**kwargs
)
new_message = QueueMessage(content=message_text)
new_message.id = message_id
new_message.inserted_on = inserted_on
new_message.expires_on = expires_on
new_message.dequeue_count = dequeue_count
new_message.pop_receipt = response["popreceipt"]
new_message.next_visible_on = response["time_next_visible"]
return new_message
except StorageErrorException as error:
process_storage_error(error)
@distributed_trace_async
async def peek_messages(self, max_messages=None, **kwargs):
# type: (Optional[int], Optional[Any]) -> List[QueueMessage]
"""Retrieves one or more messages from the front of the queue, but does
not alter the visibility of the message.
Only messages that are visible may be retrieved. When a message is retrieved
for the first time with a call to :func:`~receive_messages`, its dequeue_count property
is set to 1. If it is not deleted and is subsequently retrieved again, the
dequeue_count property is incremented. The client may use this value to
determine how many times a message has been retrieved. Note that a call
to peek_messages does not increment the value of dequeue_count, but returns
this value for the client to read.
If the key-encryption-key or resolver field is set on the local service object,
the messages will be decrypted before being returned.
:param int max_messages:
A nonzero integer value that specifies the number of
messages to peek from the queue, up to a maximum of 32. By default,
a single message is peeked from the queue with this operation.
:keyword int timeout:
The server timeout, expressed in seconds.
:return:
A list of :class:`~azure.storage.queue.QueueMessage` objects. Note that
next_visible_on and pop_receipt will not be populated as peek does
not pop the message and can only retrieve already visible messages.
:rtype: list(:class:`~azure.storage.queue.QueueMessage`)
.. admonition:: Example:
.. literalinclude:: ../samples/queue_samples_message_async.py
:start-after: [START async_peek_message]
:end-before: [END async_peek_message]
:language: python
:dedent: 16
:caption: Peek messages.
"""
timeout = kwargs.pop('timeout', None)
if max_messages and not 1 <= max_messages <= 32:
raise ValueError("Number of messages to peek should be between 1 and 32")
self._config.message_decode_policy.configure(
require_encryption=self.require_encryption,
key_encryption_key=self.key_encryption_key,
resolver=self.key_resolver_function
)
try:
messages = await self._client.messages.peek(
number_of_messages=max_messages, timeout=timeout, cls=self._config.message_decode_policy, **kwargs
)
wrapped_messages = []
for peeked in messages:
wrapped_messages.append(QueueMessage._from_generated(peeked)) # pylint: disable=protected-access
return wrapped_messages
except StorageErrorException as error:
process_storage_error(error)
@distributed_trace_async
async def clear_messages(self, **kwargs):
# type: (Optional[Any]) -> None
"""Deletes all messages from the specified queue.
:keyword int timeout:
The server timeout, expressed in seconds.
.. admonition:: Example:
.. literalinclude:: ../samples/queue_samples_message_async.py
:start-after: [START async_clear_messages]
:end-before: [END async_clear_messages]
:language: python
:dedent: 16
:caption: Clears all messages.
"""
timeout = kwargs.pop('timeout', None)
try:
await self._client.messages.clear(timeout=timeout, **kwargs)
except StorageErrorException as error:
process_storage_error(error)
@distributed_trace_async
async def delete_message(self, message, pop_receipt=None, **kwargs):
# type: (Any, Optional[str], Any) -> None
"""Deletes the specified message.
Normally after a client retrieves a message with the receive messages operation,
the client is expected to process and delete the message. To delete the
message, you must have the message object itself, or two items of data: id and pop_receipt.
The id is returned from the previous receive_messages operation. The
pop_receipt is returned from the most recent :func:`~receive_messages` or
:func:`~update_message` operation. In order for the delete_message operation
to succeed, the pop_receipt specified on the request must match the
pop_receipt returned from the :func:`~receive_messages` or :func:`~update_message`
operation.
:param message:
The message object or id identifying the message to delete.
:type message: str or ~azure.storage.queue.QueueMessage
:param str pop_receipt:
A valid pop receipt value returned from an earlier call
to the :func:`~receive_messages` or :func:`~update_message`.
:keyword int timeout:
The server timeout, expressed in seconds.
.. admonition:: Example:
.. literalinclude:: ../samples/queue_samples_message_async.py
:start-after: [START async_delete_message]
:end-before: [END async_delete_message]
:language: python
:dedent: 16
:caption: Delete a message.
"""
timeout = kwargs.pop('timeout', None)
try:
message_id = message.id
receipt = pop_receipt or message.pop_receipt
except AttributeError:
message_id = message
receipt = pop_receipt
if receipt is None:
raise ValueError("pop_receipt must be present")
try:
await self._client.message_id.delete(
pop_receipt=receipt, timeout=timeout, queue_message_id=message_id, **kwargs
)
except StorageErrorException as error:
process_storage_error(error)
|
python
|
from utils.QtCore import *
class CustomAddCoinBtn (QPushButton):
def __init__(
self
):
super().__init__()
self.setCursor(Qt.PointingHandCursor)
self.setText('Add coin')
self.setObjectName('add_coin_btn')
self.setSizePolicy(QSizePolicy.Policy.Fixed, QSizePolicy.Policy.Fixed)
self.setStyleSheet(f'''
QPushButton {{
margin-left: 20px;
color: #777777;
font: 600 12pt "Segoe UI";
}}
QPushButton:hover {{
color: #252525;
}}
''')
def mousePressEvent(self, event):
if event.button() == Qt.LeftButton:
return self.clicked.emit()
def mouseReleaseEvent(self, event):
if event.button() == Qt.LeftButton:
return self.released.emit()
|
python
|
from typing import Callable, List
import numpy as np
from qcodes.instrument.base import Instrument
from qcodes.instrument.parameter import Parameter
from qcodes.instrument.channel import InstrumentChannel, ChannelList
from qcodes.utils import validators as vals
from .SD_Module import SD_Module, keysightSD1, SignadyneParameter, with_error_check
# Functions to log method calls from the SD_AIN class
import re, sys, types
def logmethod(value):
def method_wrapper(self, *args, **kwargs):
input_str = ', '.join(map(str, args))
if args and kwargs:
input_str += ', ' + ', '.join(
[f'{key}={val}' for key, val in kwargs.items()])
method_str = f'{value.__name__}({input_str})'
if not hasattr(self, '_method_calls'):
self._method_calls = []
self._method_calls += [method_str]
return value(self, *args, **kwargs)
return method_wrapper
def logclass(cls):
namesToCheck = cls.__dict__.keys()
for name in namesToCheck:
# unbound methods show up as mere functions in the values of
# cls.__dict__,so we have to go through getattr
value = getattr(cls, name)
if isinstance(value, types.FunctionType):
setattr(cls, name, logmethod(value))
return cls
model_channels = {'M3300A': 8}
class DigitizerChannel(InstrumentChannel):
"""Signadyne digitizer channel
Args:
parent: Parent Signadyne digitizer Instrument
name: channel name (e.g. 'ch1')
id: channel id (e.g. 1)
**kwargs: Additional kwargs passed to InstrumentChannel
"""
def __init__(self, parent: Instrument, name: str, id: int, **kwargs):
super().__init__(parent=parent, name=name, **kwargs)
self.SD_AIN = self._parent.SD_AIN
self.id = id
# For channelInputConfig
self.add_parameter(
'full_scale',
unit='V',
initial_value=1,
vals=vals.Numbers(0, 3),
# self.SD_AIN.channelMinFullScale(),
# self.SD_AIN.channelMaxFullScale()),
set_function=self.SD_AIN.channelInputConfig,
set_args=['full_scale', 'impedance', 'coupling'],
docstring=f'The full scale voltage for ch{self.id}'
)
# For channelTriggerConfig
self.add_parameter(
'impedance',
initial_value='50',
val_mapping={'high': 0, '50': 1},
get_function=self.SD_AIN.channelImpedance,
set_function=self.SD_AIN.channelInputConfig,
set_args=['full_scale', 'impedance', 'coupling'],
docstring=f'The input impedance of ch{self.id}. Note that for '
f'high input impedance, the measured voltage will not be '
f'the actual voltage'
)
self.add_parameter(
'coupling',
initial_value='AC',
val_mapping={'DC': 0, 'AC': 1},
get_function=self.SD_AIN.channelCoupling,
set_function=self.SD_AIN.channelInputConfig,
set_args=['full_scale', 'impedance', 'coupling'],
docstring=f'The coupling of ch{self.id}'
)
# For channelPrescalerConfig
self.add_parameter(
'prescaler',
initial_value=0,
vals=vals.Ints(0, 4095),
get_function=self.SD_AIN.channelPrescalerConfig,
set_function=self.SD_AIN.channelPrescalerConfig,
docstring=f'The sampling frequency prescaler for ch{self.id}. '
f'Sampling rate will be max_sampling_rate/(prescaler+1)'
)
# For DAQ config
self.add_parameter(
'points_per_cycle',
initial_value=0,
vals=vals.Ints(),
set_function=self.SD_AIN.DAQconfig,
set_args=['points_per_cycle', 'n_cycles',
'trigger_delay_samples', 'trigger_mode'],
docstring=f'The number of points per cycle for ch{self.id}'
)
self.add_parameter(
'n_cycles',
initial_value=-1,
vals=vals.Ints(),
set_function=self.SD_AIN.DAQconfig,
set_args=['points_per_cycle', 'n_cycles',
'trigger_delay_samples', 'trigger_mode'],
docstring=f'The number of cycles to collect on DAQ {self.id}'
)
self.add_parameter(
'trigger_mode',
initial_value='auto',
val_mapping={'auto': 0, 'software': 1, 'digital': 2, 'analog': 3},
set_function=self.SD_AIN.DAQconfig,
set_args=['points_per_cycle', 'n_cycles',
'trigger_delay_samples', 'trigger_mode'],
docstring=f'The trigger mode for ch{self.id}'
)
self.add_parameter(
'trigger_delay_samples',
initial_value=0,
vals=vals.Numbers(),
set_parser=int,
set_function=self.SD_AIN.DAQconfig,
set_args=['points_per_cycle', 'n_cycles',
'trigger_delay_samples', 'trigger_mode'],
docstring=f'The trigger delay (in samples) for ch{self.id}. '
f'Can be negative'
)
# For channelTriggerConfig
self.add_parameter(
'analog_trigger_edge',
initial_value='rising',
val_mapping={'rising': 1, 'falling': 2, 'both': 3},
set_function=self.SD_AIN.channelTriggerConfig,
set_args=['analog_trigger_edge', 'analog_trigger_threshold'],
docstring=f'The analog trigger edge for ch{self.id}.'
f'This is only used when the channel is set as the analog'
f'trigger channel'
)
self.add_parameter(
'analog_trigger_threshold',
initial_value=0,
vals=vals.Numbers(-3, 3),
set_function=self.SD_AIN.channelTriggerConfig,
set_args=['analog_trigger_edge', 'analog_trigger_threshold'],
docstring=f'the value in volts for the trigger threshold'
)
self.add_parameter(
'analog_trigger_mask',
initial_value=0,
vals=vals.Ints(),
set_function=self.SD_AIN.DAQanalogTriggerConfig,
docstring='the trigger mask you are using. Each bit signifies '
'which analog channel to trigger on. The channel trigger'
' behaviour must be configured separately (trigger_edge '
'and trigger_threshold). Needs to be double checked, but '
'it seems multiple analog trigger channels can be used.'
)
# For DAQ trigger Config
self.add_parameter(
'digital_trigger_mode',
initial_value='rising',
val_mapping={'active_high': 1, 'active_low': 2,
'rising': 3, 'falling': 4},
set_function=self.SD_AIN.DAQdigitalTriggerConfig,
set_args=['digital_trigger_source', 'digital_trigger_mode'],
docstring='The digital trigger mode. Can be `active_high`, '
'`active_low`, `rising`, `falling`'
)
self.add_parameter(
'digital_trigger_source',
initial_value='trig_in',
val_mapping={'trig_in': 0, **{f'pxi{k}': 4000+k for k in range(8)}},
set_function=self.SD_AIN.DAQdigitalTriggerConfig,
set_args=['digital_trigger_source', 'digital_trigger_mode'],
docstring='the trigger source you are using. Can be trig_in '
'(external IO) or pxi0 to pxi7'
)
# For DAQ read
self.add_parameter(
'n_points',
initial_value=0,
vals=vals.Ints(),
set_cmd=None,
docstring='the number of points to be read from specified DAQ'
)
self.add_parameter(
'timeout',
unit='s',
initial_value=-1,
vals=vals.Numbers(min_value=0),
set_cmd=None,
docstring=f'The read timeout in seconds. 0 means infinite.'
f'Warning: setting to 0 will freeze the digitizer until'
f'acquisition has completed.'
)
self.add_parameter(
'data_multiplier',
initial_value=1,
vals=vals.Numbers(),
set_cmd=None,
docstring=f'Value to multiply all acquisition data by'
)
def add_parameter(self, name: str,
parameter_class: type=SignadyneParameter, **kwargs):
"""Use SignadyneParameter by default"""
super().add_parameter(name=name, parameter_class=parameter_class,
parent=self, **kwargs)
@with_error_check
def start(self):
""" Start acquiring data or waiting for a trigger on the specified DAQ
Acquisition data can then be read using `daq_read`
Raises:
AssertionError if DAQstart was unsuccessful
"""
return self.SD_AIN.DAQstart(self.id)
@with_error_check
def read(self) -> np.ndarray:
""" Read from the specified DAQ.
Channel acquisition must first be started using `daq_start`
Uses channel parameters `n_points` and `timeout`
Returns:
Numpy array with acquisition data
Raises:
AssertionError if DAQread was unsuccessful
"""
value = self.SD_AIN.DAQread(self.id, self.n_points(),
int(self.timeout() * 1e3)) # ms
if not isinstance(value, int):
# Scale signal from int to volts, why are we checking for non-int?
int_min, int_max = -0x8000, 0x7FFF
v_min, v_max = -self.full_scale(), self.full_scale()
relative_value = (value.astype(float) - int_min) / (int_max - int_min)
scaled_value = v_min + (v_max-v_min) * relative_value
else:
scaled_value = value
scaled_value *= self.data_multiplier()
return scaled_value
@with_error_check
def stop(self):
""" Stop acquiring data on the specified DAQ
Raises:
AssertionError if DAQstop was unsuccessful
"""
return self.SD_AIN.DAQstop(self.id)
@with_error_check
def flush(self):
""" Flush the DAQ channel
Raises:
AssertionError if DAQflush was unsuccessful
"""
return self.SD_AIN.DAQflush(self.id)
@with_error_check
def trigger(self):
""" Manually trigger the specified DAQ
Raises:
AssertionError if DAQtrigger was unsuccessful
"""
return self.SD_AIN.DAQtrigger(self.id)
class SD_DIG(SD_Module):
"""Qcodes driver for a generic Keysight Digitizer of the M32/33XX series.
This driver is written with the M3300A in mind.
This driver makes use of the Python library provided by Keysight as part of
the SD1 Software package (v.2.01.00).
Args:
name: the name of the digitizer card
model: Digitizer model (e.g. 'M3300A').
Used to retrieve number of channels if not specified
chassis: Signadyne chassis (usually 0).
slot: module slot in chassis (starting at 1)
channels: the number of input channels the specified card has
triggers: the number of pxi trigger inputs the specified card has
"""
def __init__(self,
name: str,
model: str,
chassis: int,
slot: int,
channels: int = None,
triggers: int = 8,
**kwargs):
super().__init__(name, model, chassis, slot, triggers, **kwargs)
if channels is None:
channels = model_channels[self.model]
# Create instance of keysight SD_AIN class
# We wrap it in a logclass so that any method call is recorded in
# self.SD_AIN._method_calls
self.SD_AIN = logclass(keysightSD1.SD_AIN)()
# store card-specifics
self.n_channels = channels
# Open the device, using the specified chassis and slot number
self.initialize(chassis=chassis, slot=slot)
# for triggerIOconfig
self.add_parameter(
'trigger_direction',
label='Trigger direction for trigger port',
val_mapping={'out': 0, 'in': 1},
set_cmd=self.SD_AIN.triggerIOconfig,
docstring='The trigger direction for digitizer trigger port'
)
# for clockSetFrequency
self.add_parameter(
'system_frequency',
label='System clock frequency',
vals=vals.Numbers(),
set_cmd=None,
initial_value=100e6,
# clockGetFrequency seems to give issues
# set_cmd=self.SD_AIN.clockSetFrequency,
# get_cmd=self.SD_AIN.clockGetFrequency,
docstring='The frequency of internal CLKsys in Hz'
)
# for clockGetSyncFrequency
self.add_parameter(
'sync_frequency',
label='Clock synchronization frequency',
vals=vals.Ints(),
get_cmd=self.SD_AIN.clockGetSyncFrequency,
docstring='The frequency of internal CLKsync in Hz'
)
self.add_parameter('trigger_io',
label='trigger io',
get_function=self.SD_AIN.triggerIOread,
set_function=self.SD_AIN.triggerIOwrite,
docstring='The trigger input value, 0 (OFF) or 1 (ON)',
val_mapping={'off': 0, 'on': 1})
channels = ChannelList(self,
name='channels',
chan_type=DigitizerChannel)
for ch in range(self.n_channels):
channel = DigitizerChannel(self, name=f'ch{ch}', id=ch)
setattr(self, f'ch{ch}', channel)
channels.append(channel)
self.add_submodule('channels', channels)
def add_parameter(self, name: str,
parameter_class: type=SignadyneParameter, **kwargs):
"""Use SignadyneParameter by default"""
super().add_parameter(name=name, parameter_class=parameter_class,
parent=self, **kwargs)
def initialize(self, chassis: int, slot: int):
"""Open connection to digitizer
Args:
chassis: Signadyne chassis number (usually 1)
slot: Module slot in chassis
Returns:
Name of digitizer
Raises:
AssertionError if connection to digitizer was unsuccessful
"""
digitizer_name = self.SD_AIN.getProductNameBySlot(chassis, slot)
assert isinstance(digitizer_name, str), \
f'No SD_DIG found at chassis {chassis}, slot {slot}'
result_code = self.SD_AIN.openWithSlot(digitizer_name, chassis, slot)
assert result_code > 0, f'Could not open SD_DIG error code {result_code}'
return digitizer_name
@with_error_check
def start_channels(self, channels: List[int]):
""" Start acquiring data or waiting for a trigger on the specified DAQs
Args:
channels: list of channels to start
Raises:
AssertionError if DAQstartMultiple was unsuccessful
"""
# DAQ channel mask, where LSB is for DAQ_0, bit 1 is for DAQ_1 etc.
channel_mask = sum(2**channel for channel in channels)
return self.SD_AIN.DAQstartMultiple(channel_mask)
@with_error_check
def stop_channels(self, channels: List[int]):
""" Stop acquiring data on the specified DAQs
Args:
channels: List of DAQ channels to stop
Raises:
AssertionError if DAQstopMultiple was unsuccessful
"""
# DAQ channel mask, where LSB is for DAQ_0, bit 1 is for DAQ_1 etc.
channel_mask = sum(2**channel for channel in channels)
return self.SD_AIN.DAQstopMultiple(channel_mask)
@with_error_check
def trigger_channels(self, channels):
""" Manually trigger the specified DAQs
Args:
channels: List of DAQ channels to trigger
Raises:
AssertionError if DAQtriggerMultiple was unsuccessful
"""
# DAQ channel mask, where LSB is for DAQ_0, bit 1 is for DAQ_1 etc.
channel_mask = sum(2**channel for channel in channels)
return self.SD_AIN.DAQtriggerMultiple(channel_mask)
@with_error_check
def flush_channels(self, channels: List[int]):
""" Flush the specified DAQ channels
Args:
channels: List of DAQ channels to flush
Raises:
AssertionError if DAQflushMultiple was unsuccessful
"""
# DAQ channel mask, where LSB is for DAQ_0, bit 1 is for DAQ_1 etc.
channel_mask = sum(2**channel for channel in channels)
return self.SD_AIN.DAQflushMultiple(channel_mask)
@with_error_check
def reset_clock_phase(self,
trigger_behaviour: int,
trigger_source: int,
skew: float = 0.0):
""" Reset the clock phase between CLKsync and CLKsys
Args:
trigger_behaviour:
trigger_source: the PXI trigger number
skew: the skew between PXI_CLK10 and CLKsync in multiples of 10ns
Raises:
AssertionError if clockResetPhase was unsuccessful
"""
return self.SD_AIN.clockResetPhase(trigger_behaviour, trigger_source, skew)
|
python
|
# -*- coding: utf-8 -*-
from urllib import request
import sys, os
import time, types, json
import os.path as op
import win32api, win32con, win32gui
# default_encoding = 'utf-8'
# if sys.getdefaultencoding() != default_encoding:
# reload(sys)
# sys.setdefaultencoding(default_encoding)
TRY_TIMES = 1
DEFAULT_PIC_PATH = ""
if DEFAULT_PIC_PATH == "":
DEFAULT_PIC_PATH = os.path.expanduser("~") + "\\Pictures\\Bing"
def schedule(a,b,c):
per = 100.0 * a * b / c
if per > 100 :
print("\r100.00%")
return
print("\r%.2f%%" % per, end="")
def get_pic_URL():
bing_json = ''
req = request.Request(
url = 'http://cn.bing.com/HPImageArchive.aspx?format=js&idx=0&n=1'
)
i = TRY_TIMES
while True:
try:
bing_json = request.urlopen(req).read()
except request.HTTPError as e:
print(e)
i = i - 1
if i == 0:
break
time.sleep(5)
else :
break
if bing_json:
bing_dic = json.loads(bing_json)
if bing_dic != None:
return "http://cn.bing.com%s" % bing_dic['images'][0]['url']
print("无法获取URL!")
return ""
def set_wallpaper(pic_path):
if sys.platform == 'win32':
k = win32api.RegOpenKey(win32con.HKEY_CURRENT_USER, 'Control Panel\Desktop', 0, win32con.KEY_ALL_ACCESS)
curpath = win32api.RegQueryValueEx(k, 'Wallpaper')[0]
if curpath == pic_path:
pass
else:
# win32api.RegSetValueEx(k, "WallpaperStyle", 0, win32con.REG_SZ, "2")#2 for tile,0 for center
# win32api.RegSetValueEx(k, "TileWallpaper", 0, win32con.REG_SZ, "0")
win32gui.SystemParametersInfo(win32con.SPI_SETDESKWALLPAPER, pic_path, 1+2)
win32api.RegCloseKey(k)
else:
curpath = commands.getstatusoutput('gsettings get org.gnome.desktop.background picture-uri')[1][1:-1]
if curpath == pic_path:
pass
else:
commands.getstatusoutput('DISPLAY=:0 gsettings set org.gnome.desktop.background picture-uri "%s"' % (picpath))
try:
print("开始运行。")
localtime = time.localtime(time.time())
url = get_pic_URL()
if url != '':
print("URL:" + url)
pic_name = url.split('/')[-1].split('&')[0].split('OHR.')[-1]
pic_name = "%04d.%02d.%02d.%s" % (localtime.tm_year, localtime.tm_mon, localtime.tm_mday, pic_name)
pic_path = "%s\\%s" % (DEFAULT_PIC_PATH, pic_name)
if os.path.exists(pic_path):
print("图片已存在!")
exit()
print("图片名:" + pic_name)
print("开始下载...")
try:
request.urlretrieve(url, pic_path, schedule)
set_wallpaper(pic_path)
print("成功")
except Exception as e:
print(e)
exit()
except KeyboardInterrupt:
pass
|
python
|
from django.shortcuts import render
from django.urls import reverse, reverse_lazy
from django.views.generic import ListView, DetailView, DeleteView
from django.views.generic.edit import CreateView, UpdateView
from django.contrib.auth.mixins import PermissionRequiredMixin
from .models import AdvThreatEvent, NonAdvThreatEvent
from .models import AdvThreatSource, NonAdvThreatSource
from .models import Vulnerability, RiskCondition
from .models import Impact, RiskResponse
# Create your views here.
def index(request):
return render(request, 'risk/index.html')
def at_index(request):
return render(request, 'risk/at_index.html')
def nt_index(request):
return render(request, 'risk/nt_index.html')
def help_index(request):
return render(request, 'risk/help_index.html')
def help_adv_threat(request):
return render(request, 'risk/help_adv_threat.html')
def help_nonadv_threat(request):
return render(request, 'risk/help_nonadv_threat.html')
class ATEIndexView(ListView):
model = AdvThreatEvent
template_name = 'risk/ate_index.html'
context_object_name = 'ate_list'
permission_required = 'risk.view_advthreatevent'
def get_queryset(self):
return AdvThreatEvent.objects.order_by('-assigned_risk')
class NTEIndexView(ListView):
model = NonAdvThreatEvent
template_name = 'risk/nte_index.html'
context_object_name = 'nte_list'
permission_required = 'risk.view_nonadvthreatevent'
def get_queryset(self):
return NonAdvThreatEvent.objects.order_by('-assigned_risk')
class ATSIndexView(ListView):
model = AdvThreatSource
template_name = 'risk/ats_index.html'
context_object_name = 'ats_list'
permission_required = 'risk.view_advthreatsource'
class NTSIndexView(ListView):
model = NonAdvThreatSource
template_name = 'risk/nts_index.html'
context_object_name = 'nts_list'
permission_required = 'risk.view_nonadvthreatsource'
class VulnIndexView(ListView):
model = Vulnerability
template_name = 'risk/vuln_index.html'
context_object_name = 'vuln_list'
permission_required = 'risk.view_vulnerability'
class CondIndexView(ListView):
model = RiskCondition
template_name = 'risk/cond_index.html'
context_object_name = 'cond_list'
permission_required = 'risk.view_riskcondition'
class ImpactIndexView(ListView):
model = Impact
template_name = 'risk/impact_index.html'
context_object_name = 'impact_list'
permission_required = 'risk.view_impact'
class ResponseIndexView(ListView):
model = RiskResponse
template_name = 'risk/response_index.html'
context_object_name = 'response_list'
permission_required = 'risk.view_riskresponse'
class ATEDetailView(DetailView):
model = AdvThreatEvent
template_name = 'risk/ate_detail.html'
context_object_name = 'ate'
permission_required = 'risk.view_advthreatevent'
class NTEDetailView(DetailView):
model = NonAdvThreatEvent
template_name = 'risk/nte_detail.html'
context_object_name = 'nte'
permission_required = 'risk.view_nonadvthreatevent'
class ATSDetailView(DetailView):
model = AdvThreatSource
template_name = 'risk/ats_detail.html'
context_object_name = 'ats'
permission_required = 'risk.view_advthreatsource'
class NTSDetailView(DetailView):
model = NonAdvThreatSource
template_name = 'risk/nts_detail.html'
context_object_name = 'nts'
permission_required = 'risk.view_nonadvthreatsource'
class VulnDetailView(DetailView):
model = Vulnerability
template_name = 'risk/vuln_detail.html'
context_object_name = 'vuln'
permission_required = 'risk.view_vulnerability'
class CondDetailView(DetailView):
model = RiskCondition
template_name = 'risk/cond_detail.html'
context_object_name = 'cond'
permission_required = 'risk.view_riskcondition'
class ImpactDetailView(DetailView):
model = Impact
template_name = 'risk/impact_detail.html'
context_object_name = 'impact'
permission_required = 'risk.view_impact'
class ResponseDetailView(DetailView):
model = RiskResponse
template_name = 'risk/response_detail.html'
context_object_name = 'response'
permission_required = 'risk.view_riskresponse'
class ATECreateView(PermissionRequiredMixin, CreateView):
model = AdvThreatEvent
permission_required = 'risk.add_advthreatevent'
fields = ['name', 'desc', 'event_type', 'sources', 'relevance',
'info_source', 'tier', 'likelihood_initiation',
'likelihood_impact', 'vulnerabilities', 'impacts',
'responses', 'assigned_risk']
def get_success_url(self):
return reverse_lazy('risk:ate_detail', args=(self.object.id,))
class NTECreateView(PermissionRequiredMixin, CreateView):
model = NonAdvThreatEvent
permission_required = 'risk.add_nonadvthreatevent'
fields = ['name', 'desc', 'event_type', 'sources', 'relevance',
'info_source', 'tier', 'likelihood_initiation',
'likelihood_impact', 'risk_conditions', 'impacts',
'responses', 'assigned_risk']
def get_success_url(self):
return reverse_lazy('risk:nte_detail', args=(self.object.id,))
class ATSCreateView(PermissionRequiredMixin, CreateView):
model = AdvThreatSource
permission_required = 'risk.add_advthreatsource'
fields = ['name', 'desc', 'source_type', 'info_source', 'tier',
'in_scope', 'capability', 'intent', 'targeting']
def get_success_url(self):
return reverse_lazy('risk:ats_detail', args=(self.object.id,))
class NTSCreateView(PermissionRequiredMixin, CreateView):
model = NonAdvThreatSource
permission_required = 'risk.add_nonadvthreatsource'
fields = ['name', 'desc', 'source_type', 'info_source', 'tier',
'in_scope', 'range_of_effect']
def get_success_url(self):
return reverse_lazy('risk:nts_detail', args=(self.object.id,))
class VulnCreateView(PermissionRequiredMixin, CreateView):
model = Vulnerability
permission_required = 'risk.add_vulnerability'
fields = ['name', 'desc', 'vuln_type', 'severity',
'info_source', 'tier']
def get_success_url(self):
return reverse_lazy('risk:vuln_detail', args=(self.object.id,))
class CondCreateView(PermissionRequiredMixin, CreateView):
model = RiskCondition
permission_required = 'risk.add_riskcondition'
fields = ['name', 'desc', 'condition_type', 'pervasiveness',
'info_source', 'tier']
def get_success_url(self):
return reverse_lazy('risk:cond_detail', args=(self.object.id,))
class ImpactCreateView(PermissionRequiredMixin, CreateView):
model = Impact
permission_required = 'risk.add_impact'
fields = ['name', 'desc', 'impact_type', 'info_source', 'tier',
'severity', 'impact_tier']
def get_success_url(self):
return reverse_lazy('risk:impact_detail', args=(self.object.id,))
class ResponseCreateView(PermissionRequiredMixin, CreateView):
model = RiskResponse
permission_required = 'risk.add_riskresponse'
fields = ['name', 'desc', 'response_type', 'effectiveness', 'status']
def get_success_url(self):
return reverse_lazy('risk:response_detail', args=(self.object.id,))
class ATEUpdateView(PermissionRequiredMixin, UpdateView):
model = AdvThreatEvent
permission_required = 'change_advthreatevent'
template_name = 'risk/advthreatevent_update_form.html'
fields = ['name', 'desc', 'event_type', 'sources', 'relevance',
'info_source', 'tier', 'likelihood_initiation',
'likelihood_impact', 'vulnerabilities', 'impacts',
'responses', 'assigned_risk']
def get_success_url(self):
return reverse_lazy('risk:ate_detail', args=(self.object.id,))
class NTEUpdateView(PermissionRequiredMixin, UpdateView):
model = NonAdvThreatEvent
permission_required = 'change_nonadvthreatevent'
template_name = 'risk/nonadvthreatevent_update_form.html'
fields = ['name', 'desc', 'event_type', 'sources', 'relevance',
'info_source', 'tier', 'likelihood_initiation',
'likelihood_impact', 'risk_conditions', 'impacts',
'responses', 'assigned_risk']
def get_success_url(self):
return reverse_lazy('risk:nte_detail', args=(self.object.id,))
class ATSUpdateView(PermissionRequiredMixin, UpdateView):
model = AdvThreatSource
permission_required = 'change_advthreatsource'
template_name = 'risk/advthreatsource_update_form.html'
fields = ['name', 'desc', 'source_type', 'info_source', 'tier',
'in_scope', 'capability', 'intent', 'targeting']
def get_success_url(self):
return reverse_lazy('risk:ats_detail', args=(self.object.id,))
class NTSUpdateView(PermissionRequiredMixin, UpdateView):
model = NonAdvThreatSource
permission_required = 'change_nonadvthreatsource'
template_name = 'risk/nonadvthreatsource_update_form.html'
fields = ['name', 'desc', 'source_type', 'info_source', 'tier',
'in_scope', 'range_of_effect']
def get_success_url(self):
return reverse_lazy('risk:nts_detail', args=(self.object.id,))
class VulnUpdateView(PermissionRequiredMixin, UpdateView):
model = Vulnerability
permission_required = 'change_vulnerability'
template_name = 'risk/vulnerability_update_form.html'
fields = ['name', 'desc', 'vuln_type', 'severity',
'info_source', 'tier']
def get_success_url(self):
return reverse_lazy('risk:vuln_detail', args=(self.object.id,))
class CondUpdateView(PermissionRequiredMixin, UpdateView):
model = RiskCondition
permission_required = 'change_riskconditions'
template_name = 'risk/riskcondition_update_form.html'
fields = ['name', 'desc', 'condition_type', 'pervasiveness',
'info_source', 'tier']
def get_success_url(self):
return reverse_lazy('risk:cond_detail', args=(self.object.id,))
class ImpactUpdateView(PermissionRequiredMixin, UpdateView):
model = Impact
permission_required = 'change_impact'
template_name = 'risk/impact_update_form.html'
fields = ['name', 'desc', 'impact_type', 'info_source', 'tier',
'severity', 'impact_tier']
def get_success_url(self):
return reverse_lazy('risk:impact_detail', args=(self.object.id,))
class ResponseUpdateView(PermissionRequiredMixin, UpdateView):
model = RiskResponse
permission_required = 'change_riskresponse'
template_name = 'risk/riskresponse_update_form.html'
fields = ['name', 'desc', 'response_type', 'effectiveness', 'status']
def get_success_url(self):
return reverse_lazy('risk:response_detail', args=(self.object.id,))
class ATEDeleteView(PermissionRequiredMixin, DeleteView):
model = AdvThreatEvent
permission_required = 'delete_advthreatevent'
template_name = 'risk/ate_delete.html'
context_object_name = 'ate'
def get_success_url(self):
return reverse('risk:ate_index')
class NTEDeleteView(PermissionRequiredMixin, DeleteView):
model = NonAdvThreatEvent
permission_required = 'delete_nonadvthreatevent'
template_name = 'risk/nte_delete.html'
context_object_name = 'nte'
def get_success_url(self):
return reverse('risk:nte_index')
class ATSDeleteView(PermissionRequiredMixin, DeleteView):
model = AdvThreatSource
permission_required = 'delete_advthreatsource'
template_name = 'risk/ats_delete.html'
context_object_name = 'ats'
def get_success_url(self):
return reverse('risk:ats_index')
class NTSDeleteView(PermissionRequiredMixin, DeleteView):
model = NonAdvThreatSource
permission_required = 'delete_nonadvthreatsource'
template_name = 'risk/nts_delete.html'
context_object_name = 'nts'
def get_success_url(self):
return reverse('risk:nts_index')
class VulnDeleteView(PermissionRequiredMixin, DeleteView):
model = Vulnerability
permission_required = 'delete_vulnerability'
template_name = 'risk/vuln_delete.html'
context_object_name = 'vuln'
def get_success_url(self):
return reverse('risk:vuln_index')
class CondDeleteView(PermissionRequiredMixin, DeleteView):
model = RiskCondition
permission_required = 'delete_riskcondition'
template_name = 'risk/cond_delete.html'
context_object_name = 'cond'
def get_success_url(self):
return reverse('risk:cond_index')
class ImpactDeleteView(PermissionRequiredMixin, DeleteView):
model = Impact
permission_required = 'delete_impact'
template_name = 'risk/impact_delete.html'
context_object_name = 'impact'
def get_success_url(self):
return reverse('risk:impact_index')
class ResponseDeleteView(PermissionRequiredMixin, DeleteView):
model = RiskResponse
permission_required = 'delete_riskresponse'
template_name = 'risk/response_delete.html'
context_object_name = 'response'
def get_success_url(self):
return reverse('risk:response_index')
|
python
|
# -*- coding: utf-8 -*-
# @COPYRIGHT_begin
#
# Copyright [2010-2014] Institute of Nuclear Physics PAN, Krakow, Poland
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# @COPYRIGHT_end
"""@package src.clm.urls
"""
from django.conf.urls import patterns, include, url
from clm.utils.decorators import decorated_functions
from clm.views.guest.user import *
from clm.views.guest.cluster import *
from clm.views.guest.message import *
from clm.views.guest.news import *
from clm.views.admin_clm.cluster import *
from clm.views.admin_clm.news import *
from clm.views.admin_clm.user import *
from clm.views.admin_cm.user import *
from clm.views.admin_cm.admin import *
from clm.views.admin_cm.cluster import *
from clm.views.admin_cm.farm import *
from clm.views.admin_cm.node import *
from clm.views.admin_cm.storage import *
from clm.views.admin_cm.template import *
from clm.views.admin_cm.vm import *
from clm.views.admin_cm.network import *
from clm.views.admin_cm.iso_image import *
from clm.views.admin_cm.storage_image import *
from clm.views.admin_cm.system_image import *
from clm.views.admin_cm.monia import *
from clm.views.admin_cm.public_ip import *
from clm.views.user.ctx import *
from clm.views.user.group import *
from clm.views.user.iso_image import *
from clm.views.user.storage_image import *
from clm.views.user.system_image import *
from clm.views.user.key import *
from clm.views.user.message import *
from clm.views.user.template import *
from clm.views.user.user import *
from clm.views.user.vm import *
from clm.views.user.farm import *
from clm.views.user.public_ip import *
from clm.views.user.network import *
from clm.views.user.admin import *
from clm.views.user.monia import *
global decorated_functions
urlpatterns = patterns('',)
for fun in decorated_functions:
urlpatterns += patterns('', url(r'^%s/%s/' % (fun.__module__.replace('clm.views.', '').replace('.', '/'),
fun.__name__), fun)
)
# TODO: Remove it when it will be logged somewhere
f = open('/tmp/log-clm', 'w')
for u in urlpatterns:
f.write(str(u) + '\n')
f.close()
|
python
|
import os
from optparse import make_option
from django.core.management import call_command, BaseCommand
from django.conf import settings
from fixture_generator.base import get_available_fixtures
from django.db.models.loading import get_app
class Command(BaseCommand):
"""
Regenerate fixtures for all applications.
"""
option_list = BaseCommand.option_list + (
make_option("--format", default="json", dest="format",
help="Specifies the output serialization format for fixtures."),
make_option("--indent", default=4, dest="indent", type="int",
help="Specifies the indent level to use when pretty-printing output"),
make_option("--not-natural", default=True, dest="use_natural_keys", action="store_false",
help="Don't use natural keys."),
make_option("--databases", dest="dbs", default="",
help="Comma separeted list of databases to dump. All databases are used by default")
)
args = '<app app ... app>'
def handle(self, *apps, **options):
fixtures = get_available_fixtures(apps or settings.INSTALLED_APPS)
for fixture in fixtures.itervalues():
if not isinstance(fixture.export, basestring):
continue
print fixture
app = get_app(fixture.app)
destdir = os.path.dirname(app.__file__)
if app.__file__.rsplit('.', 1)[0].endswith("__init__"):
destdir = os.path.dirname(destdir)
destdir = os.path.join(destdir, "fixtures")
call_command("generate_fixture", fixture.label, prefix=fixture.export, dest_dir=destdir, **options)
|
python
|
# coding: utf-8
# Copyright (c) Max-Planck-Institut für Eisenforschung GmbH - Computational Materials Design (CM) Department
# Distributed under the terms of "New BSD License", see the LICENSE file.
"""
Builtin tools that come with pyiron base.
"""
from abc import ABC
from pyiron_base.job.factory import JobFactory
__author__ = "Liam Huber"
__copyright__ = (
"Copyright 2021, Max-Planck-Institut für Eisenforschung GmbH - "
"Computational Materials Design (CM) Department"
)
__version__ = "1.0"
__maintainer__ = "Liam Huber"
__email__ = "[email protected]"
__status__ = "production"
__date__ = "Sep 7, 2021"
class Toolkit(ABC):
def __init__(self, project):
self._project = project
class BaseTools(Toolkit):
def __init__(self, project):
super().__init__(project)
self._job = JobFactory(project)
@property
def job(self) -> JobFactory:
return self._job
|
python
|
# -*- coding: utf-8 -*-
from yandex_checkout import ReceiptItem
from yandex_checkout.domain.common.receipt_type import ReceiptType
from yandex_checkout.domain.common.request_object import RequestObject
from yandex_checkout.domain.models.receipt_customer import ReceiptCustomer
from yandex_checkout.domain.models.settlement import Settlement
class ReceiptRequest(RequestObject):
__type = None
__send = None
__customer = None
__tax_system_code = None
__items = None
__settlements = None
__payment_id = None
__refund_id = None
@property
def type(self):
return self.__type
@type.setter
def type(self, value):
self.__type = str(value)
@property
def send(self):
return self.__send
@send.setter
def send(self, value):
if isinstance(value, bool):
self.__send = value
else:
raise TypeError('Invalid send value type in receipt_request')
@property
def customer(self):
return self.__customer
@customer.setter
def customer(self, value):
if isinstance(value, dict):
self.__customer = ReceiptCustomer(value)
elif isinstance(value, ReceiptCustomer):
self.__customer = value
else:
raise TypeError('Invalid customer value type in receipt_request')
@property
def tax_system_code(self):
return self.__tax_system_code
@tax_system_code.setter
def tax_system_code(self, value):
if isinstance(value, int):
self.__tax_system_code = value
else:
raise TypeError('Invalid tax_system_code value type in receipt_request')
@property
def items(self):
return self.__items
@items.setter
def items(self, value):
if isinstance(value, list):
items = []
for item in value:
if isinstance(item, dict):
items.append(ReceiptItem(item))
elif isinstance(item, ReceiptItem):
items.append(item)
else:
raise TypeError('Invalid item type in receipt.items')
self.__items = items
else:
raise TypeError('Invalid items value type in receipt_request')
@property
def settlements(self):
return self.__settlements
@settlements.setter
def settlements(self, value):
if isinstance(value, list):
items = []
for item in value:
if isinstance(item, dict):
items.append(Settlement(item))
elif isinstance(item, Settlement):
items.append(item)
else:
raise TypeError('Invalid settlement type in receipt.settlements')
self.__settlements = items
else:
raise TypeError('Invalid settlements value type in receipt_request')
@property
def payment_id(self):
return self.__payment_id
@payment_id.setter
def payment_id(self, value):
self.__refund_id = None
self.__payment_id = str(value)
@property
def refund_id(self):
return self.__refund_id
@refund_id.setter
def refund_id(self, value):
self.__payment_id = None
self.__refund_id = str(value)
def validate(self):
if self.type is None:
self.__set_validation_error('Receipt type not specified')
if self.send is None:
self.__set_validation_error('Receipt send not specified')
if self.customer is not None:
email = self.customer.email
phone = self.customer.phone
if not email and not phone:
self.__set_validation_error('Both email and phone values are empty in customer')
else:
self.__set_validation_error('Receipt customer not specified')
if not self.has_items():
self.__set_validation_error('Receipt items not specified')
if not self.has_settlements():
self.__set_validation_error('Receipt settlements not specified')
if self.type is ReceiptType.PAYMENT and self.payment_id is None:
self.__set_validation_error('Receipt payment_id not specified')
if self.type is ReceiptType.REFUND and self.refund_id is None:
self.__set_validation_error('Receipt refund_id not specified')
def has_items(self):
return bool(self.items)
def has_settlements(self):
return bool(self.settlements)
def __set_validation_error(self, message):
raise ValueError(message)
|
python
|
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Skywater 130 PDK support rules.
These rules generate PDK providers for downstream tools.
"""
load("//pdk:build_defs.bzl", "CornerInfo", "StandardCellInfo")
def _skywater_corner_impl(ctx):
# Choose user supplied root, or default to build directory.
standard_cell_root = ctx.attr.standard_cell_root
# Choose the build target name as the corner first unless overwritten.
corner = ctx.attr.corner if ctx.attr.corner else ctx.attr.name
corner_suffix = ""
args = ctx.actions.args()
if ctx.attr.with_leakage:
corner_suffix = "_pwrlkg"
args.add("--leakage")
if ctx.attr.with_ccsnoise:
corner_suffix = "_ccsnoise"
args.add("--ccsnoise")
timing_output = ctx.actions.declare_file("timing/{}__{}{}.lib".format(
ctx.attr.standard_cell_name,
corner,
corner_suffix,
))
args.add_all("-o", [timing_output.dirname])
args.add(standard_cell_root)
args.add(corner)
ctx.actions.run(
outputs = [timing_output],
inputs = ctx.files.srcs,
arguments = [args],
executable = ctx.executable._liberty_tool,
)
return [
DefaultInfo(files = depset([timing_output])),
CornerInfo(
liberty = timing_output,
with_ccsnoise = ctx.attr.with_ccsnoise,
with_leakage = ctx.attr.with_leakage,
corner_name = corner,
),
]
def _skywater_cell_library_impl(ctx):
corners = dict([(dep[CornerInfo].corner_name, dep[CornerInfo]) for dep in ctx.attr.process_corners])
return [
DefaultInfo(files = depset([])),
StandardCellInfo(corners = corners, default_corner = corners.get(ctx.attr.default_corner, None)),
]
skywater_cell_library = rule(
implementation = _skywater_cell_library_impl,
attrs = {
"srcs": attr.label_list(allow_files = True),
"process_corners": attr.label_list(
providers = [CornerInfo],
),
"default_corner": attr.string(mandatory = True),
},
)
skywater_corner = rule(
implementation = _skywater_corner_impl,
attrs = {
"srcs": attr.label_list(
allow_files = True,
allow_empty = False,
),
"corner": attr.string(
default = "",
doc = "The selected process corner to generate liberty files for.",
),
"standard_cell_root": attr.string(
default = "",
doc = "The root directory of the standard cell variants.",
mandatory = True,
),
"with_ccsnoise": attr.bool(
default = False,
doc = "Wheter to generate ccsnoise.",
),
"standard_cell_name": attr.string(
mandatory = True,
doc = "The name of the standar cell variant ex. sky130_fd_sc_hd",
),
"with_leakage": attr.bool(
default = False,
doc = "Wheter to generate leakage",
),
"_liberty_tool": attr.label(
default = Label("@com_google_skywater_pdk//:liberty"),
executable = True,
cfg = "exec",
),
},
)
|
python
|
from flaky import flaky
from .. import SemparseTestCase
from allennlp.models.archival import load_archive
from allennlp.predictors import Predictor
class TestAtisParserPredictor(SemparseTestCase):
@flaky
def test_atis_parser_uses_named_inputs(self):
inputs = {"utterance": "show me the flights to seattle"}
archive_path = self.FIXTURES_ROOT / "atis" / "serialization" / "model.tar.gz"
archive = load_archive(archive_path)
predictor = Predictor.from_archive(archive, "atis-parser")
result = predictor.predict_json(inputs)
action_sequence = result.get("best_action_sequence")
if action_sequence:
# An untrained model will likely get into a loop, and not produce at finished states.
# When the model gets into a loop it will not produce any valid SQL, so we don't get
# any actions. This basically just tests if the model runs.
assert len(action_sequence) > 1
assert all([isinstance(action, str) for action in action_sequence])
predicted_sql_query = result.get("predicted_sql_query")
assert predicted_sql_query is not None
@flaky
def test_atis_parser_predicted_sql_present(self):
inputs = {"utterance": "show me flights to seattle"}
archive_path = self.FIXTURES_ROOT / "atis" / "serialization" / "model.tar.gz"
archive = load_archive(archive_path)
predictor = Predictor.from_archive(archive, "atis-parser")
result = predictor.predict_json(inputs)
predicted_sql_query = result.get("predicted_sql_query")
assert predicted_sql_query is not None
@flaky
def test_atis_parser_batch_predicted_sql_present(self):
inputs = [{"utterance": "show me flights to seattle"}]
archive_path = self.FIXTURES_ROOT / "atis" / "serialization" / "model.tar.gz"
archive = load_archive(archive_path)
predictor = Predictor.from_archive(archive, "atis-parser")
result = predictor.predict_batch_json(inputs)
predicted_sql_query = result[0].get("predicted_sql_query")
assert predicted_sql_query is not None
|
python
|
# Generated by Django 3.2.3 on 2021-05-29 21:11
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('discordbot', '0021_auto_20210529_2045'),
]
operations = [
migrations.AddField(
model_name='member',
name='settings',
field=models.JSONField(default=dict, verbose_name='Settings'),
),
]
|
python
|
# Author: Jintao Huang
# Time: 2020-5-24
import torch.nn as nn
from .utils import FrozenBatchNorm2d
default_config = {
# backbone
"pretrained_backbone": True,
"backbone_norm_layer": nn.BatchNorm2d,
"backbone_freeze": ["conv_first", "layer1", "layer2"],
# "backbone_freeze": [""], # freeze backbone all
# anchor:
"anchor_scales": (1., 2 ** (1 / 3.), 2 ** (2 / 3.)), # scales on a single feature
"anchor_aspect_ratios": ((1., 1.), (0.7, 1.4), (1.4, 0.7)), # H, W
# focal loss
"alpha": 0.25,
"gamma": 2, # 1.5
# other:
"other_norm_layer": nn.BatchNorm2d,
}
config_dict = {
# resolution[% 128 == 0], backbone, fpn_channels, fpn_num_repeat, regressor_classifier_num_repeat,
# anchor_base_scale(anchor_size / stride)(基准尺度)
'efficientdet_d0': (512, 'efficientnet_b0', 64, 3, 3, 4.), #
'efficientdet_d1': (640, 'efficientnet_b1', 88, 4, 3, 4.), #
'efficientdet_d2': (768, 'efficientnet_b2', 112, 5, 3, 4.), #
'efficientdet_d3': (896, 'efficientnet_b3', 160, 6, 4, 4.), #
'efficientdet_d4': (1024, 'efficientnet_b4', 224, 7, 4, 4.), #
'efficientdet_d5': (1280, 'efficientnet_b5', 288, 7, 4, 4.),
'efficientdet_d6': (1280, 'efficientnet_b6', 384, 8, 5, 4.), #
'efficientdet_d7': (1536, 'efficientnet_b6', 384, 8, 5, 5.)
}
# 官方配置 official configuration
# config_dict = {
# # resolution[% 128 == 0], backbone, fpn_channels, fpn_num_repeat, regressor_classifier_num_repeat,
# # anchor_base_scale(anchor_size / stride)(基准尺度)
# 'efficientdet_d0': (512, 'efficientnet_b0', 64, *2, 3, 4.), #
# 'efficientdet_d1': (640, 'efficientnet_b1', 88, *3, 3, 4.), #
# 'efficientdet_d2': (768, 'efficientnet_b2', 112, *4, 3, 4.), #
# 'efficientdet_d3': (896, 'efficientnet_b3', 160, *5, 4, 4.), #
# 'efficientdet_d4': (1024, 'efficientnet_b4', 224, *6, 4, 4.), #
# 'efficientdet_d5': (1280, 'efficientnet_b5', 288, 7, 4, 4.),
# 'efficientdet_d6': (*1408, 'efficientnet_b6', 384, 8, 5, 4.), #
# 'efficientdet_d7': (1536, 'efficientnet_b6', 384, 8, 5, 5.)
# }
|
python
|
# TOO EASY
T = int(input())
for _ in range(T):
lower, upper = map(int, input().split())
n = int(input())
# a < num <= b
for _ in range(n):
mid = (lower+upper)//2
print(mid)
res = input()
if res == "TOO_SMALL":
lower = mid + 1
elif res == "TOO_BIG":
upper = mid - 1
else:
break
|
python
|
# -*- coding: utf-8 -*-
from odoo import _, api, fields, models
class Trainer(models.Model):
_name = "bista.trainer"
_description = "Bista Training Management System - Trainer"
_rec_name = "name"
profile_image = fields.Binary(string="Profile Image", attachement=True)
first_name = fields.Char(string="First Name", required=True)
last_name = fields.Char(string="Last Name")
name = fields.Char(string="Name", compute="_get_name", store=True)
@api.depends('first_name', 'last_name')
def _get_name(self):
for record in self:
if record.last_name:
record.name = record.first_name + ' ' + record.last_name
else:
record.name = record.first_name
class TrainerNotes(models.Model):
_name = "bista.trainer.note"
_description = "Bista Training Management System - Trainer Notes"
_rec_name = "subject"
added_by = fields.Many2one('res.users', string="Added By")
subject = fields.Char(string="Subject", required=True)
date = fields.Date(
string="Date", default=lambda self: fields.datetime.today())
note = fields.Char(string="Note")
|
python
|
#coding: utf-8
#!python3
# 5) Solicite o preço de uma mercadoria e o percentual de desconto. Exiba o valor do desconto
# e o preço a pagar:
p_produto = float(input('Valor produto: '))
p_desconto = float(input('Percentual de desconto: '))
p_produto_atual = p_produto - (p_produto*p_desconto/100)
print('Preço atual: ', p_produto_atual)
|
python
|
# Generated by Django 2.2.16 on 2020-10-02 08:37
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('routes', '0047_source_id_nullable'),
('routes', '0048_athlete_activities_imported'),
]
operations = [
]
|
python
|
#!/usr/bin/env python3
import unittest
import numpy as np
from selfdrive.test.longitudinal_maneuvers.maneuver import Maneuver
def run_cruise_simulation(cruise, t_end=100.):
man = Maneuver(
'',
duration=t_end,
initial_speed=float(0.),
lead_relevancy=True,
initial_distance_lead=100,
cruise_values=[cruise],
prob_lead_values=[0.0],
breakpoints=[0.],
)
valid, output = man.evaluate()
assert valid
return output[-1,3]
class TestCruiseSpeed(unittest.TestCase):
def test_cruise_speed(self):
for speed in np.arange(5, 40, 5):
print(f'Testing {speed} m/s')
cruise_speed = float(speed)
simulation_steady_state = run_cruise_simulation(cruise_speed)
self.assertAlmostEqual(simulation_steady_state, cruise_speed, delta=.01, msg=f'Did not reach {speed} m/s')
if __name__ == "__main__":
unittest.main()
|
python
|
from __future__ import print_function
from mmstage import MicroManagerStage
|
python
|
# Copyright: 2006-2011 Brian Harring <[email protected]>
# License: GPL2/BSD
"""
exceptions thrown by the MergeEngine
"""
__all__ = ("ModificationError", "BlockModification",
"TriggerUnknownCset",
)
class ModificationError(Exception):
"""Base Exception class for modification errors"""
def __init__(self, trigger, msg):
self.trigger = trigger
self.msg = msg
Exception.__init__(self, "%s: modification error: %s" %
(self.trigger, self.msg))
class BlockModification(ModificationError):
"""Merging cannot proceed"""
def __str__(self):
return "Modification was blocked by %s: %s" % (
self.trigger.__class__.__name__, self.msg)
class TriggerUnknownCset(ModificationError):
"""Trigger's required content set isn't known"""
def __init__(self, trigger, csets):
if not isinstance(csets, (tuple, list)):
csets = (csets,)
ModificationError.__init__(self, "%s: trigger %r unknown cset: %r" %
(self.__class__, trigger, csets))
self.trigger, self.csets = trigger, csets
|
python
|
# Generated by Django 2.1.7 on 2019-04-16 15:14
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('materials', '0068_auto_20190415_2140'),
]
operations = [
migrations.RenameField(
model_name='dataset',
old_name='experimental',
new_name='is_experimental',
),
]
|
python
|
# -*- coding: utf-8 -*-
"""
flask_security.recoverable
~~~~~~~~~~~~~~~~~~~~~~~~~~
Flask-Security recoverable module
:copyright: (c) 2012 by Matt Wright.
:license: MIT, see LICENSE for more details.
"""
from flask import current_app as app
from werkzeug.local import LocalProxy
from werkzeug.security import safe_str_cmp
from .signals import password_reset, reset_password_instructions_sent
from .utils import config_value, encrypt_password, get_token_status, md5, \
send_mail, url_for_security
# Convenient references
_security = LocalProxy(lambda: app.extensions['security'])
_datastore = LocalProxy(lambda: _security.datastore)
def send_reset_password_instructions(user):
"""Sends the reset password instructions email for the specified user.
:param user: The user to send the instructions to
"""
token = generate_reset_password_token(user)
reset_link = url_for_security(
'reset_password', token=token, _external=True
)
send_mail(config_value('EMAIL_SUBJECT_PASSWORD_RESET'), user.email,
'reset_instructions',
user=user, reset_link=reset_link)
reset_password_instructions_sent.send(
app._get_current_object(), user=user, token=token
)
def send_password_reset_notice(user):
"""Sends the password reset notice email for the specified user.
:param user: The user to send the notice to
"""
if config_value('SEND_PASSWORD_RESET_NOTICE_EMAIL'):
send_mail(config_value('EMAIL_SUBJECT_PASSWORD_NOTICE'), user.email,
'reset_notice', user=user)
def generate_reset_password_token(user):
"""Generates a unique reset password token for the specified user.
:param user: The user to work with
"""
password_hash = md5(user.password) if user.password else None
data = [str(user.id), password_hash]
return _security.reset_serializer.dumps(data)
def reset_password_token_status(token):
"""Returns the expired status, invalid status, and user of a password reset
token. For example::
expired, invalid, user, data = reset_password_token_status('...')
:param token: The password reset token
"""
expired, invalid, user, data = get_token_status(
token, 'reset', 'RESET_PASSWORD', return_data=True
)
if not invalid:
if user.password:
password_hash = md5(user.password)
if not safe_str_cmp(password_hash, data[1]):
invalid = True
return expired, invalid, user
def update_password(user, password):
"""Update the specified user's password
:param user: The user to update_password
:param password: The unencrypted new password
"""
user.password = encrypt_password(password)
_datastore.put(user)
send_password_reset_notice(user)
password_reset.send(app._get_current_object(), user=user)
|
python
|
import pandas as pd
import numpy as np
import os
import matplotlib.pyplot as plt
from sklearn.metrics import classification_report,accuracy_score,f1_score,precision_score,recall_score
from scikitplot.metrics import plot_confusion_matrix
class eval_metrics():
def __init__(self,targets,preds,classes):
try:
self.targets = targets.cpu().numpy()
self.preds = preds.cpu().numpy()
self.classes = classes
self.num_classes = len(self.classes)
except:
self.targets = targets
self.preds = preds
self.classes = classes
self.num_classes = len(self.classes)
def plot_conf_matx(self,normalized=False):
fig, axs = plt.subplots(figsize=(16, 12))
plot_confusion_matrix(self.targets, self.preds, ax=axs,normalize=normalized)
tick_marks = np.arange(self.num_classes)
plt.xticks(tick_marks, self.classes, rotation=45)
plt.yticks(tick_marks, self.classes)
plt.savefig(os.path.join(os.getcwd(),'confusion_matrix.png'))
return fig
def accuracy(self):
return accuracy_score(self.targets,self.preds,normalize=True)
def f1_score_weighted(self):
return f1_score(self.targets,self.preds,average='weighted')
def precision_weighted(self):
return precision_score(self.targets,self.preds,average='weighted')
def recall_weighted(self):
return recall_score(self.targets,self.preds,average='weighted')
def classify_report(self):
return classification_report(self.targets,self.preds,
target_names=self.classes)
|
python
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
import unittest
import itertools
from saliency_map import *
from utils import OpencvIo
class GaussianPyramidTest(unittest.TestCase):
def setUp(self):
oi = OpencvIo()
src = oi.imread('./images/fruit.jpg')
self.__gp = GaussianPyramid(src)
def test_get_intensity(self):
its = self.__gp._GaussianPyramid__get_intensity(10, 20, 30)
self.assertEqual(20, its)
self.assertNotEqual(type(1), type(its))
def test_get_colors(self):
real = self.__gp._GaussianPyramid__get_colors(0.9, 0.9, 0.9, 0.9, 10)
self.assertEqual([0.0, 0.0, 0.0, 0.0], real)
class FeatureMapTest(unittest.TestCase):
def setUp(self):
oi = OpencvIo()
src = oi.imread('./images/fruit.jpg')
gp = GaussianPyramid(src)
self.__fm = FeatureMap(gp.maps)
def test_scale_diff(self):
c, s = np.zeros((4, 6)), np.zeros((2, 3))
expect = np.ones((4, 6))
for y, x in itertools.product(xrange(len(s)), xrange(len(s[0]))):
s[y][x] = (-1) ** x
self.assertTrue(np.array_equal(expect, self.__fm._FeatureMap__scale_diff(c, s)))
def test_scale_color_diff(self):
c1, s1 = np.zeros((4, 6)), np.zeros((2, 3))
c2, s2 = np.zeros((4, 6)), np.zeros((2, 3))
expect = np.ones((4, 6))
for y, x in itertools.product(xrange(len(s1)), xrange(len(s1[0]))):
s1[y][x] = (-1) ** x
real = self.__fm._FeatureMap__scale_color_diff((c1, s1), (c2, s2))
self.assertTrue(np.array_equal(expect, real))
class ConspicuityMapTest(unittest.TestCase):
def setUp(self):
oi = OpencvIo()
src = oi.imread('./images/fruit.jpg')
gp = GaussianPyramid(src)
fm = FeatureMap(gp.maps)
self.__cm = ConspicuityMap(fm.maps)
def test_scale_add(self):
srcs = [np.ones((4, 6)), np.zeros((2, 3))]
expect = np.ones((4, 6))
self.assertTrue(np.array_equal(expect, self.__cm._ConspicuityMap__scale_add(srcs)))
class SaliencyMapTest(unittest.TestCase):
def setUp(self):
self.sm = SaliencyMap()
def suite():
suite = unittest.TestSuite()
suite.addTests(unittest.makeSuite(GaussianPyramidTest))
suite.addTests(unittest.makeSuite(FeatureMapTest))
suite.addTests(unittest.makeSuite(ConspicuityMapTest))
suite.addTests(unittest.makeSuite(SaliencyMapTest))
return suite
|
python
|
# MODULE: TypeRig / Core / Ojects
# -----------------------------------------------------------
# (C) Vassil Kateliev, 2017-2020 (http://www.kateliev.com)
# (C) Karandash Type Foundry (http://www.karandash.eu)
#------------------------------------------------------------
# www.typerig.com
# No warranties. By using this you agree
# that you use it at your own risk!
__version__ = '0.26.0'
from collection import *
from cubicbezier import *
from line import *
from matrix import *
from point import *
from array import *
from transform import *
from utils import *
|
python
|
from pyomac.clustering.cluster_utils import (
ModalSet,
IndexedModalSet,
indexed_modal_sets_from_sequence,
modal_sets_from_lists,
modal_clusters,
single_set_statistics,
filter_clusters,
plot_indexed_clusters
)
|
python
|
# TextChart - Roll The Dice
import pygwidgets
from Constants import *
class TextView():
def __init__(self, window, oModel):
self.window = window
self.oModel = oModel
totalText = ['Roll total', '']
for rollTotal in range(MIN_TOTAL, MAX_TOTAL_PLUS_1):
totalText.append(rollTotal)
self.oTotalDisplay = pygwidgets.DisplayText(self.window, (200, 135), totalText,
fontSize=36, width=120, justified='right')
self.oCountDisplay = pygwidgets.DisplayText(self.window, (320, 135),
fontSize=36, width=120, justified='right')
self.oPercentDisplay = pygwidgets.DisplayText(self.window, (440, 135),
fontSize=36, width=120, justified='right')
def update(self):
nRounds, resultsDict, percentsDict = self.oModel.getRoundsRollsPercents()
countList = ['Count', ''] # extra empty string for a blank line
percentList = ['Percent', '']
for rollTotal in range(MIN_TOTAL, MAX_TOTAL_PLUS_1):
count = resultsDict[rollTotal]
percent = percentsDict[rollTotal]
countList.append(count)
# Build percent as a string with one decimal digit
percent = '{:.1%}'.format(percent)
percentList.append(percent)
self.oCountDisplay.setValue(countList)
self.oPercentDisplay.setValue(percentList)
def draw(self):
self.oTotalDisplay.draw()
self.oCountDisplay.draw()
self.oPercentDisplay.draw()
|
python
|
"""
Exercise 1
Write a function that takes a string as an argument and displays the letters
backward, one per line.
"""
def backwards(word):
x = len(word) - 1
while x >= 0:
print(word[x])
x -= 1
backwards("hello")
|
python
|
# with open('./space.text', 'w') as message:
# message.write('我是写入的数据\n')
# message.write('我再写一段文字哦\n')
# message.write('继续写入\n')
with open('./space.text', 'a') as msg:
msg.write('附加一段文字\n')
msg.write('继续附加一段\n')
|
python
|
import modeli, dobi_zneske
from bottle import *
import hashlib # racunaje md5
import json,requests
import pandas as pd
secret = "to skrivnost je zelo tezko uganiti 1094107c907cw982982c42"
def get_administrator():
username = request.get_cookie('administrator', secret=secret)
return username
def get_user(auto_login = True):
"""Poglej cookie in ugotovi, kdo je prijavljeni uporabnik,
vrni njegov username in ime. Če ni prijavljen, presumeri
na stran za prijavo ali vrni None (advisno od auto_login).
"""
# Dobimo username iz piškotka
username = request.get_cookie('username', secret=secret)
# Preverimo, ali ta uporabnik obstaja
if username is not None:
r = modeli.mail(username)
if r is not None:
# uporabnik obstaja, vrnemo njegove podatke
return r
# Če pridemo do sem, uporabnik ni prijavljen, naredimo redirect, če ni administratorjevega coockie-ja
if auto_login and not get_administrator():
redirect('/prijava')
else:
return None
def password_md5(s):
"""Vrni MD5 hash danega UTF-8 niza. Gesla vedno spravimo v bazo
kodirana s to funkcijo."""
h = hashlib.md5()
h.update(s.encode('utf-8'))
return h.hexdigest()
@get('/')
def glavniMenu():
valute = modeli.seznam_valut()
data = requests.get(r'https://www.bitstamp.net/api/v2/order_book/ethbtc')
data = data.json()
bids = pd.DataFrame()
bids['quantity'] = [i[1] for i in data['bids']]
bids['price'] = [i[0] for i in data['bids']]
asks = pd.DataFrame()
asks['price'] = [i[0] for i in data['asks']]
asks['quantity'] = [i[1] for i in data['asks']]
asks.price = asks.price.apply(float)
asks.quantity = asks.quantity.apply(float)
bids.price = bids.price.apply(float)
bids.quantity = bids.quantity.apply(float)
bids_dict = {x[1]:x[0] for x in bids.itertuples(index=False)}
asks_dict = {x[0]:x[1] for x in asks.itertuples(index=False)}
bidask = dict()
bidask['asks'] = asks_dict
bidask['bids'] = bids_dict
data['asks'] = [{'price':float(i[0]), 'amount':float(i[1])} for i in data['asks']][:100]
data['bids'] = [{'price':float(i[0]), 'amount':float(i[1])} for i in data['bids']][:100]
return template('glavni.html', mail=None, geslo=None,ime=None,priimek=None, valute=valute,napaka_registriraj=None,napaka_prijava=None, orderbook=data)
@get('/static/<filename:path>')
def static(filename):
return static_file(filename, root='static')
@get('/oseba/<id_st>')
def oOsebi(id_st):
mail=get_user()
admin = get_administrator()
uporabnik = modeli.podatki(id_st)
vsota = 0
if admin or (uporabnik is not None and mail[0] == uporabnik[3]):
id, ime, priimek, mail, geslo = uporabnik
valute = modeli.seznam_valut()
lastnistvo = modeli.vsi_podatki(id_st)
for _, _ , _, nova_vrednost, kol, _ in lastnistvo:
vsota+=nova_vrednost*kol
vsota = round(vsota,2)
zasluzek = modeli.zasluzek(id)
return template('oseba.html', id=id, ime = ime, priimek=priimek, mail=mail,valute=valute,kolicina=None,lastnistvo=lastnistvo, zasluzek=zasluzek, vsota=vsota)
abort(404,"Not found: '/oseba/{0}'".format(id_st))
@post('/kupi')
def nakup():
mail = get_user()
admin = get_administrator()
id = request.forms.id
ime = request.forms.k
vrednost = request.forms.vrednost
kolicina = request.forms.kolicina
modeli.kupi_valuto(id, ime, vrednost, kolicina)
redirect('/oseba/'+str(id))
return template('oseba.html', id=id, ime = ime, kolicina=kolicina,vrednost=vrednost,k=k)
@post('/prodaj')
def prodaj():
mail = get_user()
admin = get_administrator()
id = request.forms.id
ime = request.forms.valut
vred = request.forms.vredn
kol = float(request.forms.kol)
kolicina = float(request.forms.kolicina)
kolicina = min(kol, kolicina)
modeli.prodaj_valuto(id, ime, kolicina, vred)
redirect('/oseba/'+str(id))
return template('oseba.html', id=id, ime = ime, kol=kol, vred=vred, kolicina=kolicina)
@get('/administrator')
def administrator():
if get_administrator():
valute = modeli.seznam_valut()
return template('administrator.html', valute=valute)
abort(404, "Not found: '/administrator'")
@get('/administrator/osebe')
def administrator_osebe():
if get_administrator():
sez = {}
rezultat = modeli.podatki_vsi()
for el in rezultat:
sez[el[0]]=modeli.zasluzek(el[0])
return template('seznam_oseb.html', rezultat=rezultat,zasluzek=sez)
abort(404,"Not found: '/administrator/osebe")
@get('/administrator/valute')
def administrator_valute():
if get_administrator():
rezultat = modeli.seznam_valut()
return template('seznam_valut.html', rezultat=rezultat)
abort(404,"Not found: '/administrator/valute")
@get('/isci')
def isci():
id_st = request.query.iskalniNiz
rezultat = modeli.podatki(id_st)
if rezultat is not None:
return template('isci.html', rezultat = rezultat)
@get('/registracija')
def glavni_r():
return template('registriraj.html', ime = None, priimek = None, mail = None, napaka_registriraj=None, geslo = None)
@post('/registracija')
def dodaj():
ime = request.forms.ime
priimek = request.forms.priimek
mail = request.forms.mail
geslo = password_md5(request.forms.geslo)
if ime and priimek and mail and geslo:
je_v_bazi = modeli.mail_v_bazi(mail)
if je_v_bazi or mail=="admin@admin":
redirect('/registracija')
return template('registriraj.html', ime=None, priimek=None, mail=None, geslo=None, napaka_registriraj = 'Uporabnik obstaja')
modeli.dodaj_osebo(ime, priimek, mail, geslo)
id_1 = modeli.id_st(mail)
response.set_cookie('username', mail, path='/', secret=secret)
redirect('/oseba/'+str(id_1))
return template('registriraj.html', ime = ime, priimek = priimek, mail = mail, geslo = geslo, napaka_registriraj=None)
#redirect('/registracija')
redirect('/#registracija')
return template('registriraj.html', ime=None, priimek=None, mail=None, geslo=None, napaka_registriraj = 'Neveljavna registracija')
@get('/oseba/<id>/spremeni')
def spremen(id):
if get_user() is not None:
return template('spremeni.html', ime = None, priimek = None, mail = get_user()[0], staro_geslo = None, geslo = None, napaka=None)
return template('spremeni.html', ime = None, priimek = None, mail = None, staro_geslo = None, geslo = None, napaka=None)
@post('/spremeni')
def spremeni():
mail = None or get_user()[0]
id = modeli.id_st(mail)
ime = request.forms.ime or modeli.ime(id)
priimek = request.forms.priimek or modeli.priimek(id)
staro_geslo = request.forms.staro_geslo
geslo = password_md5(request.forms.geslo)
if password_md5(staro_geslo) == modeli.geslo(id):
modeli.spremeni_osebo(id, ime, priimek, mail, geslo)
modeli.spremeni_osebo(id, ime, priimek, mail, modeli.geslo(id))
response.set_cookie('username', mail, path='/', secret=secret)
redirect('/oseba/'+str(id))
return template('spremeni.html', ime = ime, priimek = priimek, staro_geslo = staro_geslo, mail = mail, geslo = geslo, napaka=None)
@get('/administrator/luzerji')
def luzerji():
if get_administrator():
rezultat = modeli.lozerji()
return template('loserji.html', lastnistvo=rezultat)
abort(404,"Not found: '/administrator/luzerji")
@get('/prijava')
def glavni():
return template('prijava.html', mail = None, napaka_prijava=None, geslo = None)
@post('/prijava')
def glavni_p():
mail = request.forms.mail
geslo = password_md5(request.forms.geslo)
if mail == "admin@admin" and geslo == password_md5("admin"):
response.set_cookie('administrator', mail, path='/', secret=secret)
redirect('/administrator')
return template('prijava.html', mail = mail, napaka_prijava=None, geslo = geslo)
id_s = modeli.id_st(mail)
podatki = modeli.podatki(id_s)
if podatki is not None:
_, _, _, email, psw = podatki
if email == mail and geslo == psw:
response.set_cookie('username', mail, path='/', secret=secret)
redirect('/oseba/'+str(id_s))
return template('prijava.html', mail = mail, napaka_prijava=None, geslo = geslo)
else:
redirect('/#prijava')
return template('prijava.html', mail=None, geslo=None, napaka_prijava='Neveljavna prijava')
else:
redirect('/#prijava')
return template('prijava.html', mail = None, geslo = None, napaka_prijava = 'Izpolni polja')
@get('/zapri_racun')
def odstrani_g():
return template('zapri_racun.html',mail=None,geslo=None,napaka=None)
@post('/zapri_racun')
def odstrani():
mail = request.forms.mail
geslo = password_md5(request.forms.geslo)
id = modeli.id_st(mail)
podatki = modeli.podatki(id)
if podatki is not None:
id_s, _, _, email, psw = podatki
if email == mail and geslo == psw and id==id_s:
modeli.zapri_racun(id)
redirect('/')
return template('zapri_racun.html', mail=mail, geslo=geslo,napaka=None)
redirect('/zapri_racun')
return template('zapri_racun.html', mail=mail, geslo=geslo, napaka='Nepravilno mail/geslo')
return template('zapri_racun.html', mail=None, geslo=None, napaka=None)
@post('/administrator/zapri_racun_admin')
def zapri_racun_admin():
id = request.forms.id
modeli.zapri_racun(id)
redirect('/administrator/luzerji')
@post('/administrator/zapri_racun_adm')
def zapri_racun_adm():
id = request.forms.id
modeli.zapri_racun(id)
redirect('/administrator/osebe')
@post('/administrator/zbrisi_valute')
def zbrisi_valuto():
id = request.forms.id
modeli.zbrisi_valuto(id)
redirect('/administrator/valute')
@post('/odstrani_valute')
def zbrisi_valute():
modeli.zbrisi_valute()
redirect('/administrator/valute')
@post('/zbrisi_osebe')
def zbrisi_osebe():
modeli.zbrisi_vse_osebe()
redirect('/administrator/osebe')
@get('/dodaj_valute')
def dodaj_valute():
if get_administrator():
rezultat = modeli.seznam_valut()
redirect('/administrator/valute')
return template('seznam_valut.html', rezultat=rezultat)
abort(404,"Not found: '/dodaj_valute")
@post('/dodaj_valute')
def dodaj_valute():
if get_administrator():
modeli.dodaj_valute()
rezultat = modeli.seznam_valut()
redirect('/administrator/valute')
return template('seznam_valut.html', rezultat=rezultat)
@get('/dodaj_nove_valute')
def dodaj_valute():
if get_administrator():
rezultat = modeli.seznam_valut()
redirect('/administrator/valute')
return template('seznam_valut.html', rezultat=rezultat)
abort(404,"Not found: '/dodaj_nove_valute")
@post('/dodaj_nove_valute')
def dodaj_nove_valute():
if get_administrator():
modeli.dodaj_nove_valute()
rezultat = modeli.seznam_valut()
redirect('/administrator/valute')
return template('seznam_valut.html', rezultat=rezultat)
@get('/oseba/<id>/zgodovina')
def zgodovina(id):
mail = get_user()
uporabnik = modeli.podatki(id)
if get_administrator() or uporabnik is not None and mail[0] == uporabnik[3]:
zgodovina_transakcij = modeli.vrni_zgodovino(id)
zasluzek = modeli.zasluzek(id)
return template('zgodovina.html',zasluzek=zasluzek,lastnistvo=zgodovina_transakcij)
else:
odjava()
@get('/odjavi')
def odjava():
response.delete_cookie('username')
redirect('/')
@get('/odjava')
def odjavi():
response.delete_cookie('administrator')
redirect('/')
# poženemo strežnik na portu 8080, glej http://localhost:8080/
run(host='localhost', port=8080,debug=True, reloader=False) #problem reloader idle
|
python
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import sys
filepath = sys.argv[1]
path, filename = os.path.split(filepath)
filename, ext = os.path.splitext(filename)
for i in os.listdir(os.getcwd()+'/'+path):
file_i, ext = os.path.splitext(i)
if i.startswith(filename+'_segmented') and ext == '.ttl':
# print 'Converting {0} to dot format'.format(file_i)
os.system('~/.virtualenvs/tc/bin/rdf2dot {2}/{0} > {2}/{1}'.format(i, file_i+'.dot', os.getcwd()+'/'+path))
|
python
|
from datetime import datetime, timedelta
import pendulum
import prefect
from prefect import task, Flow
from prefect.schedules import CronSchedule
import pandas as pd
from io import BytesIO
import zipfile
import requests
schedule = CronSchedule(
cron="*/30 * * * *",
start_date=pendulum.datetime(2021, 3, 12, 17, 00, tz='America/Sao_Paulo')
)
@task
def get_raw_data():
url = 'http://download.inep.gov.br/microdados/microdados_enem_2019.zip'
filebytes = BytesIO(
requests.get(url).content
)
logger = prefect.context.get('logger')
logger.info('Dados obtidos')
# Extrair o conteudo do zipfile
myzip = zipfile.ZipFile(filebytes)
myzip.extractall()
path = './DADOS/'
return path
@task
def aplica_filtros(path):
enade = pd.read_csv(path + 'MICRODADOS_ENEM_2019.csv',
sep=';', decimal=',', nrows=1000)
logger = prefect.context.get('logger')
logger.info(f'Colunas do df sao: {enade.columns}')
enade = enade.loc[
(enade.NU_IDADE > 20) &
(enade.NU_IDADE < 40) &
(enade.NT_GER > 0)
]
return enade
@task
def constroi_idade_centralizada(df):
idade = df[['NU_IDADE']]
idade['idadecent'] = idade.NU_IDADE - idade.NU_IDADE.mean()
return idade[['idadecent']]
@task
def constroi_idade_cent_quad(df):
idadecent = df.copy()
idadecent['idade2'] = idadecent.idadecent ** 2
return idadecent[['idade2']]
@task
def constroi_est_civil(df):
filtro = df[['QE_I01']]
filtro['estcivil'] = filtro.QE_I01.replace({
'A': 'Solteiro',
'B': 'Casado',
'C': 'Separado',
'D': 'Viuvo',
'E': 'Outro'
})
return filtro[['estcivil']]
@task
def constroi_cor(df):
filtro = df[['QE_I02']]
filtro['cor'] = filtro.QE_I02.replace({
'A': 'Branca',
'B': 'Preta',
'C': 'Amarela',
'D': 'Parda',
'E': 'Indigena',
'F': '',
' ': ''
})
return filtro[['cor']]
@task
def constroi_escopai(df):
filtro = df[['QE_I_04']]
filtro['escopai'] = filtro.QE_I04.replace({
'A': 0,
'B': 1,
'C': 2,
'D': 3,
'E': 4,
'F': 5
})
return filtro[['escopai']]
@task
def constroi_escomae(df):
filtro = df[['QE_I_05']]
filtro['escomae'] = filtro.QE_I05.replace({
'A': 0,
'B': 1,
'C': 2,
'D': 3,
'E': 4,
'F': 5
})
return filtro[['escomae']]
@task
def constroi_renda(df):
filtro = df[['QE_I_08']]
filtro['renda'] = filtro.QE_I08.replace({
'A': 0,
'B': 1,
'C': 2,
'D': 3,
'E': 4,
'F': 5,
'G': 6
})
return filtro[['renda']]
@task
def join_data(df, idadecent, idadequadrado, estcivil, cor,
escopai, escomae, renda):
final = pd.concat([df, idadecent, idadequadrado, estcivil, cor,
escopai, escomae, renda],
axis=1)
logger = prefect.context.get('logger')
logger.info(final.head().to_json())
final.to_csv('enade_tratato.csv', index=False)
with Flow('Enade', schedule) as flow:
path = get_raw_data()
filtro = aplica_filtros(path)
idadecent = constroi_idade_centralizada(filtro)
idadequadrado = constroi_idade_cent_quad(idadecent)
estcivil = constroi_est_civil(filtro)
cor = constroi_cor(filtro)
escomae = constroi_escomae(filtro)
escopai = constroi_escopai(filtro)
renda = constroi_renda(filtro)
j = join_data(filtro, idadecent, idadequadrado, estcivil, cor,
escomae, escopai, renda)
# prefect create project IGTI --description "Projetos do bootcamp de engenharia de dados do IGTI"
flow.register(project_name='IGTI', idempotency_key=flow.serialized_hash())
# prefect auth create-token -n my-runner-token -s RUNNER
flow.run_agent(token='dE5zGVFdfzZpNj6bTBcweg')
|
python
|
import ROOT as root
qMap_Ag_C0_V0 = root.TProfile2D("qMap_Ag_C0_V0","qMap_Ag_C0 (V0)",52,0,52,80,0,80,0,0);
qMap_Ag_C0_V0.SetBinEntries(3585,29768);
qMap_Ag_C0_V0.SetBinEntries(3586,79524);
qMap_Ag_C0_V0.SetBinEntries(3639,83953);
qMap_Ag_C0_V0.SetBinEntries(3640,124982);
qMap_Ag_C0_V0.SetBinEntries(3641,14345);
qMap_Ag_C0_V0.SetBinEntries(3693,31598);
qMap_Ag_C0_V0.SetBinEntries(3694,91098);
qMap_Ag_C0_V0.SetBinContent(3585,3245287);
qMap_Ag_C0_V0.SetBinContent(3586,1.615629e+07);
qMap_Ag_C0_V0.SetBinContent(3639,2.731302e+07);
qMap_Ag_C0_V0.SetBinContent(3640,3.14566e+08);
qMap_Ag_C0_V0.SetBinContent(3641,1444064);
qMap_Ag_C0_V0.SetBinContent(3693,3763256);
qMap_Ag_C0_V0.SetBinContent(3694,2.928397e+07);
qMap_Ag_C0_V0.SetBinError(3585,174261.7);
qMap_Ag_C0_V0.SetBinError(3586,278676.5);
qMap_Ag_C0_V0.SetBinError(3639,960499.6);
qMap_Ag_C0_V0.SetBinError(3640,4324021);
qMap_Ag_C0_V0.SetBinError(3641,12124.33);
qMap_Ag_C0_V0.SetBinError(3693,148045.1);
qMap_Ag_C0_V0.SetBinError(3694,865776);
qMap_Ag_C0_V0.SetMinimum(0);
qMap_Ag_C0_V0.SetEntries(455268);
qMap_Ag_C0_V0.SetStats(0);
qMap_Ag_C0_V0.SetContour(20);
qMap_Ag_C0_V0.SetContourLevel(0,0);
qMap_Ag_C0_V0.SetContourLevel(1,125.8445);
qMap_Ag_C0_V0.SetContourLevel(2,251.689);
qMap_Ag_C0_V0.SetContourLevel(3,377.5335);
qMap_Ag_C0_V0.SetContourLevel(4,503.3781);
qMap_Ag_C0_V0.SetContourLevel(5,629.2226);
qMap_Ag_C0_V0.SetContourLevel(6,755.0671);
qMap_Ag_C0_V0.SetContourLevel(7,880.9116);
qMap_Ag_C0_V0.SetContourLevel(8,1006.756);
qMap_Ag_C0_V0.SetContourLevel(9,1132.601);
qMap_Ag_C0_V0.SetContourLevel(10,1258.445);
qMap_Ag_C0_V0.SetContourLevel(11,1384.29);
qMap_Ag_C0_V0.SetContourLevel(12,1510.134);
qMap_Ag_C0_V0.SetContourLevel(13,1635.979);
qMap_Ag_C0_V0.SetContourLevel(14,1761.823);
qMap_Ag_C0_V0.SetContourLevel(15,1887.668);
qMap_Ag_C0_V0.SetContourLevel(16,2013.512);
qMap_Ag_C0_V0.SetContourLevel(17,2139.357);
qMap_Ag_C0_V0.SetContourLevel(18,2265.201);
qMap_Ag_C0_V0.SetContourLevel(19,2391.046);
ci = root.TColor.GetColor("#000099");
qMap_Ag_C0_V0.SetLineColor(ci);
qMap_Ag_C0_V0.GetXaxis().SetTitle("col");
qMap_Ag_C0_V0.GetXaxis().SetRange(17,29);
qMap_Ag_C0_V0.GetXaxis().SetNdivisions(508);
qMap_Ag_C0_V0.GetXaxis().SetLabelFont(42);
qMap_Ag_C0_V0.GetXaxis().SetLabelSize(0.05);
qMap_Ag_C0_V0.GetXaxis().SetTitleSize(0.05);
qMap_Ag_C0_V0.GetXaxis().SetTitleOffset(1.1);
qMap_Ag_C0_V0.GetXaxis().SetTitleFont(42);
qMap_Ag_C0_V0.GetYaxis().SetTitle("row");
qMap_Ag_C0_V0.GetYaxis().SetRange(55,76);
qMap_Ag_C0_V0.GetYaxis().SetLabelFont(42);
qMap_Ag_C0_V0.GetYaxis().SetLabelSize(0.05);
qMap_Ag_C0_V0.GetYaxis().SetTitleSize(0.05);
qMap_Ag_C0_V0.GetYaxis().SetTitleOffset(1.1);
qMap_Ag_C0_V0.GetYaxis().SetTitleFont(42);
qMap_Ag_C0_V0.GetZaxis().SetLabelFont(42);
qMap_Ag_C0_V0.GetZaxis().SetLabelSize(0.035);
qMap_Ag_C0_V0.GetZaxis().SetTitleSize(0.035);
qMap_Ag_C0_V0.GetZaxis().SetTitleFont(42);
|
python
|
from . import start_watcher
def main():
start_watcher()
|
python
|
from .market import Market, TradeException
import time
import hmac
import urllib.parse
import urllib.request
import requests
import hashlib
import config
import database
from datetime import datetime
class PrivateBter(Market):
url = "https://bter.com/api/1/private/"
def __init__(self):
super().__init__()
self.key = config.bter_key
self.secret = config.bter_secret
self.min_tx_volume = 0.001
try:
self.get_balances()
except Exception:
self.s_coin_balance = 0
self.p_coin_balance = 0
def query(self, method, req={}):
# generate POST data string
req["nonce"] = int(time.time())
post_data = urllib.parse.urlencode(req)
# sign it
sign = hmac.new(self.secret.encode("ascii"), post_data.encode("ascii"), hashlib.sha512).hexdigest()
# extra headers for request
headers = {"Sign": sign, "Key": self.key}
full_url = self.url + method
try:
res = requests.post(full_url, data=req, headers=headers)
except Exception as e:
raise Exception("Error sending request to %s - %s" % (self.name, e))
try:
value = res.json()
except Exception as e:
raise Exception("Unable to decode response from %s - %s" %
(self.name, e))
return value
def get_open_orders(self):
# Might not be necessary
response = self.query('orderlist', {})
if not response["result"]:
raise TradeException(response["msg"])
return response
def _buy(self, amount, price):
"""Create a buy limit order"""
currency_pair = self.p_coin.lower() + "_" + self.s_coin.lower()
req = {"pair": currency_pair, "type": "BUY", "rate": price, "amount": amount}
response = self.query("placeorder", req)
if not response["result"]:
raise TradeException(response["msg"])
order_id = response['order_id']
# Check open order list to see if the most recent open order matches this order:
# match by price and primary coin type. If we find it output the real order id,
# otherwise return the dummy order id returned from the order request.
time.sleep(10)
open_orders = self.get_open_orders()['orders']
open_orders.sort(key=lambda x: x['id'], reverse=True)
if open_orders and float(open_orders[0]['sell_amount']) == (price * 1000) and \
open_orders[0]['buy_type'] == self.p_coin:
order_id = open_orders[0]['id']
return order_id
def _sell(self, amount, price):
"""Create a sell limit order"""
currency_pair = self.p_coin.lower() + "_" + self.s_coin.lower()
req = {"pair": currency_pair, "type": "SELL", "rate": price, "amount": amount}
response = self.query("placeorder", req)
if not response["result"]:
raise TradeException(response["msg"])
order_id = response['order_id']
# Check open order list to see if the most recent open order matches this order:
# match by price and primary coin type. If we find it output the real order id,
# otherwise return the dummy order id returned from the order request.
time.sleep(10)
open_orders = self.get_open_orders()['orders']
open_orders.sort(key=lambda x: x['id'], reverse=True)
if open_orders and float(open_orders[0]['buy_amount']) == (price * 1000) and \
open_orders[0]['sell_type'] == self.p_coin:
order_id = open_orders[0]['id']
return order_id
def update_order_status(self):
if not self.open_orders:
return
response = self.query('orderlist')
remaining_open_orders = []
completed_order_ids = []
for open_order in self.open_orders:
found_order = [found_order for found_order in response['orders'] if
found_order['id'] == open_order['order_id']]
if not found_order:
completed_order_ids.append(open_order['order_id'])
else:
remaining_open_orders.append(open_order)
if completed_order_ids:
self.open_orders = remaining_open_orders
database.order_completed(self.name, completed_order_ids)
def get_balances(self):
"""Get balance of primary coin and secondary coin"""
try:
res = self.query("getfunds")
if self.p_coin in res["available_funds"]:
self.p_coin_balance = float(res["available_funds"][self.p_coin])
else:
self.p_coin_balance = 0
if self.s_coin in res["available_funds"]:
self.s_coin_balance = float(res["available_funds"][self.s_coin])
else:
self.s_coin_balance = 0
except Exception:
raise Exception("Error getting balance")
|
python
|
import rlkit.misc.hyperparameter as hyp
from rlkit.launchers.launcher_util import run_experiment
import os
from rlkit.misc.asset_loader import sync_down
def experiment(variant):
from rlkit.core import logger
demo_path = sync_down(variant['demo_path'])
off_policy_path = sync_down(variant['off_policy_path'])
logdir = logger.get_snapshot_dir()
os.system('python -m BEAR.main' +
' --demo_data='+demo_path+
' --off_policy_data='+off_policy_path+
' --eval_freq='+variant['eval_freq']+
' --algo_name='+variant['algo_name']+
' --env_name='+variant['env_name']+
' --log_dir='+logdir+
' --lagrange_thresh='+variant['lagrange_thresh']+
' --distance_type='+variant['distance_type']+
' --mode='+variant['mode']+
' --num_samples_match='+variant['num_samples_match']+
' --lamda='+variant['lambda_']+
' --version='+variant['version']+
' --mmd_sigma='+variant['mmd_sigma']+
' --kernel_type='+variant['kernel_type']+
' --use_ensemble_variance='+variant['use_ensemble_variance'])
if __name__ == "__main__":
variant = dict(
demo_path='demos/ant_action_noise_15.npy',
off_policy_path='demos/ant_off_policy_15_demos_100.npy',
eval_freq='1000',
algo_name='BEAR',
env_name='Ant-v2',
lagrange_thresh='10.0',
distance_type='MMD',
mode='auto',
num_samples_match='5',
lambda_='0.0',
version='0.0',
mmd_sigma='10.0',
kernel_type='laplacian',
use_ensemble_variance='"False"',
)
search_space = {
'mmd_sigma':['10.0', '20.0'],
'num_samples_match':['5', '10', '20'],
}
sweeper = hyp.DeterministicHyperparameterSweeper(
search_space, default_parameters=variant,
)
n_seeds = 1
mode = 'local'
exp_name = 'test'
# n_seeds = 1
# mode = 'ec2'
# exp_name = 'ant_bear_sweep_v1'
for exp_id, variant in enumerate(sweeper.iterate_hyperparameters()):
for _ in range(n_seeds):
run_experiment(
experiment,
exp_name=exp_name,
mode=mode,
unpack_variant=False,
variant=variant,
num_exps_per_instance=1,
use_gpu=False,
gcp_kwargs=dict(
preemptible=False,
),
)
|
python
|
# Generated by Django 3.2.7 on 2021-09-27 02:08
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('users', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='profile',
name='user_type',
field=models.CharField(choices=[('user', 'User'), ('admin', 'Admin')], default='user', max_length=200),
),
]
|
python
|
# encoding: utf-8
"""
route.py
Created by Thomas Mangin on 2015-06-22.
Copyright (c) 2009-2017 Exa Networks. All rights reserved.
License: 3-clause BSD. (See the COPYRIGHT file)
"""
from exabgp.protocol.family import SAFI
from exabgp.bgp.message.update.nlri.qualifier import RouteDistinguisher
from exabgp.configuration.core import Section
from exabgp.configuration.flow.match import ParseFlowMatch
from exabgp.configuration.flow.then import ParseFlowThen
from exabgp.configuration.flow.scope import ParseFlowScope
from exabgp.configuration.static.mpls import route_distinguisher
from exabgp.configuration.flow.parser import flow
from exabgp.configuration.flow.parser import next_hop
class ParseFlowRoute(Section):
syntax = (
'route give-me-a-name {\n'
' (optional) rd 255.255.255.255:65535|65535:65536|65536:65535;\n'
' next-hop 1.2.3.4; (to use with redirect-to-nexthop)\n'
' %s\n'
' %s\n'
' %s\n'
'}\n'
% (
'\n '.join(ParseFlowMatch.syntax.split('\n')),
'\n '.join(ParseFlowScope.syntax.split('\n')),
'\n '.join(ParseFlowThen.syntax.split('\n')),
)
)
known = {
'rd': route_distinguisher,
'route-distinguisher': route_distinguisher,
'next-hop': next_hop,
}
action = {
'rd': 'nlri-set',
'route-distinguisher': 'nlri-set',
'next-hop': 'nlri-nexthop',
}
assign = {
'rd': 'rd',
'route-distinguisher': 'rd',
}
name = 'flow/route'
def __init__(self, tokeniser, scope, error, logger):
Section.__init__(self, tokeniser, scope, error, logger)
def clear(self):
pass
def pre(self):
self.scope.append_route(flow(None))
return True
def post(self):
route = self.scope.get_route()
if route.nlri.rd is not RouteDistinguisher.NORD:
route.nlri.safi = SAFI.flow_vpn
return True
def _check(self, change):
self.logger.debug('warning: no check on flows are implemented', 'configuration')
return True
|
python
|
'''This file provides editor completions while working on DFHack using ycmd:
https://github.com/Valloric/ycmd
'''
# pylint: disable=import-error,invalid-name,missing-docstring,unused-argument
import os
import ycm_core
def DirectoryOfThisScript():
return os.path.dirname(os.path.abspath(__file__))
# We need to tell YouCompleteMe how to compile this project. We do this using
# clang's "Compilation Database" system, which essentially just dumps a big
# json file into the build folder.
# More details: http://clang.llvm.org/docs/JSONCompilationDatabase.html
#
# We don't use clang, but luckily CMake supports generating a database on its
# own, using:
# set( CMAKE_EXPORT_COMPILE_COMMANDS 1 )
for potential_build_folder in ['build', 'build-osx']:
if os.path.exists(DirectoryOfThisScript() + os.path.sep + potential_build_folder
+ os.path.sep + 'compile_commands.json'):
database = ycm_core.CompilationDatabase(potential_build_folder)
break
else:
raise RuntimeError("Can't find dfhack build folder: not one of build, build-osx")
def MakeRelativePathsInFlagsAbsolute(flags, working_directory):
if not working_directory:
return list(flags)
new_flags = []
make_next_absolute = False
path_flags = ['-isystem', '-I', '-iquote', '--sysroot=']
for flag in flags:
new_flag = flag
if make_next_absolute:
make_next_absolute = False
if not flag.startswith('/'):
new_flag = os.path.join(working_directory, flag)
for path_flag in path_flags:
if flag == path_flag:
make_next_absolute = True
break
if flag.startswith(path_flag):
path = flag[len(path_flag):]
new_flag = path_flag + os.path.join(working_directory, path)
break
if new_flag:
new_flags.append(new_flag)
return new_flags
def IsHeaderFile(filename):
extension = os.path.splitext(filename)[1]
return extension in ['.h', '.hxx', '.hpp', '.hh']
SOURCE_EXTENSIONS = ['.cpp', '.cxx', '.cc', '.c', '.m', '.mm']
def PotentialAlternatives(header):
dirname, filename = os.path.split(header)
basename, _ = os.path.splitext(filename)
source_dirs = [dirname]
if dirname.endswith(os.path.sep + 'include'):
# if we're in a folder 'include', also look in its parent
parent = os.path.abspath(os.path.join(dirname, os.path.pardir))
source_dirs.append(parent)
# and ../src (used by lua dependency)
source_dirs.append(os.path.join(parent, 'src'))
include_idx = dirname.rfind(os.path.sep + 'include' + os.path.sep)
if include_idx != -1:
# we're in a subfolder of a parent '/include/'
# .../include/subdir/path
# look in .../subdir/path
source_dirs.append(
dirname[:include_idx] +
os.path.sep +
dirname[include_idx + len('include') + 2*len(os.path.sep):]
)
for source_dir in source_dirs:
for ext in SOURCE_EXTENSIONS:
yield source_dir + os.path.sep + basename + ext
def GetCompilationInfoForFile(filename):
# The compilation_commands.json file generated by CMake does not have entries
# for header files. So we do our best by asking the db for flags for a
# corresponding source file, if any. If one exists, the flags for that file
# should be good enough.
if IsHeaderFile(filename):
for alternative in PotentialAlternatives(filename):
if os.path.exists(alternative):
compilation_info = database.GetCompilationInfoForFile(
alternative
)
if compilation_info.compiler_flags_:
return compilation_info
return None
else:
return database.GetCompilationInfoForFile(filename)
def FlagsForFile(filename, **kwargs):
# Bear in mind that compilation_info.compiler_flags_ does NOT return a
# python list, but a "list-like" StringVec object
compilation_info = GetCompilationInfoForFile(filename)
if not compilation_info:
return None
final_flags = MakeRelativePathsInFlagsAbsolute(
compilation_info.compiler_flags_,
compilation_info.compiler_working_dir_
)
return {
'flags': final_flags,
'do_cache': True
}
|
python
|
from twisted.trial import unittest
from signing.persistence import Persistence
class PersistenceTests(unittest.TestCase):
def setUp(self):
self.persistence = Persistence()
def test_set_get(self):
d = self.persistence.set('somekey', 'somefield', 'somevalue')
d.addCallback(lambda _: self.persistence.get('somekey', 'somefield'))
return d.addCallback(self.assertEquals, 'somevalue')
def test_update(self):
d = self.persistence.set('updatekey', 'updatefield', 'firstvalue')
d.addCallback(lambda _: self.persistence.set('updatekey', 'updatefield', 'secondvalue'))
d.addCallback(lambda _: self.persistence.get('updatekey', 'updatefield'))
return d.addCallback(self.assertEquals, 'secondvalue')
def test_delete(self):
d = self.persistence.set('deletekey', 'deletefield', 'somevalue')
d.addCallback(lambda _: self.persistence.delete('deletekey', 'deletefield'))
d.addCallback(lambda _: self.persistence.get('deletekey', 'deletefield'))
return d.addCallback(self.assertEquals, None)
def test_getAll(self):
d = self.persistence.set('getallkey', 'getallfield', 'getallvalue')
d.addCallback(lambda _: self.persistence.set('getallkey', 'getallfield2', 'getallvalue2'))
d.addCallback(lambda _: self.persistence.get_all('getallkey'))
return d.addCallback(lambda result: self.assertTrue('getallfield' in result and 'getallfield2' in result))
def test_deleteAll(self):
d = self.persistence.set('deleteall_key', 'deleteall_field', 'deleteall_value')
d.addCallback(lambda _: self.persistence.delete('deleteall_key'))
d.addCallback(lambda _: self.persistence.get_all('deleteall_key'))
return d.addCallback(self.assertEquals, [])
|
python
|
""" Module storing the `~halotools.sim_manager.CachedHaloCatalog`,
the class responsible for retrieving halo catalogs from shorthand
keyword inputs such as ``simname`` and ``redshift``.
"""
import os
from warnings import warn
from copy import deepcopy
import numpy as np
from astropy.table import Table
from ..utils.python_string_comparisons import _passively_decode_string, compare_strings_py23_safe
try:
import h5py
_HAS_H5PY = True
except ImportError:
_HAS_H5PY = False
warn("Most of the functionality of the "
"sim_manager sub-package requires h5py to be installed,\n"
"which can be accomplished either with pip or conda. ")
from ..sim_manager import sim_defaults, supported_sims
from ..utils import broadcast_host_halo_property, add_halo_hostid
from .halo_table_cache import HaloTableCache
from .ptcl_table_cache import PtclTableCache
from .halo_table_cache_log_entry import get_redshift_string
from ..custom_exceptions import HalotoolsError, InvalidCacheLogEntry
__all__ = ('CachedHaloCatalog', )
class CachedHaloCatalog(object):
"""
Container class for the halo catalogs and particle data
that are stored in the Halotools cache log.
`CachedHaloCatalog` is used to retrieve halo catalogs
from shorthand keyword inputs such as
``simname``, ``halo_finder`` and ``redshift``.
The halos are stored in the ``halo_table`` attribute
in the form of an Astropy `~astropy.table.Table`.
If available, another `~astropy.table.Table` storing
a random downsampling of dark matter particles
is stored in the ``ptcl_table`` attribute.
See the Examples section below for details on how to
access and manipulate this data.
For a list of available snapshots provided by Halotools,
see :ref:`supported_sim_list`.
For information about the subhalo vs. host halo nomenclature
conventions used throughout Halotools, see :ref:`rockstar_subhalo_nomenclature`.
For a thorough discussion of the meaning of each column in the Rockstar halo catalogs,
see the appendix of `Rodriguez Puebla et al 2016 <http://arxiv.org/abs/1602.04813>`_.
"""
acceptable_kwargs = ('ptcl_version_name', 'fname', 'simname',
'halo_finder', 'redshift', 'version_name', 'dz_tol', 'update_cached_fname',
'preload_halo_table')
def __init__(self, *args, **kwargs):
"""
Parameters
------------
simname : string, optional
Nickname of the simulation used as a shorthand way to keep track
of the halo catalogs in your cache.
The simnames of the Halotools-provided catalogs are
'bolshoi', 'bolplanck', 'consuelo' and 'multidark'.
Default is set by the ``default_simname`` variable in the
`~halotools.sim_manager.sim_defaults` module.
halo_finder : string, optional
Nickname of the halo-finder used to generate the hlist file from particle data.
Default is set by the ``default_halo_finder`` variable in the
`~halotools.sim_manager.sim_defaults` module.
redshift : float, optional
Redshift of the halo catalog.
Default is set by the ``default_redshift`` variable in the
`~halotools.sim_manager.sim_defaults` module.
version_name : string, optional
Nickname of the version of the halo catalog.
Default is set by the ``default_version_name`` variable in the
`~halotools.sim_manager.sim_defaults` module.
ptcl_version_name : string, optional
Nicknake of the version of the particle catalog associated with
the halos.
This argument is typically only used if you have cached your own
particles via the `~halotools.sim_manager.UserSuppliedPtclCatalog` class.
Default is set by the ``default_version_name`` variable in the
`~halotools.sim_manager.sim_defaults` module.
fname : string, optional
Absolute path to the location on disk storing the hdf5 file
of halo data. If passing ``fname``, do not pass the metadata keys
``simname``, ``halo_finder``, ``version_name`` or ``redshift``.
update_cached_fname : bool, optional
If the hdf5 file storing the halos has been relocated to a new
disk location after storing the data in cache,
the ``update_cached_fname`` input can be used together with the
``fname`` input to update the cache log with the new disk location.
See :ref:`relocating_simulation_data_instructions` for
further instructions.
dz_tol : float, optional
Tolerance within to search for a catalog with a matching redshift.
Halo catalogs in cache with a redshift that differs by greater
than ``dz_tol`` will be ignored. Default is 0.05.
Examples
---------
If you followed the instructions in the
:ref:`download_default_halos` section of the :ref:`getting_started` guide,
then you can load the default halo catalog into memory by calling the
`~halotools.sim_manager.CachedHaloCatalog` with no arguments:
>>> halocat = CachedHaloCatalog() # doctest: +SKIP
The halos are stored in the ``halo_table`` attribute
in the form of an Astropy `~astropy.table.Table`.
>>> halos = halocat.halo_table # doctest: +SKIP
As with any Astropy `~astropy.table.Table`, the properties of the
halos can be accessed in the same manner as a Numpy structured array
or python dictionary:
>>> array_of_masses = halocat.halo_table['halo_mvir'] # doctest: +SKIP
>>> x_positions = halocat.halo_table['halo_x'] # doctest: +SKIP
Note that all keys of a cached halo catalog begin with the substring
``halo_``. This is a bookkeeping device used to help
the internals of Halotools differentiate
between halo properties and the properties of mock galaxies
populated into the halos with ambiguously similar names.
The ``simname``, ``halo_finder``, ``version_name`` and ``redshift``
keyword arguments fully specify the halo catalog that will be loaded.
Omitting any of them will select the corresponding default value
set in the `~halotools.sim_manager.sim_defaults` module.
>>> halocat = CachedHaloCatalog(redshift = 1, simname = 'multidark') # doctest: +SKIP
If you forget which catalogs you have stored in cache,
you have two options for how to remind yourself.
First, you can use the `~halotools.sim_manager.HaloTableCache` class:
>>> from halotools.sim_manager import HaloTableCache
>>> cache = HaloTableCache()
>>> for entry in cache.log: print(entry) # doctest: +SKIP
Alternatively, you can simply use a text editor to open the cache log,
which is stored as ASCII data in the following location on your machine:
$HOME/.astropy/cache/halotools/halo_table_cache_log.txt
See also
----------
:ref:`halo_catalog_analysis_quickstart`
:ref:`halo_catalog_analysis_tutorial`
"""
self._verify_acceptable_constructor_call(*args, **kwargs)
assert _HAS_H5PY, "Must have h5py package installed to use CachedHaloCatalog objects"
try:
dz_tol = kwargs['dz_tol']
except KeyError:
dz_tol = 0.05
self._dz_tol = dz_tol
try:
update_cached_fname = kwargs['update_cached_fname']
except KeyError:
update_cached_fname = False
self._update_cached_fname = update_cached_fname
self.halo_table_cache = HaloTableCache()
self._disallow_catalogs_with_known_bugs(**kwargs)
self.log_entry = self._determine_cache_log_entry(**kwargs)
self.simname = self.log_entry.simname
self.halo_finder = self.log_entry.halo_finder
self.version_name = self.log_entry.version_name
self.redshift = self.log_entry.redshift
self.fname = self.log_entry.fname
self._bind_additional_metadata()
try:
preload_halo_table = kwargs['preload_halo_table']
except KeyError:
preload_halo_table = False
if preload_halo_table is True:
_ = self.halo_table
del _
self._set_publication_list(self.simname)
def _set_publication_list(self, simname):
try:
simclass = supported_sims.supported_sim_dict[simname]
simobj = simclass()
self.publications = simobj.publications
except (KeyError, AttributeError):
self.publications = []
def _verify_acceptable_constructor_call(self, *args, **kwargs):
"""
"""
try:
assert len(args) == 0
except AssertionError:
msg = ("\nCachedHaloCatalog only accepts keyword arguments, not position arguments. \n")
raise HalotoolsError(msg)
for key in list(kwargs.keys()):
try:
assert key in self.acceptable_kwargs
except AssertionError:
msg = ("\nCachedHaloCatalog got an unexpected keyword ``" + key + "``\n"
"The only acceptable keywords are listed below:\n\n")
for acceptable_key in self.acceptable_kwargs:
msg += "``" + acceptable_key + "``\n"
raise HalotoolsError(msg)
def _determine_cache_log_entry(self, **kwargs):
"""
"""
try:
self.ptcl_version_name = kwargs['ptcl_version_name']
self._default_ptcl_version_name_choice = False
except KeyError:
self.ptcl_version_name = sim_defaults.default_ptcl_version_name
self._default_ptcl_version_name_choice = True
if 'fname' in kwargs:
fname = kwargs['fname']
if not os.path.isfile(fname):
msg = ("\nThe ``fname`` you passed to the CachedHaloCatalog "
"constructor is a non-existent path.\n")
raise HalotoolsError(msg)
try:
assert 'simname' not in kwargs
except AssertionError:
msg = ("\nIf you specify an input ``fname``, "
"do not also specify ``simname``.\n")
raise HalotoolsError(msg)
try:
assert 'halo_finder' not in kwargs
except AssertionError:
msg = ("\nIf you specify an input ``fname``, "
"do not also specify ``halo_finder``.\n")
raise HalotoolsError(msg)
try:
assert 'redshift' not in kwargs
except AssertionError:
msg = ("\nIf you specify an input ``fname``, "
"do not also specify ``redshift``.\n")
raise HalotoolsError(msg)
try:
assert 'version_name' not in kwargs
except AssertionError:
msg = ("\nIf you specify an input ``fname``, "
"do not also specify ``version_name``.\n")
raise HalotoolsError(msg)
return self._retrieve_matching_log_entry_from_fname(fname)
else:
try:
simname = str(kwargs['simname'])
self._default_simname_choice = False
except KeyError:
simname = sim_defaults.default_simname
self._default_simname_choice = True
try:
halo_finder = str(kwargs['halo_finder'])
self._default_halo_finder_choice = False
except KeyError:
halo_finder = sim_defaults.default_halo_finder
self._default_halo_finder_choice = True
try:
version_name = str(kwargs['version_name'])
self._default_version_name_choice = False
except KeyError:
version_name = sim_defaults.default_version_name
self._default_version_name_choice = True
try:
redshift = float(kwargs['redshift'])
self._default_redshift_choice = False
except KeyError:
redshift = sim_defaults.default_redshift
self._default_redshift_choice = True
return self._retrieve_matching_log_entry_from_metadata(
simname, halo_finder, version_name, redshift)
def _retrieve_matching_log_entry_from_fname(self, fname):
"""
"""
log_entry = self.halo_table_cache.determine_log_entry_from_fname(fname,
overwrite_fname_metadata=False)
if not compare_strings_py23_safe(log_entry.fname, fname):
if self._update_cached_fname is True:
old_fname = deepcopy(log_entry.fname)
log_entry = (
self.halo_table_cache.determine_log_entry_from_fname(fname,
overwrite_fname_metadata=self._update_cached_fname)
)
self.halo_table_cache.update_cached_file_location(
fname, old_fname)
else:
msg = ("\nThe ``fname`` you passed as an input to the "
"CachedHaloCatalog class \ndoes not match the ``fname`` "
"stored as metadata in the hdf5 file.\n"
"This means that at some point you manually relocated the catalog on disk \n"
"after storing its location in cache, "
"but you did not yet update the Halotools cache log. \n"
"When possible, try to keep your halo catalogs "
"at a fixed disk location \n"
"as this helps ensure reproducibility. \n"
"If the ``fname`` you passed to CachedHaloCatalog is the "
"new location you want to store the catalog, \n"
"then you can update the cache by calling the CachedHaloCatalog \n"
"constructor again and setting the ``update_cached_fname`` variable to True.\n")
raise HalotoolsError(msg)
return log_entry
def _retrieve_matching_ptcl_cache_log_entry(self):
"""
"""
ptcl_table_cache = PtclTableCache()
if len(ptcl_table_cache.log) == 0:
msg = ("\nThe Halotools cache log has no record of any particle catalogs.\n"
"If you have never used Halotools before, "
"you should read the Getting Started guide on halotools.readthedocs.io.\n"
"If you have previously used the package before, \n"
"try running the halotools/scripts/rebuild_ptcl_table_cache_log.py script.\n")
raise HalotoolsError(msg)
gen0 = ptcl_table_cache.matching_log_entry_generator(
simname=self.simname, version_name=self.ptcl_version_name,
redshift=self.redshift, dz_tol=self._dz_tol)
gen1 = ptcl_table_cache.matching_log_entry_generator(
simname=self.simname, version_name=self.ptcl_version_name)
gen2 = ptcl_table_cache.matching_log_entry_generator(simname=self.simname)
matching_entries = list(gen0)
msg = ("\nYou tried to load a cached particle catalog "
"with the following characteristics:\n\n")
if self._default_simname_choice is True:
msg += ("simname = ``" + str(self.simname) +
"`` (set by sim_defaults.default_simname)\n")
else:
msg += "simname = ``" + str(self.simname) + "``\n"
if self._default_ptcl_version_name_choice is True:
msg += ("ptcl_version_name = ``" + str(self.ptcl_version_name) +
"`` (set by sim_defaults.default_version_name)\n")
else:
msg += "ptcl_version_name = ``" + str(self.ptcl_version_name) + "``\n"
if self._default_redshift_choice is True:
msg += ("redshift = ``" + str(self.redshift) +
"`` (set by sim_defaults.default_redshift)\n")
else:
msg += "redshift = ``" + str(self.redshift) + "``\n"
msg += ("\nThere is no matching catalog in cache "
"within dz_tol = "+str(self._dz_tol)+" of these inputs.\n"
)
if len(matching_entries) == 0:
suggestion_preamble = ("\nThe following entries in the cache log "
"most closely match your inputs:\n\n")
alt_list1 = list(gen1) # discard the redshift requirement
if len(alt_list1) > 0:
msg += suggestion_preamble
for entry in alt_list1:
msg += str(entry) + "\n\n"
else:
alt_list2 = list(gen2) # discard the version_name requirement
if len(alt_list2) > 0:
msg += suggestion_preamble
for entry in alt_list2:
msg += str(entry) + "\n\n"
else:
msg += "There are no simulations matching your input simname.\n"
raise InvalidCacheLogEntry(msg)
elif len(matching_entries) == 1:
log_entry = matching_entries[0]
return log_entry
else:
msg += ("There are multiple entries in the cache log \n"
"within dz_tol = "+str(self._dz_tol)+" of your inputs. \n"
"Try using the exact redshift and/or decreasing dz_tol.\n"
"Now printing the matching entries:\n\n")
for entry in matching_entries:
msg += str(entry) + "\n"
raise InvalidCacheLogEntry(msg)
def _retrieve_matching_log_entry_from_metadata(self,
simname, halo_finder, version_name, redshift):
"""
"""
if len(self.halo_table_cache.log) == 0:
msg = ("\nThe Halotools cache log is empty.\n"
"If you have never used Halotools before, "
"you should read the Getting Started guide on halotools.readthedocs.io.\n"
"If you have previously used the package before, \n"
"try running the halotools/scripts/rebuild_halo_table_cache_log.py script.\n")
raise HalotoolsError(msg)
gen0 = self.halo_table_cache.matching_log_entry_generator(
simname=simname, halo_finder=halo_finder,
version_name=version_name, redshift=redshift,
dz_tol=self._dz_tol)
gen1 = self.halo_table_cache.matching_log_entry_generator(
simname=simname,
halo_finder=halo_finder, version_name=version_name)
gen2 = self.halo_table_cache.matching_log_entry_generator(
simname=simname, halo_finder=halo_finder)
gen3 = self.halo_table_cache.matching_log_entry_generator(
simname=simname)
matching_entries = list(gen0)
msg = ("\nYou tried to load a cached halo catalog "
"with the following characteristics:\n\n")
if self._default_simname_choice is True:
msg += ("simname = ``" + str(simname) +
"`` (set by sim_defaults.default_simname)\n")
else:
msg += "simname = ``" + str(simname) + "``\n"
if self._default_halo_finder_choice is True:
msg += ("halo_finder = ``" + str(halo_finder) +
"`` (set by sim_defaults.default_halo_finder)\n")
else:
msg += "halo_finder = ``" + str(halo_finder) + "``\n"
if self._default_version_name_choice is True:
msg += ("version_name = ``" + str(version_name) +
"`` (set by sim_defaults.default_version_name)\n")
else:
msg += "version_name = ``" + str(version_name) + "``\n"
if self._default_redshift_choice is True:
msg += ("redshift = ``" + str(redshift) +
"`` (set by sim_defaults.default_redshift)\n")
else:
msg += "redshift = ``" + str(redshift) + "``\n"
msg += ("\nThere is no matching catalog in cache "
"within dz_tol = "+str(self._dz_tol)+" of these inputs.\n"
)
if len(matching_entries) == 0:
suggestion_preamble = ("\nThe following entries in the cache log "
"most closely match your inputs:\n\n")
alt_list1 = list(gen1) # discard the redshift requirement
if len(alt_list1) > 0:
msg += suggestion_preamble
for entry in alt_list1:
msg += str(entry) + "\n\n"
else:
alt_list2 = list(gen2) # discard the version_name requirement
if len(alt_list2) > 0:
msg += suggestion_preamble
for entry in alt_list2:
msg += str(entry) + "\n\n"
else:
alt_list3 = list(gen3) # discard the halo_finder requirement
if len(alt_list3) > 0:
msg += suggestion_preamble
for entry in alt_list3:
msg += str(entry) + "\n\n"
else:
msg += "There are no simulations matching your input simname.\n"
raise InvalidCacheLogEntry(msg)
elif len(matching_entries) == 1:
log_entry = matching_entries[0]
return log_entry
else:
msg += ("There are multiple entries in the cache log \n"
"within dz_tol = "+str(self._dz_tol)+" of your inputs. \n"
"Try using the exact redshift and/or decreasing dz_tol.\n"
"Now printing the matching entries:\n\n")
for entry in matching_entries:
msg += str(entry) + "\n"
raise InvalidCacheLogEntry(msg)
@property
def halo_table(self):
"""
Astropy `~astropy.table.Table` object storing a catalog of dark matter halos.
You can access the array storing, say, halo virial mass using the following syntax:
>>> halocat = CachedHaloCatalog() # doctest: +SKIP
>>> mass_array = halocat.halo_table['halo_mvir'] # doctest: +SKIP
To see what halo properties are available in the catalog:
>>> print(halocat.halo_table.keys()) # doctest: +SKIP
"""
try:
return self._halo_table
except AttributeError:
if self.log_entry.safe_for_cache is True:
self._halo_table = Table.read(_passively_decode_string(self.fname), path='data')
self._add_new_derived_columns(self._halo_table)
return self._halo_table
else:
raise InvalidCacheLogEntry(self.log_entry._cache_safety_message)
def _add_new_derived_columns(self, t):
if 'halo_hostid' not in list(t.keys()):
add_halo_hostid(t)
if 'halo_mvir_host_halo' not in list(t.keys()):
broadcast_host_halo_property(t, 'halo_mvir')
def _bind_additional_metadata(self):
""" Create convenience bindings of all metadata to the `CachedHaloCatalog` instance.
"""
if not os.path.isfile(self.log_entry.fname):
msg = ("The following input fname does not exist: \n\n" +
self.log_entry.fname + "\n\n")
raise InvalidCacheLogEntry(msg)
f = h5py.File(self.log_entry.fname, 'r')
for attr_key in list(f.attrs.keys()):
if attr_key == 'redshift':
setattr(self, attr_key, float(get_redshift_string(f.attrs[attr_key])))
elif attr_key == 'Lbox':
self.Lbox = np.empty(3)
self.Lbox[:] = f.attrs['Lbox']
else:
setattr(self, attr_key, f.attrs[attr_key])
f.close()
matching_sim = self._retrieve_supported_sim()
if matching_sim is not None:
for attr in matching_sim._attrlist:
if hasattr(self, attr):
try:
a = _passively_decode_string(getattr(self, attr))
b = _passively_decode_string(getattr(matching_sim, attr))
assert np.all(a == b)
except AssertionError:
msg = ("The ``" + attr + "`` metadata of the hdf5 file \n"
"is inconsistent with the corresponding attribute of the \n" +
matching_sim.__class__.__name__ + " class in the "
"sim_manager.supported_sims module.\n"
"Double-check the value of this attribute in the \n"
"NbodySimulation sub-class you added to the supported_sims module. \n"
)
raise HalotoolsError(msg)
else:
setattr(self, attr, getattr(matching_sim, attr))
def _retrieve_supported_sim(self):
"""
"""
matching_sim = None
for clname in supported_sims.__all__:
try:
cl = getattr(supported_sims, clname)
obj = cl()
if isinstance(obj, supported_sims.NbodySimulation):
if compare_strings_py23_safe(self.simname, obj.simname):
matching_sim = obj
except TypeError:
pass
return matching_sim
@property
def ptcl_table(self):
"""
Astropy `~astropy.table.Table` object storing
a collection of ~1e6 randomly selected dark matter particles.
"""
try:
return self._ptcl_table
except AttributeError:
try:
ptcl_log_entry = self.ptcl_log_entry
except AttributeError:
self.ptcl_log_entry = (
self._retrieve_matching_ptcl_cache_log_entry()
)
ptcl_log_entry = self.ptcl_log_entry
if ptcl_log_entry.safe_for_cache is True:
self._ptcl_table = Table.read(_passively_decode_string(ptcl_log_entry.fname), path='data')
return self._ptcl_table
else:
raise InvalidCacheLogEntry(ptcl_log_entry._cache_safety_message)
def _disallow_catalogs_with_known_bugs(self, simname=sim_defaults.default_simname,
version_name=sim_defaults.default_version_name, **kwargs):
"""
"""
if (simname == 'bolplanck') and ('halotools_alpha_version' in version_name):
msg = ("The ``{0}`` version of the ``{1}`` simulation \n"
"is known to be spatially incomplete and should not be used.\n"
"See https://github.com/astropy/halotools/issues/598.\n"
"You can either download the original ASCII data and process it yourself, \n"
"or use version_name = ``halotools_v0p4`` instead.\n")
raise HalotoolsError(msg.format(version_name, simname))
|
python
|
import unittest, os
import cuisine
USER = os.popen("whoami").read()[:-1]
class Text(unittest.TestCase):
def testEnsureLine( self ):
some_text = "foo"
some_text = cuisine.text_ensure_line(some_text, "bar")
assert some_text == 'foo\nbar'
some_text = cuisine.text_ensure_line(some_text, "bar")
assert some_text == 'foo\nbar'
class Users(unittest.TestCase):
def testUserCheck( self ):
user_data = cuisine.user_check(USER)
print "USER_DATA", user_data
class Files(unittest.TestCase):
def testB( self ):
cuisine.file_read("/etc/passwd")
def testC( self ):
pass
class Packages(unittest.TestCase):
def testInstall( self ):
pass
#with cuisine.mode_sudo():
# cuisine.package_ensure("tmux")
class SSHKeys(unittest.TestCase):
def testKeygen( self ):
if cuisine.ssh_keygen(USER):
print "SSH keys already there"
else:
print "SSH keys created"
def testAuthorize( self ):
key = "ssh-dss XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX= user@cuisine"""
cuisine.ssh_authorize(USER, key)
# FIXME: Should check that the key is present, and only one
if __name__ == "__main__":
# We bypass fabric as we want the tests to be run locally
cuisine.mode_local()
unittest.main()
# EOF
|
python
|
import math
import numpy as np
from utils.functions.math.coordinate_trans import coordinate_transformation_in_angle
def circle_make(center_x, center_y, radius):
'''
Create circle matrix(2D)
Parameters
-------
center_x : float in meters
the center position of the circle coordinate x
center_y : float in meters
the center position of the circle coordinate y
radius : float in meters
Returns
-------
circle x : numpy.ndarray
circle y : numpy.ndarray
'''
point_num = 100
circle_xs = []
circle_ys = []
for i in range(point_num + 1):
circle_xs.append(center_x + radius * math.cos(i*2*math.pi/point_num))
circle_ys.append(center_y + radius * math.sin(i*2*math.pi/point_num))
return np.array(circle_xs), np.array(circle_ys)
def circle_make_with_angles(center_x, center_y, radius, angle):
'''
Create circle matrix with angle line matrix(2D)
Parameters
-------
center_x : float in meters
the center position of the circle coordinate x
center_y : float in meters
the center position of the circle coordinate y
radius : float in meters
angle : float in radians
Returns
-------
circle xs : numpy.ndarray
circle ys : numpy.ndarray
angle line xs : numpy.ndarray
angle line ys : numpy.ndarray
'''
point_num = 100
circle_xs = []
circle_ys = []
for i in range(point_num + 1):
circle_xs.append(center_x + radius * math.cos(i*2*math.pi/point_num))
circle_ys.append(center_y + radius * math.sin(i*2*math.pi/point_num))
angle_line_xs = [center_x, center_x + math.cos(angle) * radius]
angle_line_ys = [center_y, center_y + math.sin(angle) * radius]
return np.array(circle_xs), np.array(circle_ys), np.array(angle_line_xs), np.array(angle_line_ys)
def square_make_with_angles(center_x, center_y, size, angle):
'''
Create square matrix with angle line matrix(2D)
Parameters
-------
center_x : float in meters
the center x position of the square
center_y : float in meters
the center y position of the square
size : float in meters
the square's half-size
angle : float in radians
Returns
-------
square xs : numpy.ndarray
lenght is 5 (counterclockwise from right-up)
square ys : numpy.ndarray
length is 5 (counterclockwise from right-up)
angle line xs : numpy.ndarray
angle line ys : numpy.ndarray
'''
# start with the up right points
# create point in counterclockwise
square_xys = np.array([[size, size], [-size, size], [-size, -size], [size, -size], [size, size]])
trans_points = coordinate_transformation_in_angle(square_xys.T, -angle) # this is inverse type
trans_points += np.array([[center_x], [center_y]])
square_xs = trans_points[0, :]
square_ys = trans_points[1, :]
angle_line_xs = [center_x, center_x + math.cos(angle) * size]
angle_line_ys = [center_y, center_y + math.sin(angle) * size]
return square_xs, square_ys, np.array(angle_line_xs), np.array(angle_line_ys)
|
python
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.