content
stringlengths 22
815k
| id
int64 0
4.91M
|
---|---|
def voigt_peak_err(peak, A, dA, alphaD, dalphaD):
"""
Gives the error on the peak of the Voigt profile. \
It assumes no correlation between the parameters and that they are \
normally distributed.
:param peak: Peak of the Voigt profile.
:type peak: array
:param A: Area under the Voigt profile.
:param dA: Error on the area `A`.
:type dA: array
:param alphaD: HWHM of the Gaussian core.
:type alphaD: array
"""
dpeak = abs(peak)*np.sqrt(np.power(dalphaD/alphaD, 2.) + np.power(dA/A, 2.))
return dpeak
| 5,353,900 |
def zip_recursive(destination, source_dir, rootfiles):
"""
Recursively zips source_dir into destination.
rootfiles should contain a list of files in the top level directory that
are to be included. Any top level files not in rootfiles will be omitted
from the zip file.
"""
zipped = zipfile.ZipFile(destination, 'w', zipfile.ZIP_DEFLATED)
for root, dirs, files in os.walk(source_dir):
inRoot = False
if root == source_dir:
inRoot = True
if inRoot:
for d in dirs:
try:
rootfiles.index(d)
except ValueError:
dirs.remove(d)
for f in files[:]:
if inRoot:
try:
rootfiles.index(f)
except ValueError:
continue
fullpath = os.path.join(root, f)
zipped.write(fullpath)
zipped.close()
return destination
| 5,353,901 |
def discord_api_call(method: str, params: typing.Dict, func, data, token: str) -> typing.Any:
""" Calls Discord API. """
# This code is from my other repo -> https://gtihub.com/kirillzhosul/python-discord-token-grabber
# Calling.
return func(
f"https://discord.com/api/{method}",
params=params,
headers={
"Authorization": f"{token}",
"Content-Type": "application/json"
},
data=data
)
| 5,353,902 |
def get_startup(config: Config) -> Startup:
"""Extracts and validates startup parameters from the application config
file for the active profile
"""
db_init_schema = config.extract_config_value(
('postgres', 'startup', 'init_schema'),
lambda x: x is not None and isinstance(x, bool),
lambda x: x,
'bool'
)
db_wipe_schema = config.extract_config_value(
('postgres', 'startup', 'wipe_schema'),
lambda x: x is not None and isinstance(x, bool),
lambda x: x,
'bool'
)
if db_wipe_schema and not db_init_schema:
logging.getLogger(__name__).warning(
"Configuration is set to wipe database schema, but not"
" re-initialize it afterward: despite configuration, schema will be"
" re-initialized"
)
db_init_schema = True
return Startup(
init_schema=db_init_schema,
wipe_schema=db_wipe_schema
)
| 5,353,903 |
def lambda_handler(event, context):
"""Call all scrapers."""
if 'country' in event:
country = event["country"]
if country == "poland-en":
poland_scraper.scrape_poland_en(event)
elif country == "poland-pl":
poland_scraper.scrape_poland_pl(event)
elif country == "poland-ua":
poland_scraper.scrape_poland_ua(event)
elif country == "hungary-hu":
hungary_scraper.scrape(event)
elif country == "moldova-ro":
moldova_scraper.scrape(event)
elif country == "romania-ro":
romania_scraper.scrape(event)
else:
for scraper in [
poland_scraper,
hungary_scraper,
moldova_scraper,
romania_scraper
]:
try:
scraper.scrape(event)
except Exception:
logging.exception('An error was encountered during scraping.')
| 5,353,904 |
def new_request(request):
"""Implements view that allows users to create new requests"""
user = request.user
if user.user_type == 'ADM':
return redirect('/admin')
if request.method == "POST":
request_type = request.POST.get('requestType')
if request_type == 'SC' and user.user_object.type == 'PR':
schedule = request.POST.getlist('schedule')
start_time = request.POST.get('start_time')
# Create schedule model
monday_start = tuesday_start = wednesday_start = None
thursday_start = friday_start = saturday_start = sunday_start = None
for day in schedule:
if day == 'MO':
monday_start = start_time
elif day == 'TU':
tuesday_start = start_time
elif day == 'WE':
wednesday_start = start_time
elif day == 'TH':
thursday_start = start_time
elif day == 'FR':
friday_start = start_time
elif day == 'SA':
saturday_start = start_time
elif day == 'SU':
sunday_start = start_time
schedule_model = Schedule.objects.get_or_create(monday_start=monday_start,
tuesday_start=tuesday_start,
wednesday_start=wednesday_start,
thursday_start=thursday_start,
friday_start=friday_start,
saturday_start=saturday_start,
sunday_start=sunday_start)[0]
request_change = None
else:
schedule_model = None
request_change = request.POST.get('request_change')
request = Request.objects.get_or_create(user_id=user, schedule_id=schedule_model,
request_change=request_change,
current_request_review_id=None,
request_type=request_type)[0]
request_review = RequestReview.objects.get_or_create(request_id=request,
status='P')[0]
request.current_request_review_id = request_review
request_review.save()
request.save()
# create new notification
notification = Notification.objects.get_or_create(notification_type='R', is_dismissed=False,
request=request)[0]
notification.save()
# sending emails for this request:
email_vendor.email_admin_new_request(request)
email_vendor.email_user_new_request(request)
return redirect('/requests')
else:
# GET Request
return render(request, 'applications/request_new.html')
| 5,353,905 |
def get_slice_test(eval_kwargs, test_kwargs, test_dataloader, robustness_testing_datasets):
"""
Args:
test_dataloader:
test_kwargs:
eval_kwargs (dict):
test_dataloader (Dataloader):
robustness_testing_datasets (dict):
Returns:
"""
slice_test = None
if 'slice' in robustness_testing_datasets:
slice_kwargs = {'dataset': robustness_testing_datasets['slice']}
if 'sampler' in test_kwargs:
slice_kwargs['sampler'] = test_kwargs['sampler']
slice_kwargs.update(eval_kwargs)
slice_test = test_dataloader(**slice_kwargs)
return slice_test
| 5,353,906 |
def transform_config(cfg, split_1='search:', split_2='known_papers:'):
"""Ugly function to make cfg.yml less ugly."""
before_search, after_search = cfg.split(split_1, 1)
search_default, papers_default = after_search.split(split_2, 1)
search, paper_comment = '', ''
for line in search_default.splitlines():
line = line.strip()
if line:
if line.startswith('-'):
search += ' '
elif line.startswith('# List of paper ids'):
paper_comment = line
continue
search += ' ' + line + '\n'
ok = papers_default
if '-' in papers_default:
ok = ' ['
for line in papers_default.splitlines():
line = line.strip()
if '-' in line:
ok += line.split('- ')[1] + ', '
ok = ok[:-2] + ']'
return f"{before_search}{split_1}\n{search}{paper_comment}\n{split_2}{ok}"
| 5,353,907 |
def filter_coords(raw_lasso, filter_mtx):
"""Filter the raw data corresponding to the new coordinates."""
filter_mtx_use = filter_mtx.copy()
filter_mtx_use["y"] = filter_mtx_use.index
lasso_data = pd.melt(filter_mtx_use, id_vars=["y"], value_name="MIDCounts")
lasso_data = lasso_data[lasso_data["MIDCounts"] != 0][["x", "y"]]
new_lasso = pd.merge(raw_lasso, lasso_data, on=["x", "y"], how="inner")
return new_lasso
| 5,353,908 |
def compute_threshold(predictions_list, dev_labels, f1=True):
"""
Determine the best threshold to use for classification.
Inputs:
predictions_list: prediction found by running the model
dev_labels: ground truth label to be compared with predictions_list
f1: True is using F1 score, False if using accuracy score
Returns:
best_threshold: threshold that yields the best accuracy
"""
predictions_list = predictions_list.reshape(-1, 1)
dev_labels = dev_labels.reshape(-1, 1)
both = np.column_stack((predictions_list, dev_labels))
both = both[both[:, 0].argsort()]
predictions_list = both[:, 0].ravel()
dev_labels = both[:, 1].ravel()
accuracies = np.zeros(np.shape(predictions_list))
for i in range(np.shape(predictions_list)[0]):
score = predictions_list[i]
predictions = (predictions_list >= score) * 2 - 1
accuracy = accuracy_score(predictions, dev_labels)
if f1:
accuracy = f1_score(dev_labels, predictions)
accuracies[i] = accuracy
indices = np.argmax(accuracies)
best_threshold = np.mean(predictions_list[indices])
return best_threshold
| 5,353,909 |
def color2position(C, min=None, max=None):
"""
Converts the input points set into colors
Parameters
----------
C : Tensor
the input color tensor
min : float (optional)
the minimum value for the points set. If None it will be set to -1 (default is None)
max : float (optional)
the maximum value for the points set. If None it will be set to +1 (default is None)
Returns
-------
Tensor
the points set tensor
"""
if min is None:
min = -1
if max is None:
max = 1
return torch.add(torch.mul(C, max-min), min)
| 5,353,910 |
def is_empty_parsed_graph(graph):
"""
Checks if graph parsed from web page only contains an "empty" statement, that was not embedded in page
namely (<subjectURI>, <http://www.w3.org/ns/md#item>, <http://www.w3.org/1999/02/22-rdf-syntax-ns#nil>)
:param graph: an rdflib.Graph
:return: True if graph contains no "real" RDF, False otherwise
"""
if len(graph) > 1:
return False
for po in graph.predicate_objects(None):
if po == (URIRef(u'http://www.w3.org/ns/md#item'),
URIRef(u'http://www.w3.org/1999/02/22-rdf-syntax-ns#nil')):
return True
return False
| 5,353,911 |
def fillinNaN(var,neighbors):
"""
replacing masked area using interpolation
"""
for ii in range(var.shape[0]):
a = var[ii,:,:]
count = 0
while np.any(a.mask):
a_copy = a.copy()
for hor_shift,vert_shift in neighbors:
if not np.any(a.mask): break
a_shifted=np.roll(a_copy,shift=hor_shift,axis=1)
a_shifted=np.roll(a_shifted,shift=vert_shift,axis=0)
idx=~a_shifted.mask*a.mask
#print count, idx[idx==True].shape
a[idx]=a_shifted[idx]
count+=1
var[ii,:,:] = a
return var
| 5,353,912 |
def test_cache_complex():
"""
Test caching on a complicated expression with multiple symbols appearing
multiple times.
"""
expr = x ** 2 + (y - sy.exp(x)) * sy.sin(z - x * y)
symbol_names = {s.name for s in expr.free_symbols}
expr_t = aesara_code_(expr)
# Iterate through variables in the Aesara computational graph that the
# printed expression depends on
seen = set()
for v in aesara.graph.basic.ancestors([expr_t]):
# Owner-less, non-constant variables should be our symbols
if v.owner is None and not isinstance(v, aesara.graph.basic.Constant):
# Check it corresponds to a symbol and appears only once
assert v.name in symbol_names
assert v.name not in seen
seen.add(v.name)
# Check all were present
assert seen == symbol_names
| 5,353,913 |
def dtw_randomize(f_data, f_score, outpath, numrand=100):
""" Computes Dynamic Time Warping value between
(1) ChIP-seq enrichment - output of absolute_change_from_cutsite()
(2) insulation scores - output of wigvals_from_cutsite()
Permutation of insulation scores to match with ChIP-seq enrichment is performed after the
first iteration (# of iterations specified by numrand variable)
:param f_data: path to ChIP-seq enrichent output from absolute_change_from_cutsite()
:param f_score: path to insulation score output from wigvals_from_cutsite()
:param outpath: path to output file, extension omitted; function will add "_dtw-rand.csv"
:param numrand: (int >= 1) number of iterations to perform, where the first one has no
permutation of insulation score, while the remaining ones do.
output: csv file with first two columns recording chromosome and coordinate of cut site, then
subsequent columns contain values from dynamic time warping. Iterations after the first
one have the insulation scores permuted. Rows correspond to cut sites.
"""
data, score = m.load_nparray(f_data), m.load_nparray(f_score)
header = "chr, coord, " + ', '.join(['iter_%i' % n for n in range(numrand)])
dtwscore = []
datanone = data == 'None'
data, score = data[~datanone.any(axis=1)], score[~datanone.any(axis=1)] # remove rows with None
for n in range(numrand):
if n > 0:
np.random.shuffle(score)
if n % 10 == 0:
print("dtw_randomize(): Processing %i / %i." % (n, numrand))
for i, (data_i, score_i) in enumerate(zip(data, score)):
dtw_matrix = dtw(data_i[2:].astype(float), score_i[2:].astype(float))
if n == 0:
dtwscore.append([data_i[0], data_i[1], dtw_matrix.flat[-1]])
else:
dtwscore[i].append(dtw_matrix.flat[-1])
np.savetxt(outpath + "_dtw-rand.csv", np.asarray(dtwscore), fmt='%s', delimiter=',',
header=header)
| 5,353,914 |
def attach_driver(context):
"""
Attach a webdriver to the behave context
Uses behave configuration to select the driver
:param context: behave context
"""
context.base_url = context.config.userdata.get('server.url')
driver_name = context.config.userdata.get('browser.driver', DRIVER_CHROME)
headless = context.config.userdata.get('browser.headless', 'False')
if headless == 'True':
size = [int(s) for s in context.config.userdata.get('browser.size', '1920,1080').split(',')]
from pyvirtualdisplay import Display
display = Display(visible=0, size=(size[0], size[1]))
display.start()
context.display = display
context.uses_protractor = True
context.driver = _driver_from_config(driver_name)
context.driver.maximize_window()
| 5,353,915 |
def parse_plist_from_bytes(data):
"""
Convert a binary encoded plist to a dictionary.
:param data: plist data
:return: dictionary
"""
try:
from plistlib import loads, FMT_BINARY
return loads(data, fmt=FMT_BINARY)
except ImportError:
from bplistlib import loads
return loads(data, binary=True)
| 5,353,916 |
def istd(arrays, axis=-1, ddof=0, weights=None, ignore_nan=False):
"""
Streaming standard deviation of arrays. Weights are also supported.
This is equivalent to calling `numpy.std(axis = 2)` on a stack of images.
Parameters
----------
arrays : iterable of ndarrays
Arrays to be combined. This iterable can also a generator.
axis : int, optional
Reduction axis. Default is to combine the arrays in the stream as if
they had been stacked along a new axis, then compute the standard deviation along this new axis.
If None, arrays are flattened. If `axis` is an int larger that
the number of dimensions in the arrays of the stream, standard deviation is computed
along the new axis.
ddof : int, optional
Means Delta Degrees of Freedom. The divisor used in calculations
is ``N - ddof``, where ``N`` represents the number of elements.
weights : iterable of ndarray, iterable of floats, or None, optional
Iterable of weights associated with the values in each item of `arrays`.
Each value in an element of `arrays` contributes to the standard deviation
according to its associated weight. The weights array can either be a float
or an array of the same shape as any element of `arrays`. If weights=None,
then all data in each element of `arrays` are assumed to have a weight equal to one.
ignore_nan : bool, optional
If True, NaNs are set to zero weight. Default is propagation of NaNs.
Yields
------
std: `~numpy.ndarray`
Standard deviation
See Also
--------
std : total standard deviation.
numpy.std : standard deviation calculation of dense arrays. Weights are not supported.
"""
yield from map(
np.sqrt,
ivar(
arrays=arrays, axis=axis, ddof=ddof, weights=weights, ignore_nan=ignore_nan
),
)
| 5,353,917 |
def bench_dot(lhs_row_dim, lhs_col_dim, rhs_col_dim, density,
rhs_density, dot_func, trans_lhs, lhs_stype,
rhs_stype, only_storage, distribution="uniform"):
""" Benchmarking both storage and dot
"""
lhs_nd = rand_ndarray((lhs_row_dim, lhs_col_dim), lhs_stype, density, distribution=distribution)
if not only_storage:
rhs_nd = rand_ndarray((lhs_col_dim, rhs_col_dim), rhs_stype,
density=rhs_density, distribution=distribution)
out = dot_func(lhs_nd, rhs_nd, trans_lhs)
mx.nd.waitall()
| 5,353,918 |
def projection_standardizer(emb):
"""Returns an affine transformation to translate an embedding to the centroid
of the given set of points."""
return Affine.translation(*(-emb.mean(axis=0)[:2]))
| 5,353,919 |
def raman_normalize(database_name):
"""Raman normaization - element-wise division of the eem spectra by area under the ramam peak.
See reference Murphy et al. "Measurement of Dissolved Organic Matter Fluorescence in Aquatic
Environments: An Interlaboratory Comparison" 2010 Environmental Science and Technology.
Args:
database_name (str): filename for hdf5 database
Note- 'Raman_Area' column is required in the metadata to use this function.
Returns:
no retun - raman normalized eems are stored in h5 database under key 'eems_ru'
"""
from pandas import read_hdf
#load EEMs for normalization
try:
with h5py.File(database_name, 'r') as f:
eems = f['eems'][:]
except OSError:
raise OSError(database_name + ' not found - please run `pyeem.init_h5_database` and `pyeem.load_eems` first')
return
except KeyError:
raise KeyError('eem data not found - please run `pyeem.load_eems` first')
return
#load values for raman normalization
try:
#load raman area from the metadata stored in the h5 database as np.array
raman_area = np.array(read_hdf(database_name, 'meta')['Raman_Area'])
except KeyError:
raise KeyError('Raman_Area not found. This must be included in the meta data to use this function')
return
#test if function has already run (dataset 'eems_ru' should not exist)
with h5py.File(database_name, 'r') as f:
try:
test = f['eems_ru'][:]
raise Exception('`raman_normalize` function has already run on this dataset')
except KeyError:
pass
#intialize storage for normaized eems
eems_ru = np.zeros(eems.shape)
for i in tqdm(range(eems.shape[0])):
#separeate eems into excitaion and emisson wavelenghts and fluorescence values
ex = eems[i,0,1:]
em = eems[i,1:,0]
fl = eems[i,1:,1:]
#raman normailze
eems_ru[i, 1:, 1:] = eems[i, 1:, 1:] / raman_area[i]
#add excitation and emission values to new dataset
eems_ru[i,0,1:] = ex
eems_ru[i,1:,0] = em
update_eem_database(database_name, {'eems_ru': eems_ru,
'eems': eems_ru})
return
| 5,353,920 |
def forward_imputation(X_features, X_time):
"""
Fill X_features missing values with values, which are the same as its last measurement.
:param X_features: time series features for all samples
:param X_time: times, when observations were measured
:return: X_features, filled with last measurements instead of zeros (missing observations)
"""
time_length = [np.where(times == 0)[0][1] if np.where(times == 0)[0][0] == 0 else np.where(times == 0)[0][0] for times in X_time]
# impute times series features
for i, sample in enumerate(X_features):
for j, ts in enumerate(sample.T): # note the transposed matrix
first_observation = True
current_value = -1
for k, observation in enumerate(ts[:time_length[i]]):
if X_features[i, k, j] == 0 and first_observation:
continue
elif X_features[i, k, j] != 0:
current_value = X_features[i, k, j]
first_observation = False
elif X_features[i, k, j] == 0 and not first_observation:
X_features[i, k, j] = current_value
return X_features
| 5,353,921 |
def calc_mlevel(ctxstr, cgmap, gtftree, pmtsize=1000):
"""
Compute the mean methylation level of promoter/gene/exon/intron/IGN in each gene
"""
inv_ctxs = {'X': 'CG', 'Y': 'CHG', 'Z': 'CHH'}
ign = defaultdict(list)
mtable = defaultdict(lambda: defaultdict(lambda: defaultdict(float)))
counter = defaultdict(lambda: defaultdict(int))
for chr in set(ctxstr) & set(cgmap) & set(gtftree):
mask = [1]*len(cgmap[chr])
for (gene_id, strand) in gtftree[chr]:
feature_mlevels = defaultdict(lambda: defaultdict(list))
gstart = min(gtftree[chr][(gene_id, strand)])[0]
gend = max(gtftree[chr][(gene_id, strand)])[1]
mask[gstart:gend] = [0]*(gend - gstart)
if strand == '+':
for (pos, (tag, mlevel)) in enumerate(izip(ctxstr[chr][gstart-pmtsize:gstart], cgmap[chr][gstart-pmtsize:gstart])):
tag = tag.upper()
if tag in inv_ctxs and mlevel != '-':
feature_mlevels[inv_ctxs[tag]]['pmt'].append(mlevel)
elif strand == '-':
for (pos, (tag, mlevel)) in enumerate(izip(ctxstr[chr][gend:gend+pmtsize], cgmap[chr][gend:gend+pmtsize])):
tag = tag.upper()
if tag in inv_ctxs and mlevel != '-':
feature_mlevels[inv_ctxs[tag]]['pmt'].append(mlevel)
for (pos, (tag, mlevel)) in enumerate(izip(ctxstr[chr][gstart:gend], cgmap[chr][gstart:gend])):
tag = tag.upper()
inexon = False
if tag in inv_ctxs and mlevel != '-':
feature_mlevels[inv_ctxs[tag]]['gene'].append(mlevel)
for exon in gtftree[chr][(gene_id, strand)]:
if exon[0] <= pos+gstart < exon[1]:
feature_mlevels[inv_ctxs[tag]]['exon'].append(mlevel)
inexon = True
break
if not inexon:
feature_mlevels[inv_ctxs[tag]]['intron'].append(mlevel)
for ctx in ['CG', 'CHG', 'CHH']:
for feature in feature_mlevels[ctx]:
counter[ctx][feature] += len(feature_mlevels[ctx][feature])
mtable[ctx][gene_id][feature] = np.mean(feature_mlevels[ctx][feature])
for (pos, (tag, mlevel)) in enumerate(izip(ctxstr[chr], cgmap[chr])):
tag = tag.upper()
if (tag in inv_ctxs) and (mask[pos] == 1) and (mlevel != '-'):
ign[inv_ctxs[tag]].append(mlevel)
for ctx in ign:
ign[ctx] = np.mean(ign[ctx])
cg_table = pd.DataFrame(mtable['CG']).T
cg_table = cg_table[['pmt', 'gene', 'exon', 'intron']]
chg_table = pd.DataFrame(mtable['CHG']).T
chg_table = chg_table[['pmt', 'gene', 'exon', 'intron']]
chh_table = pd.DataFrame(mtable['CHH']).T
chh_table = chh_table[['pmt', 'gene', 'exon', 'intron']]
return ign, cg_table, chg_table, chh_table
| 5,353,922 |
def _get_service(plugin):
"""
Return a service (ie an instance of a plugin class).
:param plugin: any of: the name of a plugin entry point; a plugin class; an
instantiated plugin object.
:return: the service object
"""
if isinstance(plugin, basestring):
try:
(plugin,) = iter_entry_points(
group=PLUGINS_ENTRY_POINT_GROUP,
name=plugin
)
except ValueError:
raise PluginNotFoundException(plugin)
return plugin.load()()
elif isinstance(plugin, _pca_Plugin):
return plugin
elif isclass(plugin) and issubclass(plugin, _pca_Plugin):
return plugin()
else:
raise TypeError("Expected a plugin name, class or instance", plugin)
| 5,353,923 |
def taiut1(tai1, tai2, dta):
"""
Wrapper for ERFA function ``eraTaiut1``.
Parameters
----------
tai1 : double array
tai2 : double array
dta : double array
Returns
-------
ut11 : double array
ut12 : double array
Notes
-----
The ERFA documentation is below.
- - - - - - - - - -
e r a T a i u t 1
- - - - - - - - - -
Time scale transformation: International Atomic Time, TAI, to
Universal Time, UT1.
Given:
tai1,tai2 double TAI as a 2-part Julian Date
dta double UT1-TAI in seconds
Returned:
ut11,ut12 double UT1 as a 2-part Julian Date
Returned (function value):
int status: 0 = OK
Notes:
1) tai1+tai2 is Julian Date, apportioned in any convenient way
between the two arguments, for example where tai1 is the Julian
Day Number and tai2 is the fraction of a day. The returned
UT11,UT12 follow suit.
2) The argument dta, i.e. UT1-TAI, is an observed quantity, and is
available from IERS tabulations.
Reference:
Explanatory Supplement to the Astronomical Almanac,
P. Kenneth Seidelmann (ed), University Science Books (1992)
Copyright (C) 2013-2017, NumFOCUS Foundation.
Derived, with permission, from the SOFA library. See notes at end of file.
"""
ut11, ut12, c_retval = ufunc.taiut1(tai1, tai2, dta)
check_errwarn(c_retval, 'taiut1')
return ut11, ut12
| 5,353,924 |
def store():
"""
Stores the data in the db
:Note: This clears everything from local memory and cant be retrieved without retrieve()
:return: None
"""
global is_inio
global data
global lbels
global cols
while True:
if not is_inio:
break
is_inio = True
with open(os.path.join(path, c_file), "a") as d:
for i in data:
for x in i:
d.write(str(x) + "\n")
data = []
with open(os.path.join(path, cd_file), "w") as f:
for i in lbels:
f.write(str(i) + "\n")
lbels = []
cols = None
is_inio = False
| 5,353,925 |
def fetch_ppn(ppn):
"""
"""
from SPARQLWrapper import SPARQLWrapper, JSON
ENDPOINT_URL = 'http://openvirtuoso.kbresearch.nl/sparql'
sparql = SPARQLWrapper(ENDPOINT_URL)
sqlquery = """
SELECT ?collatie WHERE {{
kbc:{ppn} dcterms:extent ?formaat, ?collatie .
FILTER (?formaat != ?collatie ) .
FILTER regex(?formaat, "^[0-9]{{1,2}}°", "i") .
}}
""".format(ppn=ppn)
sparql.setQuery(sqlquery)
sparql.setReturnFormat(JSON)
results = sparql.query().convert()
# {'head': {'link': [], 'vars': ['collatie']}, 'results': {'bindings': [{'collatie': {'value': '*`SUP`8`LO` A-S`SUP`8`LO` (S8 blank)', 'type': 'literal'}}], 'distinct': False, 'ordered': True}}
result = results['results']['bindings'][0]['collatie']['value']
return result
| 5,353,926 |
def prepare_link_title(
item: feedparser.FeedParserDict) -> feedparser.FeedParserDict:
"""
Для RSS Item возвращает ссылку, заголовок и описание
:param item:
:return:
"""
result = None
if item:
assert item.title, 'Not found title in item'
assert item.link, 'Not found link in item'
link = item.link.replace('https://www.google.com/url?rct=j&sa=t&url=',
'')
ge_ind = link.find('&ct=ga')
if ge_ind > -1:
link = link[0:ge_ind]
title = item.title.replace('<b>', '').replace('</b>', '')
item.link = link
item.title = title
result = item
return result
| 5,353,927 |
def api_docs_redirect():
""" Redirect to API docs """
return redirect('/api/v1', code=302)
| 5,353,928 |
def print_topics(model, vectorizer, top_n: int=10)-> List:
"""Print the top n words found by each topic model.
Args:
model: Sklearn LatentDirichletAllocation model
vectorizer: sklearn CountVectorizer
top_n (int): Number of words you wish to return
Source: https://towardsdatascience.com/end-to-end-topic-modeling-in-python-latent-dirichlet-allocation-lda-35ce4ed6b3e0
"""
for idx, topic in enumerate(model.components_):
print(f"Topic {idx}:")
print([(vectorizer.get_feature_names()[i], topic[i])
for i in topic.argsort()[:-top_n - 1:-1]])
return [vectorizer.get_feature_names()[i] for i in topic.argsort()[:-top_n-1:-1]]
| 5,353,929 |
def simulate_eazy_sed_from_coeffs(
eazycoeffs, eazytemplatedata, z,
returnfluxunit='', returnwaveunit='A',
limitwaverange=True, savetofile='',
**outfile_kwargs):
"""
Generate a simulated SED from a given set of input eazy-py coefficients
and eazypy templates.
NB: Requires the eazy-py package to apply the IGM absorption!
(https://github.com/gbrammer/eazy-py)
Optional Args:
returnfluxunit: ['AB', 'flambda'] TODO: add Jy
'AB'= return log(flux) as AB magnitudes
'flambda' = return flux density in erg/s/cm2/A
returnwaveunit: ['A' or 'nm'] limitwaverange: limit the output
wavelengths to the range covered by PFS savetofile: filename for saving
the output spectrum as a two-column ascii data file (suitable for use
with the SubaruPFS ETC from C. Hirata.
Returns
-------
obswave : observed-frame wavelength, Angstroms or nm
obsflux : flux density of best-fit template, erg/s/cm2/A or AB mag
"""
# the input data units are Angstroms for wavelength
# and cgs for flux: erg/cm2/s/Ang
obswave = eazytemplatedata[0] * (1 + z)
obsfluxmatrix = eazytemplatedata[1:]
sedsimflux = np.dot(eazycoeffs, obsfluxmatrix)
fnu_factor = 10 ** (-0.4 * (25 + 48.6))
flam_spec = 1. / (1 + z) ** 2
obsflux = sedsimflux * fnu_factor * flam_spec
try:
import eazy.igm
igmz = eazy.igm.Inoue14().full_IGM(z, obswave)
obsflux *= igmz
except:
pass
if limitwaverange:
# to simplify things, we only write out the data over the Subaru PFS
# + WFIRST prism wavelength range, from 200 to 2500 nm
# (3000 to 25000 Angstroms)
iuvoir = np.where((obswave>2000) & (obswave<25000))[0]
obswave = obswave[iuvoir]
obsflux = obsflux[iuvoir]
if returnfluxunit=='AB':
# convert from flux density f_lambda into AB mag:
mAB_from_flambda = lambda f_lambda, wave: -2.5 * np.log10(
3.34e4 * wave * wave * f_lambda / 3631)
obsflux = mAB_from_flambda(obsflux, obswave)
if returnwaveunit=='nm':
obswave = obswave / 10.
if savetofile:
out_table = Table()
outcol1 = Column(data=obswave, name='wave')
outcol2 = Column(data=obsflux, name='flux')
out_table.add_columns([outcol1, outcol2])
out_table.write(savetofile, **outfile_kwargs)
return obswave, obsflux
| 5,353,930 |
def get_timezones_all():
"""Dump the list of timezones from ptyz into a format suitable
for use with the Django Forms API's ChoiceField
"""
# TODO: Find a more user-friendly way of managing 500+ timezones
output = []
for tz in all_timezones:
output.append( (tz, tz) )
return output
| 5,353,931 |
def create_logistic_vector(input_vector, cutoff):
"""
Creates a vector of 0s and 1s based on an input vector of numbers with a cut-off point.
"""
output_vector = np.zeros(len(input_vector))
n = 0
for i in range(len(input_vector)):
if input_vector[i] > cutoff:
output_vector[i] = 1
else:
output_vector[i] = -1 # Set to -1 rather than 0 to help make later calculations easier.
n += 1
return output_vector
| 5,353,932 |
def profile(username):
""" user profile """
user = User.query.filter_by(username=username).first_or_404()
return render_template("user/profile.jinja.html", user=user)
| 5,353,933 |
def action_rescale(action):
"""Rescale Distribution actions to exp one"""
return np.array([0 if abs(a) < 0.5 else 10 ** (a-3) if a > 0 else -(10 ** (-a - 3)) for a in action * 3])
| 5,353,934 |
async def test_button_press(hass, entity_id_suffix, api_method_name) -> None:
"""Test pressing the button entities."""
client_mock = await init_integration(hass, electric_vehicle=True)
await hass.services.async_call(
BUTTON_DOMAIN,
SERVICE_PRESS,
{ATTR_ENTITY_ID: f"button.my_mazda3_{entity_id_suffix}"},
blocking=True,
)
await hass.async_block_till_done()
api_method = getattr(client_mock, api_method_name)
api_method.assert_called_once_with(12345)
| 5,353,935 |
def check_if_cards(list_of_cards):
"""Raise an exception if not valid cards.
Every card should be a rank character followed by a suit character.
"""
for i in list_of_cards:
if i[0] not in CARD_RANKS:
message = (
"'" + str(i) + "' is not a recognised card rank.\n"
"A valid rank is a single character as follows:\n"
"'A' (ace)\n"
"'2' (two)\n"
"'3' (three)\n"
"'4' (four)\n"
"'5' (five)\n"
"'6' (six)\n"
"'7' (seven)\n"
"'8' (eight)\n"
"'9' (nine)\n"
"'T' (ten)\n"
"'J' (jack)\n"
"'Q' (queen)\n"
"'K' (king)"
)
raise UnrecognisedCardRankError(message)
if i[1] not in CARD_SUITS:
message = (
"'" + str(i) + "' is not a recognised card suit.\n"
"A valid suit is a single character as follows:\n"
"'S' (spades)\n"
"'H' (hearts)\n"
"'D' (diamonds)\n"
"'C' (clubs)"
)
raise UnrecognisedCardSuitError(message)
| 5,353,936 |
def multi_knee(points: np.ndarray, t1: float = 0.99, t2: int = 3) -> np.ndarray:
"""
Recursive knee point detection based on the curvature equations.
It returns the knee points on the curve.
Args:
points (np.ndarray): numpy array with the points (x, y)
t1 (float): coefficient of determination threshold (default 0.99)
t2 (int): number of points threshold (default 3)
Returns:
np.ndarray: knee points on the curve
"""
return mk.multi_knee(knee, points, t1, t2)
| 5,353,937 |
def parse_dotted_path(path):
"""
Extracts attribute name from dotted path.
"""
try:
objects, attr = path.rsplit('.', 1)
except ValueError:
objects = None
attr = path
return objects, attr
| 5,353,938 |
def check_get_btc(output_fields):
"""
Checks whether the price of btc is a float and
larger than zero.
"""
btc_price = output_fields[BTC_PRICE_FLD]
check_float_value(btc_price, BTC_PRICE_FLD)
| 5,353,939 |
def resource_media_fields(document, resource):
""" Returns a list of media fields defined in the resource schema.
:param document: the document eventually containing the media files.
:param resource: the resource being consumed by the request.
.. versionadded:: 0.3
"""
media_fields = app.config['DOMAIN'][resource]['_media']
return [field for field in media_fields if field in document]
| 5,353,940 |
def bump_version(target_version, upgrade, metadata_file_path):
"""Bumps target version in metadata"""
# load
raw_data: OrderedDict = ordered_safe_load(metadata_file_path.read_text())
# parse and validate
metadata = ServiceDockerData(**raw_data)
# get + bump + set
attrname = target_version.replace("-", "_")
current_version: str = getattr(metadata, attrname)
raw_data[target_version] = new_version = bump_version_string(
current_version, upgrade
)
# dump to file (preserving order!)
text = ordered_safe_dump(raw_data)
metadata_file_path.write_text(text)
click.echo(f"{target_version.title()} bumped: {current_version} → {new_version}")
| 5,353,941 |
def read_config_file(f):
"""Read a config file."""
if isinstance(f, basestring):
f = os.path.expanduser(f)
try:
config = ConfigObj(f, interpolation=False, encoding='utf8')
except ConfigObjError as e:
log(LOGGER, logging.ERROR, "Unable to parse line {0} of config file "
"'{1}'.".format(e.line_number, f))
log(LOGGER, logging.ERROR, "Using successfully parsed config values.")
return e.config
except (IOError, OSError) as e:
log(LOGGER, logging.WARNING, "You don't have permission to read "
"config file '{0}'.".format(e.filename))
return None
return config
| 5,353,942 |
def _parse_moving(message: List[str]) -> Tuple[Actions, str]:
"""Parses the incoming message list to determine if movement is found.
Args:
message: list of words in the player message
Returns: a tuple of the action and direction
"""
short_dir = ['n', 'ne', 'e', 'se', 's', 'sw', 'w', 'nw']
long_dir = [
'north', 'northeast', 'east', 'southeast', 'south', 'southwest',
'west', 'northwest'
]
for d in long_dir:
if d in message:
return (Actions.MOVE, d)
for d in short_dir:
if d in message:
direction = long_dir[short_dir.index(d)]
return (Actions.MOVE, direction)
return (Actions.UNKNOWN, '')
| 5,353,943 |
def checkheader(headerfile, name, arch):
"""check a header by opening it and comparing the results to the name and arch
we believe it to be for. if it fails raise URLGrabError(-1)"""
h = Header_Work(headerfile)
fail = 0
if h.hdr is None:
fail = 1
else:
if name != h.name() or arch != h.arch():
fail = 1
if fail:
raise URLGrabError(-1, _('Header cannot be opened or does not match %s, %s.') % (name, arch))
return
| 5,353,944 |
def _Net_forward_all(self, blobs=None, **kwargs):
"""
Run net forward in batches.
Take
blobs: list of blobs to extract as in forward()
kwargs: Keys are input blob names and values are blob ndarrays.
Refer to forward().
Give
all_outs: {blob name: list of blobs} dict.
"""
# Collect outputs from batches
all_outs = {out: [] for out in set(self.outputs + (blobs or []))}
for batch in self._batch(kwargs):
outs = self.forward(blobs=blobs, **batch)
for out, out_blob in outs.items():
all_outs[out].extend(out_blob.copy())
# Package in ndarray.
for out in all_outs:
all_outs[out] = np.asarray(all_outs[out])
# Discard padding.
pad = len(next(iter(all_outs.values()))) - len(next(iter(kwargs.values())))
if pad:
for out in all_outs:
all_outs[out] = all_outs[out][:-pad]
return all_outs
| 5,353,945 |
def email_members_old(request, course_prefix, course_suffix):
"""
Displays the email form and handles email actions
Right now this is blocking and does not do any batching.
Will have to make it better
"""
error_msg=""
success_msg=""
form = EmailForm()
if request.method == "POST":
form = EmailForm(data=request.POST)
if form.is_valid():
sender = request.common_page_data['course'].title + ' Staff <[email protected]>'
recipient_qset = User.objects.none() #get recipients in a QuerySet
if form.cleaned_data['to'] == "all" :
recipient_qset = request.common_page_data['course'].get_all_members()
elif form.cleaned_data['to'] == "students" :
recipient_qset = request.common_page_data['course'].get_all_students()
elif form.cleaned_data['to'] == "staff" :
recipient_qset = request.common_page_data['course'].get_all_course_admins()
elif form.cleaned_data['to'] == "myself":
recipient_qset = User.objects.filter(id=request.user.id)
#pdb.set_trace()
courses.email_members.tasks.email_with_celery.delay(
form.cleaned_data['subject'],
form.cleaned_data['message'],
sender,
recipient_qset.values_list('email',flat=True),
course_title=request.common_page_data['course'].title,
course_url=request.build_absolute_uri(reverse('courses.views.main', args=[course_prefix, course_suffix])))
success_msg = "Your email was successfully queued for sending"
#form = EmailForm()
else:
error_msg = "Please fix the errors below:"
context = RequestContext(request)
return render_to_response('email/email.html',
{'form': form,
'error_msg': error_msg,
'success_msg': success_msg,
'course': request.common_page_data['course'],
'common_page_data': request.common_page_data},
context_instance=context)
| 5,353,946 |
def start_folders() -> None:
"""Creates the initial folders to save the data and plots.
:return: None -- The function creates folders and does not return a value.
"""
try:
os.mkdir(f"../data/epochs_sim")
os.mkdir(f"../plot/epochs_sim")
print("Folder to save data created")
print()
except FileExistsError as error:
print("Folder exists. The folder was not created")
print(error)
print()
| 5,353,947 |
def Padding_op(Image, strides, offset_x, offset_y):
"""
Takes an image, offset required to fit output image dimensions with given strides and calculates the
padding it needs for perfect fit.
:param Image:
:param strides:
:param offset_x:
:param offset_y:
:return: Padded image
"""
if config['volumetric']:
raise Exception("3D Padding not yet implemented!")
padding_x = strides[0] - offset_x
padding_y = strides[1] - offset_y
Padded_Image = np.zeros(shape=(Image.shape[0] + padding_x, Image.shape[1] + padding_y, Image.shape[2]),
dtype=Image.dtype)
Padded_Image[padding_x // 2:(padding_x // 2) + (Image.shape[0]), padding_y // 2:(padding_y // 2) + Image.shape[1],
:] = Image
return Padded_Image
| 5,353,948 |
def area_triangle(point_a: array_like, point_b: array_like, point_c: array_like) -> np.float64:
"""
Return the area of a triangle defined by three points.
The points are the vertices of the triangle. They must be 3D or less.
Parameters
----------
point_a, point_b, point_c : array_like
The three vertices of the triangle.
Returns
-------
np.float64
The area of the triangle.
References
----------
http://mathworld.wolfram.com/TriangleArea.html
Examples
--------
>>> from skspatial.measurement import area_triangle
>>> area_triangle([0, 0], [0, 1], [1, 0])
0.5
>>> area_triangle([0, 0], [0, 2], [1, 1])
1.0
>>> area_triangle([3, -5, 1], [5, 2, 1], [9, 4, 2]).round(2)
12.54
"""
vector_ab = Vector.from_points(point_a, point_b)
vector_ac = Vector.from_points(point_a, point_c)
# Normal vector of plane defined by the three points.
vector_normal = vector_ab.cross(vector_ac)
return 0.5 * vector_normal.norm()
| 5,353,949 |
def file_exists(target, parse=False):
"""Checks if a file exists"""
if parse:
target = envar_parser(target)
if os.path.isfile(target):
return True
else:
return False
| 5,353,950 |
def test_valid(line):
"""Test for 40 character hex strings
Print error on failure"""
base_error = '*** WARNING *** line in torrent list'
if len(line) != 40:
print(base_error, 'incorrect length:', line)
elif any(char not in HEX for char in line):
print(base_error, 'has non-hex digits:', line)
else:
return True
| 5,353,951 |
def copy_for_online(levels):
""" Generates a separate Puzzlescript source file for each level in the given sequence, and copies each in order to the system clipboard.
When called, this function first prints a prompt including the name of the first level in the progression to stdout. When the user
presses the Enter key on their keyboard, the function will generate a fully-functional Puzzlescript source file with that level and
that level alone, but not the level's name, and copy it to the system clipboard. The function will then print a prompt for the next
level, and so on until it runs out of levels.
Args:
levels: A sequence of Level objects, such as one returned by gen_progressions
"""
for elem in levels:
input('Press ENTER to copy {}'.format(elem.name))
pyperclip.copy(single_playable(elem, 'Please exit and return to the survey'))
| 5,353,952 |
def readIMAGCDF(filename, headonly=False, **kwargs):
"""
Reading Intermagnet CDF format (1.0,1.1,1.2)
"""
debug = kwargs.get('debug')
cdfdat = cdf.CDF(filename)
if debug:
logger.info("readIMAGCDF: FOUND IMAGCDF file created with version {}".format(cdfdat.version()))
if debug:
for line in cdfdat:
logger.info("{}".format(line))
# get Attribute list
attrslist = [att for att in cdfdat.attrs]
# get Data list
datalist = [att for att in cdfdat]
headers={}
arraylist = []
array = [[] for elem in KEYLIST]
startdate = cdfdat[datalist[-1]][0]
flagruleversion = ''
flagruletype = ''
flaglist = []
# #################################
# Get header info:
# #################################
if 'FormatDescription' in attrslist:
form = cdfdat.attrs['FormatDescription']
headers['DataFormat'] = str(cdfdat.attrs['FormatDescription'])
if 'FormatVersion' in attrslist:
vers = cdfdat.attrs['FormatVersion']
headers['DataFormat'] = str(form) + '; ' + str(vers)
if 'Title' in attrslist:
pass
if 'IagaCode' in attrslist:
headers['StationIAGAcode'] = str(cdfdat.attrs['IagaCode'])
headers['StationID'] = str(cdfdat.attrs['IagaCode'])
if 'ElementsRecorded' in attrslist:
headers['DataComponents'] = str(cdfdat.attrs['ElementsRecorded'])
if 'PublicationLevel' in attrslist:
headers['DataPublicationLevel'] = str(cdfdat.attrs['PublicationLevel'])
if 'PublicationDate' in attrslist:
headers['DataPublicationDate'] = str(cdfdat.attrs['PublicationDate'])
if 'ObservatoryName' in attrslist:
headers['StationName'] = str(cdfdat.attrs['ObservatoryName'])
if 'Latitude' in attrslist:
headers['DataAcquisitionLatitude'] = str(cdfdat.attrs['Latitude'])
if 'Longitude' in attrslist:
headers['DataAcquisitionLongitude'] = str(cdfdat.attrs['Longitude'])
if 'Elevation' in attrslist:
headers['DataElevation'] = str(cdfdat.attrs['Elevation'])
if 'Institution' in attrslist:
headers['StationInstitution'] = str(cdfdat.attrs['Institution'])
if 'VectorSensOrient' in attrslist:
headers['DataSensorOrientation'] = str(cdfdat.attrs['VectorSensOrient'])
if 'StandardLevel' in attrslist:
headers['DataStandardLevel'] = str(cdfdat.attrs['StandardLevel'])
if 'StandardName' in attrslist:
headers['DataStandardName'] = str(cdfdat.attrs['StandardName'])
if 'StandardVersion' in attrslist:
headers['DataStandardVersion'] = str(cdfdat.attrs['StandardVersion'])
if 'PartialStandDesc' in attrslist:
headers['DataPartialStandDesc'] = str(cdfdat.attrs['PartialStandDesc'])
if 'Source' in attrslist:
headers['DataSource'] = str(cdfdat.attrs['Source'])
if 'TermsOfUse' in attrslist:
headers['DataTerms'] = str(cdfdat.attrs['TermsOfUse'])
if 'References' in attrslist:
headers['DataReferences'] = str(cdfdat.attrs['References'])
if 'UniqueIdentifier' in attrslist:
headers['DataID'] = str(cdfdat.attrs['UniqueIdentifier'])
if 'ParentIdentifiers' in attrslist:
headers['SensorID'] = str(cdfdat.attrs.get('ParentIdentifiers'))
if 'ReferenceLinks' in attrslist:
headers['StationWebInfo'] = str(cdfdat.attrs['ReferenceLinks'])
if 'FlagRulesetType' in attrslist:
flagruletype = str(cdfdat.attrs['FlagRulesetType'])
if 'FlagRulesetVersion' in attrslist:
flagruleversion = str(cdfdat.attrs['FlagRulesetVersion'])
# New in 0.3.99 - provide a SensorID as well consisting of IAGA code, min/sec
# and numerical publevel
# IAGA code
if headers.get('SensorID','') == '':
try:
#TODO determine resolution
headers['SensorID'] = "{}_{}_{}".format(headers.get('StationIAGAcode','xxx').upper()+'sec',headers.get('DataPublicationLevel','0'),'0001')
except:
pass
# #################################
# Get data:
# #################################
# Reorder datalist and Drop time column
# #########################################################
# 1. Get the amount of Times columns and associated lengths
# #########################################################
#print "Analyzing file structure and returning values"
#print datalist
zpos = KEYLIST.index('z') # used for idf records
mutipletimerange = False
newdatalist = []
tllist = []
indexarray = np.asarray([])
for elem in datalist:
if elem.endswith('Times') and not elem.startswith('Flag'):
#print "Found Time Column"
# Get length
tl = int(str(cdfdat[elem]).split()[1].strip('[').strip(']'))
#print "Length", tl
tllist.append([tl,elem])
if len(tllist) < 1:
#print "No time column identified"
# Check for starttime and sampling rate in header
if 'StartTime' in attrslist and 'SamplingPeriod' in attrslist:
# TODO Write that function
st = str(cdfdat.attrs['StartTime'])
sr = str(cdfdat.attrs['SamplingPeriod'])
else:
logger.error("readIMAGCDF: No Time information available - aborting")
return
elif len(tllist) > 1:
tl = [el[0] for el in tllist]
if not max(tl) == min(tl):
logger.warning("readIMAGCDF: Time columns of different length. Choosing longest as basis")
newdatalist.append(['time',max(tllist)[1]])
try:
indexarray = np.nonzero(np.in1d(date2num(cdfdat[max(tllist)[1]][...]),date2num(cdfdat[min(tllist)[1]][...])))[0]
except:
indexarray = np.asarray([])
mutipletimerange = True
else:
logger.info("readIMAGCDF: Equal length time axes found - assuming identical time")
if 'GeomagneticVectorTimes' in datalist:
newdatalist.append(['time','GeomagneticVectorTimes'])
else:
newdatalist.append(['time',tllist[0][1]]) # Take the first one
else:
#print "Single time axis found in file"
newdatalist.append(['time',tllist[0][1]])
def Ruleset2Flaglist(flagginglist,rulesettype,rulesetversion):
if flagruletype in ['Conrad', 'conrad', 'MagPy','magpy']:
if flagruleversion in ['1.0','1',1]:
flagcolsconrad = [flagginglist[0],flagginglist[1],flagginglist[3],flagginglist[4],flagginglist[5],flagginglist[6],flagginglist[2]]
flaglisttmp = []
for elem in flagcolsconrad:
flaglisttmp.append(cdfdat[elem][...])
flaglist = np.transpose(flaglisttmp)
flaglist = [list(elem) for elem in flaglist]
return list(flaglist)
else:
logger.warning("readIMAGCDF: Could not interprete Ruleset")
if not flagruletype == '':
logger.info("readIMAGCDF: Found flagging ruleset {} vers.{} - extracting flagging information".format(flagruletype,flagruleversion))
flagginglist = [elem for elem in datalist if elem.startswith('Flag')]
flaglist = Ruleset2Flaglist(flagginglist,flagruletype,flagruleversion)
datalist = [elem for elem in datalist if not elem.endswith('Times') and not elem.startswith('Flag')]
# #########################################################
# 2. Sort the datalist according to KEYLIST
# #########################################################
for key in KEYLIST:
possvals = [key]
if key == 'x':
possvals.extend(['h','i'])
if key == 'y':
possvals.extend(['d','e'])
if key == 'df':
possvals.append('g')
if key == 'f':
possvals.append('s')
for elem in datalist:
try:
label = cdfdat[elem].attrs['LABLAXIS'].lower()
if label in possvals:
newdatalist.append([key,elem])
except:
pass # for lines which have no Label
if not len(datalist) == len(newdatalist)-1:
logger.warning("readIMAGCDF: error encountered in key assignment - please check")
# 3. Create equal length array reducing all data to primary Times and filling nans for non-exist
# (4. eventually completely drop time cols and just store start date and sampling period in header)
# Deal with scalar data (independent or whatever
for elem in newdatalist:
#print ("Here", elem)
if elem[0] == 'time':
try:
ar = date2num(cdfdat[elem[1]][...])
except:
ar = date2num(np.asarray([cdf.lib.tt2000_to_datetime(el) for el in cdfdat[elem[1]][...]]))
arlen= len(ar)
arraylist.append(ar)
ind = KEYLIST.index('time')
array[ind] = ar
else:
ar = cdfdat[elem[1]][...]
if elem[0] in NUMKEYLIST:
with np.errstate(invalid='ignore'):
ar[ar > 88880] = float(nan)
ind = KEYLIST.index(elem[0])
headers['col-'+elem[0]] = cdfdat[elem[1]].attrs['LABLAXIS'].lower()
headers['unit-col-'+elem[0]] = cdfdat[elem[1]].attrs['UNITS']
if len(indexarray) > 0 and elem[0] in ['f','df']: ## this is no good - point to depend_0
newar = np.asarray([np.nan]*arlen)
#print (len(newar),len(ar),len(indexarray))
newar[indexarray] = ar
#print (len(newar))
array[ind] = newar
arraylist.append(newar)
else:
array[ind] = ar
arraylist.append(ar)
# if idf -> add f column also to z
if elem[0] in ['f','F'] and headers.get('DataComponents','') in ['DIF','dif','idf','IDF'] and not len(array[zpos]) > 0:
array[zpos] = ar
arraylist.append(ar)
headers['col-z'] = cdfdat[elem[1]].attrs['LABLAXIS'].lower()
headers['unit-col-z'] = cdfdat[elem[1]].attrs['UNITS']
ndarray = np.array(array)
stream = DataStream()
stream = [LineStruct()]
result = DataStream(stream,headers,ndarray)
if not flagruletype == '' and len(flaglist) > 0:
result = result.flag(flaglist)
#t2 = datetime.utcnow()
#print "Duration for conventional stream assignment:", t2-t1
return result
| 5,353,953 |
def custom_formatter(code, msg):
""" 自定义结果格式化函数
:param code: 响应码
:param msg: 响应消息
"""
return {
"code": code,
"msg": "hello",
"sss": "tt",
}
| 5,353,954 |
def get_sampler_callback(rank, num_replicas, noniid=0, longtail=0):
"""
noniid: noniid controls the noniidness.
- noniid = 1 refers to completely noniid
- noniid = 0 refers to iid.
longtail: longtail controls the long-tailness.
- Class i takes (1-longtail) ** i percent of data.
"""
assert noniid >= 0 and noniid <= 1, f"`noniid` in [0, 1], get {noniid}"
assert longtail >= 0 and longtail <= 1, f"`longtail` in [0, 1], get {longtail}"
if longtail > 0:
return lambda x: NONIIDLTSampler(
alpha=1 - noniid,
beta=1 - longtail,
num_replicas=num_replicas,
rank=rank,
shuffle=True,
dataset=x,
)
if noniid == 0:
# Byzantine workers
return lambda x: DistributedSampler(
num_replicas=num_replicas,
rank=rank,
shuffle=True,
dataset=x,
)
if noniid > 0:
return lambda x: DecentralizedMixedSampler(
noniid_percent=noniid,
num_replicas=num_replicas,
rank=rank,
shuffle=True,
dataset=x,
)
raise NotImplementedError("")
| 5,353,955 |
def _test_broadcast_args(in_shape_1, in_shape_2):
"""One iteration of broadcast_args"""
shape_1 = np.array(in_shape_1).astype("int32")
shape_2 = np.array(in_shape_2).astype("int32")
with tf.Graph().as_default():
shape_1 = constant_op.constant(shape_1, shape=shape_1.shape, dtype=shape_1.dtype)
shape_2 = constant_op.constant(shape_2, shape=shape_2.shape, dtype=shape_2.dtype)
tf.raw_ops.BroadcastArgs(s0=shape_1, s1=shape_2)
compare_tf_with_tvm(None, "", "BroadcastArgs:0", opt_level=0)
| 5,353,956 |
def alloc_bitrate(frame_nos, chunk_frames, pref_bitrate, nrow_tiles, ncol_tiles):
"""
Allocates equal bitrate to all the tiles
"""
vid_bitrate = []
for i in range(len(chunk_frames)):
chunk = chunk_frames[i]
chunk_bitrate = [[-1 for x in range(ncol_tiles)] for y in range(nrow_tiles)]
chunk_weight = [[1. for x in range(ncol_tiles)] for y in range(nrow_tiles)]
total_weight = sum(sum(x) for x in chunk_weight)
for x in range(nrow_tiles):
for y in range(ncol_tiles):
chunk_bitrate[x][y] = chunk_weight[x][y]*pref_bitrate/total_weight;
vid_bitrate.append(chunk_bitrate)
return vid_bitrate
| 5,353,957 |
def file_filter(extensions: Collection[str]) -> Any:
"""Register a page content filter for file extensions."""
def wrapper(f):
for ext in extensions:
_file_filters[ext] = f
return f
return wrapper
| 5,353,958 |
def epc_calc_img_size(reg_dict):
"""
Calcalute the output image size from the EPC660 sensor
Parameters
----------
reg_dict : dict
Returns
----------
int
The number of rows
int
The number of columns in the image
"""
col_start, col_end, row_start, row_end = epc_calc_roi(reg_dict)
row_bin, col_bin = epc_calc_bin_mode(reg_dict)
row_binning, col_binning = epc_calc_binning(reg_dict)
row_div = 1
col_div = 1
if row_bin:
row_div = (1 << row_binning)
if col_bin:
col_div = (1 << col_binning)
nrows = (2*(row_end-row_start+1))/row_div
ncols = (col_end-col_start+1)/col_div
return nrows, ncols
| 5,353,959 |
def test():
"""
Run Find in Files widget test.
"""
# Standard library imports
from os.path import dirname
import sys
from unittest.mock import MagicMock
# Local imports
from spyder.utils.qthelpers import qapplication
app = qapplication()
plugin_mock = MagicMock()
plugin_mock.CONF_SECTION = 'find_in_files'
widget = FindInFilesWidget('find_in_files', plugin=plugin_mock)
widget.CONF_SECTION = 'find_in_files'
widget._setup()
widget.setup()
widget.resize(640, 480)
widget.show()
external_paths = [
dirname(__file__),
dirname(dirname(__file__)),
dirname(dirname(dirname(__file__))),
dirname(dirname(dirname(dirname(__file__)))),
]
for path in external_paths:
widget.add_external_path(path)
sys.exit(app.exec_())
| 5,353,960 |
def main():
"""Script entry point."""
vm_os, vm_packages = parse_cmdline()
print('OS:', vm_os)
print('Packages:', ', '.join(vm_packages))
download_vagrantfile(vm_os)
download_ansible_roles(vm_packages)
write_playbook(vm_packages)
create_workspace()
run_vagrant_up()
| 5,353,961 |
def get_rki_data(read_data=dd.defaultDict['read_data'],
file_format=dd.defaultDict['file_format'],
out_folder=dd.defaultDict['out_folder'],
no_raw=dd.defaultDict['no_raw'],
impute_dates=dd.defaultDict['impute_dates'],
make_plot=dd.defaultDict['make_plot'],
moving_average=dd.defaultDict['moving_average'],
split_berlin=dd.defaultDict['split_berlin'],
rep_date=dd.defaultDict['rep_date']
):
"""! Downloads the RKI data and provides different kind of structured data
The data is read either from the internet or from a json file (FullDataRKI.json), stored in an earlier run.
If the data is read from the internet, before changing anything the data is stored in FullDataRKI.json.
If data should be downloaded, it is checked if data contains all 16 states.
If not a different source is tried, in this case a column has to be renamed.
The file is read in or stored at the folder "out_folder"/Germany/.
To store and change the data we use pandas
While working with the data
- the column names are changed to english depending on defaultDict
- a new column "Date" is defined.
- we are only interested in the values where the parameter NeuerFall, NeuerTodesfall, NeuGenesen are larger than 0.
The values, when these parameters are negative are just useful,
if one would want to get the difference to the previous day.
For details we refer to the above mentioned webpage.
- For all different parameters and different columns the values are added up for whole germany for every date
and the cumulative sum is calculated. Unless something else is mentioned.
- For Berlin all districts can be merged into one [Default]. Otherwise, Berlin is divided into multiple districts and
different file names are used.
- Following data is generated and written to the mentioned filename
- All infected (current and past) for whole germany are stored in "infected_rki"
- All deaths whole germany are stored in "deaths_rki"
- Infected, deaths and recovered for whole germany are stored in "all_germany_rki"
- Infected split for states are stored in "infected_state_rki"
- Infected, deaths and recovered split for states are stored in "all_state_rki"
- Infected split for counties are stored in "infected_county_rki(_split_berlin)"
- Infected, deaths and recovered split for county are stored in "all_county_rki(_split_berlin)"
- Infected, deaths and recovered split for gender are stored in "all_gender_rki"
- Infected, deaths and recovered split for state and gender are stored in "all_state_gender_rki"
- Infected, deaths and recovered split for county and gender are stored in "all_county_gender_rki(_split_berlin)"
- Infected, deaths and recovered split for age are stored in "all_age_rki"
- Infected, deaths and recovered split for state and age are stored in "all_state_age_rki"
- Infected, deaths and recovered split for county and age are stored in "all_county_age_rki(_split_berlin)"
@param read_data False [Default] or True. Defines if data is read from file or downloaded.
@param file_format File format which is used for writing the data. Default defined in defaultDict.
@param out_folder Path to folder where data is written in folder out_folder/Germany.
@param no_raw True or False [Default]. Defines if unchanged raw data is saved or not.
@param impute_dates False [Default] or True. Defines if values for dates without new information are imputed.
@param make_plot False [Default] or True. Defines if plots are generated with matplotlib.
@param moving_average 0 [Default] or >0. Applies an 'moving_average'-days moving average on all time series
to smooth out weekend effects.
@param split_berlin True or False [Default]. Defines if Berlin's disctricts are kept separated or get merged.
"""
directory = os.path.join(out_folder, 'Germany/')
gd.check_dir(directory)
filename = "FullDataRKI"
if read_data:
# if once dowloaded just read json file
file_in = os.path.join(directory, filename + ".json")
try:
df = pandas.read_json(file_in)
except ValueError as err:
raise FileNotFoundError("Error: The file: " + file_in + \
" does not exist. Call program without -r flag to get it.") \
from err
else:
# Supported data formats:
load = {
'csv': gd.loadCsv,
'geojson': gd.loadGeojson
}
# ArcGIS public data item ID:
itemId = 'dd4580c810204019a7b8eb3e0b329dd6_0'
# Get data:
df = load['csv'](itemId)
complete = check_for_completeness(df, merge_eisenach=True)
# try another possibility if df was empty or incomplete
if not complete:
print("Note: RKI data is incomplete. Trying another source.")
df = load['csv']("", "https://npgeo-de.maps.arcgis.com/sharing/rest/content/items/"
"f10774f1c63e40168479a1feb6c7ca74/data", "")
df.rename(columns={'FID': "ObjectId"}, inplace=True)
complete = check_for_completeness(df, merge_eisenach=True)
if complete:
# output data to not always download it
if not no_raw:
gd.write_dataframe(df, directory, filename, "json")
else:
print("Information: dataframe was incomplete for csv. Trying geojson.")
df = load['geojson'](itemId)
complete = check_for_completeness(df, merge_eisenach=True)
if not df.empty and complete:
if not no_raw:
gd.write_dataframe(df, directory, filename, "json")
else:
raise FileNotFoundError("Something went wrong, dataframe is empty for csv and geojson!")
# store dict values in parameter to not always call dict itself
Altersgruppe2 = dd.GerEng['Altersgruppe2']
Altersgruppe = dd.GerEng['Altersgruppe']
Geschlecht = dd.GerEng['Geschlecht']
AnzahlFall = dd.GerEng['AnzahlFall']
AnzahlGenesen = dd.GerEng['AnzahlGenesen']
AnzahlTodesfall = dd.GerEng['AnzahlTodesfall']
IdBundesland = dd.GerEng['IdBundesland']
Bundesland = dd.GerEng['Bundesland']
IdLandkreis = dd.GerEng['IdLandkreis']
Landkreis = dd.GerEng['Landkreis']
# translate column gender from German to English and standardize
df.loc[df.Geschlecht == 'unbekannt', ['Geschlecht']] = dd.GerEng['unbekannt']
df.loc[df.Geschlecht == 'W', ['Geschlecht']] = dd.GerEng['W']
df.loc[df.Geschlecht == 'M', ['Geschlecht']] = dd.GerEng['M']
df.loc[df.Altersgruppe == 'unbekannt', ['Altersgruppe']] = dd.GerEng['unbekannt']
df.loc[df.Altersgruppe2 == 'unbekannt', ['Altersgruppe2']] = dd.GerEng['unbekannt']
# change names of columns
df.rename(dd.GerEng, axis=1, inplace=True)
# Add column 'Date' with Date
# = reporting date if rep_date is set
# = reference date (date of disease onset) if IstErkrankungsbeginn = 1 else
# take Meldedatum (reporting date)
if rep_date:
df['Date'] = df['Meldedatum']
else:
df['Date'] = np.where(df['IstErkrankungsbeginn'] == 1, df['Refdatum'], df['Meldedatum'])
# remove leading zeros for ID_County (if not yet done)
df['ID_County'] = df['ID_County'].astype(int)
# TODO: uncomment if ALtersgruppe2 will again be provided
# Add new column with Age with range 10 as spain data
# conditions = [
# (df[Altersgruppe2] == '0-4') & (df[Altersgruppe2] == '5-9'),
# (df[Altersgruppe2] == '10-14') & (df[Altersgruppe2] == '15-19'),
# (df[Altersgruppe2] == '20-24') & (df[Altersgruppe2] == '25-29'),
# (df[Altersgruppe2] == '30-34') & (df[Altersgruppe2] == '35-39'),
# (df[Altersgruppe2] == '40-44') & (df[Altersgruppe2] == '45-49'),
# (df[Altersgruppe2] == '50-54') & (df[Altersgruppe2] == '55-59'),
# (df[Altersgruppe2] == '60-64') & (df[Altersgruppe2] == '65-69'),
# (df[Altersgruppe2] == '70-74') & (df[Altersgruppe2] == '75-79'),
# ]
# choices = ['0-9', '10-19', '20-29', '30-39', '40-49', '50-59', '60-69', '70-79']
# df['Age10'] = np.select(conditions, choices, default=dd.GerEng['unbekannt'])
# convert "Datenstand" to real date:
df.Datenstand = pandas.to_datetime(df.Datenstand, format='%d.%m.%Y, %H:%M Uhr')
# Correct Timestamps:
for col in ['Date']:
df[col] = df[col].astype('datetime64[ns]')
# Date is either Refdatum or Meldedatum after column
# 'IstErkrankungsbeginn' has been added. See also rep_date option.
dateToUse = 'Date'
df.sort_values(dateToUse, inplace=True)
# Manipulate data to get rid of conditions: df.NeuerFall >= 0, df.NeuerTodesfall >= 0, df.NeuGenesen >=0
# There might be a better way
dfF = df
dfF.loc[dfF.NeuerFall < 0, [AnzahlFall]] = 0
dfF.loc[dfF.NeuerTodesfall < 0, [AnzahlTodesfall]] = 0
dfF.loc[dfF.NeuGenesen < 0, [AnzahlGenesen]] = 0
# get rid of unnecessary columns
dfF = dfF.drop(['NeuerFall', 'NeuerTodesfall', 'NeuGenesen', "IstErkrankungsbeginn", "ObjectId",
"Meldedatum", "Datenstand", "Refdatum", Altersgruppe2], 1)
print("Available columns:", df.columns)
######## Data for whole Germany all ages ##########
# NeuerFall: Infected (incl. recovered) over "dateToUse":
# make sum for one "dateToUse"
# old way:
# gbNF = df[df.NeuerFall >= 0].groupby( dateToUse ).sum()
gbNF = df[df.NeuerFall >= 0].groupby(dateToUse).agg({AnzahlFall: sum})
# make cumulative sum of "AnzahlFall" for "dateToUse"
# old way:
# gbNF_cs = gbNF.AnzahlFall.cumsum()
gbNF_cs = gbNF.cumsum()
# output to json file
filename = 'infected'
if rep_date:
filename_orig = filename + '_repdate'
else:
filename_orig = filename
gd.write_dataframe(gbNF_cs.reset_index(), directory, filename_orig + '_rki', file_format)
if impute_dates or moving_average > 0:
gbNF_cs = modifyDataframeSeries.impute_and_reduce_df(
gbNF_cs.reset_index(),
{},
['Confirmed'],
impute='forward', moving_average=moving_average)
filename = gd.append_filename(filename, impute_dates, moving_average)
if rep_date:
filename = filename + '_repdate'
gd.write_dataframe(gbNF_cs, directory, filename + '_rki', file_format)
if make_plot:
# make plot
gbNF_cs.plot(title='COVID-19 infections', grid=True,
style='-o')
plt.tight_layout()
plt.show()
# Dead over Date:
gbNT = df[df.NeuerTodesfall >= 0].groupby(dateToUse).agg({AnzahlTodesfall: sum})
gbNT_cs = gbNT.cumsum()
# output
filename = 'deaths'
if rep_date:
filename_orig = filename + '_repdate'
else:
filename_orig = filename
gd.write_dataframe(gbNT_cs.reset_index(), directory, filename_orig + '_rki', file_format)
if impute_dates or moving_average > 0:
gbNT_cs = modifyDataframeSeries.impute_and_reduce_df(
gbNT_cs.reset_index(),
{},
['Deaths'],
impute='forward', moving_average=moving_average)
filename = gd.append_filename(filename, impute_dates, moving_average)
if rep_date:
filename = filename + '_repdate'
gd.write_dataframe(gbNT_cs.reset_index(), directory, filename + '_rki', file_format)
if make_plot:
gbNT_cs.plot(title='COVID-19 deaths', grid=True,
style='-o')
plt.tight_layout()
plt.show()
dfF.agg({AnzahlFall: sum, AnzahlTodesfall: sum, AnzahlGenesen: sum}) \
.plot(title='COVID-19 infections, deaths, recovered', grid=True,
kind='bar')
plt.tight_layout()
plt.show()
gbNF = df.groupby(dateToUse).agg({AnzahlFall: sum, AnzahlTodesfall: sum, AnzahlGenesen: sum})
gbNF_cs = gbNF.cumsum()
filename = 'all_germany'
if rep_date:
filename_orig = filename + '_repdate'
else:
filename_orig = filename
gd.write_dataframe(gbNF_cs.reset_index(), directory, filename_orig + '_rki', file_format)
if impute_dates or moving_average > 0:
gbNF_cs = modifyDataframeSeries.impute_and_reduce_df(
gbNF_cs.reset_index(),
{},
['Confirmed', 'Deaths', 'Recovered'],
impute='forward', moving_average=moving_average)
filename = gd.append_filename(filename, impute_dates, moving_average)
if rep_date:
filename = filename + '_repdate'
gd.write_dataframe(gbNF_cs, directory, filename + '_rki', file_format)
############## Data for states all ages ################
# NeuerFall: Infected (incl. recovered) over "dateToUse" for every state ("Bundesland"):
# gbNFst = df[df.NeuerFall >= 0].groupby( [IdBundesland','Bundesland', dateToUse]).AnzahlFall.sum()
gbNFst = df[df.NeuerFall >= 0].groupby([IdBundesland, Bundesland, dateToUse])\
.agg({AnzahlFall: sum})
gbNFst_cs = gbNFst.groupby(level=1).cumsum().reset_index()
# output
filename = 'infected_state'
if rep_date:
filename_orig = filename + '_repdate'
else:
filename_orig = filename
gd.write_dataframe(gbNFst_cs, directory, filename_orig + '_rki', file_format)
if impute_dates or moving_average > 0:
gbNFst_cs = modifyDataframeSeries.impute_and_reduce_df(
gbNFst_cs,
{dd.EngEng["idState"]: [k for k, v in dd.State.items()]},
['Confirmed'],
impute='forward', moving_average=moving_average)
filename = gd.append_filename(filename, impute_dates, moving_average)
if rep_date:
filename = filename + '_repdate'
gd.write_dataframe(gbNFst_cs, directory, filename + '_rki', file_format)
# output nested json
# gbNFst_cs.groupby(['IdBundesland', 'Bundesland'], as_index=False) \
# .apply(lambda x: x[[dateToUse,'AnzahlFall']].to_dict('r')) \
# .reset_index().rename(columns={0:'Dates'})\
# .to_json(directory + "gbNF_state_nested.json", orient='records')
# infected (incl recovered), deaths and recovered together
gbAllSt = dfF.groupby([IdBundesland, Bundesland, dateToUse]) \
.agg({AnzahlFall: sum, AnzahlTodesfall: sum, AnzahlGenesen: sum})
gbAllSt_cs = gbAllSt.groupby(level=1).cumsum().reset_index()
# output
filename = 'all_state'
if rep_date:
filename_orig = filename + '_repdate'
else:
filename_orig = filename
gd.write_dataframe(gbAllSt_cs, directory, filename_orig + '_rki', file_format)
if impute_dates or moving_average > 0:
gbAllSt_cs = modifyDataframeSeries.impute_and_reduce_df(
gbAllSt_cs,
{dd.EngEng["idState"]: [k for k, v in dd.State.items()]},
['Confirmed', 'Deaths', 'Recovered'],
impute='forward', moving_average=moving_average)
filename = gd.append_filename(filename, impute_dates, moving_average)
if rep_date:
filename = filename + '_repdate'
gd.write_dataframe(gbAllSt_cs, directory, filename + '_rki', file_format)
############# Data for counties all ages ######################
if not split_berlin:
df = geoger.merge_df_counties(
df, 11000, geoger.CountyMerging[11000],
sorting=['Date'],
columns=[dd.EngEng['date'],
dd.EngEng['gender'],
dd.EngEng['idState'],
dd.EngEng['state'],
dd.EngEng['county'],
dd.EngEng['ageRKI']])
# NeuerFall: Infected (incl. recovered) over "dateToUse" for every county ("Landkreis"):
gbNFc = df[df.NeuerFall >= 0].groupby([IdLandkreis, Landkreis, dateToUse]) \
.agg({AnzahlFall: sum})
gbNFc_cs = gbNFc.groupby(level=1).cumsum().reset_index()
# output
if split_berlin:
filename = 'infected_county_split_berlin'
if rep_date:
filename_orig = filename + '_repdate'
else:
filename_orig = filename
gd.write_dataframe(gbNFc_cs, directory, filename_orig + '_rki', file_format)
if impute_dates or moving_average > 0:
gbNFc_cs = modifyDataframeSeries.impute_and_reduce_df(
gbNFc_cs,
{dd.EngEng["idCounty"]: sorted(set(df[dd.EngEng["idCounty"]].unique()))},
['Confirmed'],
impute='forward', moving_average=moving_average)
filename = gd.append_filename(filename, impute_dates, moving_average)
if rep_date:
filename = filename + '_repdate'
gd.write_dataframe(gbNFc_cs, directory, filename + '_rki', file_format)
else:
filename = 'infected_county'
if rep_date:
filename_orig = filename + '_repdate'
else:
filename_orig = filename
gd.write_dataframe(gbNFc_cs, directory, filename_orig + '_rki', file_format)
if impute_dates or moving_average > 0:
gbNFc_cs = modifyDataframeSeries.impute_and_reduce_df(
gbNFc_cs,
{dd.EngEng["idCounty"]: sorted(set(df[dd.EngEng["idCounty"]].unique()))},
['Confirmed'],
impute='forward', moving_average=moving_average)
filename = gd.append_filename(filename, impute_dates, moving_average)
if rep_date:
filename = filename + '_repdate'
gd.write_dataframe(gbNFc_cs, directory, filename + '_rki', file_format)
# infected (incl recovered), deaths and recovered together
if not split_berlin:
dfF = geoger.merge_df_counties(
dfF, 11000, geoger.CountyMerging[11000],
sorting=[dd.EngEng['date']],
columns=[dd.EngEng['date'],
dd.EngEng['gender'],
dd.EngEng['idState'],
dd.EngEng['state'],
dd.EngEng['county'],
dd.EngEng['ageRKI']])
gbAllC = dfF.groupby([IdLandkreis, Landkreis, dateToUse]).\
agg({AnzahlFall: sum, AnzahlTodesfall: sum, AnzahlGenesen: sum})
gbAllC_cs = gbAllC.groupby(level=1).cumsum().reset_index()
# output
if split_berlin:
filename = 'all_county_split_berlin'
if rep_date:
filename_orig = filename + '_repdate'
else:
filename_orig = filename
gd.write_dataframe(gbAllC_cs, directory, filename_orig + '_rki', file_format)
if impute_dates or moving_average > 0:
gbAllC_cs = modifyDataframeSeries.impute_and_reduce_df(
gbAllC_cs,
{dd.EngEng["idCounty"]: sorted(set(df[dd.EngEng["idCounty"]].unique()))},
['Confirmed', 'Deaths', 'Recovered'],
impute='forward', moving_average=moving_average)
filename = gd.append_filename(filename, impute_dates, moving_average)
if rep_date:
filename = filename + '_repdate'
gd.write_dataframe(gbAllC_cs, directory, filename + '_rki', file_format)
else:
filename = 'all_county'
if rep_date:
filename_orig = filename + '_repdate'
else:
filename_orig = filename
gd.write_dataframe(gbAllC_cs, directory, filename_orig + '_rki', file_format)
if impute_dates or moving_average > 0:
gbAllC_cs = modifyDataframeSeries.impute_and_reduce_df(
gbAllC_cs,
{dd.EngEng["idCounty"]: sorted(set(df[dd.EngEng["idCounty"]].unique()))},
['Confirmed', 'Deaths', 'Recovered'],
impute='forward', moving_average=moving_average)
filename = gd.append_filename(filename, impute_dates, moving_average)
if rep_date:
filename = filename + '_repdate'
gd.write_dataframe(gbAllC_cs, directory, filename + '_rki', file_format)
######### Data whole Germany different gender ##################
# infected (incl recovered), deaths and recovered together
gbAllG = dfF.groupby([Geschlecht, dateToUse]) \
.agg({AnzahlFall: sum, AnzahlTodesfall: sum, AnzahlGenesen: sum})
gbAllG_cs = gbAllG.groupby(level=0).cumsum().reset_index()
# output
filename = 'all_gender'
if rep_date:
filename_orig = filename + '_repdate'
else:
filename_orig = filename
gd.write_dataframe(gbAllG_cs, directory, filename_orig + '_rki', file_format)
if impute_dates or moving_average > 0:
gbAllG_cs = modifyDataframeSeries.impute_and_reduce_df(
gbAllG_cs,
{dd.EngEng["gender"]: list(df[dd.EngEng["gender"]].unique())},
['Confirmed', 'Deaths', 'Recovered'],
impute='forward', moving_average=moving_average)
filename = gd.append_filename(filename, impute_dates, moving_average)
if rep_date:
filename = filename + '_repdate'
gd.write_dataframe(gbAllG_cs, directory, filename + '_rki', file_format)
if make_plot:
dfF.groupby(Geschlecht) \
.agg({AnzahlFall: sum, AnzahlTodesfall: sum, AnzahlGenesen: sum}) \
.plot(title='COVID-19 infections, deaths, recovered', grid=True,
kind='bar')
plt.tight_layout()
plt.show()
############################# Gender and State ######################################################
# infected (incl recovered), deaths and recovered together
gbAllGState = dfF.groupby([IdBundesland, Bundesland, Geschlecht, dateToUse]) \
.agg({AnzahlFall: sum, AnzahlTodesfall: sum, AnzahlGenesen: sum})
gbAllGState_cs = gbAllGState.groupby(level=[1, 2]).cumsum().reset_index()
# output
filename = 'all_state_gender'
if rep_date:
filename_orig = filename + '_repdate'
else:
filename_orig = filename
gd.write_dataframe(gbAllGState_cs, directory, filename_orig + '_rki', file_format)
if impute_dates or moving_average > 0:
gbAllGState_cs = modifyDataframeSeries.impute_and_reduce_df(
gbAllGState_cs,
{dd.EngEng["idState"]: geoger.get_state_ids(),
dd.EngEng["gender"]: list(df[dd.EngEng["gender"]].unique())},
['Confirmed', 'Deaths', 'Recovered'],
impute='forward', moving_average=moving_average)
filename = gd.append_filename(filename, impute_dates, moving_average)
if rep_date:
filename = filename + '_repdate'
gd.write_dataframe(gbAllGState_cs, directory, filename + '_rki', file_format)
############# Gender and County #####################
gbAllGCounty = dfF.groupby([IdLandkreis, Landkreis, Geschlecht, dateToUse]) \
.agg({AnzahlFall: sum, AnzahlTodesfall: sum, AnzahlGenesen: sum})
gbAllGCounty_cs = gbAllGCounty.groupby(level=[1, 2]).cumsum().reset_index()
# output
if split_berlin:
filename = 'all_county_gender_split_berlin'
if rep_date:
filename_orig = filename + '_repdate'
else:
filename_orig = filename
gd.write_dataframe(gbAllGCounty_cs, directory, filename_orig + '_rki', file_format)
if impute_dates or moving_average > 0:
gbAllGCounty_cs = modifyDataframeSeries.impute_and_reduce_df(
gbAllGCounty_cs,
{dd.EngEng["idCounty"]: sorted(set(df[dd.EngEng["idCounty"]].unique())),
dd.EngEng["gender"]: list(df[dd.EngEng["gender"]].unique())},
['Confirmed', 'Deaths', 'Recovered'],
impute='forward', moving_average=moving_average)
filename = gd.append_filename(filename, impute_dates, moving_average)
if rep_date:
filename = filename + '_repdate'
gd.write_dataframe(gbAllGCounty_cs, directory, filename + '_rki', file_format)
else:
filename = 'all_county_gender'
if rep_date:
filename_orig = filename + '_repdate'
else:
filename_orig = filename
gd.write_dataframe(gbAllGCounty_cs, directory, filename_orig + '_rki', file_format)
if impute_dates or moving_average > 0:
gbAllGCounty_cs = modifyDataframeSeries.impute_and_reduce_df(
gbAllGCounty_cs,
{dd.EngEng["idCounty"]: sorted(set(df[dd.EngEng["idCounty"]].unique())),
dd.EngEng["gender"]: list(df[dd.EngEng["gender"]].unique())},
['Confirmed', 'Deaths', 'Recovered'],
impute='forward', moving_average=moving_average)
filename = gd.append_filename(filename, impute_dates, moving_average)
if rep_date:
filename = filename + '_repdate'
gd.write_dataframe(gbAllGCounty_cs, directory, filename + '_rki', file_format)
######### Data whole Germany different ages ####################
# infected (incl recovered), deaths and recovered together
gbAllA = dfF.groupby([Altersgruppe, dateToUse]) \
.agg({AnzahlFall: sum, AnzahlTodesfall: sum, AnzahlGenesen: sum})
gbAllA_cs = gbAllA.groupby(level=0).cumsum().reset_index()
# output
filename = 'all_age'
if rep_date:
filename_orig = filename + '_repdate'
else:
filename_orig = filename
gd.write_dataframe(gbAllA_cs, directory, filename_orig + '_rki', file_format)
if impute_dates or moving_average > 0:
gbAllA_cs = modifyDataframeSeries.impute_and_reduce_df(
gbAllA_cs,
{dd.EngEng["ageRKI"]: sorted(set(df[dd.EngEng["ageRKI"]].unique()))},
['Confirmed', 'Deaths', 'Recovered'],
impute='forward', moving_average=moving_average)
filename = gd.append_filename(filename, impute_dates, moving_average)
if rep_date:
filename = filename + '_repdate'
gd.write_dataframe(gbAllA_cs, directory, filename + '_rki', file_format)
if make_plot:
dfF.groupby(Altersgruppe) \
.agg({AnzahlFall: sum, AnzahlTodesfall: sum, AnzahlGenesen: sum}) \
.plot(title='COVID-19 infections, deaths, recovered for diff ages', grid=True,
kind='bar')
plt.tight_layout()
plt.show()
# Dead by "Altersgruppe":
gbNTAG = df[df.NeuerTodesfall >= 0].groupby(Altersgruppe).agg({AnzahlTodesfall: sum})
gbNTAG.plot(title='COVID-19 deaths', grid=True,
kind='bar')
plt.tight_layout()
plt.show()
############################# Age and State ######################################################
##### Age_RKI #####
# infected (incl recovered), deaths and recovered together
gbAllAgeState = dfF.groupby([IdBundesland, Bundesland, Altersgruppe, dateToUse]) \
.agg({AnzahlFall: sum, AnzahlTodesfall: sum, AnzahlGenesen: sum})
gbAllAgeState_cs = gbAllAgeState.groupby(level=[1, 2]).cumsum().reset_index()
# output
filename = 'all_state_age'
if rep_date:
filename_orig = filename + '_repdate'
else:
filename_orig = filename
gd.write_dataframe(gbAllAgeState_cs, directory, filename_orig + '_rki', file_format)
if impute_dates or moving_average > 0:
gbAllAgeState_cs = modifyDataframeSeries.impute_and_reduce_df(
gbAllAgeState_cs,
{dd.EngEng["idState"]: geoger.get_state_ids(),
dd.EngEng["ageRKI"]: sorted(set(df[dd.EngEng["ageRKI"]].unique()))},
['Confirmed', 'Deaths', 'Recovered'],
impute='forward', moving_average=moving_average)
filename = gd.append_filename(filename, impute_dates, moving_average)
if rep_date:
filename = filename + '_repdate'
gd.write_dataframe(gbAllAgeState_cs, directory, filename + '_rki', file_format)
# TODO: uncomment if ALtersgruppe2 will again be provided
##### Age5 and Age10#####
# infected (incl recovered), deaths and recovered together
# gbAllAgeState = dfF.groupby([IdBundesland, Bundesland, dd.GerEng['Altersgruppe2'], dateToUse]) \
# .agg({AnzahlFall: sum, AnzahlTodesfall: sum, AnzahlGenesen: sum})
# gbAllAgeState_cs = gbAllAgeState.groupby(level=[1, 2]).cumsum().reset_index()
# output
# gd.write_dataframe(gbAllAgeState_cs, directory, "all_state_age5_rki", file_format)
##### Age10 #####
# gbAllAgeState = dfF.groupby([IdBundesland, Bundesland, 'Age10', dateToUse]) \
# .agg({AnzahlFall: sum, AnzahlTodesfall: sum, AnzahlGenesen: sum})
# gbAllAgeState_cs = gbAllAgeState.groupby(level=[1, 2]).cumsum().reset_index()
# output
# gd.write_dataframe(gbAllAgeState_cs, directory, "all_state_age10_rki", file_format)
############# Age and County #####################
gbAllAgeCounty = dfF.groupby([IdLandkreis, Landkreis, Altersgruppe, dateToUse]) \
.agg({AnzahlFall: sum, AnzahlTodesfall: sum, AnzahlGenesen: sum})
gbAllAgeCounty_cs = gbAllAgeCounty.groupby(level=[1, 2]).cumsum().reset_index()
# output
if split_berlin:
filename = 'all_county_age_split_berlin'
if rep_date:
filename_orig = filename + '_repdate'
else:
filename_orig = filename
gd.write_dataframe(gbAllAgeCounty_cs, directory, filename_orig + '_rki', file_format)
if impute_dates or moving_average > 0:
gbAllAgeCounty_cs = modifyDataframeSeries.impute_and_reduce_df(
gbAllAgeCounty_cs,
{dd.EngEng["idCounty"]: sorted(set(df[dd.EngEng["idCounty"]].unique())),
dd.EngEng["ageRKI"]: sorted(set(df[dd.EngEng["ageRKI"]].unique()))},
['Confirmed', 'Deaths', 'Recovered'],
impute='forward', moving_average=moving_average)
filename = gd.append_filename(filename, impute_dates, moving_average)
if rep_date:
filename = filename + '_repdate'
gd.write_dataframe(gbAllAgeCounty_cs, directory, filename + '_rki', file_format)
else:
filename = 'all_county_age'
if rep_date:
filename_orig = filename + '_repdate'
else:
filename_orig = filename
gd.write_dataframe(gbAllAgeCounty_cs, directory, filename_orig + '_rki', file_format)
if impute_dates or moving_average > 0:
gbAllAgeCounty_cs = modifyDataframeSeries.impute_and_reduce_df(
gbAllAgeCounty_cs,
{dd.EngEng["idCounty"]: sorted(set(df[dd.EngEng["idCounty"]].unique())),
dd.EngEng["ageRKI"]: sorted(set(df[dd.EngEng["ageRKI"]].unique()))},
['Confirmed', 'Deaths', 'Recovered'],
impute='forward', moving_average=moving_average)
if moving_average > 0:
filename = filename + '_ma' + str(moving_average)
elif impute_dates:
filename = filename + '_all_dates'
if rep_date:
filename = filename + '_repdate'
gd.write_dataframe(gbAllAgeCounty_cs, directory, filename + '_rki', file_format)
# TODO: uncomment if ALtersgruppe2 will again be provided
#### age5 ####
# gbAllAgeCounty = dfF.groupby([IdLandkreis, Landkreis, Altersgruppe2, dateToUse]) \
# .agg({AnzahlFall: sum, AnzahlTodesfall: sum, AnzahlGenesen: sum})
# gbAllAgeCounty_cs = gbAllAgeCounty.groupby(level=[1, 2]).cumsum().reset_index()
# if split_berlin:
# gd.write_dataframe(gbAllAgeCounty_cs, directory, "all_county_age5_split_berlin_rki", file_format)
# else:
# gd.write_dataframe(gbAllAgeCounty_cs, directory, "all_county_age5_rki", file_format)
#### age10 ####
# gbAllAgeCounty = dfF.groupby( [IdLandkreis, Landkreis, 'Age10', dateToUse])\
# .agg({AnzahlFall: sum, AnzahlTodesfall: sum, AnzahlGenesen: sum})
# gbAllAgeCounty_cs = gbAllAgeCounty.groupby(level=[1,2]).cumsum().reset_index()
# output
# if split_berlin:
# gd.write_dataframe(gbAllAgeCounty_cs, directory, "all_county_age10_split_berlin_rki", file_format)
# else:
# gd.write_dataframe(gbAllAgeCounty_cs, directory, "all_county_age10_rki", file_format)
| 5,353,962 |
def simple_url_formatter(endpoint, url):
"""
A simple URL formatter to use when no application context
is available.
:param str endpoint: the endpoint to use.
:param str url: the URL to format
"""
return u"/{}".format(url)
| 5,353,963 |
def generate_service(
name: str,
image: str,
ports: List[str] = [],
volumes: List[str] = [],
dependsOn: List[str] = [],
) -> str:
"""
Creates a string with docker compose service specification.
Arguments are a list of values that need to be added to each section
named after the parameter. i.e. the volume arguments are for the
volumes section of the service config.
"""
indent = ' '
service = "{s}{name}:\n{s}{s}image: {image}\n".format(
s=indent,
name=name,
image=image,
)
if ports:
service += "{s}ports:\n".format(s=indent*2)
for port in ports:
service += '{s}- "{port}"\n'.format(s=indent*3, port=port)
if volumes:
service += "{s}volumes:\n".format(s=indent*2)
for vol in volumes:
service += '{s}- {vol}\n'.format(s=indent*3, vol=vol)
if dependsOn:
service += "{s}depends_on:\n".format(s=indent*2)
for item in dependsOn:
service += '{s}- "{dep}"\n'.format(s=indent*3, dep=item)
return service
| 5,353,964 |
def read_integer(msg=None, error_msg=None):
"""
Asks the user for an integer value (int or long)
:param msg: The message, displayed to the user.
:param error_msg: The message, displayed to the user, in case he did not entered a valid int or long.
:return: An int or a long from the user.
"""
res = raw_input(msg)
try:
return int(res)
except (TypeError, ValueError):
pass
try:
return long(res)
except (TypeError, ValueError):
pass
if error_msg is not None:
print(error_msg)
return read_integer(msg=msg, error_msg=error_msg)
| 5,353,965 |
def set_async_call_stack_depth(maxDepth: int) -> dict:
"""Enables or disables async call stacks tracking.
Parameters
----------
maxDepth: int
Maximum depth of async call stacks. Setting to `0` will effectively disable collecting async
call stacks (default).
"""
return {
"method": "Debugger.setAsyncCallStackDepth",
"params": {"maxDepth": maxDepth},
}
| 5,353,966 |
def main():
"""合計50問を生成して標準出力に出力する"""
print("No,条件式,True | False,説明")
ls = create_basic_conditions(10) + create_logical_conditions(20)
for no, i in enumerate(range(len(ls))):
print(no+1, ls[i], sep=',')
| 5,353,967 |
def print_output(name, src, toStdErr):
"""
Relay the output stream to stdout line by line
:param name:
:param src: source stream
:param toStdErr: flag set if stderr is to be the dest
:return:
"""
global needPassword
debug ("starting printer for %s" % name )
line = ""
while not finished:
(line, done) = read(src, line)
if done:
out(toStdErr, line + "\n")
flush(toStdErr)
if line.find("Enter password for") >= 0:
needPassword = True
line = ""
out(toStdErr, line)
# closedown: read remainder of stream
c = src.read(1)
while c!="" :
c = c.decode('utf-8')
out(toStdErr, c)
if c == "\n":
flush(toStdErr)
c = src.read(1)
flush(toStdErr)
src.close()
| 5,353,968 |
def save_empty_abundance_file(ngrid, outputfilepath='.'):
"""Dummy abundance file with only zeros"""
Z_atomic = np.arange(1, 31)
abundancedata = {'cellid': range(1, ngrid + 1)}
for atomic_number in Z_atomic:
abundancedata[f'Z={atomic_number}'] = np.zeros(ngrid)
# abundancedata['Z=28'] = np.ones(ngrid)
abundancedata = pd.DataFrame(data=abundancedata)
abundancedata = abundancedata.round(decimals=5)
abundancedata.to_csv(Path(outputfilepath) / 'abundances.txt', header=False, sep='\t', index=False)
| 5,353,969 |
def percError(predicted, observed):
"""Percentage Error
Parameters
==========
predicted : array-like
Array-like (list, numpy array, etc.) of predictions
observed : array-like
Array-like (list, numpy array, etc.) of observed values of scalar
quantity
Returns
=======
perc : float
Array of forecast errors expressed as a percentage
"""
err, pred, obse = forecastError(predicted, observed, full=True)
res = err/obse
return 100*res
| 5,353,970 |
def _dict_eq(a, b):
"""
Compare dictionaries using their items iterators and loading as much
as half of each into a local temporary store. For comparisons of ordered
dicts, memory usage is nil. For comparisons of dicts whose iterators
differ in sequence maximally, memory consumption is O(N). Execution time
is O(N).
:param a: one dict
:param b: another dict
:return: True if they're the same, false otherwise
"""
# The memory consumption here is to make a linear improvement in execution
# time. In the case of a dict backed by Redis, it is faster to iterate
# over N items than to retrieve each one, by a factor of 10 or more
# because of the reduced round-trips to the server.
size = len(a)
if size != len(b):
return False
# Iterate over both dicts. Compare items. If the same ones come up
# at the same time, great, they match. If different ones come up,
# store them in the am and bm collections of misses. Check for prior
# misses that may be matched by the new elements.
bi = iteritems(b)
am = {}
bm = {}
for ak, av in iteritems(a):
bk, bv = next(bi)
if ak == bk:
if av != bv:
return False
else: # keys differ
if ak in bm:
if bm[ak] == av:
del bm[ak]
else:
return False
else:
am[ak] = av
if bk in am:
if am[bk] == bv:
del am[bk]
else:
return False
else:
bm[bk] = bv
if len(am) + len(bm) > size:
return False
return len(am) + len(bm) == 0
| 5,353,971 |
def avoid_snakes(my_head: Dict[str, int], snakes: List[dict], possible_moves: List[str]) -> List[str]:
"""
my_head: Dictionary of x/y coordinates of the Battlesnake head.
e.g. {"x": 0, "y": 0}
snakes: List of dictionaries of x/y coordinates for every segment of a Battlesnake.
e.g. [ {"x": 0, "y": 0}, {"x": 1, "y": 0}, {"x": 2, "y": 0} ]
possible_moves: List of strings. Moves to pick from.
e.g. ["up", "down", "left", "right"]
return: The list of remaining possible_moves not blocked by other snakes
"""
for snake in snakes:
for segment in snake["body"]:
if my_head["x"] - 1 == segment["x"] and my_head["y"] == segment["y"]:
print("Segment to the left")
remove_move("left", possible_moves)
if my_head["x"] + 1 == segment["x"] and my_head["y"] == segment["y"]:
print("Segment to the right")
remove_move("right", possible_moves)
if my_head["x"] == segment["x"] and my_head["y"] - 1 == segment["y"]:
print("Segment below")
remove_move("down", possible_moves)
if my_head["x"] == segment["x"] and my_head["y"] + 1 == segment["y"]:
print("Segment above")
remove_move("up", possible_moves)
# We're going to be super conservative if we're near another head
# to avoid head on collisions
if my_head["x"] - 2 == snake["head"]["x"] and my_head["y"] == snake["head"]["y"]:
print("Dodge the head!")
remove_move("left", possible_moves)
if my_head["x"] + 2 == snake["head"]["x"] and my_head["y"] == snake["head"]["y"]:
print("Dodge the head!")
remove_move("right", possible_moves)
if my_head["x"] == snake["head"]["x"] and my_head["y"] - 2 == snake["head"]["y"]:
print("Dodge the head!")
remove_move("down", possible_moves)
if my_head["x"] == snake["head"]["x"] and my_head["y"] + 2 == snake["head"]["y"]:
print("Dodge the head!")
remove_move("up", possible_moves)
if my_head["x"] - 1 == snake["head"]["x"] and my_head["y"] + 1 == snake["head"]["y"]:
print("Dodge the head!")
remove_move("left", possible_moves)
remove_move("up", possible_moves)
if my_head["x"] - 1 == snake["head"]["x"] and my_head["y"] - 1 == snake["head"]["y"]:
print("Dodge the head!")
remove_move("left", possible_moves)
remove_move("down", possible_moves)
if my_head["x"] + 1 == snake["head"]["x"] and my_head["y"] + 1 == snake["head"]["y"]:
print("Dodge the head!")
remove_move("right", possible_moves)
remove_move("up", possible_moves)
if my_head["x"] + 1 == snake["head"]["x"] and my_head["y"] - 1 == snake["head"]["y"]:
print("Dodge the head!")
remove_move("right", possible_moves)
remove_move("down", possible_moves)
return possible_moves
| 5,353,972 |
def getChipZip(request, path):
"""Download the AutoPH file, converted to zip compression"""
from django.core.servers.basehttp import FileWrapper
import zipfile
logger = logging.getLogger(__name__)
path = os.path.join("/", path)
try:
name = os.path.basename(path)
name = name.split(".")[0]
# initialize zip archive file
zipfilename = os.path.join("/tmp", "%s.zip" % name)
zipobj = zipfile.ZipFile(zipfilename, mode="w", allowZip64=True)
# open tar.bz2 file, extract all members and write to zip archive
tf = tarfile.open(os.path.join(path))
for tarobj in tf.getmembers():
contents = tf.extractfile(tarobj)
zipobj.writestr(tarobj.name, contents.read())
zipobj.close()
response = HttpResponse(
FileWrapper(open(zipfilename)), mimetype="application/zip"
)
response["Content-Disposition"] = "attachment; filename=%s" % os.path.basename(
zipfilename
)
os.unlink(zipfilename)
return response
except Exception as inst:
logger.exception(traceback.format_exc())
ctxd = {
"error_state": 1,
"error": [["Error", "%s" % inst], ["Error type", "%s" % type(inst)]],
"locations_list": [],
"base_site_name": "Error",
"files": [],
"protonDiags": [],
}
ctx = RequestContext(request, ctxd)
return render_to_response(
"rundb/configure/ion_chips.html", context_instance=ctx
)
| 5,353,973 |
def m_unicom_online_time2_0(seq):
"""
获取联通手机在网时长所对应的code
:param seq: 联通在网时长区间
:return: code
example:
:seq: [0-1]
:return 1
"""
if not seq:
return []
if seq[0] in ["[0-1]", "(1-2]", "[3-6]"]:
seq = ["(0_6)"]
elif seq[0] in ["[7-12]"]:
seq = ["[6_12)"]
elif seq[0] in ["[13-24]"]:
seq = ["[12_24)"]
elif seq[0] in ["[25-36]", "[37,+)"]:
seq = ["[24_+)"]
return seq
| 5,353,974 |
def convert_config_gui_structure(config_gui_structure, port, instance_id,
is_port_in_database, conf):
"""
Converts the internal data structure to a dictionary which follows the
"Configuration file structure", see setup.rst
:param config_gui_structure: Data structure used to hold and show
configuration information in the Gui
:return A dictionary which follows the "Configuration file structure",
see setup.rst
"""
config_dict = identify_existing_config_file(port, conf.OPRP_DIR_PATH)
if not is_port_in_database and config_dict:
file_path = get_config_file_path(port, conf.OPRP_DIR_PATH)
LOGGER.error("The identified configuration file does not exist in the database. "
"File path: %s" % file_path)
if not (is_port_in_database and config_dict):
config_dict = get_default_client()
config_dict = clear_config_keys(config_dict)
if instance_id:
config_dict[CONFIG_DICT_INSTANCE_ID_KEY] = instance_id
if contains_dynamic_discovery_info(config_gui_structure):
gui_config = GuiConfig(config_gui_structure)
config_dict['srv_discovery_url'] = gui_config.get_dynamic_discovery_issuer()
elif config_gui_structure['fetchStaticProviderInfo']['showInputFields']:
config_dict = static_provider_info_to_config_file_dict(config_gui_structure,
config_dict)
config_dict = client_registration_to_config_file_dict(config_gui_structure, config_dict)
config_dict = subject_type_to_config_file_dict(config_dict, config_gui_structure)
config_dict = profile_to_config_file_dict(config_dict, config_gui_structure)
if config_gui_structure['webfingerSubject'] != "":
config_dict['webfinger_subject'] = config_gui_structure['webfingerSubject']
if config_gui_structure['loginHint'] != "":
config_dict['login_hint'] = config_gui_structure['loginHint']
if config_gui_structure['uiLocales'] != "":
config_dict['ui_locales'] = config_gui_structure['uiLocales']
if config_gui_structure['claimsLocales'] != "":
config_dict['claims_locales'] = config_gui_structure['claimsLocales']
if config_gui_structure['acrValues'] != "":
config_dict['acr_values'] = config_gui_structure['acrValues']
if config_gui_structure['webfinger_url'] != "":
config_dict['webfinger_url'] = config_gui_structure['webfinger_url']
if config_gui_structure['webfinger_email'] != "":
config_dict['webfinger_email'] = config_gui_structure['webfinger_email']
return config_dict
| 5,353,975 |
def message_similarity_hard(m1, m2):
"""
Inputs: One dimension various length numpy array.
"""
return int(np.all(m1==m2))
| 5,353,976 |
def test_uiuc_imports():
"""
Test that UIUC files can be imported without error
"""
for airfoil_file in AIRFOIL_FILES:
airfoil_file_name = os.path.basename(airfoil_file)
print(airfoil_file_name)
if airfoil_file_name in AIRFOIL_FILES_BLACKLIST:
continue
upper, lower = io.import_airfoil_data(airfoil_file)
| 5,353,977 |
def identity_block(input_tensor, kernel_size, filters, stage, block):
"""
The identity_block is the block that has no conv layer at shortcut
Arguments
input_tensor: input tensor
kernel_size: defualt 3, the kernel size of middle conv layer at main path
filters: list of integers, the nb_filters of 3 conv layer at main path
stage: integer, current stage label, used for generating layer names
block: 'a','b'..., current block label, used for generating layer names
"""
nb_filter1, nb_filter2, nb_filter3 = filters
bn_axis = 3
conv_name_base = 'res' + str(stage) + block + '_branch'
bn_name_base = 'bn' + str(stage) + block + '_branch'
x = Convolution2D(nb_filter1, 1, 1, name=conv_name_base + '2a')(input_tensor)
x = BatchNormalization(axis=bn_axis, name=bn_name_base + '2a')(x)
x = Activation('relu')(x)
x = Convolution2D(nb_filter2, kernel_size, kernel_size,
border_mode='same', name=conv_name_base + '2b')(x)
x = BatchNormalization(axis=bn_axis, name=bn_name_base + '2b')(x)
x = Activation('relu')(x)
x = Convolution2D(nb_filter3, 1, 1, name=conv_name_base + '2c')(x)
x = BatchNormalization(axis=bn_axis, name=bn_name_base + '2c')(x)
x = merge([x, input_tensor], mode='sum')
x = Activation('relu')(x)
return x
| 5,353,978 |
def convert_to_squad(story_summary_content, question_content, set_type):
"""
:param story_summary_content:
:param question_content:
:param category_content:
:param set_type:
:return: formatted SQUAD data
At initial version, we are just focusing on the context and question, nothing more,
therefore we are ignoring the answer part as of now
"""
squad_formatted_content = dict()
squad_formatted_content['version'] = 'narrativeqa_squad_format'
data = []
content = story_summary_content
if set_type != 'all':
content = story_summary_content[story_summary_content['set'] == set_type]
for datum in content.itertuples(index=False):
#print(datum.summary)
data_ELEMENT = dict()
data_ELEMENT['title'] = 'dummyTitle'
paragraphs = []
paragraphs_ELEMENT = dict()
superdocument = datum.summary
paragraphs_ELEMENT['context'] = superdocument
qas = []
sub_datum = question_content[question_content['document_id'] == datum.document_id]
for q_datum in sub_datum.itertuples():
# print(indx)
#print(q_datum)
qas_ELEMENT = dict()
ANSWERS_ELEMENT = dict()
qas_ELEMENT_ANSWERS = []
qas_ELEMENT['id'] = q_datum.document_id + '-' + str(q_datum.Index)
qas_ELEMENT['question'] = q_datum.question
ANSWERS_ELEMENT['answer_start'] = -1
ANSWERS_ELEMENT['text'] = 'dummyAnswer'
qas_ELEMENT_ANSWERS.append(ANSWERS_ELEMENT)
qas_ELEMENT['answers'] = qas_ELEMENT_ANSWERS
qas.append(qas_ELEMENT)
paragraphs_ELEMENT['qas'] = qas
paragraphs.append(paragraphs_ELEMENT)
data_ELEMENT['paragraphs'] = paragraphs
data.append(data_ELEMENT)
squad_formatted_content['data'] = data
return squad_formatted_content
| 5,353,979 |
def push_output(process, primary_fd, out_buffer: TextBuffer, process_state: ProcessState,
is_interactive_session: bool, on_error: callable):
"""
Receive output from running process and forward to streams, capture
:param process:
:param primary_fd:
:param out_buffer:
:param process_state:
:param is_interactive_session:
:param on_error:
:return:
"""
poller = select.epoll()
poller.register(primary_fd, select.EPOLLIN)
# terminal window size updating
terminal_update_time = 3 # 3 seconds
last_terminal_update = time()
should_update_terminal_size = True
try:
copy_terminal_size(sys.stdout, primary_fd)
except OSError as e:
if e.errno == 25:
should_update_terminal_size = False
else:
raise
if is_interactive_session:
poller.register(sys.stdin, select.EPOLLIN)
while process.poll() is None:
for r, flags in poller.poll(timeout=0.01):
try:
if is_interactive_session and sys.stdin.fileno() is r:
d = os.read(r, 10240)
os.write(primary_fd, d)
elif primary_fd is r:
o = os.read(primary_fd, 10240)
# terminal window size updating
if should_update_terminal_size and time() - last_terminal_update >= terminal_update_time:
copy_terminal_size(sys.stdout, primary_fd)
last_terminal_update = time()
# propagate to stdout
if o:
decoded = carefully_decode(o, 'utf-8')
sys.stdout.write(decoded)
sys.stdout.flush()
out_buffer.write(decoded)
if process_state.has_exited:
return True
except Exception as exc:
process_state.exception = exc
process_state.has_exited = True
on_error()
return
| 5,353,980 |
def check_conditions(conditions, variable_dict, domain_dict, domain_list):
"""A function that checks if the generated variables pass the conditions and generates new ones until they do.
:param conditions: The conditions of the template.
:param variable_dict: List of variables.
:param domain_dict: the domain of the variables.
:param domain_list: a dict with the domain list.
:return: List of variables that pass the conditions of the given template.
"""
conditions = remove_unnecessary(conditions)
# Check conditions --> if false: change a variable -> check conditions
inserted_conditions = string_replace(conditions, variable_dict)
while not parse_expr(latex_to_sympy(inserted_conditions), transformations=standard_transformations +
(convert_xor, implicit_multiplication_application,), global_dict=None, evaluate=True):
variable_to_change = choice(list(variable_dict.keys())) # Chose a random key from variable_dict
if domain_list[variable_to_change]:
variable_dict[variable_to_change] = make_number_from_list(domain_dict[variable_to_change])
else:
variable_dict[variable_to_change] = new_random_value(variable_to_change, domain_dict)
inserted_conditions = string_replace(conditions, variable_dict)
return variable_dict
| 5,353,981 |
def set_pin_on_teaching_page(request,
section_label,
pin=True):
"""
if pin=True, pin the section on teaching page
if pin=False, unpin the section from teaching page
@except InvalidSectionID
@except NotSectionInstructorException
@except UserCourseDisplay.DoesNotExist
"""
section = get_section_by_label(section_label)
check_section_instructor(section, get_person_of_current_user(request))
# not to pin a primary section
if section.is_primary_section:
return False
return UserCourseDisplay.set_pin(
get_user_model(request), section_label, pin)
| 5,353,982 |
def inject_python_resources() -> dict[str, Any]:
"""
Inject common resources to be used in Jinja templates.
"""
return dict(
isinstance=isinstance,
zip=zip,
enumerate=enumerate,
len=len,
str=str,
bool=bool,
int=int,
float=float,
)
| 5,353,983 |
def get_feature_subsets_options(study, data_types):
"""Given a study and list of data types, get the relevant feature
subsets
"""
feature_subsets = ['custom']
if 'expression' in data_types:
try:
feature_subsets.extend(study.expression.feature_subsets.keys())
except AttributeError:
pass
if 'splicing' in data_types:
try:
feature_subsets.extend(study.splicing.feature_subsets.keys())
except AttributeError:
pass
# Cast to "set" to get rid of duplicates, then back to list because you
# can't sort a set, then back to list after sorting because you get
# an iterator... yeah ....
feature_subsets = list(natural_sort(list(set(feature_subsets))))
# Make sure "variant" is first because all datasets have that
# first remove 'variant' if it is there, then add it at the front
try:
feature_subsets.pop(feature_subsets.index('variant'))
except ValueError:
pass
feature_subsets.insert(0, 'variant')
return feature_subsets
| 5,353,984 |
def _loop(params, context):
"""
Loop through some actions in the context of a L{Runner} run.
"""
for item in params['iterable']:
context.variables['item'] = item
result = yield context.runner.runActions(params['actions'], context)
defer.returnValue(result)
| 5,353,985 |
def sample_batch_annotate_files(storage_uri):
# os.environ["GOOGLE_APPLICATION_CREDENTIALS"] = r"C:\Users\user\Desktop\doc_ai\rmi-insights-3e257c9c456c.json"
"""Perform batch file annotation."""
mime_type = "application/pdf"
client = vision_v1.ImageAnnotatorClient()
gcs_source = {"uri": storage_uri}
input_config = {"gcs_source": gcs_source, "mime_type": mime_type}
features = [{"type": enums.Feature.Type.DOCUMENT_TEXT_DETECTION}]
# The service can process up to 5 pages per document file.
# Here we specify the first, second, and last page of the document to be
# processed.
pages = [1, 2, 3]
requests = [{"input_config": input_config, "features": features, "pages": pages}]
response = client.batch_annotate_files(requests)
#Accessing Internal memory 1
f = open("/home/srinidhi/angular/uploads/visionoutput.txt","w+")
for image_response in response.responses[0].responses:
f.write(image_response.full_text_annotation.text)
f.close()
#Reading it line by line
f1 = open("/home/srinidhi/angular/uploads/visionoutput.txt","r")
list_output = []
line = f1.readlines()
line = [x.rstrip('\\n').rstrip() for x in line]
print(line)
#Storing in a dictionary
dict_output ={}
dict_output['data'] = line
#Uploading file to bucket
#Filename is the name you want to store in bucket
storage_client = storage.Client()
bucket = storage_client.get_bucket('sample_pdf')
filename ="visionoutput.json"
blob = bucket.blob(filename)
#Removing Internal memory
# os.remove("visionoutput.txt")
# os.remove("visionoutput.json")
| 5,353,986 |
def possibilities(q=0, *num):
"""
:param q: Número de quadrados a considerar
:param num: Em quantos quadrados a soma do nº de bombas é 1
:return:
pos -> Possibilidade de distribuição das bombas
tot -> Número de quadrados nos quais só há uma bomba
i -> Início da contagem dos quadrados onde a soma das bombas é 1
"""
lbn = []
lp = []
num = str(num).replace('(', '[').replace(')', ']')
num = json.loads(num)
for c4 in range(0, len(num)):
num[c4] += ['']
for c1 in range(0, 2 ** q):
pos = []
bn = str(bin(c1)).replace('0b', '') # bn = int(bn, base=2) -> Reverte o processo
bn = bn.rjust(q, '0')
pos += bn
ts = 0
for c2 in range(0, len(num)):
i = num[c2][0]
tot = num[c2][1] # print(bn, tot, pos)
s = 0
for c3 in range(i, tot + i):
if pos[c3] == '1':
s += 1
if num[c2][3] != '':
# print(num[c2], pos[num[c2][3]])
if pos[num[c2][3]] == '1':
s += 1
if s == num[c2][2]:
ts += 1
# print(bn, s)
if ts == len(num):
lbn += [bn]
for c5 in range(0, q):
lp += [0]
for item in lbn:
for c6 in range(0, q):
if item[c6] == '1':
lp[c6] += 1
return lp
| 5,353,987 |
def parse_page_options(text):
"""
Parses special fields in page header. The header is separated by a line
with 3 dashes. It contains lines of the "key: value" form, which define
page options.
Returns a dictionary with such options. Page text is available as option
named "text".
"""
if type(text) != unicode:
raise TypeError('parse_page_options() expects Unicode text, not "%s".' % text.__class__.__name__)
options = dict()
text = text.replace('\r\n', '\n') # fix different EOL types
parts = text.split(u'\n---\n', 1)
if len(parts) > 1:
for line in parts[0].split('\n'):
if not line.startswith('#'):
kv = line.split(':', 1)
if len(kv) == 2:
k = kv[0].strip()
v = kv[1].strip()
if k.endswith('s'):
v = re.split('[\s,]+', v)
options[k] = v
options['text'] = parts[-1]
return options
| 5,353,988 |
def _read_16_bit_message(prefix, payload_base, prefix_type, is_time,
data, offset, eieio_header):
""" Return a packet containing 16 bit elements
"""
if payload_base is None:
if prefix is None:
return EIEIO16BitDataMessage(eieio_header.count, data, offset)
elif prefix_type == EIEIOPrefix.LOWER_HALF_WORD:
return EIEIO16BitLowerKeyPrefixDataMessage(
prefix, eieio_header.count, data, offset)
elif prefix_type == EIEIOPrefix.UPPER_HALF_WORD:
return EIEIO16BitUpperKeyPrefixDataMessage(
prefix, eieio_header.count, data, offset)
elif payload_base is not None and not is_time:
if prefix is None:
return EIEIO16BitPayloadPrefixDataMessage(
payload_base, eieio_header.count, data, offset)
elif prefix_type == EIEIOPrefix.LOWER_HALF_WORD:
return EIEIO16BitPayloadPrefixLowerKeyPrefixDataMessage(
prefix, payload_base, eieio_header.count, data, offset)
elif prefix_type == EIEIOPrefix.UPPER_HALF_WORD:
return EIEIO16BitPayloadPrefixUpperKeyPrefixDataMessage(
prefix, payload_base, eieio_header.count, data, offset)
elif payload_base is not None and is_time:
if prefix is None:
return EIEIO16BitTimedPayloadPrefixDataMessage(
payload_base, eieio_header.count, data, offset)
elif prefix_type == EIEIOPrefix.LOWER_HALF_WORD:
return EIEIO16BitTimedPayloadPrefixLowerKeyPrefixDataMessage(
prefix, payload_base, eieio_header.count, data, offset)
elif prefix_type == EIEIOPrefix.UPPER_HALF_WORD:
return EIEIO16BitTimedPayloadPrefixUpperKeyPrefixDataMessage(
prefix, payload_base, eieio_header.count, data, offset)
return EIEIOWithoutPayloadDataMessage(eieio_header, data, offset)
| 5,353,989 |
def test_linear():
""" Tests that KernelExplainer returns the correct result when the model is linear.
(as per corollary 1 of https://arxiv.org/abs/1705.07874)
"""
np.random.seed(2)
x = np.random.normal(size=(200, 3), scale=1)
# a linear model
def f(x):
return x[:, 0] + 2.0*x[:, 1]
phi = shap.KernelExplainer(f, x).shap_values(x, l1_reg="num_features(2)", silent=True)
assert phi.shape == x.shape
# corollary 1
expected = (x - x.mean(0)) * np.array([1.0, 2.0, 0.0])
np.testing.assert_allclose(expected, phi, rtol=1e-3)
| 5,353,990 |
def csm_shape(csm):
"""
Return the shape field of the sparse variable.
"""
return csm_properties(csm)[3]
| 5,353,991 |
def hyperlist_to_labellist(hyperlist):
"""
:param hyperlist:
:return: labellist, labels to use for plotting
"""
return [hyper_to_label(hyper) for hyper in hyperlist]
| 5,353,992 |
def _pretty_print_dict(dictionary):
"""Generates a pretty-print formatted version of the input JSON.
Args:
dictionary (dict): the JSON string to format.
Returns:
str: pretty-print formatted string.
"""
return json.dumps(_ascii_encode_dict(dictionary), indent=2, sort_keys=True)
| 5,353,993 |
def _set_version(obj, version):
"""
Set the given version on the passed object
This function should be used with 'raw' values, any type conversion should be managed in
VersionField._set_version_value(). This is needed for future enhancement of concurrency.
"""
obj._concurrencymeta.field._set_version_value(obj, version)
| 5,353,994 |
def _bivariate_kdeplot(x, y, filled, fill_lowest,
kernel, bw, gridsize, cut, clip,
axlabel, cbar, cbar_ax, cbar_kws, ax, **kwargs):
"""Plot a joint KDE estimate as a bivariate contour plot."""
# Determine the clipping
if clip is None:
clip = [(-np.inf, np.inf), (-np.inf, np.inf)]
elif np.ndim(clip) == 1:
clip = [clip, clip]
# Calculate the KDE
if _has_statsmodels:
xx, yy, z = _statsmodels_bivariate_kde(x, y, bw, gridsize, cut, clip)
else:
xx, yy, z = _scipy_bivariate_kde(x, y, bw, gridsize, cut, clip)
# Plot the contours
n_levels = kwargs.pop("n_levels", 10)
scout, = ax.plot([], [])
default_color = scout.get_color()
scout.remove()
cmap = kwargs.pop("cmap", None)
color = kwargs.pop("color", None)
if cmap is None and "colors" not in kwargs:
if color is None:
color = default_color
if filled:
cmap = light_palette(color, as_cmap=True)
else:
cmap = dark_palette(color, as_cmap=True)
if isinstance(cmap, str):
if cmap.endswith("_d"):
pal = ["#333333"]
pal.extend(color_palette(cmap.replace("_d", "_r"), 2))
cmap = blend_palette(pal, as_cmap=True)
else:
cmap = mpl.cm.get_cmap(cmap)
label = kwargs.pop("label", None)
kwargs["cmap"] = cmap
contour_func = ax.contourf if filled else ax.contour
cset = contour_func(xx, yy, z, n_levels, **kwargs)
if filled and not fill_lowest:
cset.collections[0].set_alpha(0)
kwargs["n_levels"] = n_levels
if cbar:
cbar_kws = {} if cbar_kws is None else cbar_kws
ax.figure.colorbar(cset, cbar_ax, ax, **cbar_kws)
# Label the axes
if hasattr(x, "name") and axlabel:
ax.set_xlabel(x.name)
if hasattr(y, "name") and axlabel:
ax.set_ylabel(y.name)
if label is not None:
legend_color = cmap(.95) if color is None else color
if filled:
ax.fill_between([], [], color=legend_color, label=label)
else:
ax.plot([], [], color=legend_color, label=label)
return ax
| 5,353,995 |
def load_file(path):
"""
Load single cell dataset from file
"""
if os.path.exists(DATA_PATH+path+'.h5ad'):
adata = sc.read_h5ad(DATA_PATH+path+'.h5ad')
elif os.path.isdir(path): # mtx format
adata = read_mtx(path)
elif os.path.isfile(path):
if path.endswith(('.csv', '.csv.gz')):
adata = sc.read_csv(path).T
elif path.endswith(('.txt', '.txt.gz', '.tsv', '.tsv.gz')):
df = pd.read_csv(path, sep='\t', index_col=0).T
adata = AnnData(df.values, dict(obs_names=df.index.values), dict(var_names=df.columns.values))
elif path.endswith('.h5ad'):
adata = sc.read_h5ad(path)
else:
raise ValueError("File {} not exists".format(path))
if not issparse(adata.X):
adata.X = scipy.sparse.csr_matrix(adata.X)
adata.var_names_make_unique()
return adata
| 5,353,996 |
def unified_load(namespace, subclasses=None, recurse=False):
"""Provides a unified interface to both the module and class loaders,
finding modules by default or classes if given a ``subclasses`` parameter.
"""
if subclasses is not None:
return ClassLoader(recurse=recurse).load(namespace, subclasses=subclasses)
else:
return ModuleLoader(recurse=recurse).load(namespace)
| 5,353,997 |
def generate_free_rooms(room_times: dict) -> dict:
"""
Generates data structure for getting free rooms for each time.
"""
# create data format
free_rooms = {'M': {},
'Tu': {},
'W': {},
'Th': {},
'F': {}
}
# add empty lists for each time
for dotw in free_rooms:
for i in range(0, 144):
free_rooms[dotw][i] = []
# iterate through all the rooms. days, and times
for room in room_times:
for day in room_times[room]:
for time in room_times[room][day]:
# add the room to the corresponding time
free_rooms[day][time].append(room)
return free_rooms
| 5,353,998 |
def code_parse_line(li, pattern_type="import/import_externa"):
"""
External Packages
"""
### Import pattern
if pattern_type == "import":
if li.find("from") > -1:
l = li[li.find("from") + 4 : li.find("import")].strip().split(",")
else:
l = li.strip().split("import ")[1].strip().split(",")
l = [x for x in l if x != ""]
l = np_list_dropduplicate(l)
return l
# Only external
if pattern_type == "import_extern":
if li.find("from") > -1:
l = li[li.find("from") + 4 : li.find("import")].strip().split(",")
else:
l = li.strip().split("import ")[1].strip().split(",")
l = [x for x in l if x != ""]
l = [x for x in l if x[0] != "."]
l = [x.split(".")[0].split("as")[0].split("#")[0].strip() for x in l]
l = np_list_dropduplicate(l)
return l
| 5,353,999 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.