content
stringlengths 22
815k
| id
int64 0
4.91M
|
---|---|
def cached_property_named(name, kls=_internal_jit_attr, use_cls_setattr=False):
"""
variation of `cached_property`, just with the ability to explicitly set the attribute name
Primarily of use for when the functor it's wrapping has a generic name (
`functools.partial` instances for example).
Example Usage:
>>> from snakeoil.klass import cached_property_named
>>> class foo:
...
... @cached_property_named("attr")
... def attr(self):
... print("invoked")
... return 1
>>>
>>> obj = foo()
>>> print(obj.attr)
invoked
1
>>> print(obj.attr)
1
"""
return post_curry(kls, name, use_singleton=False, use_cls_setattr=False)
| 5,346,200 |
def remix(tracks, n_tracks=50, n_steps=60):
"""Return new tracks generated by remixing given tracks"""
time_step = int(
np.round(next(dt for dt in sorted(tracks["Time"].diff()) if dt > 0) * 60)
)
print(
"Generating {} steps from {} steps {}s apart.".format(
n_tracks * n_steps, len(tracks), time_step
)
)
velocities_only = tracks[tracks["Turning Angle"].isnull()]["Velocity"].dropna()
velo_and_turn = tracks[tracks["Plane Angle"].isnull()][
["Velocity", "Turning Angle"]
].dropna()
remaining_data = tracks[["Velocity", "Turning Angle", "Plane Angle"]].dropna()
new_tracks = pd.DataFrame()
for i in range(n_tracks):
track_data = velo_and_turn.sample()
track_data = track_data.append(
remaining_data.sample(n_steps - 2, replace=True)
)
track_data = track_data.append(
pd.DataFrame(
{
"Velocity": velocities_only.sample()
}
)
)
new_track = silly_steps(track_data, time_step)
new_track["Track_ID"] = i
new_tracks = new_tracks.append(new_track)
if "Condition" in tracks.columns:
new_tracks["Condition"] = tracks["Condition"].iloc[0] + " Remixed"
else:
new_tracks["Condition"] = "Remixed"
return new_tracks.reset_index()
| 5,346,201 |
def prohibition_served_recently(**args) -> tuple:
"""
Returns TRUE if the prohibition was served within the previous 3 days;
otherwise returns FALSE
"""
date_served_string = args.get('date_of_service')
config = args.get('config')
delay_days = int(config.DAYS_TO_DELAY_FOR_VIPS_DATA_ENTRY)
# Note: we have to rely on the date_served as submitted by the user -- not the date in VIPS
# Check to see if enough time has elapsed to enter the prohibition into VIPS
today = args.get('today_date')
date_served = helper.localize_timezone(datetime.strptime(date_served_string, '%Y-%m-%d'))
very_recently_served = (today - date_served).days < delay_days
if very_recently_served:
return True, args
error = 'prohibition not served within the past {} days'.format(delay_days)
args['error_string'] = error
logging.info(error)
print("date_served: {}, very_recently_served: {}".format(date_served, very_recently_served))
return False, args
| 5,346,202 |
def test_check_family_equal_unicode_encodings(mada_ttFonts):
""" Fonts have equal unicode encodings ? """
from fontbakery.profiles.cmap import com_google_fonts_check_family_equal_unicode_encodings as check
from fontbakery.constants import WindowsEncodingID
print('Test PASS with good family.')
# our reference Mada family is know to be good here.
status, message = list(check(mada_ttFonts))[-1]
assert status == PASS
bad_ttFonts = mada_ttFonts
# introduce mismatching encodings into the first 2 font files:
for i, encoding in enumerate([WindowsEncodingID.SYMBOL,
WindowsEncodingID.UNICODE_BMP]):
for table in bad_ttFonts[i]['cmap'].tables:
if table.format == 4:
table.platEncID = encoding
print('Test FAIL with fonts that diverge on unicode encoding.')
status, message = list(check(bad_ttFonts))[-1]
assert status == FAIL and message.code == "mismatch"
| 5,346,203 |
def project_api(request):
"""
创建项目接口
"""
if not request.user.has_perm('home_application.can_add_project'):
return render(request, '403.html')
if request.method == 'POST':
groupId=request.POST.get('group-id','')
projectName=request.POST.get('project-name','')
domain=request.POST.get('domain','')
domainId=request.POST.get('domain-id',1)
if domainId == '1':
main_domain='.a.com.cn'
elif domainId == '2':
main_domain='.b.com'
else:
main_domain=''
new_domain=domain+main_domain
kwargs={
'groupId':groupId,
'projectName':projectName,
'domain':new_domain,
}
try:
create_project=QuerySet(**kwargs).creat_project()
msg={'resultCode':u'200','data':create_project,'info': u'创建成功'}
except Exception:
msg={'resultCode':'60001','data':'','info': u'数据库操作异常,注意不要重复'}
return HttpResponse(json.dumps(msg))
msg={'resultCode':u'60003','data':u'','info':u'不支持的请求'}
return HttpResponse(json.dumps(msg))
| 5,346,204 |
def impute_missing_values(model,
observed_time_series,
parameter_samples,
include_observation_noise=False):
"""Runs posterior inference to impute the missing values in a time series.
This method computes the posterior marginals `p(latent state | observations)`,
given the time series at observed timesteps (a missingness mask should
be specified using `tfp.sts.MaskedTimeSeries`). It pushes this posterior back
through the observation model to impute a predictive distribution on the
observed time series. At unobserved steps, this is an imputed value; at other
steps it is interpreted as the model's estimate of the underlying noise-free
series.
Args:
model: `tfp.sts.Sum` instance defining an additive STS model.
observed_time_series: `float` `Tensor` of shape
`concat([sample_shape, model.batch_shape, [num_timesteps, 1]])` where
`sample_shape` corresponds to i.i.d. observations, and the trailing `[1]`
dimension may (optionally) be omitted if `num_timesteps > 1`. May
optionally be an instance of `tfp.sts.MaskedTimeSeries` including a
mask `Tensor` to encode the locations of missing observations.
parameter_samples: Python `list` of `Tensors` representing posterior
samples of model parameters, with shapes `[concat([
[num_posterior_draws], param.prior.batch_shape,
param.prior.event_shape]) for param in model.parameters]`. This may
optionally also be a map (Python `dict`) of parameter names to
`Tensor` values.
include_observation_noise: If `False`, the imputed uncertainties
represent the model's estimate of the noise-free time series at each
timestep. If `True`, they represent the model's estimate of the range of
values that could be *observed* at each timestep, including any i.i.d.
observation noise.
Default value: `False`.
Returns:
imputed_series_dist: a `tfd.MixtureSameFamily` instance with event shape
[num_timesteps] and batch shape `concat([sample_shape,
model.batch_shape])`, with `num_posterior_draws` mixture components.
#### Example
To specify a time series with missing values, use `tfp.sts.MaskedTimeSeries`:
```python
time_series_with_nans = [-1., 1., np.nan, 2.4, np.nan, 5]
observed_time_series = tfp.sts.MaskedTimeSeries(
time_series=time_series_with_nans,
is_missing=tf.math.is_nan(time_series_with_nans))
```
Masked time series can be passed to `tfp.sts` methods in place of a
`observed_time_series` `Tensor`:
```python
# Build model using observed time series to set heuristic priors.
linear_trend_model = tfp.sts.LocalLinearTrend(
observed_time_series=observed_time_series)
model = tfp.sts.Sum([linear_trend_model],
observed_time_series=observed_time_series)
# Fit model to data
parameter_samples, _ = tfp.sts.fit_with_hmc(model, observed_time_series)
```
After fitting a model, `impute_missing_values` will return a distribution
```python
# Impute missing values
imputed_series_distribution = tfp.sts.impute_missing_values(
model, observed_time_series)
print('imputed means and stddevs: ',
imputed_series_distribution.mean(),
imputed_series_distribution.stddev())
```
"""
with tf.name_scope('impute_missing_values'):
[
observed_time_series,
mask
] = sts_util.canonicalize_observed_time_series_with_mask(
observed_time_series)
# Run smoothing over the training timesteps to extract the
# predictive means and variances.
num_timesteps = dist_util.prefer_static_value(
tf.shape(input=observed_time_series))[-2]
lgssm = model.make_state_space_model(
num_timesteps=num_timesteps, param_vals=parameter_samples)
posterior_means, posterior_covs = lgssm.posterior_marginals(
observed_time_series, mask=mask)
observation_means, observation_covs = lgssm.latents_to_observations(
latent_means=posterior_means,
latent_covs=posterior_covs)
if not include_observation_noise:
# Extract just the variance of observation noise by pushing forward
# zero-variance latents.
_, observation_noise_covs = lgssm.latents_to_observations(
latent_means=posterior_means,
latent_covs=tf.zeros_like(posterior_covs))
# Subtract out the observation noise that was added in the original
# pushforward. Note that this could cause numerical issues if the
# observation noise is very large. If this becomes an issue we could
# avoid the subtraction by plumbing `include_observation_noise` through
# `lgssm.latents_to_observations`.
observation_covs -= observation_noise_covs
# Squeeze dims to convert from LGSSM's event shape `[num_timesteps, 1]`
# to a scalar time series.
return sts_util.mix_over_posterior_draws(
means=observation_means[..., 0],
variances=observation_covs[..., 0, 0])
| 5,346,205 |
def grad_z(y, z, axis=0):
"""
Compute the vertical gradient
"z" can be an array same size as y, or vector along the first axis of "y"
Takes the derivative along the dimension specified by axis(=0)
"""
Nz = z.shape[0]
# Reshape the y variable
y = y.swapaxes(0, axis)
#assert y.shape[0] == Nz
z = z.swapaxes(0, axis)
#assert z.shape == (Nz,) or z.shape == y.shape
dy_dz = np.zeros_like(y)
# Second-order accurate for mid-points
ymid = 0.5*(y[1:,...]+y[0:-1,...])
zmid = 0.5*(z[1:,...]+z[0:-1,...])
dzmid = zmid[1:,...] - zmid[0:-1,...]
dzmidi = 1./dzmid
dy_dz[1:-1, ...] = (ymid[1:,...] - ymid[0:-1,...])*\
dzmidi[:,...]
# First-order accurate for top and bottom cells
dy_dz[0,...] = (y[1,...] - y[0,...])*dzmidi[0,...]
dy_dz[-1,...] = (y[-1,...] - y[-2,...])*dzmidi[-1,...]
return dy_dz.swapaxes(axis, 0)
| 5,346,206 |
def wang_ryzin_reg(h, Xi, x):
"""
A version for the Wang-Ryzin kernel for nonparametric regression.
Suggested by Li and Racine in [1] ch.4
"""
return h ** abs(Xi - x)
| 5,346,207 |
def _removeBackrefs( senderkey):
"""Remove all back-references to this senderkey"""
try:
signals = connections[senderkey]
except KeyError:
signals = None
else:
items = signals.items()
def allReceivers( ):
for signal,set in items:
for item in set:
yield item
for receiver in allReceivers():
_killBackref( receiver, senderkey )
| 5,346,208 |
def get_all_input_values(corpus_weights):
"""
Returns all relevant input values
"""
azerty = get_azerty()
letters = get_letters()
characters = get_characters()
keyslots = get_keyslots()
similarity_c_c = get_character_similarities()
similarity_c_l = get_character_letter_similarities()
distance_level_0, distance_level_1 = get_distances()
# read in single probabilities
p_single, p_bigram = get_probabilities(corpus_weights)
ergonomics = get_ergonomics()
performance = get_performance()
return azerty, \
characters, \
keyslots, \
letters, \
p_single, p_bigram, \
performance, \
similarity_c_c, similarity_c_l, \
distance_level_0, distance_level_1, \
ergonomics
| 5,346,209 |
def bulk_rename(doctype, rows=None, via_console = False):
"""Bulk rename documents
:param doctype: DocType to be renamed
:param rows: list of documents as `((oldname, newname), ..)`"""
if not rows:
frappe.throw(_("Please select a valid csv file with data"))
if not via_console:
max_rows = 500
if len(rows) > max_rows:
frappe.throw(_("Maximum {0} rows allowed").format(max_rows))
rename_log = []
for row in rows:
# if row has some content
if len(row) > 1 and row[0] and row[1]:
try:
if rename_doc(doctype, row[0], row[1], rebuild_search=False):
msg = _("Successful: {0} to {1}").format(row[0], row[1])
frappe.db.commit()
else:
msg = _("Ignored: {0} to {1}").format(row[0], row[1])
except Exception as e:
msg = _("** Failed: {0} to {1}: {2}").format(row[0], row[1], repr(e))
frappe.db.rollback()
if via_console:
print(msg)
else:
rename_log.append(msg)
frappe.enqueue('frappe.utils.global_search.rebuild_for_doctype', doctype=doctype)
if not via_console:
return rename_log
| 5,346,210 |
def parse_file(
input_file,
output_file,
xsd_file,
xpaths,
excludepaths,
delete_xml,
block_size,
file_info,
):
"""
:param input_file: input file
:param output_file: output file
:param xsd_file: xsd file
:param xpaths: whether to parse a specific xml path
:param excludepaths: paths to exclude
:param delete_xml: optional delete xml file after converting
:param block_size: memory needed to read xml
:param file_info: capture file information metadata in parquet file
"""
_logger.debug("Generating schema from " + xsd_file)
my_schema = xmlschema.XMLSchema(xsd_file, converter=NestedParqConverter)
_logger.debug("Parsing " + input_file)
xpaths_set = set()
xparents_set = set()
if xpaths:
xpaths = xpaths.split(",")
xpaths_list = [v.split("/")[1:] for v in xpaths]
xpaths_set = {tuple(v) for v in xpaths_list}
start = -1
while True:
x_set = {tuple(v[:start]) for v in xpaths_list if len(v[:start]) > 0}
xparents_set.update(x_set)
start = start - 1
if not x_set:
break
excludepaths_set = set()
excludeparents_set = set()
if excludepaths:
excludepaths = excludepaths.split(",")
excludepaths_list = [v.split("/")[1:] for v in excludepaths]
excludepaths_set = {tuple(v) for v in excludepaths_list}
excludeparents_set = {tuple(v[:-1]) for v in excludepaths_list}
if input_file.endswith(".tar.gz"):
zip_file = tarfile.open(input_file, "r")
zip_file_list = zip_file.getmembers()
for member in zip_file_list:
with zip_file.extractfile(member) as xml_file:
if file_info:
file_info_meta = member.get_info()
file_info_meta["tarfile"] = os.path.basename(input_file)
else:
file_info_meta = None
parse_xml(
xml_file,
output_file + "." + member.name + ".parquet",
my_schema,
xpaths_set,
xparents_set,
excludepaths_set,
excludeparents_set,
block_size,
file_info_meta,
)
elif input_file.endswith(".zip"):
zip_file = ZipFile(input_file, "r")
if zip_file.testzip():
logging.info("Zip File is Corrupt:" + input_file)
return
zip_file_list = zip_file.infolist()
for i in range(len(zip_file_list)):
with zip_file.open(zip_file_list[i].filename) as xml_file:
if file_info:
file_info_meta = {
"filename": zip_file_list[i].filename,
"date_time": zip_file_list[i].date_time,
"compress_size": zip_file_list[i].compress_size,
"zipfile": os.path.basename(input_file),
}
else:
file_info_meta = None
parse_xml(
xml_file,
output_file + "." + zip_file_list[i].filename + ".parquet",
my_schema,
xpaths_set,
xparents_set,
excludepaths_set,
excludeparents_set,
block_size,
file_info_meta,
)
elif input_file.endswith(".gz"):
with gzip.open(input_file) as xml_file:
if file_info:
file_info_meta = {
"filename": os.path.basename(input_file),
"modified": datetime.fromtimestamp(os.path.getmtime(input_file)),
"size": os.path.getsize(input_file),
}
else:
file_info_meta = None
parse_xml(
xml_file,
output_file + "." + input_file[:-3] + ".parquet",
my_schema,
xpaths_set,
xparents_set,
excludepaths_set,
excludeparents_set,
block_size,
file_info_meta,
)
else:
if file_info:
file_info_meta = {
"filename": os.path.basename(input_file),
"modified": datetime.fromtimestamp(os.path.getmtime(input_file)),
"size": os.path.getsize(input_file),
}
else:
file_info_meta = None
parse_xml(
input_file,
output_file + ".xml.parquet",
my_schema,
xpaths_set,
xparents_set,
excludepaths_set,
excludeparents_set,
block_size,
file_info_meta,
)
if delete_xml:
os.remove(input_file)
_logger.debug("Completed " + input_file)
| 5,346,211 |
def mag_to_flux_AB(mag, mag_err):
"""Calculate flux in erg s-1 cm-2 Hz-1."""
flux = 10 ** (-.4 * (mag + 48.6))
flux_err = abs(-.4 * flux * sp.log(10) * mag_err)
return flux, flux_err
| 5,346,212 |
def add_count_records(df, count_type):
"""Add count records for the count type."""
log(f'Adding {DATASET_ID} count records for {count_type}')
has_count = pd.to_numeric(df[count_type], errors='coerce').notna()
df = df.loc[has_count, :].copy()
df[count_type] = df[count_type].astype(int)
df['count_id'] = db.create_ids(df, 'counts')
df['dataset_id'] = DATASET_ID
df['count'] = df[count_type]
df['count_type'] = count_type
df['count_json'] = util.json_object(df, COUNT_FIELDS)
df.loc[df['count'].notna(), db.COUNT_FIELDS].to_sql(
'counts', db.connect(), if_exists='append', index=False)
| 5,346,213 |
def rasterizeTimesliceMultipleDays(timeslices_range: dict, perform_rasterization):
"""
Rasterize timeslices over multiple days while keeping consistent color scheme across rasters
timeslices_range shall for each day contain a dictioanry with keys:
- timeslices
- startTime
- endTime
- imagePath
- rasterPath
:param timeslices_range: dictionary containing timeslices and metadata for each day
:param perform_rasterization: whether or not to raster GDAL layers or just create and store them
"""
xres = 1000
yres = 1000
multproc = True
min_agents_range = 4000
max_agents_range = 0
log.info(f"Calculating min and max agents over all timeslices")
for day in timeslices_range:
timeslices = timeslices_range[day]["timeslices"]
for hour in sorted(list(timeslices.keys())):
for minute in timeslices[hour]:
min_agents_range = min(min_agents_range, timeslices[hour][minute][timeslices[hour][minute].countReachable > 3].countReachable.min())
max_agents_range = max(max_agents_range, timeslices[hour][minute].countReachable.max())
log.info(f"min agents: {min_agents_range}, max agents: {max_agents_range}")
hours = range(0,24)
minutes = range(0, 60, 10)
log.info(f"Rasterizing timeslices from {timeslices_range[list(timeslices_range.keys())[0]]['startTime']} to {timeslices_range[list(timeslices_range.keys())[-1]]['startTime']}")
for day in timeslices_range:
timeslices = timeslices_range[day]["timeslices"]
rasterPath = timeslices_range[day]["rasterPath"]
imagePath = timeslices_range[day]["imagePath"]
slice_datetime = timeslices_range[day]["startTime"]
log.info(f"Rasterizing timeslices on day {day}")
if not os.path.isdir(rasterPath):
log.warning(f"{rasterPath} does not exist, attempting to create folder..")
os.mkdir(rasterPath)
if not os.path.isdir(imagePath):
log.warning(f"{imagePath} does not exist, attempting to create folder..")
os.mkdir(imagePath)
global parsed
parsed = 0
maxParsed = len(hours)*len(minutes)
steps = 10
iter = int(maxParsed / steps)
def callback(result):
dt = result
c_hour = dt.hour
c_minute = dt.minute
global parsed
parsed += 1
numBlocks = int(parsed / (iter + 1)) if parsed != maxParsed else steps
print(f"\rRendering timeslices [" + ''.join(['#' for _ in range(numBlocks)]).ljust(steps) + f"] ({str(c_hour).rjust(2)} {str(c_minute).rjust(2)})", end="", flush=True)
if multproc:
pool = multiprocessing.Pool()
for hour in hours:
for minute in minutes:
dt = datetime.datetime(year=slice_datetime.year, month=slice_datetime.month, day=slice_datetime.day, hour=hour, minute=minute)
pool.apply_async(_rasterizeTimesliceWorker,
(timeslices[hour][minute], rasterPath, imagePath, min_agents_range, max_agents_range, dt, xres, yres, perform_rasterization),
callback=callback)
pool.close()
pool.join()
else:
for hour in hours:
for minute in minutes:
dt = datetime.datetime(year=slice_datetime.year, month=slice_datetime.month, day=slice_datetime.day, hour=hour, minute=minute)
callback(_rasterizeTimesliceWorker(timeslices[hour][minute], rasterPath, imagePath, min_agents_range, max_agents_range, dt, xres, yres, perform_rasterization))
print()
shutil.rmtree(rasterPath)
| 5,346,214 |
def django_admin_navtree(request, context):
"""show menu"""
if request and request.user.is_staff:
coop_cms_navtrees = context.get('coop_cms_navtrees', None) or []
tree_class = get_navtree_class()
admin_tree_name = "{0}_{1}".format(get_model_app(tree_class), get_model_name(tree_class))
if len(coop_cms_navtrees) == 1:
tree = coop_cms_navtrees[0]
url = reverse('admin:{0}_change'.format(admin_tree_name), args=[tree.id])
label = _('Navigation tree')
else:
url = reverse('admin:{0}_changelist'.format(admin_tree_name))
label = _('Navigation trees')
return make_link(
url, label, 'leaf',
classes=['icon', 'alert_on_click']
)
| 5,346,215 |
def get_pos_tags(student_comment: str) -> pd.DataFrame:
"""Get the POS (part of speech) tags for each of the words in the student
comments
Keyword arguments
student_comment -- a spacy.tokens.doc.Doc object
"""
# Count how many of each pos tags are in each comment
pos_tags = student_comment.apply(lambda x: Counter([token.pos_ for token in x]))
# Expand the list column into several columns
pos_tags_df = pos_tags.apply(pd.Series).fillna(0)
return pos_tags_df
| 5,346,216 |
def compute_descriptor_digest(fields, descriptors, entry, flavor):
"""
(details of the parser – private API)
Plugs into our consumer to compute extra "digest" fields that expose
the (micro-)descriptor's (micro-)digest, enabling us to easily fetch
associated entries within a consensus.
:param list fields: "fields" accumulator used by the consumer
:param bytes descriptors: remaining input to be parsed by the consumer
:param bytes entry: last line being parsed by the consumer
:param str flavor: flavor used by the consumer
:returns: updated (or not) fields accumulator
"""
if flavor == 'unflavored':
digest_name = 'digest'
pivot_field = 'router'
starts_hash = b'router '
ends_hasher = b'router-signature'
base_offset = 1
base_legacy = 0
shalgorithm = hashlib.sha1
# https://github.com/plcp/tor-scripts/blob/master/torspec/dir-spec-4d0d42f.txt#L602
else:
digest_name = 'micro-digest'
pivot_field = 'onion-key'
starts_hash = b'onion-key'
ends_hasher = b'id '
base_offset = 7 + 1 + 43 + 1 # 'ed25519 [identity]\n'
base_legacy = 7 + 1 + 27 + 1 # 'rsa1024 [identity]\n'
shalgorithm = hashlib.sha256
# https://github.com/plcp/tor-scripts/blob/master/torspec/dir-spec-4d0d42f.txt#L3202
# 1. check if we're starting to parse a fresh entry before computing digest
if digest_name not in fields[-1] or (
entry.startswith(starts_hash) and pivot_field in fields[-1]):
if pivot_field in fields[-1]:
fields.append(dict())
# 1.5 (extra sanity checks: double-check that we have what we need)
if not entry.startswith(starts_hash):
raise RuntimeError('Expecting {} to start the payload: {}'.format(
starts_hash, entry))
if not ends_hasher in descriptors:
raise RuntimeError(
'Expecting {} within: {}'.format(ends_hasher, descriptors))
try:
# 2. compute the offset to the ends what goes into the hash
sigoffset = descriptors.index(ends_hasher)
# TODO: better support?
sigoffset += len(ends_hasher) + base_offset
if b'rsa1024' in descriptors[:sigoffset]:
sigoffset -= base_offset
sigoffset += base_legacy
# 3. rebuild the original (including its first line being parsed)
full_desc = entry + b'\n' + descriptors[:sigoffset]
# 4. compute the base64-encoded hash with the right algorithm
digest = base64.b64encode(shalgorithm(full_desc).digest())
# 5. strips the trailing '=' as specified
fields[-1][digest_name] = str(digest.rstrip(b'='), 'utf8')
except ValueError:
pass
if not digest_name in fields[-1]:
raise RuntimeError('Was unable to generate proper sum.')
return fields
| 5,346,217 |
def main():
"""The Main Function."""
parser = argparse.ArgumentParser()
parser.add_argument("-v", "--validation", type=int, default=1, help="turn on to cal the index")
parser.add_argument("-d", "--dataset", type=str, default="3T3", help="choose mode")
parser.add_argument("--n_model", type=int,
default=1, help="how many models you want to train")
parser.add_argument("--cyto_prefix", type=str,
default="2017-10-29_3T3_bn_feature_net_61x61_", help="the prefix of your cyto modle")
parser.add_argument("--nuclear_prefix", type=str,
default="2017-10-29_nuclei_bn_feature_net_61x61_", help="the prefix of your nuclear modle")
parser.add_argument("--win_cyto", type=int,
default=30, help="window size of cyto model")
parser.add_argument("--win_nuclear", type=int,
default=30, help="window size of nuclear model")
args = parser.parse_args()
root = os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))
if args.validation:
direc_name = os.path.join(root, "DATA/validation/" + args.dataset)
else:
direc_name = os.path.join(root, "DATA/test/" + args.dataset)
data_location = os.path.join(direc_name, 'RawImages')
cyto_location = os.path.join(direc_name, 'Cytoplasm')
nuclear_location = os.path.join(direc_name, 'Nuclear')
mask_location = os.path.join(direc_name, 'Masks')
cyto_channel_names = [x[:-4] for x in os.listdir(data_location)]
nuclear_channel_names = [x[:-4] for x in os.listdir(data_location) if 'hase' not in x]
trained_network_cyto_dir = os.path.join(root, "MODEL/cyto")
trained_network_nuclear_dir = os.path.join(root, "MODEL/nuclear")
n_model = args.n_model
cyto_prefix = args.cyto_prefix
nuclear_prefix = args.nuclear_prefix
win_cyto = args.win_cyto
win_nuclear = args.win_nuclear
image_size_x, image_size_y = get_image_sizes(data_location, cyto_channel_names)
"""
Define model
"""
list_of_cyto_weights = []
for j in xrange(n_model):
cyto_weights = os.path.join(trained_network_cyto_dir, cyto_prefix + str(j) + ".h5")
list_of_cyto_weights += [cyto_weights]
list_of_nuclear_weights = []
for j in xrange(n_model):
nuclear_weights = os.path.join(trained_network_nuclear_dir, nuclear_prefix + str(j) + ".h5")
list_of_nuclear_weights += [nuclear_weights]
"""
Run model on directory
"""
cytoplasm_predictions = run_models_on_directory(data_location, cyto_channel_names, cyto_location,
n_features=3, model_fn=network, list_of_weights=list_of_cyto_weights, image_size_x=image_size_x,
image_size_y=image_size_y, win_x=win_cyto, win_y=win_cyto, std=True, split=True)
run_models_on_directory(data_location, nuclear_channel_names, nuclear_location,
model_fn=network, list_of_weights=list_of_nuclear_weights,
image_size_x=image_size_x, image_size_y=image_size_y,
win_x=win_nuclear, win_y=win_nuclear, std=False, split=False)
"""
Refine segmentation with active contours
"""
nuclear_masks = segment_nuclei(img=None, color_image=True, load_from_direc=nuclear_location,
mask_location=mask_location, area_threshold=100, solidity_threshold=0, eccentricity_threshold=1)
cytoplasm_masks = segment_cytoplasm(img=None, load_from_direc=cyto_location, color_image=True,
nuclear_masks=nuclear_masks, mask_location=mask_location, smoothing=1, num_iters=120)
"""
Compute validation metrics (optional)
"""
if args.validation:
direc_val = os.path.join(direc_name, 'Validation')
imglist_val = nikon_getfiles(direc_val, 'interior')
val_name = os.path.join(direc_val, imglist_val[0])
print(val_name)
val = get_image(val_name)
val = val[win_cyto:-win_cyto, win_cyto:-win_cyto]
cyto = cytoplasm_masks[0, win_cyto:-win_cyto, win_cyto:-win_cyto]
nuc = nuclear_masks[0, win_cyto:-win_cyto, win_cyto:-win_cyto]
print(val.shape, cyto.shape, nuc.shape)
print(dice_jaccard_indices(cyto, val, nuc))
# Compute cell categorization prediction for each cell
interior1 = cytoplasm_predictions[0, 1, :, :]
interior2 = cytoplasm_predictions[0, 2, :, :]
seg = label(cytoplasm_masks[0, :, :])
num_of_cells = np.amax(seg)
prediction = np.zeros(interior1.shape, dtype=np.float32)
prediction_color = np.zeros((interior1.shape[0], interior1.shape[1], 3), dtype=np.float32)
bound = segmentation.find_boundaries(seg)
for cell_no in xrange(1, num_of_cells):
class_1_pred = interior1[seg == cell_no]
class_2_pred = interior2[seg == cell_no]
# class_1_score = np.sum(class_1_pred) / (np.sum(class_1_pred) + np.sum(class_2_pred))
class_2_score = np.sum(class_2_pred) / (np.sum(class_1_pred) + np.sum(class_2_pred))
prediction[seg == cell_no] = class_2_score
prediction_color[seg == cell_no, 0] = plt.cm.coolwarm(class_2_score)[0]
prediction_color[seg == cell_no, 1] = plt.cm.coolwarm(class_2_score)[1]
prediction_color[seg == cell_no, 2] = plt.cm.coolwarm(class_2_score)[2]
prediction_color[bound, 0] = 0
prediction_color[bound, 1] = 0
prediction_color[bound, 2] = 0
cnnout_name = os.path.join(mask_location, 'segmentation_rgb_new.tif')
scipy.misc.imsave(cnnout_name, np.float16(prediction_color))
| 5,346,218 |
def add_auth(instance):
"""Add authentication to VM."""
if not instance:
sys.exit(click.style("Need to provide an instance to before "
"we can add authentication.", fg="red"))
if instance.provider == 'vmware' and instance.vmx is None:
sys.exit(click.style("Need to provide vmx before we can add authentication.", fg="red"))
if instance.provider == 'virtualbox' and instance.vbox is None:
sys.exit(click.style("Need to provide vbox before we can add authentication.", fg="red"))
if instance.user is None or instance.user == '':
sys.exit(click.style("Need to provide user to add authentication.", fg="red"))
if instance.password is None or instance.password == '':
sys.exit(click.style("Need to provide password to add authentication.", fg="red"))
click.secho('Adding auth to instance:{}'.format(instance.name), fg="green")
vmrun = VMrun(instance.vmx, instance.user, instance.password)
# cannot run if vmware tools are not installed
if not vmrun.installed_tools():
sys.exit(click.style("Cannot add authentication if VMware Tools "
"are not installed.", fg="red"))
if instance.auth:
username = instance.auth.get('username', None)
pub_key = instance.auth.get('pub_key', None)
if username and pub_key:
pub_key_path = os.path.expanduser(pub_key)
LOGGER.debug("pub_key_path:%s pub_key:%s", pub_key_path, pub_key)
with open(pub_key_path, 'r') as the_file:
pub_key_contents = the_file.read().strip()
if pub_key_contents:
# set the password to some random string
# user should never need it (sudo should not prompt for a
# password)
password = random_string()
cmd = ('sudo useradd -m -s /bin/bash -p "{password}" {username};'
'sudo mkdir /home/{username}/.ssh;'
'sudo usermod -aG sudo {username};'
'echo "{username} ALL=(ALL) NOPASSWD: ALL" | '
'sudo tee -a /etc/sudoers;'
'echo "{pub_key_contents}" | '
'sudo tee -a /home/{username}/.ssh/authorized_keys;'
'sudo chmod 700 /home/{username}/.ssh;'
'sudo chown {username}:{username} /home/{username}/.ssh;'
'sudo chmod 600 /home/{username}/.ssh/authorized_keys;'
'sudo chown {username}:{username} /home/{username}/.ssh/authorized_keys'
).format(username=username, pub_key_contents=pub_key_contents,
password=password)
LOGGER.debug('cmd:%s', cmd)
results = vmrun.run_script_in_guest('/bin/sh', cmd, quiet=True)
LOGGER.debug('results:%s', results)
if results is None:
click.secho("Did not add auth", fg="red")
else:
click.secho("Added auth.", fg="red")
else:
click.secho("Could not read contents of the pub_key"
" file:{}".format(pub_key), fg="green")
else:
click.secho("Warning: Need a username and pub_key in auth.", fg="blue")
else:
click.secho("No auth to add.", fg="blue")
| 5,346,219 |
def test_docker_registry_htpasswd_list(
docker_registry_htpasswd_list: List[Path],
docker_registry_password_list: List[str],
docker_registry_username_list: List[str],
pdrf_scale_factor: int,
):
"""Test that a htpasswd can be provided."""
for i in range(pdrf_scale_factor):
assert docker_registry_htpasswd_list[i].exists()
content = docker_registry_htpasswd_list[i].read_text("utf-8")
assert docker_registry_username_list[i] in content
assert docker_registry_password_list[i] not in content
assert no_duplicates(docker_registry_htpasswd_list)
assert no_duplicates(docker_registry_password_list)
assert no_duplicates(docker_registry_username_list)
| 5,346,220 |
def get_axis(array, axis, slice_num):
"""Returns a fixed axis"""
slice_list = [slice(None)] * array.ndim
slice_list[axis] = slice_num
slice_data = array[tuple(slice_list)].T # transpose for proper orientation
return slice_data
| 5,346,221 |
def test_integrity(param_test):
"""
Test integrity of function
"""
# open result file
f = open(os.path.join(param_test.path_output, 'ernst_angle.txt'), 'r')
angle_result = float(f.read())
f.close()
# compare with GT
if abs(angle_result - param_test.angle_gt) < param_test.threshold:
param_test.output += '--> PASSED'
else:
param_test.output += '--> FAILED'
param_test.status = 99
return param_test
| 5,346,222 |
def expand_name_df(df,old_col,new_col):
"""Takes a dataframe df with an API JSON object with nested elements in old_col,
extracts the name, and saves it in a new dataframe column called new_col
Parameters
----------
df : dataframe
old_col : str
new_col : str
Returns
-------
df : dataframe
"""
import pandas as pd
def expand_name(nested_name):
"""Takes an API JSON object with nested elements and extracts the name
Parameters
----------
nested_name : JSON API object
Returns
-------
object_name : str
"""
if pd.isnull(nested_name):
object_name = 'Likely Missing'
else:
object_name = nested_name['name']
return object_name
df[new_col] = df[old_col].apply(expand_name)
return df
| 5,346,223 |
def image_rpms_remove_if_exists(rpmlist):
"""
`image.rpms_remove_if_exists(["baz"])` removes `baz.rpm` if exists.
Note that removals may only be applied against the parent layer -- if your
current layer includes features both removing and installing the same
package, this will cause a build failure.
"""
return _build_rpm_feature(
rpmlist,
"remove_if_exists",
needs_version_set = False,
)
| 5,346,224 |
def expand_set(mySet):
""" pass in a set of genome coords, and it will 'expand' the indels
within the set by adding +/- 3 bp copies for each one """
returnSet = []
for entry in mySet:
l0 = []
l1 = []
try:
sub0 = entry.split('-')[0] # split on `-`
sub1 = entry.split('-')[1] # this guy is good
sub00 = sub0.split(':')[1] # split on :, need to get rid of chrom
chrom = sub0.split(':')[0]
if sub00 != sub1: # got an indel
sub00_1 = int(sub00) + 1
sub00_2 = int(sub00) + 2
sub00_3 = int(sub00) + 3
sub00_4 = int(sub00) - 1
sub00_5 = int(sub00) - 2
sub00_6 = int(sub00) - 3
l0.extend((sub00_1, sub00_2, sub00_3, sub00_4, sub00_5, sub00_6))
try:
sub1_1 = int(sub1) + 1
sub1_2 = int(sub1) + 2
sub1_3 = int(sub1) + 3
sub1_4 = int(sub1) - 1
sub1_5 = int(sub1) - 2
sub1_6 = int(sub1) - 3
l1.extend((sub1_1, sub1_2, sub1_3, sub1_4, sub1_5, sub1_6))
except ValueError:
continue
coord_combos = list(itertools.product(l0, l1))
for pair in coord_combos:
toAdd = chrom + ':' + str(pair[0]) + '-' + str(pair[1])
returnSet.append(toAdd)
else:
returnSet.append(entry)
except IndexError:
continue
return returnSet
| 5,346,225 |
def trapezoid(t, depth, bigT, littleT):
"""Trapezoid shape for model
INPUT:
t - [float] vector of independent values to evaluate
trapezoid model
depth - [float] depth of trapezoid
bigT - [float] full trapezoid duration
littleT - [float] 'ingress/egress' duration
OUTPUT:
output - [float] vector of trapezoid model values
"""
output = np.full_like(t, 1.0)
t = np.abs(t)
output = np.where(t <= bigT/2.0 - littleT/2.0, 1.0 - depth, output)
output = np.where(np.logical_and(t > bigT/2.0 - littleT/2.0, \
t < bigT/2.0 + littleT/2.0), \
1.0 - depth + ((depth/littleT)* \
(t-bigT/2.0 + littleT/2.0)), output)
return output
| 5,346,226 |
def destroy(ctx, config, name, force):
"""Destroy a machine."""
name = drifter.commands.validate_name(ctx, name)
# Destroy the named machine only
if name:
_destroy(ctx, config, name, force)
return
# Destroy all machines
for machine in drifter.commands.list_machines(config):
_destroy(ctx, config, machine, force)
| 5,346,227 |
def clean_username(username=''):
""" Simple helper method to ensure a username is compatible with our system requirements. """
return ('_').join(re.findall(r'[a-zA-Z0-9\-]+', username))[:USERNAME_MAX_LENGTH]
| 5,346,228 |
def hy_compile(tree, module_name, root=ast.Module, get_expr=False):
"""
Compile a HyObject tree into a Python AST Module.
If `get_expr` is True, return a tuple (module, last_expression), where
`last_expression` is the.
"""
body = []
expr = None
if not isinstance(tree, HyObject):
tree = wrap_value(tree)
if not isinstance(tree, HyObject):
raise HyCompileError("`tree` must be a HyObject or capable of "
"being promoted to one")
spoof_positions(tree)
compiler = HyASTCompiler(module_name)
result = compiler.compile(tree)
expr = result.force_expr
if not get_expr:
result += result.expr_as_stmt()
module_docstring = None
if (PY37 and result.stmts and
isinstance(result.stmts[0], ast.Expr) and
isinstance(result.stmts[0].value, ast.Str)):
module_docstring = result.stmts.pop(0).value.s
body = compiler.imports_as_stmts(tree) + result.stmts
ret = root(body=body, docstring=(
None if module_docstring is None else module_docstring))
if get_expr:
expr = ast.Expression(body=expr)
ret = (ret, expr)
return ret
| 5,346,229 |
def make_expt_parser():
"""
Parses arguments from the command line for running experiments
returns
args (argparse NameSpace)
"""
parser = argparse.ArgumentParser(
description='energy_py dict expt parser'
)
# required
parser.add_argument('expt_name', default=None, type=str)
# optional
parser.add_argument('--run_name', default=None, type=str)
parser.add_argument('--seed', default=None, type=int)
args = parser.parse_args()
return args
| 5,346,230 |
def user_dss_clients(dss_clients, dss_target):
"""
Fixture that narrows down the dss clients to only the ones that are relevant considering the curent DSS target.
Args:
dss_clients (fixture): All the instanciated dss client for each user and dss targets
dss_target (fixture): The considered DSS target for the test to be executed
Returns:
A dict of dss client instances for the current DSS target and each of its specified users.
"""
return dss_clients[dss_target]
| 5,346,231 |
def build_log(x: np.ndarray) -> np.ndarray:
"""
Logarithmic expansion.
:param x: features
:return: augmented features
"""
expanded = np.ones((x.shape[0], 1))
expanded = np.hstack((expanded, np.nan_to_num(np.log(x))))
return expanded
| 5,346,232 |
def print_pqr(args, pqr_lines, header_lines, missing_lines, is_cif):
"""Print PQR-format output to specified file
.. todo:: Move this to another module (io)
:param argparse.Namespace args: command-line arguments
:param [str] pqr_lines: output lines (records)
:param [str] header_lines: header lines
:param [str] missing_lines: lines describing missing atoms (should go
in header)
:param bool is_cif: flag indicating CIF format
"""
with open(args.output_pqr, "wt") as outfile:
# Adding whitespaces if --whitespace is in the options
if header_lines:
_LOGGER.warning(
f"Ignoring {len(header_lines)} header lines in output."
)
if missing_lines:
_LOGGER.warning(
f"Ignoring {len(missing_lines)} missing lines in output."
)
for line in pqr_lines:
if args.whitespace:
if line[0:4] == "ATOM" or line[0:6] == "HETATM":
newline = (
line[0:6]
+ " "
+ line[6:16]
+ " "
+ line[16:38]
+ " "
+ line[38:46]
+ " "
+ line[46:]
)
outfile.write(newline)
else:
if line[0:3] != "TER" or not is_cif:
outfile.write(line)
if is_cif:
outfile.write("#\n")
| 5,346,233 |
def all_multibert_finetune_glue(m:Manager, task_name:str='MRPC')->BertGlue:
""" Finetune milti-lingual base-BERT on GLUE dataset
Ref. https://github.com/google-research/bert/blob/master/multilingual.md
"""
refbert=all_fetch_multibert(m)
refglue=all_fetchglue(m)
vocab=mklens(refbert).bert_vocab.refpath
glueref=glue_tfrecords(m, task_name, bert_vocab=vocab,
lower_case=mklens(refbert).cased.val==False, refdataset=refglue)
return bert_finetune_glue(m,refbert,glueref)
| 5,346,234 |
def hash_eth2(data: Union[bytes, bytearray]) -> Hash32:
"""
Return SHA-256 hashed result.
Note: this API is currently under active research/development so is subject to change
without a major version bump.
Note: it's a placeholder and we aim to migrate to a S[T/N]ARK-friendly hash function in
a future Ethereum 2.0 deployment phase.
"""
return Hash32(hashlib.sha256(data).digest())
| 5,346,235 |
def read_data(data_dir="../main/datasets/", data_file=DATA_FILE):
"""Returns the data, in order infos, items, orders"""
with zipfile.ZipFile(data_dir+DATA_FILE) as z:
dfs = []
for name in ["infos", "items", "orders"]:
dfs.append(pd.read_csv(z.open(f"1.0v/{name}.csv"), sep="|"))
return dfs
| 5,346,236 |
def get_sprints(root_project_id, rally_number=None):
"""Get list of sprint projects.
Args:
root_project_id: Synapse Project ID with admin annotations,
including the sprint table ID.
rally_number: An integer rally number. If None, return sprints
from all rallies.
Returns:
A Pandas data frame of sprint information from the
Sprint Synapse table.
"""
syn = Synapse().client()
root_project = syn.get(root_project_id)
table_id = root_project.annotations.sprintTableId[0]
tbl = syn.tableQuery("select * from %s" % (table_id, ))
data_frame = tbl.asDataFrame()
if rally_number:
data_frame = data_frame[data_frame.rally == rally_number]
return data_frame
| 5,346,237 |
async def cmd_viewdb(self, message, scopes):
"""
`$!_viewdb [scopes...]` : Displays the current database state
If scopes are provided, then only show the requested scopes
"""
async with DBView() as db:
if scopes is None or not len(scopes):
await self.send_message(
message.channel,
json.dumps(
{key:DBView.serializable(db[key]) for key in iter(db)},
indent=2,
sort_keys=False
)
)
else:
await self.send_message(
message.channel,
json.dumps(
{key:DBView.serializable(db[key]) for key in scopes if key in db},
indent=2,
sort_keys=False
)
)
| 5,346,238 |
def prep_bbox(sess, logits_scalar, x, y, X_train, Y_train, X_test, Y_test,
img_rows, img_cols, channels, nb_epochs, batch_size, learning_rate,
rng, phase=None, binary=False, scale=False, nb_filters=64,
model_path=None, adv=0, delay=0, eps=0.3):
"""
Define and train a model that simulates the "remote"
black-box oracle described in the original paper.
:param sess: the TF session
:param x: the input placeholder for MNIST
:param y: the ouput placeholder for MNIST
:param X_train: the training data for the oracle
:param Y_train: the training labels for the oracle
:param X_test: the testing data for the oracle
:param Y_test: the testing labels for the oracle
:param nb_epochs: number of epochs to train model
:param batch_size: size of training batches
:param learning_rate: learning rate for training
:param rng: numpy.random.RandomState
:return:
"""
# Define TF model graph (for the black-box model)
save = False
train_from_scratch = False
if model_path is not None:
if os.path.exists(model_path):
# check for existing model in immediate subfolder
if any(f.endswith('.meta') for f in os.listdir(model_path)):
binary, scale, nb_filters, batch_size, learning_rate, nb_epochs, adv = parse_model_settings(
model_path)
train_from_scratch = False
else:
model_path = build_model_save_path(
model_path, binary, batch_size, nb_filters, learning_rate, nb_epochs, adv, delay, scale)
print(model_path)
save = True
train_from_scratch = True
else:
train_from_scratch = True # train from scratch, but don't save since no path given
if binary:
if scale:
#from cleverhans_tutorials.tutorial_models import make_scaled_binary_cnn
# model = make_scaled_binary_cnn(phase, 'bb_binsc_', input_shape=(
from cleverhans_tutorials.tutorial_models import make_scaled_binary_rand_cnn
model = make_scaled_binary_rand_cnn(phase, logits_scalar, 'bb_binsc_', input_shape=(
None, img_rows, img_cols, channels), nb_filters=nb_filters)
else:
from cleverhans_tutorials.tutorial_models import make_basic_binary_cnn
model = make_basic_binary_cnn(phase, logits_scalar, 'bb_bin_', input_shape=(
None, img_rows, img_cols, channels), nb_filters=nb_filters)
else:
from cleverhans_tutorials.tutorial_models import make_basic_cnn
model = make_basic_cnn(phase, logits_scalar, 'bb_fp_', input_shape=(
None, img_rows, img_cols, channels), nb_filters=nb_filters)
preds = model(x, reuse=False)
print("Defined TensorFlow model graph.")
def evaluate():
# Print out the accuracy on legitimate data
eval_params = {'batch_size': batch_size}
acc = model_eval(
sess, x, y, preds, X_test, Y_test, phase=phase, args=eval_params)
print('Test accuracy of black-box on legitimate test '
'examples: %.4f' % acc)
# Train an MNIST model
train_params = {
'binary': binary,
'nb_epochs': nb_epochs,
'batch_size': batch_size,
'learning_rate': learning_rate,
'loss_name': 'bb train loss',
'filename': 'bb_model',
'train_scope': 'bb_model',
'reuse_global_step': False,
'is_training': True
}
if adv != 0:
if adv == ADVERSARIAL_TRAINING_MADRYETAL:
from cleverhans.attacks import MadryEtAl
nb_iter = 20
train_attack_params = {'eps': MAX_EPS, 'eps_iter': 0.01,
'nb_iter': nb_iter}
train_attacker = MadryEtAl(model, sess=sess)
if adv == ADVERSARIAL_TRAINING_FGSM:
from cleverhans.attacks import FastGradientMethod
train_attacker = FastGradientMethod(model, back='tf', sess=sess)
# create the adversarial trainer
train_attack_params.update({'clip_min': 0., 'clip_max': 1.})
adv_x_train = train_attacker.generate(x, phase, **train_attack_params)
preds_adv = model.get_probs(adv_x_train)
if train_from_scratch:
if save:
train_params.update({'log_dir': model_path})
if adv and delay > 0:
train_params.update({'nb_epochs': delay})
# do clean training for 'nb_epochs' or 'delay' epochs
model_train(sess, x, y, preds, X_train, Y_train, phase=phase,
evaluate=evaluate, args=train_params, save=save, rng=rng)
# optionally do additional adversarial training
if adv:
print("Adversarial training for %d epochs" % (nb_epochs - delay))
train_params.update({'nb_epochs': nb_epochs - delay})
train_params.update({'reuse_global_step': True})
model_train(sess, x, y, preds, X_train, Y_train, phase=phase,
predictions_adv=preds_adv, evaluate=evaluate, args=train_params,
save=save, rng=rng)
else:
tf_model_load(sess, model_path)
print('Restored model from %s' % model_path)
accuracy = evaluate()
return model, preds, accuracy, model_path
| 5,346,239 |
def out_of_bounds(maze: Array, x: int, y: int):
""" Return true if x, y is out of bounds """
w, h = maze.shape
is_x_out = (x < 0) + (x >= w)
is_y_out = (y < 0) + (y >= h)
return is_x_out + is_y_out
| 5,346,240 |
def run_test(test_name, module_dict, print_test_case=False, display=None):
"""Run a given test."""
import test_parser
import test_classes
for module in module_dict:
setattr(sys.modules[__name__], module, module_dict[module])
test_dict = test_parser.TestParser(test_name + ".test").parse()
solution_dict = test_parser.TestParser(test_name + ".solution").parse()
test_out_file = os.path.join('%s.test_output' % test_name)
test_dict['test_out_file'] = test_out_file
test_class = getattr(project_test_classes, test_dict['class'])
question_class = getattr(test_classes, 'Question')
question = question_class({'max_points': 0}, display)
test_case = test_class(question, test_dict)
if print_test_case:
print_test(test_dict, solution_dict)
# This is a fragile hack to create a stub grades object
grades = grading.Grades(project_params.PROJECT_NAME, [(None, 0)])
test_case.execute(grades, module_dict, solution_dict)
| 5,346,241 |
def cache_set(apollo_client, name, val):
"""
保存数据到redis
:return:
"""
r = redis_handler(apollo_client)
try:
res = r.set(name=name, value=json.dumps(val))
except Exception as e:
logger.error("Storage {} to cache failed!{}".format(name, e.__str__()))
return None
else:
logger.info("{} values {}".format(name, val))
return res
| 5,346,242 |
def guess_pyramid(data):
"""If shape of arrays along first axis is strictly decreasing.
"""
# If the data has ndim and is not one-dimensional then cannot be pyramid
if hasattr(data, 'ndim') and data.ndim > 1:
return False
size = np.array([np.prod(d.shape, dtype=np.uint64) for d in data])
if len(size) > 1:
return np.all(size[:-1] > size[1:])
else:
return False
| 5,346,243 |
def __clear_archive_file(model_context):
"""
Remove any binaries already in the archive file.
:param model_context: the model context
:raises DiscoverException: if an error occurs while removing the binaries
"""
_method_name = '__clear_archive_file'
__logger.entering(class_name=_class_name, method_name=_method_name)
archive_file = model_context.get_archive_file()
if archive_file is None:
de = exception_helper.create_discover_exception('WLSDPLY-06004', model_context.get_archive_file_name())
__logger.throwing(class_name=_class_name, method_name=_method_name, error=de)
raise de
try:
archive_file.removeAllBinaries()
except WLSDeployArchiveIOException, wioe:
de = exception_helper.create_discover_exception('WLSDPLY-06005', wioe.getLocalizedMessage())
__logger.throwing(class_name=_class_name, method_name=_method_name, error=de)
raise de
return
| 5,346,244 |
def test_object_properties_match_xml(person_xml, mini_mock):
"""
Test the object is modified to include attributes for elements
from the XML tree.
"""
tree = etree.fromstring(person_xml)
# Set up some blank object attributes.
person0 = mini_mock()
person0.nickname = None
person0.first_name = None
person1 = mini_mock()
person1.nickname = None
person1.first_name = None
# The mappings for XML -> Object translation. This will tell the function
# to get the firstName element and assign it to the first_name attribute on
# the object. It will do the same for nickname.
mapping = {
'@namespaces': {'x': 'http://example.com/namespace'},
'nickname': ['x:nickname'], 'first_name': ['x:firstName']
}
bagatom.updateObjectFromXML(tree, person1, mapping)
assert person0.nickname != person1.nickname
assert person0.first_name != person1.first_name
assert person1.nickname == 'Jim'
assert person1.first_name == 'James'
| 5,346,245 |
def test_interdependency_constrained():
"""
Test a model with interdependent components, and with constraints which
depend on the Model's output.
This is done in the MatrixSymbol formalism, using a Tikhonov
regularization as an example. In this, a matrix inverse has to be
calculated and is used multiple times. Therefore we split that term of
into a seperate component, so the inverse only has to be computed once
per model call.
See https://arxiv.org/abs/1901.05348 for a more detailed background.
"""
N = Symbol('N', integer=True)
M = MatrixSymbol('M', N, N)
W = MatrixSymbol('W', N, N)
I = MatrixSymbol('I', N, N)
y = MatrixSymbol('y', N, 1)
c = MatrixSymbol('c', N, 1)
a, = parameters('a')
z, = variables('z')
i = Idx('i')
model_dict = {
W: Inverse(I + M / a ** 2),
c: - W * y,
z: sqrt(c.T * c)
}
# Sympy currently does not support derivatives of matrix expressions,
# so we use CallableModel instead of Model.
model = CallableModel(model_dict)
# Generate data
iden = np.eye(2)
M_mat = np.array([[2, 1], [3, 4]])
y_vec = np.array([[3], [5]])
eval_model = model(I=iden, M=M_mat, y=y_vec, a=0.1)
# Calculate the answers 'manually' so I know it was done properly
W_manual = np.linalg.inv(iden + M_mat / 0.1 ** 2)
c_manual = - np.atleast_2d(W_manual.dot(y_vec))
z_manual = np.atleast_1d(np.sqrt(c_manual.T.dot(c_manual)))
assert y_vec.shape == (2, 1)
assert M_mat.shape == (2, 2)
assert iden.shape == (2, 2)
assert W_manual.shape == (2, 2)
assert c_manual.shape == (2, 1)
assert z_manual.shape == (1, 1)
assert W_manual == pytest.approx(eval_model.W)
assert c_manual == pytest.approx(eval_model.c)
assert z_manual == pytest.approx(eval_model.z)
fit = Fit(model, z=z_manual, I=iden, M=M_mat, y=y_vec)
fit_result = fit.execute()
# See if a == 0.1 was reconstructed properly. Since only a**2 features
# in the equations, we check for the absolute value. Setting a.min = 0.0
# is not appreciated by the Minimizer, it seems.
assert np.abs(fit_result.value(a)) == pytest.approx(0.1)
| 5,346,246 |
def create_menu(menu_items, parent=None):
"""
Create the navigation nodes based on a passed list of dicts
"""
nodes = []
for menu_dict in menu_items:
try:
label = menu_dict['label']
except KeyError:
raise ImproperlyConfigured(
"No label specified for menu item in dashboard")
children = menu_dict.get('children', [])
if children:
node = Node(label=label, icon=menu_dict.get('icon', None),
access_fn=menu_dict.get('access_fn', None))
create_menu(children, parent=node)
else:
node = Node(label=label, icon=menu_dict.get('icon', None),
url_name=menu_dict.get('url_name', None),
url_kwargs=menu_dict.get('url_kwargs', None),
url_args=menu_dict.get('url_args', None),
access_fn=menu_dict.get('access_fn', None))
if parent is None:
nodes.append(node)
else:
parent.add_child(node)
return nodes
| 5,346,247 |
def imsave(f, img):
"""Save an image to file.
:param string|file f: Filename or file-like object.
:param numpy.ndarray img: Image to save. Of shape (M,N) or (M,N,3) or (M,N,4).
"""
# Ensure we use PIL so we can guarantee that imsave will accept file-like object as well as filename
skio.imsave(f, img, plugin='pil')
| 5,346,248 |
def find_contact(name):
"""Selects existing contact details based on the provided name."""
# Uses the LIKE operator to perform case insensitive searching. Also allows
# for the use of the % and _ wildcards.
conn = sqlite3.connect('contacts.db')
c = conn.cursor()
c.execute('SELECT contact_id, name, address, phone, email '
'FROM contacts WHERE name LIKE ?', (name,))
results = c.fetchall()
conn.close()
display_contact_details(results)
| 5,346,249 |
def get_tg_ids(db):
"""Obtain a list of recognized Telegram user IDs.
Args:
db: Database connector
Returns:
Query results for later iteration
"""
return db.query(QUERY_TG_IDS)
| 5,346,250 |
async def _casino(
ctx: commands.Context,
bet: int = None, # type: ignore
number_of_games: int = 1
) -> None:
"""
Casino, generates lines with sectors depending
on <number of games> (last argument, default: 1).
:param ctx: commands.Context
:param bet: Bet of the user, but if it is None, then the command will fail.
:param number_of_games: (1 <= x <= 3) affects the number of lines
with sectors and multiplies the bet.
:return: None
"""
if bet is None or bet <= 0:
await ctx.send('Specify the correct bet you want to play!')
elif not 1 <= number_of_games <= 3:
await ctx.send('You cannot play more than three games and less than one!')
else:
data: Container = await Casino(number_of_games).start()
win: bool = True if data.multiplier > 0 else False
to_footer: str = "Emoji\tCount\tPoints\n"
for emoji, count in data.emoji_stats.items():
# creating emoji stats in footer
to_footer += f"{ScoreUnicode[emoji].value}\t\t:\t" \
f"{count}\t:\t" \
f"{int(ScoreChances[emoji]) // 2}\n"
embed: discord.Embed = discord.Embed(
title=f'Total points: {data.total_points}. ' + (
f'You have won `{bet * data.multiplier * number_of_games}` coins.' if win
else f'You have lose `{bet * number_of_games}` coins.'
),
description="\n".join(data.board),
color=discord.Color.green() if win else discord.Color.red()
).set_footer(
text=to_footer,
icon_url=ctx.author.avatar_url
).add_field(
name=f'For x2 odds you had to score `<= {data.required_points[1]}` '
f'points, and for lose `>= {data.required_points[0]}.`',
value=f'**You got:** `{data.total_points}`'
)
await ctx.send(embed=embed)
| 5,346,251 |
def solve_with_log(board, out_fname):
"""Wrapper for solve: write log to out_fname"""
log = []
ret = solve(board, log)
with open(out_fname, 'w') as f:
f.write(json.dumps({'model': log}, indent=4))
return ret
| 5,346,252 |
def discrepancy(sample, bounds=None):
"""Discrepancy.
Compute the centered discrepancy on a given sample.
It is a measure of the uniformity of the points in the parameter space.
The lower the value is, the better the coverage of the parameter space is.
Parameters
----------
sample : array_like (n_samples, k_vars)
The sample to compute the discrepancy from.
bounds : tuple or array_like ([min, k_vars], [max, k_vars])
Desired range of transformed data. The transformation apply the bounds
on the sample and not the theoretical space, unit cube. Thus min and
max values of the sample will coincide with the bounds.
Returns
-------
discrepancy : float
Centered discrepancy.
References
----------
[1] Fang et al. "Design and modeling for computer experiments",
Computer Science and Data Analysis Series Science and Data Analysis
Series, 2006.
"""
sample = np.asarray(sample)
n_sample, dim = sample.shape
# Sample scaling from bounds to unit hypercube
if bounds is not None:
min_ = bounds.min(axis=0)
max_ = bounds.max(axis=0)
sample = (sample - min_) / (max_ - min_)
abs_ = abs(sample - 0.5)
disc1 = np.sum(np.prod(1 + 0.5 * abs_ - 0.5 * abs_ ** 2, axis=1))
prod_arr = 1
for i in range(dim):
s0 = sample[:, i]
prod_arr *= (1 +
0.5 * abs(s0[:, None] - 0.5) + 0.5 * abs(s0 - 0.5) -
0.5 * abs(s0[:, None] - s0))
disc2 = prod_arr.sum()
c2 = ((13.0 / 12.0) ** dim - 2.0 / n_sample * disc1 +
1.0 / (n_sample ** 2) * disc2)
return c2
| 5,346,253 |
def rz(psi, r):
"""
Wrapper for ERFA function ``eraRz``.
Parameters
----------
psi : double array
r : double array
Returns
-------
r : double array
Notes
-----
The ERFA documentation is below.
- - - - - -
e r a R z
- - - - - -
Rotate an r-matrix about the z-axis.
Given:
psi double angle (radians)
Given and returned:
r double[3][3] r-matrix, rotated
Notes:
1) Calling this function with positive psi incorporates in the
supplied r-matrix r an additional rotation, about the z-axis,
anticlockwise as seen looking towards the origin from positive z.
2) The additional rotation can be represented by this matrix:
( + cos(psi) + sin(psi) 0 )
( )
( - sin(psi) + cos(psi) 0 )
( )
( 0 0 1 )
Copyright (C) 2013-2017, NumFOCUS Foundation.
Derived, with permission, from the SOFA library. See notes at end of file.
"""
r = ufunc.rz(psi, r, r)
return r
| 5,346,254 |
def VisualizeCollapsedGraph(trace, node_names, edges, fuzzy):
"""
Visualize the collapsed graph and save the output.
@params trace: the trace for which to visualize a collapsed graph
@params node_names: names for the collapsed nodes
@params edges: list of edges connecting the nodes
"""
# get relavent information for output filename
dataset = trace.dataset
base_id = trace.base_id
if not os.path.exists('dots'):
os.mkdir('dots')
if not os.path.exists('dots/{}'.format(dataset)):
os.mkdir('dots/{}'.format(dataset))
# create the digraph
graph = nx.DiGraph()
for index, name in enumerate(node_names):
graph.add_node(index, label=name)
# populate the edges in the graph
for (source_index, destination_index) in edges:
graph.add_edge(source_index, destination_index)
# get the output filename for this collapsed graph
if fuzzy: output_filename = 'dots/{}/{}-fuzzy-collapsed.dot'.format(dataset, base_id)
else: output_filename = 'dots/{}/{}-collapsed.dot'.format(dataset, base_id)
# save the graph into dot format
A = nx.nx_agraph.to_agraph(graph)
A.layout('dot')
A.draw(output_filename)
| 5,346,255 |
def _get_ticklabels(band_type, kHz, separator):
"""
Return a list with all tick labels for octave or third octave bands cases.
"""
if separator is None:
import locale
separator = locale.localeconv()['decimal_point']
if band_type == 'octave':
if kHz is True:
ticklabels = TICKS_OCTAVE_KHZ
else:
ticklabels = TICKS_OCTAVE
else:
if kHz is True:
ticklabels = TICKS_THIRD_OCTAVE_KHZ
else:
ticklabels = TICKS_THIRD_OCTAVE
ticklabels = _set_separator(ticklabels, separator)
return ticklabels
| 5,346,256 |
def test_scrapping():
"""
Function that will test if the data was scrapped successfully
"""
scrapper = Scrapper()
scrapped_data = scrapper.scrape_fb_page("nous.sommes.les.ingenieurs", 5)
assert len(scrapped_data) > 0
assert len(scrapped_data[0]["text"]) > 0
| 5,346,257 |
def SpearmanP(predicted, observed):
"""abstracts out p from stats.spearmanr"""
if np.isnan(np.min(predicted)) or np.isnan(np.min(observed)):
return np.asarray([np.nan])
coef, p = stats.spearmanr(np.squeeze(predicted).astype(float), np.squeeze(observed).astype(float))
return p
| 5,346,258 |
def get_zebra_route_type_by_name(route_type='BGP'):
"""
Returns the constant value for Zebra route type named "ZEBRA_ROUTE_*"
from its name.
See "ZEBRA_ROUTE_*" constants in "ryu.lib.packet.zebra" module.
:param route_type: Route type name (e.g., Kernel, BGP).
:return: Constant value for Zebra route type.
"""
return getattr(zebra, "ZEBRA_ROUTE_%s" % route_type.upper())
| 5,346,259 |
def preprocess(arr):
"""Preprocess image array with simple normalization.
Arguments:
----------
arr (np.array): image array
Returns:
--------
arr (np.array): preprocessed image array
"""
arr = arr / 255.0
arr = arr * 2.0 - 1.0
return arr
| 5,346,260 |
def remove_const(type):
"""removes const from the type definition
If type is not const type, it will be returned as is
"""
nake_type = remove_alias(type)
if not is_const(nake_type):
return type
else:
return nake_type.base
| 5,346,261 |
def populate_institute_form(form, institute_obj):
"""Populate institute settings form
Args:
form(scout.server.blueprints.institutes.models.InstituteForm)
institute_obj(dict) An institute object
"""
# get all other institutes to populate the select of the possible collaborators
institutes_tuples = []
for inst in store.institutes():
if not inst["_id"] == institute_obj["_id"]:
institutes_tuples.append(((inst["_id"], inst["display_name"])))
form.display_name.default = institute_obj.get("display_name")
form.institutes.choices = institutes_tuples
form.coverage_cutoff.default = institute_obj.get("coverage_cutoff")
form.frequency_cutoff.default = institute_obj.get("frequency_cutoff")
# collect all available default HPO terms and populate the pheno_groups form select with these values
default_phenotypes = [choice[0].split(" ")[0] for choice in form.pheno_groups.choices]
if institute_obj.get("phenotype_groups"):
for key, value in institute_obj["phenotype_groups"].items():
if not key in default_phenotypes:
custom_group = " ".join(
[key, ",", value.get("name"), "( {} )".format(value.get("abbr"))]
)
form.pheno_groups.choices.append((custom_group, custom_group))
# populate gene panels multiselect with panels from institute
available_panels = list(store.latest_panels(institute_obj["_id"]))
# And from institute's collaborators
for collaborator in institute_obj.get("collaborators", []):
available_panels += list(store.latest_panels(collaborator))
panel_set = set()
for panel in available_panels:
panel_set.add((panel["panel_name"], panel["display_name"]))
form.gene_panels.choices = list(panel_set)
return default_phenotypes
| 5,346,262 |
def test_output_overview_path(output, tmpdir):
"""Testing if overview HTML file path is created correctly."""
expected_path = os.path.join(tmpdir, "overview.html")
actual_path = output.overview_file()
assert actual_path == expected_path
| 5,346,263 |
def closestMedioidI(active_site, medioids, distD):
"""
returns the index of the closest medioid in medioids to active_site
input: active_site, an ActiveSite instance
medioids, a list of ActiveSite instances
distD, a dictionary of distances
output: the index of the ActiveSite closest to active_site in medioids
"""
closest = (float('Inf'), None)
for i, medioid in enumerate(medioids):
thisDist = distD[frozenset([active_site, medioid])]
if thisDist < closest[0]:
closest = (thisDist, i)
return closest[1]
| 5,346,264 |
def watchPoint(filename, lineno, event="call"):
"""whenever we hit this line, print a stack trace. event='call'
for lines that are function definitions, like what a profiler
gives you.
Switch to 'line' to match lines inside functions. Execution speed
will be much slower."""
seenTraces: Dict[Any, int] = {} # trace contents : count
def trace(frame, ev, arg):
if ev == event:
if (frame.f_code.co_filename, frame.f_lineno) == (filename, lineno):
stack = ''.join(traceback.format_stack(frame))
if stack not in seenTraces:
print("watchPoint hit")
print(stack)
seenTraces[stack] = 1
else:
seenTraces[stack] += 1
return trace
sys.settrace(trace)
# atexit, print the frequencies?
| 5,346,265 |
def no_op_job():
"""
A no-op parsl.python_app to return a future for a job that already
has its outputs.
"""
return 0
| 5,346,266 |
def identity(dim, shape=None):
"""Return identity operator with appropriate shape.
Parameters
----------
dim : int
Dimension of real space.
shape : int (optional)
Size of the unitary part of the operator.
If not provided, U is set to None.
Returns
-------
id : PointGroupElement
"""
R = ta.identity(dim, int)
if shape is not None:
U = np.eye(shape)
else:
U = None
return PointGroupElement(R, False, False, U)
| 5,346,267 |
def configure_checkout_session(request):
"""
Configure the payment session for Stripe.
Return the Session ID.
Key attributes are:
- mode: payment (for one-time charge) or subscription
- line_items: including price_data because users configure the donation
price.
TODOs
- Standard amounts could use active Prices, rather than ad-hoc price_data.
- Tie Stripe customers to site User accounts.
- If a user is logged in, we can create the session for the correct
customer.
- Stripe's documented flows are VERY keen that we create the customer
first, although the session will do that if we don't.
- Allow selecting currency. (Smaller task.) Users receive an additional
charge making payments in foreign currencies. Stripe will convert all
payments without further charge.
"""
# Form data:
# - The interval: which determines the Product and the mode.
# - The amount: which goes to the Price data.
form = PaymentForm(request.POST)
if not form.is_valid():
data = {
'success': False,
'error': form.errors
}
return JsonResponse(data)
amount = form.cleaned_data["amount"]
interval = form.cleaned_data["interval"]
product_details = settings.PRODUCTS[interval]
is_subscription = product_details.get('recurring', True)
price_data = {
'currency': 'usd',
'unit_amount': amount * 100,
'product': product_details['product_id']
}
if is_subscription:
price_data['recurring'] = {
'interval': product_details['interval'],
"interval_count": product_details["interval_count"],
}
try:
session = stripe.checkout.Session.create(
payment_method_types=['card'],
line_items=[{'price_data': price_data, 'quantity': 1}],
mode='subscription' if is_subscription else 'payment',
success_url=request.build_absolute_uri(
reverse('fundraising:thank-you')
),
cancel_url=request.build_absolute_uri(
reverse('fundraising:index')
),
# TODO: Drop this when updating API.
stripe_version="2020-08-27",
)
return JsonResponse({'success': True, "sessionId": session["id"]})
except Exception as e:
logger.exception('Error configuring Stripe session.')
return JsonResponse({'success': False, "error": str(e)})
| 5,346,268 |
def load_schemas():
"""Return all of the schemas in this directory in a dictionary where
the keys are the filename (without the .json extension) and the values
are the JSON schemas (in dictionary format)
:raises jsonschema.exceptions.SchemaError if any of the JSON files in this
directory are not valid (Draft 7) JSON schemas"""
schemas = {}
for filename in os.listdir(THIS_DIR):
if (
os.path.isfile(os.path.join(THIS_DIR, filename))
and os.path.splitext(filename)[1].lower() == ".json"
):
key = os.path.splitext(filename)[0]
with open(os.path.join(THIS_DIR, filename)) as file_obj:
value = json.load(file_obj)
Draft7Validator.check_schema(value)
schemas[key] = value
return schemas
| 5,346,269 |
def load_separator(
model_str_or_path: str = "umxhq",
targets: Optional[list] = None,
niter: int = 1,
residual: bool = False,
wiener_win_len: Optional[int] = 300,
device: Union[str, torch.device] = "cpu",
pretrained: bool = True,
filterbank: str = "torch",
):
"""Separator loader
Args:
model_str_or_path (str): Model name or path to model _parent_ directory
E.g. The following files are assumed to present when
loading `model_str_or_path='mymodel', targets=['vocals']`
'mymodel/separator.json', mymodel/vocals.pth', 'mymodel/vocals.json'.
Defaults to `umxhq`.
targets (list of str or None): list of target names. When loading a
pre-trained model, all `targets` can be None as all targets
will be loaded
niter (int): Number of EM steps for refining initial estimates
in a post-processing stage. `--niter 0` skips this step altogether
(and thus makes separation significantly faster) More iterations
can get better interference reduction at the price of artifacts.
Defaults to `1`.
residual (bool): Computes a residual target, for custom separation
scenarios when not all targets are available (at the expense
of slightly less performance). E.g vocal/accompaniment
Defaults to `False`.
wiener_win_len (int): The size of the excerpts (number of frames) on
which to apply filtering independently. This means assuming
time varying stereo models and localization of sources.
None means not batching but using the whole signal. It comes at the
price of a much larger memory usage.
Defaults to `300`
device (str): torch device, defaults to `cpu`
pretrained (bool): determines if loading pre-trained weights
filterbank (str): filterbank implementation method.
Supported are `['torch', 'asteroid']`. `torch` is about 30% faster
compared to `asteroid` on large FFT sizes such as 4096. However,
asteroids stft can be exported to onnx, which makes is practical
for deployment.
"""
model_path = Path(model_str_or_path).expanduser()
# when path exists, we assume its a custom model saved locally
if model_path.exists():
if targets is None:
raise UserWarning("For custom models, please specify the targets")
target_models = load_target_models(
targets=targets, model_str_or_path=model_path, pretrained=pretrained
)
with open(Path(model_path, "separator.json"), "r") as stream:
enc_conf = json.load(stream)
separator = model.Separator(
target_models=target_models,
niter=niter,
residual=residual,
wiener_win_len=wiener_win_len,
sample_rate=enc_conf["sample_rate"],
n_fft=enc_conf["nfft"],
n_hop=enc_conf["nhop"],
nb_channels=enc_conf["nb_channels"],
filterbank=filterbank,
).to(device)
# otherwise we load the separator from torchhub
else:
hub_loader = getattr(openunmix, model_str_or_path)
separator = hub_loader(
targets=targets,
device=device,
pretrained=True,
niter=niter,
residual=residual,
filterbank=filterbank,
)
return separator
| 5,346,270 |
def transform_datetime(date_str, site):
"""
根据site转换原始的date为正规的date类型存放
:param date_str: 原始的date
:param site: 网站标识
:return: 转换后的date
"""
result = None
if site in SITE_MAP:
if SITE_MAP[site] in (SiteType.SINA, SiteType.HACKERNEWS):
try:
time_int = int(date_str)
result = datetime.fromtimestamp(time_int).strftime(DATE_FMT)
except Exception as e:
result = parse(date_str).strftime(DATE_FMT)
elif SITE_MAP[site] == SiteType.TENCENT:
result = date_str
elif SITE_MAP[site] == SiteType.TUICOOL:
result = date_str
elif SITE_MAP[site] == SiteType.HACKER:
result = date_str
elif SITE_MAP[site] == SiteType.DMZJ:
result = parse(date_str).strftime(DATE_FMT)
elif SITE_MAP[site] == SiteType.ACGMH:
result = parse(date_str).strftime(DATE_FMT)
elif SITE_MAP[site] == SiteType.CTOLIB:
result = parse(date_str).strftime(DATE_FMT)
elif date_str.strip() == '':
result = datetime.now().strftime(DATE_FMT)
else:
result = parse(date_str).strftime(DATE_FMT)
return result
| 5,346,271 |
def ParseEventsForTTLs(eventsFileName, TR = 2.0, onset = False, threshold = 5.0):
"""
Parses the events file from Avotec for TTLs. Use if history file is not available.
The events files does not contain save movie start/stops, so use the history file if possible
@param eventsFileName: name of events file from avotec
@param TR: TR duration in seconds
@param onset: use the TTL pulse onset instead of the offset for timestamps?
@param threshold: multiple of the TR interval to use as a threshold as a break between runs
@type eventsFileName: str
@type TR: float
@type onset: bool
@type threshold: float
@return: timestamps of TTLs in each run, each run is a list of TTL timestamps and the number of TTLs
@rtype: list<tuple<list<float>, int>>
"""
eventsFile = open(eventsFileName, 'r')
TTLtoken = 'S' if onset else 's'
TTLs = []
lastTime = (0, 0, 0, 0)
duplicates = 0
runs = []
thisRun = []
line = eventsFile.readline()
while line != '':
tokens = line.split()
if len(tokens) > 0 and tokens[-1] == TTLtoken:
time = []
for token in re.split('[:\.]', re.match('[0-9\. ]+:[0-9\. ]+:[0-9 ]+\.[0-9]+', line).group()):
if (len(token) > 2): # the milliseconds have rather high precision
time.append(int(numpy.round(float(token) * 0.001)))
else:
time.append(int(token))
time = tuple(time)
if (TimeToSeconds(time) - TimeToSeconds(lastTime) > 0.1): # long enough of an interval since last one such that it's not a duplicate
TTLs.append(time)
lastTime = time
else:
duplicates += 1
line = eventsFile.readline()
nTRs = 1
thisRun.append(TTLs[0])
for i in range(1, len(TTLs) - 1):
this = TTLs[i]
last = TTLs[i - 1]
dt = TimeToSeconds(this) - TimeToSeconds(last)
if dt > threshold * TR:
runs.append((thisRun, nTRs))
thisRun = [this]
nTRs = 1
else:
thisRun.append(this)
nTRs += 1
runs.append((thisRun, nTRs + 1)) # account for last run without a faraway TTL
eventsFile.close()
print('{} duplicated TTLs'.format(duplicates))
for i in range(len(runs)):
duration = TimeToSeconds(runs[i][0][-1]) - TimeToSeconds(runs[i][0][0])
expectedTRs = int(numpy.round(duration / TR))
if (i == len(runs) - 1):
expectedTRs += 1 # account for last run without a faraway TTL
print('Run {} expected {} TTLs from duration, actual recorded {} TTLs'.format(i + 1, expectedTRs, len(runs[i][0])))
return runs
| 5,346,272 |
def test_tile_valid_pan(landsat_get_mtl, monkeypatch):
"""
Should work as expected
"""
monkeypatch.setattr(landsat8, "LANDSAT_BUCKET", LANDSAT_BUCKET)
landsat_get_mtl.return_value = LANDSAT_METADATA
tile_z = 8
tile_x = 71
tile_y = 102
data, mask = landsat8.tile(LANDSAT_SCENE_C1, tile_x, tile_y, tile_z, pan=True)
assert data.shape == (3, 256, 256)
assert mask.shape == (256, 256)
| 5,346,273 |
def _to_native_string(string, encoding='ascii'):
"""Given a string object, regardless of type, returns a representation of
that string in the native string type, encoding and decoding where
necessary. This assumes ASCII unless told otherwise.
"""
if isinstance(string, str):
out = string
else:
out = string.decode(encoding)
return out
| 5,346,274 |
def load(path: str) -> model_lib.Model:
"""Deserializes a TensorFlow SavedModel at `path` to a `tff.learning.Model`.
Args:
path: The `str` path pointing to a SavedModel.
Returns:
A `tff.learning.Model`.
"""
py_typecheck.check_type(path, str)
if not path:
raise ValueError('`path` must be a non-empty string, cannot deserialize '
'models without an output path.')
return _LoadedSavedModel(tf.saved_model.load(path))
| 5,346,275 |
def plot_feature_importance(obj, top_n=None, save_path=None):
"""
输出LGBM模型的feature importance,并绘制条形图
Parameters
----------
obj: lgbm object or DataFrame
训练好的Lightgbm模型,或是已经计算好的feature importan DataFrame
top_n: int, default None
展示TOP N的变量,若不填则展示全部变量,为保证显示效果,建议当变量个数多于30个时进行限制
save_path: str, default None
图片存放路径
Returns
-------
df_fi: DataFrame
模型的feature importance,包括以下几个指标:
'split': 被节点选为分裂特征的次数
'total_gain': 作为分裂特征时对损失函数的总增益
'cover': 作为分裂特征时平均每次覆盖的样本数量
'avg_gain': 作为分裂特征时平均每次对损失函数的增益
'split_weight': 单个特征的分裂次数占总分裂次数的比例
'gain_weight': 单个特征的增益占总分裂增益的比例
"""
if obj.__class__.__name__ == 'LGBMClassifier' or obj.__class__.__name__ == 'Booster':
if obj.__class__.__name__ == 'LGBMClassifier':
booster = obj.booster_
else:
booster = obj
df_fi = get_feature_importance(booster)
df_fi['avg_gain'] = df_fi['total_gain'] / df_fi['split']
df_fi['split_weight'] = df_fi['split'] / df_fi['split'].sum()
df_fi['gain_weight'] = df_fi['total_gain'] / df_fi['total_gain'].sum()
df_fi['split_rank'] = df_fi['split'].rank(method='first', ascending=False).values.reshape((-1,))
df_fi['gain_rank'] = df_fi['total_gain'].rank(method='first', ascending=False).values.reshape((-1,))
df_fi['avg_gain_rank'] = df_fi['avg_gain'].rank(method='first', ascending=False).values.reshape((-1,))
df_fi['cover_rank'] = df_fi['cover'].rank(method='first', ascending=False).values.reshape((-1,))
elif isinstance(obj, pd.DataFrame):
df_fi = obj
else:
raise ValueError('Unknown object type')
if top_n is not None:
df_gain_fi = df_fi.loc[df_fi['gain_rank'] <= top_n, :].copy().sort_values(by='gain_rank', ascending=False)
df_split_fi = df_fi.loc[df_fi['split_rank'] <= top_n, :].copy().sort_values(by='split_rank', ascending=False)
df_cover_fi = df_fi.loc[df_fi['cover_rank'] <= top_n, :].copy().sort_values(by='cover_rank', ascending=False)
title1 = 'Weight of Split Gain (Top {0})'.format(top_n)
title2 = 'Weight of Split Count (Top {0})'.format(top_n)
title3 = 'Sample Coverage across all splits (Top {0})'.format(top_n)
else:
df_gain_fi = df_fi.copy().sort_values(by='gain_rank', ascending=False)
df_split_fi = df_fi.copy().sort_values(by='split_rank', ascending=False)
df_cover_fi = df_fi.copy().sort_values(by='cover_rank', ascending=False)
title1 = 'Weight of Split Gain'
title2 = 'Weight of Split Count'
title3 = 'Sample coverage across all splits'
plt.figure(figsize=(4, 9), dpi=200)
plt.subplot(3, 1, 1)
plt.barh(np.arange(df_gain_fi.shape[0]), df_gain_fi['gain_weight'], height=0.6, color='lightskyblue')
for i, var in enumerate(df_gain_fi['var_name']):
plt.annotate(var, xy=(0.001, i), va='center', ha='left', fontsize=4, color='black', fontweight='normal')
ax = plt.gca()
for at in ['left', 'right', 'bottom', 'top']:
ax.spines[at].set_linewidth(0.7)
plt.xticks(fontsize=5)
plt.yticks([])
plt.xlabel('gain weight', fontsize=5)
plt.title(title1, fontsize=6)
plt.subplot(3, 1, 2)
plt.barh(np.arange(df_gain_fi.shape[0]), df_split_fi['split_weight'], height=0.6, color='lightgreen')
for i, var in enumerate(df_split_fi['var_name']):
plt.annotate(var, xy=(0.001, i), va='center', ha='left', fontsize=4, color='black', fontweight='normal')
ax = plt.gca()
for at in ['left', 'right', 'bottom', 'top']:
ax.spines[at].set_linewidth(0.7)
plt.xticks(fontsize=5)
plt.yticks([])
plt.xlabel('split weight', fontsize=5)
plt.title(title2, fontsize=6)
plt.tight_layout(pad=1.0, w_pad=1.0, h_pad=2.0)
plt.subplot(3, 1, 3)
plt.barh(np.arange(df_gain_fi.shape[0]), df_cover_fi['cover'], height=0.6, color='Salmon')
for i, var in enumerate(df_cover_fi['var_name']):
plt.annotate(var, xy=(0.001, i), va='center', ha='left', fontsize=4, color='black', fontweight='normal')
ax = plt.gca()
for at in ['left', 'right', 'bottom', 'top']:
ax.spines[at].set_linewidth(0.7)
plt.xticks(fontsize=5)
plt.yticks([])
plt.xlabel('sample coverage', fontsize=5)
plt.title(title3, fontsize=6)
plt.tight_layout(pad=1.0, w_pad=1.0, h_pad=2.0)
if save_path is not None:
if save_path.endswith('.png') or save_path.endswith('.jpg'):
plt.savefig(save_path, bbox_inches='tight')
elif os.path.isdir(save_path):
plt.savefig(os.path.join(save_path, 'lgbm_feature_importance.png'), bbox_inches='tight')
else:
raise ValueError('No such file or directory: {0}'.format(save_path))
plt.show()
plt.close()
return df_fi
| 5,346,276 |
def plot_pr(precision, recall, area, name, dst_dir=None):
"""Plotting ROC curve.
Arguments:
tpr {list} -- a list of numpy 1D array for true positive rate
fpr {list} -- a list of numpy 1D array for false positive rate
area {list} -- a list of floats for area under curve
name {str} -- text for the legend
dst_dir {str} -- output figure directory, file name `roc.pdf`
"""
if dst_dir is None:
dst_dir = ''
plt.clf()
plt.figure(figsize=(5,5))
plt.rcParams["font.family"] = "Times New Roman"
for (x_val, y_val, a_val, string) in zip(recall, precision, area, name):
label = string
plt.plot(x_val, y_val, label=label)
plt.legend(loc='lower right')
plt.xlim([0, 1])
plt.ylim([0, 1])
plt.grid()
plt.ylabel('Precision')
plt.xlabel('Recall')
plt.savefig(os.path.join(dst_dir, 'pr.pdf'), bbox_inches='tight')
plt.close()
| 5,346,277 |
def parameters_from_object_schema(schema, in_='formData'):
"""Convert object schema to parameters."""
# We can only extract parameters from schema
if schema['type'] != 'object':
return []
properties = schema.get('properties', {})
required = schema.get('required', [])
parameters = []
for name, property in properties.items():
parameter = {
'name': name,
'in_': in_,
'required': (name in required),
}
parameter.update(property)
parameter = Parameter(**parameter)
parameters.append(parameter)
parameters = sorted(parameters, key=lambda x: x['name'])
return parameters
| 5,346,278 |
def env_vars(request):
"""Sets environment variables to use .env and config.json files."""
os.environ["ENV"] = "TEST"
os.environ["DOTENV_FILE"] = str(DOTENV_FILE)
os.environ["CONFIG_FILE"] = str(CONFIG_FILE)
os.environ["DATABASE_URL"] = get_db_url()
return True
| 5,346,279 |
def any_toggle_enabled(*toggles):
"""
Return a view decorator for allowing access if any of the given toggles are
enabled. Example usage:
@toggles.any_toggle_enabled(REPORT_BUILDER, USER_CONFIGURABLE_REPORTS)
def delete_custom_report():
pass
"""
def decorator(view_func):
@wraps(view_func)
def wrapped_view(request, *args, **kwargs):
for t in toggles:
if (
(hasattr(request, 'user') and t.enabled(request.user.username))
or (hasattr(request, 'domain') and t.enabled(request.domain))
):
return view_func(request, *args, **kwargs)
raise Http404()
return wrapped_view
return decorator
| 5,346,280 |
def moguls(material, height, randomize, coverage, det, e0=20.0, withPoisson=True, nTraj=defaultNumTraj, dose=defaultDose, sf=True, bf=True, optimize=True, xtraParams=defaultXtraParams):
"""moguls(material, radius, randomize, det, [e0=20.0], [withPoisson=True], [nTraj=defaultNumTraj], [dose = 120.0], [sf=True], [bf=True], [optimize=True], [xtraParams={}])
Monte Carlo simulate a spectrum from a rough surface made up of close packed spheres.
+ material - Composition of material
+ height - mogul height = 0.5 * mogul radius
+ randomize - randomize the beam start position?
+ coverage - fractional likelihood of each bump existing (0.0 to 1.0)"""
tmp = u"MC simulation of a %0.2lg um %d%% %smogul bed of %s at %0.1f keV%s%s" % (1.0e6 * height, int(100.0*coverage), (" rand " if randomize else " "), material, e0, (" + CSF" if sf else ""), (" + BSF" if bf else ""))
return base(det, e0, withPoisson, nTraj, dose, sf, bf, tmp, buildMoguls, { "Coverage" : coverage, "Optimize": optimize, "Height" : height, "Material" : material, "Randomize" : randomize }, xtraParams)
| 5,346,281 |
def link_datasets(yelp_results, dj_df, df_type="wages"):
"""
(Assisted by Record Linkage Toolkit library and documentation)
This functions compares the Yelp query results to database results and
produces the best matches based on computing the qgram score. Depending
on the specific database table characteristics the qgram calculation
will be between the zip_code, business name, address strings, latitude,
longitude, or a combination of those charateristics.
Inputs:
- yelp_results: a pandas dataframe of yelp business results based
on a user's input
- dj_df: a pandas dataframe of django results.
Ex. labour statistics, healthcode violations, Divvy, etc.
- df_type: a string of which specific dataframe is being passed to
be compared to the Yelp results
Outputs:
- link: a tuple containing the indices of Yelp query results dataframe
and the database dataframe AND the best matches qgram scores
"""
# set thresholds for comparing strings using qgram method
name_thresh = 0.55
addr_thresh = 0.55
strong_addr_thresh = 0.90
# initialize a Record Linkage comparison object
compare = rl.Compare()
# Labour & Food data comparisons to Yelp are made on zip, business name,
# and address
if df_type == "wages" or df_type == "food":
indexer = rl.BlockIndex(on='zip_code') # block on zip code
compare.numeric('zip_code', 'zip_code', method='linear',
scale=30.0, label='zip_score')
compare.string('name', 'name', method='qgram',
threshold=name_thresh, label='name_score')
compare.string('addr', 'addr', method='qgram',
threshold=addr_thresh, label='addr_score')
# Environmental data comparisons to Yelp are made on address
elif df_type == "enviro":
indexer = rl.FullIndex() # no blocking available
compare.string('addr', 'addr', method='qgram',
threshold=strong_addr_thresh, label='addr_score')
# all other data comparisons to Yelp
else:
indexer = rl.FullIndex()
pairs = indexer.index(yelp_results, dj_df)
# In addition to above comparisons, ALL data sets are also compared to
# Yelp based on latitude and longitude
compare.geo('latitude', 'longitude', 'latitude', 'longitude',
method='linear', scale=30.0, label='coord_score')
# compute record linkage scores
features = compare.compute(pairs, yelp_results, dj_df)
# set classification thresholds
zip_classif_thresh = 1.0
addr_classif_thresh = 1.0
coord_classif_thresh = 0.99
name_classif_thresh = 1.0
# Classification and final filtering
if df_type == "wages" or df_type == "food":
best_matches = features[(features['zip_score'] == zip_classif_thresh) &
(features['name_score'] == name_classif_thresh) &
(features['addr_score'] == addr_classif_thresh) &
(features['coord_score'] >= coord_classif_thresh)]
elif df_type == "enviro":
best_matches = features[(features['addr_score'] == addr_classif_thresh) &
(features['coord_score'] >= coord_classif_thresh)]
else:
best_matches = features[(features['coord_score'] >= coord_classif_thresh)]
# obtain the index values from best_matches
index_array = best_matches.index.values
# create tuple of indices and best matches df
link = (index_array, best_matches)
return link
| 5,346,282 |
def hello():
"""Say Hello, so that we can check shared code."""
return b"hello"
| 5,346,283 |
def copytree(src, dst, symlinks=False, ignore=None):
"""
Credit: http://stackoverflow.com/questions/1868714/how-do-i-copy-an-entire-directory-of-files-into-an-existing-directory-using-pyth
"""
for item in os.listdir(src):
s = os.path.join(src, item)
d = os.path.join(dst, item)
if os.path.isdir(s):
shutil.copytree(s, d, symlinks, ignore)
else:
shutil.copy2(s, d)
| 5,346,284 |
def loglog_mean_lines(x, ys, axis=0, label=None, alpha=0.1):
""" Log-log plot of lines and their mean. """
return _plot_mean_lines(partial(plt.loglog, x), ys, axis, label, alpha)
| 5,346,285 |
def read_lines_from_shapefile(fpath):
""" Read coordinates of cutting line segments from a ESRI Shapefile
containing line features.
Parameters
----------
fpath
Name of a file containing coordinates of cutting lines
Returns
--------
list
List of coordinates of cutting lines
"""
import osgeo.ogr
driver = osgeo.ogr.GetDriverByName('ESRI Shapefile')
datasource = driver.Open(fpath, 0)
if datasource is None:
print('Could not open ' + fpath)
raise RuntimeError()
layer = datasource.GetLayer()
feature = layer.GetNextFeature()
lines = list()
while feature:
geometry = feature.GetGeometryRef()
line = geometry.GetPoints()
lines.append(line)
feature = layer.GetNextFeature()
return lines
| 5,346,286 |
def generate_identifier(endpoint_description: str) -> str:
"""Generate ID for model."""
return (
Config.fdk_publishers_base_uri()
+ "/fdk-model-publisher/catalog/"
+ sha1(bytes(endpoint_description, encoding="utf-8")).hexdigest() # noqa
)
| 5,346,287 |
def test_instant_memory_statistics():
""" Test the instant memory statistics. """
stats = instant_memory_statistics()
# test bounds (percent)
assert type(stats) is float
assert stats >= 0
assert stats <= 100
| 5,346,288 |
def depthFirstSearch(problem):
"""Search the deepest nodes in the search tree first."""
stack = util.Stack() # Stack used as fringe list
stack.push((problem.getStartState(),[],0))
return genericSearch(problem,stack)
| 5,346,289 |
def parse_cli_args() -> argparse.Namespace:
"""
Parse arguments passed via Command Line Interface (CLI).
:return:
namespace with arguments
"""
parser = argparse.ArgumentParser(description='Algorithmic composition of dodecaphonic music.')
parser.add_argument(
'-c', '--config_path', type=str, default=None, help='path to configuration file'
)
cli_args = parser.parse_args()
return cli_args
| 5,346,290 |
def create_app():
"""
Method to init and set up the Flask application
"""
flask_app = MyFlask(import_name="dipp_app")
_init_config(flask_app)
_setup_context(flask_app)
_register_blueprint(flask_app)
_register_api_error(flask_app)
return flask_app
| 5,346,291 |
def read_file(filename=""):
"""reads text file"""
with open(filename, mode='r', encoding='utf-8') as a_file:
print(a_file.read(), end='')
a_file.close()
| 5,346,292 |
def find_consumes(method_type):
"""
Determine mediaType for input parameters in request body.
"""
if method_type in ('get', 'delete'):
return None
return ['application/json']
| 5,346,293 |
def preprocess(text):
""" Simple Arabic tokenizer and sentencizer. It is a space-based tokenizer. I use some rules to handle
tokenition exception like words containing the preposition 'و'. For example 'ووالدته' is tokenized to 'و والدته'
:param text: Arabic text to handle
:return: list of tokenized sentences
"""
try:
text = text.decode('utf-8')
except(UnicodeDecodeError, AttributeError):
pass
text = text.strip()
tokenizer_exceptions = ["وظف", "وضعها", "وضعه", "وقفنا", "وصفوها", "وجهوا", "والدته", "والده", "وادي", "وضعية",
"واجهات", "وفرتها", "وقاية", "وفا", "وزيرنا", "وزارتي", "وجهاها", "واردة", "وضعته",
"وضعتها", "وجاهة", "وهمية", "واجهة", "واضعاً", "واقعي", "ودائع", "واعدا", "واع", "واسعا",
"ورائها", "وحدها", "وزارتي", "وزارتي", "والدة", "وزرائها", "وسطاء", "وليامز", "وافق",
"والدها", "وسم", "وافق", "وجهها", "واسعة", "واسع", "وزنها", "وزنه",
"وصلوا", "والدها", "وصولاً", "وضوحاً", "وجّهته", "وضعته", "ويكيليكس", "وحدها", "وزيراً",
"وقفات", "وعر", "واقيًا", "وقوف", "وصولهم", "وارسو", "واجهت", "وقائية", "وضعهم",
"وسطاء", "وظيفته", "ورائه", "واسع", "ورط", "وظفت", "وقوف", "وافقت", "وفدًا", "وصلتها",
"وثائقي", "ويليان", "وساط", "وُقّع", "وَقّع", "وخيمة", "ويست", "والتر", "وهران", "ولاعة",
"ولايت", "والي", "واجب", "وظيفتها", "ولايات", "واشنطن", "واصف",
"وقح", "وعد", "وقود", "وزن", "وقوع", "ورشة", "وقائع", "وتيرة", "وساطة", "وفود", "وفات",
"وصاية", "وشيك", "وثائق", "وطنية", "وجهات", "وجهت", "وعود", "وضعهم", "وون", "وسعها", "وسعه",
"ولاية", "واصفاً", "واصلت", "وليان", "وجدتها", "وجدته", "وديتي", "وطأت", "وطأ", "وعودها",
"وجوه", "وضوح", "وجيز", "ورثنا", "ورث", "واقع", "وهم", "واسعاً", "وراثية", "وراثي", "والاس",
"واجهنا", "وابل", "ويكيميديا", "واضحا", "واضح", "وصفته", "واتساب", "وحدات", "ون",
"وورلد", "والد", "وكلاء", "وتر", "وثيق", "وكالة", "وكالات", "و احدة", "واحد", "وصيته",
"وصيه", "ويلمينغتون", "ولد", "وزر", "وعي", "وفد", "وصول", "وقف", "وفاة", "ووتش", "وسط",
"وزراء", "وزارة", "ودي", "وصيف", "ويمبلدون", "وست", "وهج", "والد", "وليد", "وثار",
"وجد", "وجه", "وقت", "ويلز", "وجود", "وجيه", "وحد", "وحيد", "ودا", "وداد", "ودرو",
"ودى", "وديع", "وراء", "ورانس", "ورث", "ورَّث", "ورد", "وردة", "ورق", "ورم", "وزير",
"وسام", "وسائل", "وستون", "وسط", "وسن", "وسيط", "وسيلة", "وسيم", "وصاف", "وصف", "وصْفَ",
"وصل", "وضع", "وطن", "وعاء", "وفاء", "وفق", "وفيق", "وقت", "وقع", "وكال", "وكيل",
"ولاء", "ولف", "وهب", "وباء", "ونستون", "وضح", "وجب", "وقّع", "ولنغتون", "وحش",
"وفر", "ولادة", "ولي", "وفيات", "وزار", "وجّه", "وهماً", "وجَّه", "وظيفة", "وظائف", "وقائي"]
sentence_splitter_exceptions = ["د.", "كي.", "في.", "آر.", "بى.", "جى.", "دى.", "جيه.", "ان.", "ال.", "سى.", "اس.",
"اتش.", "اف."]
sentence_splitters = ['.', '!', '؟', '\n']
text = text.replace('،', ' ، ')
text = text.replace('*', ' * ')
text = text.replace('’', ' ’ ')
text = text.replace('‘', ' ‘ ')
text = text.replace(',', ' , ')
text = text.replace('(', ' ( ')
text = text.replace(')', ' ) ')
text = text.replace('/', ' / ')
text = text.replace('[', ' [ ')
text = text.replace(']', ' ] ')
text = text.replace('|', ' | ')
text = text.replace('؛', ' ؛ ')
text = text.replace('«', ' « ')
text = text.replace('»', ' » ')
text = text.replace('!', ' ! ')
text = text.replace('-', ' - ')
text = text.replace('“', ' “ ')
text = text.replace('”', ' ” ')
text = text.replace('"', ' " ')
text = text.replace('؟', ' ؟ ')
text = text.replace(':', ' : ')
text = text.replace('…', ' … ')
text = text.replace('..', ' .. ')
text = text.replace('...', ' ... ')
text = text.replace('\'', ' \' ')
text = text.replace('\n', ' \n ')
text = text.replace(' ', ' ')
tokens = text.split()
for i, token in enumerate(tokens):
if token[-1] in sentence_splitters:
is_exceptions = token in sentence_splitter_exceptions
if not is_exceptions:
tokens[i] = token[:-1] + ' ' + token[-1] + 'SENT_SPLITTER'
tokens = ' '.join(tokens).split()
for i, token in enumerate(tokens):
if token.startswith('و'):
is_exceptions = [token.startswith(exception) and len(token) <= len(exception) + 1 for exception in
tokenizer_exceptions]
if True not in is_exceptions:
tokens[i] = token[0] + ' ' + token[1:]
text = (' '.join(tokens))
text = text.replace(' وال', ' و ال')
text = text.replace(' لل', ' ل ل')
text = text.replace(' لإ', ' ل إ')
text = text.replace(' بالأ', ' ب الأ')
text = text.replace('وفقا ل', 'وفقا ل ')
text = text.replace('نسبة ل', 'نسبة ل ')
sentences = text.split('SENT_SPLITTER')
return sentences
| 5,346,294 |
def login(client, password="pass", ):
"""Helper function to log into our app.
Parameters
----------
client : test client object
Passed here is the flask test client used to send the request.
password : str
Dummy password for logging into the app.
Return
-------
post request object
The test client is instructed to send a post request to the /login
route. The request contains the fields values to be posted by the form.
"""
return client.post('/login',
data=dict(pass_field=password, remember_me=True),
follow_redirects=True)
| 5,346,295 |
def define_dagstermill_solid(
name,
notebook_path,
input_defs=None,
output_defs=None,
config_schema=None,
required_resource_keys=None,
output_notebook=None,
output_notebook_name=None,
asset_key_prefix=None,
description=None,
tags=None,
):
"""Wrap a Jupyter notebook in a solid.
Arguments:
name (str): The name of the solid.
notebook_path (str): Path to the backing notebook.
input_defs (Optional[List[InputDefinition]]): The solid's inputs.
output_defs (Optional[List[OutputDefinition]]): The solid's outputs. Your notebook should
call :py:func:`~dagstermill.yield_result` to yield each of these outputs.
required_resource_keys (Optional[Set[str]]): The string names of any required resources.
output_notebook (Optional[str]): If set, will be used as the name of an injected output of
type :py:class:`~dagster.FileHandle` that will point to the executed notebook (in
addition to the :py:class:`~dagster.AssetMaterialization` that is always created). This
respects the :py:class:`~dagster.core.storage.file_manager.FileManager` configured on
the pipeline resources via the "file_manager" resource key, so, e.g.,
if :py:class:`~dagster_aws.s3.s3_file_manager` is configured, the output will be a :
py:class:`~dagster_aws.s3.S3FileHandle`.
output_notebook_name: (Optional[str]): If set, will be used as the name of an injected output
of type of :py:class:`~dagster.BufferedIOBase` that is the file object of the executed
notebook (in addition to the :py:class:`~dagster.AssetMaterialization` that is always
created). It allows the downstream solids to access the executed notebook via a file
object.
asset_key_prefix (Optional[Union[List[str], str]]): If set, will be used to prefix the
asset keys for materialized notebooks.
description (Optional[str]): If set, description used for solid.
tags (Optional[Dict[str, str]]): If set, additional tags used to annotate solid.
Dagster uses the tag keys `notebook_path` and `kind`, which cannot be
overwritten by the user.
Returns:
:py:class:`~dagster.SolidDefinition`
"""
check.str_param(name, "name")
check.str_param(notebook_path, "notebook_path")
input_defs = check.opt_list_param(input_defs, "input_defs", of_type=InputDefinition)
output_defs = check.opt_list_param(output_defs, "output_defs", of_type=OutputDefinition)
required_resource_keys = check.opt_set_param(
required_resource_keys, "required_resource_keys", of_type=str
)
extra_output_defs = []
if output_notebook_name is not None:
required_resource_keys.add("output_notebook_io_manager")
extra_output_defs.append(
OutputDefinition(name=output_notebook_name, io_manager_key="output_notebook_io_manager")
)
# backcompact
if output_notebook is not None:
rename_warning(
new_name="output_notebook_name", old_name="output_notebook", breaking_version="0.14.0"
)
required_resource_keys.add("file_manager")
extra_output_defs.append(OutputDefinition(dagster_type=FileHandle, name=output_notebook))
if isinstance(asset_key_prefix, str):
asset_key_prefix = [asset_key_prefix]
asset_key_prefix = check.opt_list_param(asset_key_prefix, "asset_key_prefix", of_type=str)
default_description = f"This solid is backed by the notebook at {notebook_path}"
description = check.opt_str_param(description, "description", default=default_description)
user_tags = validate_tags(tags)
if tags is not None:
check.invariant(
"notebook_path" not in tags,
"user-defined solid tags contains the `notebook_path` key, but the `notebook_path` key is reserved for use by Dagster",
)
check.invariant(
"kind" not in tags,
"user-defined solid tags contains the `kind` key, but the `kind` key is reserved for use by Dagster",
)
default_tags = {"notebook_path": notebook_path, "kind": "ipynb"}
return SolidDefinition(
name=name,
input_defs=input_defs,
compute_fn=_dm_solid_compute(
name,
notebook_path,
output_notebook_name,
asset_key_prefix=asset_key_prefix,
output_notebook=output_notebook, # backcompact
),
output_defs=output_defs + extra_output_defs,
config_schema=config_schema,
required_resource_keys=required_resource_keys,
description=description,
tags={**user_tags, **default_tags},
)
| 5,346,296 |
def barycorr(eventfile,outfile,refframe,orbit_file,output_folder):
"""
General function to perform the barycenter corrections for a Swift event file
eventfile - path to the event file. Will extract ObsID from this for the NICER files.
outfile - path to the output event file with barycenter corrections applied
refframe - reference frame for barycenter corrections (usually ICRS)
orbit_file - path to the orbit file of the observation
output_folder - path to the folder where the outfile will be
"""
obsid = eventfile[2:13]
logfile = output_folder + 'barycorr_notes.txt'
ra,dec = get_ra_dec(eventfile)
with open(logfile,'w') as logtextfile:
output = subprocess.run(['barycorr',eventfile,'outfile='+outfile,'orbitfiles='+orbit_file,'ra='+str(ra),'dec='+str(dec),'refframe='+str(refframe),'clobber=YES'],capture_output=True,text=True)
logtextfile.write(output.stdout)
logtextfile.write('*------------------------------* \n')
logtextfile.write(output.stderr)
logtextfile.close()
return
| 5,346,297 |
def denormalize_laf(LAF: torch.Tensor, images: torch.Tensor) -> torch.Tensor:
"""De-normalizes LAFs from scale to image scale.
B,N,H,W = images.size()
MIN_SIZE = min(H,W)
[a11 a21 x]
[a21 a22 y]
becomes
[a11*MIN_SIZE a21*MIN_SIZE x*W]
[a21*MIN_SIZE a22*MIN_SIZE y*H]
Args:
LAF:
images: images, LAFs are detected in.
Returns:
the denormalized lafs.
Shape:
- Input: :math:`(B, N, 2, 3)`
- Output: :math:`(B, N, 2, 3)`
"""
raise_error_if_laf_is_not_valid(LAF)
n, ch, h, w = images.size()
wf = float(w)
hf = float(h)
min_size = min(hf, wf)
coef = torch.ones(1, 1, 2, 3).to(LAF.dtype).to(LAF.device) * min_size
coef[0, 0, 0, 2] = wf
coef[0, 0, 1, 2] = hf
return coef.expand_as(LAF) * LAF
| 5,346,298 |
def project_main(GIS_files_path, topath):
""" This main function reads the GIS-layers in GIS_files_path and separates them by raster and vector data.
Projects the data to WGS84 UMT37S
Moves all files to ../Projected_files
Merges the files named 'kV' to two merged shape file of Transmission and Medium Voltage lines
Merges the files named 'MiniGrid' to one merged shape file
:param GIS_files_path:
:return:
"""
print(os.getcwd())
basedir = os.getcwd()
os.chdir(GIS_files_path)
current = os.getcwd()
print(os.getcwd())
#All shp-files in all folders in dir current
adm = project_vector(os.path.join(current,'gadm36_KEN_shp\gadm36_KEN_0.shp'))
adm.to_file(os.path.join(current,'gadm36_KEN_shp\gadm36_KEN_0_UMT37S.shp'))
shpFiles = [os.path.join(dp, f) for dp, dn, filenames in os.walk(current) for f in filenames if
os.path.splitext(f)[1] == '.shp']
for s in shpFiles:
path, filename = os.path.split(s)
projected = project_vector(s)
clip_vector(os.path.join(current,'gadm36_KEN_shp\gadm36_KEN_0_UMT37S.shp'), projected, s, os.path.join(path, "UMT37S_%s" % (filename)))
#All tif-files in all folders in dir current
tifFiles = [os.path.join(dp, f) for dp, dn, filenames in os.walk(current) for f in filenames if
os.path.splitext(f)[1] == '.tif']
for t in tifFiles:
path, filename = os.path.split(t)
masking(os.path.join(current,'gadm36_KEN_shp\gadm36_KEN_0.shp'), t)
project_raster(os.path.join(path,'%s' % (filename)), os.path.join(path,"masked_UMT37S_%s" % (filename)))
#All files containing "UMT37S" is copied to ../Projected_files dir
def create_dir(dir):
if not os.path.exists(dir):
os.makedirs(dir)
create_dir((topath))
allFiles = [os.path.join(dp, f) for dp, dn, filenames in os.walk(current) for f in filenames]
keyword = 'UMT37S'
for fname in allFiles:
if keyword in fname:
shutil.copy("\\\\?\\"+fname, os.path.join(current, topath)) #Due to really long name the \\\\?\\ can trick Windows accepting it
os.chdir(basedir)
merge_transmission(topath)
merge_minigrid(topath)
merge_mv(topath)
return ()
| 5,346,299 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.