content
stringlengths 22
815k
| id
int64 0
4.91M
|
---|---|
def format_batch_request_last_fm(listens: List[Listen]) -> Request:
"""
Format a POST request to scrobble the given listens to Last.fm.
"""
assert len(listens) <= 50, 'Last.fm allows at most 50 scrobbles per batch.'
params = {
'method': 'track.scrobble',
'sk': LAST_FM_SESSION_KEY,
}
for i, listen in enumerate(listens):
params.update(listen.format_lastfm_scrobble(i))
return format_signed_request(http_method='POST', data=params)
| 5,350,500 |
def train_eval(
root_dir,
gpu=0,
env_load_fn=None,
model_ids=None,
reload_interval=None,
eval_env_mode='headless',
num_iterations=1000000,
conv_1d_layer_params=None,
conv_2d_layer_params=None,
encoder_fc_layers=[256],
actor_fc_layers=[256, 256],
critic_obs_fc_layers=None,
critic_action_fc_layers=None,
critic_joint_fc_layers=[256, 256],
# Params for collect
initial_collect_steps=10000,
collect_steps_per_iteration=1,
num_parallel_environments=1,
replay_buffer_capacity=1000000,
# Params for target update
target_update_tau=0.005,
target_update_period=1,
# Params for train
train_steps_per_iteration=1,
batch_size=256,
actor_learning_rate=3e-4,
critic_learning_rate=3e-4,
alpha_learning_rate=3e-4,
td_errors_loss_fn=tf.compat.v1.losses.mean_squared_error,
gamma=0.99,
reward_scale_factor=1.0,
gradient_clipping=None,
# Params for eval
num_eval_episodes=30,
eval_interval=10000,
eval_only=False,
eval_deterministic=False,
num_parallel_environments_eval=1,
model_ids_eval=None,
# Params for summaries and logging
train_checkpoint_interval=10000,
policy_checkpoint_interval=10000,
rb_checkpoint_interval=50000,
log_interval=100,
summary_interval=1000,
summaries_flush_secs=10,
debug_summaries=False,
summarize_grads_and_vars=False,
eval_metrics_callback=None):
"""A simple train and eval for SAC."""
root_dir = os.path.expanduser(root_dir)
train_dir = os.path.join(root_dir, 'train')
eval_dir = os.path.join(root_dir, 'eval')
train_summary_writer = tf.compat.v2.summary.create_file_writer(
train_dir, flush_millis=summaries_flush_secs * 1000)
train_summary_writer.set_as_default()
eval_summary_writer = tf.compat.v2.summary.create_file_writer(
eval_dir, flush_millis=summaries_flush_secs * 1000)
eval_metrics = [
batched_py_metric.BatchedPyMetric(
py_metrics.AverageReturnMetric,
metric_args={'buffer_size': num_eval_episodes},
batch_size=num_parallel_environments_eval),
batched_py_metric.BatchedPyMetric(
py_metrics.AverageEpisodeLengthMetric,
metric_args={'buffer_size': num_eval_episodes},
batch_size=num_parallel_environments_eval),
]
eval_summary_flush_op = eval_summary_writer.flush()
global_step = tf.compat.v1.train.get_or_create_global_step()
with tf.compat.v2.summary.record_if(
lambda: tf.math.equal(global_step % summary_interval, 0)):
if reload_interval is None:
if model_ids is None:
model_ids = [None] * num_parallel_environments
else:
assert len(model_ids) == num_parallel_environments, \
'model ids provided, but length not equal to num_parallel_environments'
else:
train_model_ids = [model['id'] for model in suite_gibson.get_train_models()]
model_ids = np.random.choice(train_model_ids, num_parallel_environments).tolist()
if model_ids_eval is None:
model_ids_eval = [None] * num_parallel_environments_eval
else:
assert len(model_ids_eval) == num_parallel_environments_eval, \
'model ids eval provided, but length not equal to num_parallel_environments_eval'
tf_py_env = [lambda model_id=model_ids[i]: env_load_fn(model_id, 'headless', gpu)
for i in range(num_parallel_environments)]
tf_env = tf_py_environment.TFPyEnvironment(parallel_py_environment.ParallelPyEnvironment(tf_py_env))
if eval_env_mode == 'gui':
assert num_parallel_environments_eval == 1, 'only one GUI env is allowed'
eval_py_env = [lambda model_id=model_ids_eval[i]: env_load_fn(model_id, eval_env_mode, gpu)
for i in range(num_parallel_environments_eval)]
eval_py_env = parallel_py_environment.ParallelPyEnvironment(eval_py_env)
# Get the data specs from the environment
time_step_spec = tf_env.time_step_spec()
observation_spec = time_step_spec.observation
action_spec = tf_env.action_spec()
print('observation_spec', observation_spec)
print('action_spec', action_spec)
glorot_uniform_initializer = tf.compat.v1.keras.initializers.glorot_uniform()
preprocessing_layers = {}
if 'rgb' in observation_spec:
preprocessing_layers['rgb'] = tf.keras.Sequential(mlp_layers(
conv_1d_layer_params=None,
conv_2d_layer_params=conv_2d_layer_params,
fc_layer_params=encoder_fc_layers,
kernel_initializer=glorot_uniform_initializer,
))
if 'depth' in observation_spec:
preprocessing_layers['depth'] = tf.keras.Sequential(mlp_layers(
conv_1d_layer_params=None,
conv_2d_layer_params=conv_2d_layer_params,
fc_layer_params=encoder_fc_layers,
kernel_initializer=glorot_uniform_initializer,
))
if 'sensor' in observation_spec:
preprocessing_layers['sensor'] = tf.keras.Sequential(mlp_layers(
conv_1d_layer_params=None,
conv_2d_layer_params=None,
fc_layer_params=encoder_fc_layers,
kernel_initializer=glorot_uniform_initializer,
))
if len(preprocessing_layers) <= 1:
preprocessing_combiner = None
else:
preprocessing_combiner = tf.keras.layers.Concatenate(axis=-1)
actor_net = actor_distribution_network.ActorDistributionNetwork(
observation_spec,
action_spec,
preprocessing_layers=preprocessing_layers,
preprocessing_combiner=preprocessing_combiner,
fc_layer_params=actor_fc_layers,
continuous_projection_net=normal_projection_net,
kernel_initializer=glorot_uniform_initializer,
)
critic_net = critic_network.CriticNetwork(
(observation_spec, action_spec),
preprocessing_layers=preprocessing_layers,
preprocessing_combiner=preprocessing_combiner,
observation_fc_layer_params=critic_obs_fc_layers,
action_fc_layer_params=critic_action_fc_layers,
joint_fc_layer_params=critic_joint_fc_layers,
kernel_initializer=glorot_uniform_initializer,
)
tf_agent = sac_agent.SacAgent(
time_step_spec,
action_spec,
actor_network=actor_net,
critic_network=critic_net,
actor_optimizer=tf.compat.v1.train.AdamOptimizer(
learning_rate=actor_learning_rate),
critic_optimizer=tf.compat.v1.train.AdamOptimizer(
learning_rate=critic_learning_rate),
alpha_optimizer=tf.compat.v1.train.AdamOptimizer(
learning_rate=alpha_learning_rate),
target_update_tau=target_update_tau,
target_update_period=target_update_period,
td_errors_loss_fn=td_errors_loss_fn,
gamma=gamma,
reward_scale_factor=reward_scale_factor,
gradient_clipping=gradient_clipping,
debug_summaries=debug_summaries,
summarize_grads_and_vars=summarize_grads_and_vars,
train_step_counter=global_step)
config = tf.compat.v1.ConfigProto()
config.gpu_options.allow_growth = True
sess = tf.compat.v1.Session(config=config)
# Make the replay buffer.
replay_buffer = tf_uniform_replay_buffer.TFUniformReplayBuffer(
data_spec=tf_agent.collect_data_spec,
batch_size=tf_env.batch_size,
max_length=replay_buffer_capacity)
replay_observer = [replay_buffer.add_batch]
if eval_deterministic:
eval_py_policy = py_tf_policy.PyTFPolicy(greedy_policy.GreedyPolicy(tf_agent.policy))
else:
eval_py_policy = py_tf_policy.PyTFPolicy(tf_agent.policy)
step_metrics = [
tf_metrics.NumberOfEpisodes(),
tf_metrics.EnvironmentSteps(),
]
train_metrics = step_metrics + [
tf_metrics.AverageReturnMetric(
buffer_size=100,
batch_size=num_parallel_environments),
tf_metrics.AverageEpisodeLengthMetric(
buffer_size=100,
batch_size=num_parallel_environments),
]
collect_policy = tf_agent.collect_policy
initial_collect_policy = random_tf_policy.RandomTFPolicy(time_step_spec, action_spec)
initial_collect_op = dynamic_step_driver.DynamicStepDriver(
tf_env,
initial_collect_policy,
observers=replay_observer + train_metrics,
num_steps=initial_collect_steps * num_parallel_environments).run()
collect_op = dynamic_step_driver.DynamicStepDriver(
tf_env,
collect_policy,
observers=replay_observer + train_metrics,
num_steps=collect_steps_per_iteration * num_parallel_environments).run()
# Prepare replay buffer as dataset with invalid transitions filtered.
def _filter_invalid_transition(trajectories, unused_arg1):
return ~trajectories.is_boundary()[0]
dataset = replay_buffer.as_dataset(
num_parallel_calls=5,
sample_batch_size=5 * batch_size,
num_steps=2).apply(tf.data.experimental.unbatch()).filter(
_filter_invalid_transition).batch(batch_size).prefetch(5)
dataset_iterator = tf.compat.v1.data.make_initializable_iterator(dataset)
trajectories, unused_info = dataset_iterator.get_next()
train_op = tf_agent.train(trajectories)
summary_ops = []
for train_metric in train_metrics:
summary_ops.append(train_metric.tf_summaries(
train_step=global_step, step_metrics=step_metrics))
with eval_summary_writer.as_default(), tf.compat.v2.summary.record_if(True):
for eval_metric in eval_metrics:
eval_metric.tf_summaries(
train_step=global_step, step_metrics=step_metrics)
train_checkpointer = common.Checkpointer(
ckpt_dir=train_dir,
agent=tf_agent,
global_step=global_step,
metrics=metric_utils.MetricsGroup(train_metrics, 'train_metrics'))
policy_checkpointer = common.Checkpointer(
ckpt_dir=os.path.join(train_dir, 'policy'),
policy=tf_agent.policy,
global_step=global_step)
rb_checkpointer = common.Checkpointer(
ckpt_dir=os.path.join(train_dir, 'replay_buffer'),
max_to_keep=1,
replay_buffer=replay_buffer)
init_agent_op = tf_agent.initialize()
with sess.as_default():
# Initialize the graph.
train_checkpointer.initialize_or_restore(sess)
if eval_only:
metric_utils.compute_summaries(
eval_metrics,
eval_py_env,
eval_py_policy,
num_episodes=num_eval_episodes,
global_step=0,
callback=eval_metrics_callback,
tf_summaries=False,
log=True,
)
print('EVAL DONE')
return
# Initialize training.
rb_checkpointer.initialize_or_restore(sess)
sess.run(dataset_iterator.initializer)
common.initialize_uninitialized_variables(sess)
sess.run(init_agent_op)
sess.run(train_summary_writer.init())
sess.run(eval_summary_writer.init())
global_step_val = sess.run(global_step)
if global_step_val == 0:
# Initial eval of randomly initialized policy
metric_utils.compute_summaries(
eval_metrics,
eval_py_env,
eval_py_policy,
num_episodes=num_eval_episodes,
global_step=0,
callback=eval_metrics_callback,
tf_summaries=True,
log=True,
)
# Run initial collect.
logging.info('Global step %d: Running initial collect op.',
global_step_val)
sess.run(initial_collect_op)
# Checkpoint the initial replay buffer contents.
rb_checkpointer.save(global_step=global_step_val)
logging.info('Finished initial collect.')
else:
logging.info('Global step %d: Skipping initial collect op.',
global_step_val)
collect_call = sess.make_callable(collect_op)
train_step_call = sess.make_callable([train_op, summary_ops])
global_step_call = sess.make_callable(global_step)
timed_at_step = global_step_call()
time_acc = 0
steps_per_second_ph = tf.compat.v1.placeholder(
tf.float32, shape=(), name='steps_per_sec_ph')
steps_per_second_summary = tf.compat.v2.summary.scalar(
name='global_steps_per_sec', data=steps_per_second_ph,
step=global_step)
for _ in range(num_iterations):
start_time = time.time()
collect_call()
for _ in range(train_steps_per_iteration):
total_loss, _ = train_step_call()
time_acc += time.time() - start_time
global_step_val = global_step_call()
if global_step_val % log_interval == 0:
logging.info('step = %d, loss = %f', global_step_val, total_loss.loss)
steps_per_sec = (global_step_val - timed_at_step) / time_acc
logging.info('%.3f steps/sec', steps_per_sec)
sess.run(
steps_per_second_summary,
feed_dict={steps_per_second_ph: steps_per_sec})
timed_at_step = global_step_val
time_acc = 0
if global_step_val % train_checkpoint_interval == 0:
train_checkpointer.save(global_step=global_step_val)
if global_step_val % policy_checkpoint_interval == 0:
policy_checkpointer.save(global_step=global_step_val)
if global_step_val % rb_checkpoint_interval == 0:
rb_checkpointer.save(global_step=global_step_val)
if global_step_val % eval_interval == 0:
metric_utils.compute_summaries(
eval_metrics,
eval_py_env,
eval_py_policy,
num_episodes=num_eval_episodes,
global_step=global_step_val,
callback=eval_metrics_callback,
tf_summaries=True,
log=True,
)
if reload_interval is not None and global_step_val % reload_interval == 0:
model_ids = np.random.choice(train_model_ids, num_parallel_environments).tolist()
tf_env.reload_model(model_ids)
sess.close()
| 5,350,501 |
def file_mtime_ns(file):
"""Get the ``os.stat(file).st_mtime_ns`` value."""
return os.stat(file).st_mtime_ns
| 5,350,502 |
def get_entry_for_file_attachment(item_id, attachment):
"""
Creates a file entry for an attachment
:param item_id: item_id of the attachment
:param attachment: attachment dict
:return: file entry dict for attachment
"""
entry = fileResult(get_attachment_name(attachment.name), attachment.content)
entry["EntryContext"] = {
CONTEXT_UPDATE_EWS_ITEM_FOR_ATTACHMENT
+ CONTEXT_UPDATE_FILE_ATTACHMENT: parse_attachment_as_dict(item_id, attachment)
}
return entry
| 5,350,503 |
def fix(args):
"""
%prog fix bedfile > newbedfile
Fix non-standard bed files. One typical problem is start > end.
"""
p = OptionParser(fix.__doc__)
p.add_option("--minspan", default=0, type="int",
help="Enforce minimum span [default: %default]")
p.set_outfile()
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
bedfile, = args
minspan = opts.minspan
fp = open(bedfile)
fw = must_open(opts.outfile, "w")
nfixed = nfiltered = ntotal = 0
for row in fp:
atoms = row.strip().split("\t")
assert len(atoms) >= 3, "Must be at least 3 columns"
seqid, start, end = atoms[:3]
start, end = int(start), int(end)
orientation = '+'
if start > end:
start, end = end, start
orientation = '-'
nfixed += 1
atoms[1:3] = [str(start), str(end)]
if len(atoms) > 6:
atoms[6] = orientation
line = "\t".join(atoms)
b = BedLine(line)
if b.span >= minspan:
print(b, file=fw)
nfiltered += 1
ntotal += 1
if nfixed:
logging.debug("Total fixed: {0}".format(percentage(nfixed, ntotal)))
if nfiltered:
logging.debug("Total filtered: {0}".format(percentage(nfiltered, ntotal)))
| 5,350,504 |
def test_root__FrontPage__3(address_book, browser):
"""`FrontPage` lists only address books.
When another object is in the root folder it is not listed.
"""
from zope.container.btree import BTreeContainer
address_book.__parent__['btree'] = BTreeContainer()
browser.login('mgr')
browser.open(browser.ROOT_URL)
assert 'test addressbook' == ''.join(
browser.etree.xpath('//ul/li/a[1]/text()')).strip()
| 5,350,505 |
def check_all_particles_present(partlist, gambit_pdg_codes):
"""
Checks all particles exist in the particle_database.yaml.
"""
absent = []
for i in range(len(partlist)):
if not partlist[i].pdg() in list(gambit_pdg_codes.values()):
absent.append(partlist[i])
absent_by_pdg = [x.pdg() for x in absent]
if len(absent) == 0:
print("All particles are in the GAMBIT database.")
else:
print(("\nThe following particles (by PDG code) are missing from the "
"particle database: {0}. GUM is now adding them to "
"../config/particle_database.yaml.\n").format(absent_by_pdg))
return absent
| 5,350,506 |
def binarize_tree(t):
"""Convert all n-nary nodes into left-branching subtrees
Returns a new tree. The original tree is intact.
"""
def recurs_binarize_tree(t):
if t.height() <= 2:
return t[0]
if len(t) == 1:
return recurs_binarize_tree(t[0])
elif len(t) == 2:
new_children = []
for i, child in enumerate(t):
new_children.append(recurs_binarize_tree(child))
return Tree(t.node, new_children)
#return Tree(-1, new_children)
else:
#left_child = recurs_binarize_tree(Tree(-1, t[0:-1]))
if t.node[-1] != '_':
new_node_name = t.node + '_'
else:
new_node_name = t.node
left_child = recurs_binarize_tree(Tree(new_node_name, t[0:-1]))
right_child = recurs_binarize_tree(t[-1])
#return Tree(-1, [left_child, right_child])
return Tree(t.node, [left_child, right_child])
return recurs_binarize_tree(t)
| 5,350,507 |
def calculate_prfs_using_rdd(y_actual, y_predicted, average='macro'):
"""
Determines the precision, recall, fscore, and support of the predictions.
With average of macro, the algorithm Calculate metrics for each label, and find their unweighted mean.
See http://scikit-learn.org/stable/modules/generated/sklearn.metrics.precision_recall_fscore_support.html for details
A better metric for recommender systems is precision at N (also in this package)
Args:
y_actual: actual ratings in the format of an RDD of [ (userId, itemId, actualRating) ]
y_predicted: predicted ratings in the format of an RDD of [ (userId, itemId, predictedRating) ]
Returns:
precision, recall, fbeta_score, and support values
"""
prediction_rating_pairs = y_predicted.map(lambda x: ((x[0], x[1]), x[2]))\
.join(y_actual.map(lambda x: ((x[0], x[1]), x[2])))\
.map(lambda ((user, item), (prediction, rating)): (user, item, prediction, rating))
true_vals = np.array(prediction_rating_pairs.map(lambda (user, item, prediction, rating): rating).collect())
pred_vals = np.array(prediction_rating_pairs.map(lambda (user, item, prediction, rating): prediction).collect())
return precision_recall_fscore_support(map(lambda x: int(np.round(x)), true_vals),\
map(lambda x: int(np.round(x)), pred_vals), average = average)
| 5,350,508 |
def translation_from_matrix(M):
"""Returns the 3 values of translation from the matrix M.
Parameters
----------
M : list[list[float]]
A 4-by-4 transformation matrix.
Returns
-------
[float, float, float]
The translation vector.
"""
return [M[0][3], M[1][3], M[2][3]]
| 5,350,509 |
def test_camera(make_test_viewer):
"""Test vispy camera creation in 2D."""
viewer = make_test_viewer()
vispy_camera = viewer.window.qt_viewer.camera
np.random.seed(0)
data = np.random.random((11, 11, 11))
viewer.add_image(data)
# Test default values camera values are used and vispy camera has been
# updated
assert viewer.dims.ndisplay == 2
assert viewer.camera.ndisplay == 2
np.testing.assert_almost_equal(viewer.camera.angles, (0, 0, 90))
np.testing.assert_almost_equal(viewer.camera.center, (5.0, 5.0))
np.testing.assert_almost_equal(viewer.camera.angles, vispy_camera.angles)
np.testing.assert_almost_equal(viewer.camera.center, vispy_camera.center)
np.testing.assert_almost_equal(viewer.camera.zoom, vispy_camera.zoom)
| 5,350,510 |
def saving_filename_boundary(save_location, close_up, beafort, wave_roughness):
""" Setting the filename of the figure """
if close_up is None:
return save_location + 'Boundary_comparison_Bft={}_roughness={}.png'.format(beafort, wave_roughness)
else:
ymax, ymin = close_up
return save_location + 'Boundary_comparison_Bft={}_max={}_min={}_roughness={}.png'.format(beafort, ymax, ymin,
wave_roughness)
| 5,350,511 |
def get_library() -> CDLL:
"""Return the CDLL instance, loading it if necessary."""
global LIB
if LIB is None:
LIB = _load_library("aries_askar")
_init_logger()
return LIB
| 5,350,512 |
def childs_page_return_right_login(response_page, smarsy_login):
"""
Receive HTML page from login function and check we've got expected source
"""
if smarsy_login in response_page:
return True
else:
raise ValueError('Invalid Smarsy Login')
| 5,350,513 |
def merge_files(intakes, outcomes):
"""
Merges intakes and outcomes datasets to create unique line for each animal in the shelter to capture full stories for each animal
takes intakes file then outcomes file as arguments
returns merged dataset
"""
# Merge intakes and outcomes on animal id and year
animal_shelter_df = pd.merge(intakes,
outcomes,
on=['animal_id', 'year'],
how='left',
suffixes=('_intake', '_outcome'))
# Filters out animals who have yet to have outcomes and keeps animals where outcome data is later than intake date
animal_shelter_df = animal_shelter_df[(~animal_shelter_df['date_o'].isna())
& (animal_shelter_df['date_o'] > animal_shelter_df['date_i'])]
# Creates new days_in_shelter variable
animal_shelter_df['days_in_shelter'] = (animal_shelter_df['date_o'] - animal_shelter_df['date_i']).dt.days
# Sorts the column names to be alphabetical
animal_shelter_df = animal_shelter_df[animal_shelter_df.columns.sort_values()]
return animal_shelter_df
| 5,350,514 |
def _ssepdpsolve_single_trajectory(data, Heff, dt, times, N_store, N_substeps, psi_t, dims, c_ops, e_ops):
"""
Internal function. See ssepdpsolve.
"""
states_list = []
phi_t = np.copy(psi_t)
prng = RandomState() # todo: seed it
r_jump, r_op = prng.rand(2)
jump_times = []
jump_op_idx = []
for t_idx, t in enumerate(times):
if e_ops:
for e_idx, e in enumerate(e_ops):
s = cy_expect_psi_csr(
e.data.data, e.data.indices, e.data.indptr, psi_t, 0)
data.expect[e_idx, t_idx] += s
data.ss[e_idx, t_idx] += s ** 2
else:
states_list.append(Qobj(psi_t, dims=dims))
for j in range(N_substeps):
if norm(phi_t) ** 2 < r_jump:
# jump occurs
p = np.array([norm(c.data * psi_t) ** 2 for c in c_ops])
p = np.cumsum(p / np.sum(p))
n = np.where(p >= r_op)[0][0]
# apply jump
psi_t = c_ops[n].data * psi_t
psi_t /= norm(psi_t)
phi_t = np.copy(psi_t)
# store info about jump
jump_times.append(times[t_idx] + dt * j)
jump_op_idx.append(n)
# get new random numbers for next jump
r_jump, r_op = prng.rand(2)
# deterministic evolution wihtout correction for norm decay
dphi_t = (-1.0j * dt) * (Heff.data * phi_t)
# deterministic evolution with correction for norm decay
dpsi_t = (-1.0j * dt) * (Heff.data * psi_t)
A = 0.5 * np.sum([norm(c.data * psi_t) ** 2 for c in c_ops])
dpsi_t += dt * A * psi_t
# increment wavefunctions
phi_t += dphi_t
psi_t += dpsi_t
# ensure that normalized wavefunction remains normalized
# this allows larger time step than otherwise would be possible
psi_t /= norm(psi_t)
return states_list, jump_times, jump_op_idx
| 5,350,515 |
def construct_features_MH_1(data):
"""
Processes the provided pandas dataframe object by:
Deleting the original METER_ID, LOCATION_NO, BILLING_CYCLE, COMMENTS, and DAYS_FROM_BILLDT columns
Constructing a time series index out of the year, month, day, hour, minute, second columns
Sorting by the time series index
"""
try:
del data['METER_ID']
del data['LOCATION_HASH']
del data['BILLING_CYCLE']
del data['COMMENTS']
del data['DAYS_FROM_BILLDT']
return data
except Exception as e:
logger.info(
'There was a problem constructing the feature vector for the provided data set: {}'.format(str(e)))
| 5,350,516 |
def sort_observations(observations):
"""
Method to sort observations to make sure that the "winner" is at index 0
"""
return sorted(observations, key=cmp_to_key(cmp_observation), reverse=True)
| 5,350,517 |
def coor_trans(point, theta):
"""
coordinate transformation (坐标转换)
theta方向:以顺时针旋转为正
"""
point = np.transpose(point)
k = np.array([[np.cos(theta), np.sin(theta)],
[-np.sin(theta), np.cos(theta)]])
print(point)
# return np.dot(k, point)
return np.round(np.dot(k, point),6)
| 5,350,518 |
async def test_default_state(hass):
"""Test light switch default state."""
await async_setup_component(
hass,
"light",
{
"light": {
"platform": "switch",
"entity_id": "switch.test",
"name": "Christmas Tree Lights",
}
},
)
await hass.async_block_till_done()
state = hass.states.get("light.christmas_tree_lights")
assert state is not None
assert state.state == "unavailable"
assert state.attributes["supported_features"] == 0
assert state.attributes.get("brightness") is None
assert state.attributes.get("hs_color") is None
assert state.attributes.get("color_temp") is None
assert state.attributes.get("white_value") is None
assert state.attributes.get("effect_list") is None
assert state.attributes.get("effect") is None
assert state.attributes.get(ATTR_SUPPORTED_COLOR_MODES) == [COLOR_MODE_ONOFF]
assert state.attributes.get(ATTR_COLOR_MODE) is None
| 5,350,519 |
def ppo(
env_fn,
actor_critic=core.MLPActorCritic2Heads,
ac_kwargs=dict(),
seed=0,
steps_per_epoch=4000,
epochs=100,
epochs_rnd_warmup=1,
gamma=0.99,
clip_ratio=0.2,
pi_lr=3e-4,
vf_lr=1e-3,
rnd_lr=1e-3,
train_pi_iters=80,
train_v_iters=80,
train_rnd_iters=80,
lam=0.97,
max_ep_len=200,
target_kl=0.01,
logger_kwargs=dict(),
save_freq=10,
scale_reward=100,
only_intr=False,
norm_intr=False,
alpha_std_est=0.05,
single_head=False,
):
"""
Proximal Policy Optimization (by clipping),
with early stopping based on approximate KL
Args:
env_fn : A function which creates a copy of the environment.
The environment must satisfy the OpenAI Gym API.
actor_critic: The constructor method for a PyTorch Module with a
``step`` method, an ``act`` method, a ``pi`` module, and a ``v``
module. The ``step`` method should accept a batch of observations
and return:
=========== ================ ======================================
Symbol Shape Description
=========== ================ ======================================
``a`` (batch, act_dim) | Numpy array of actions for each
| observation.
``v`` (batch,) | Numpy array of value estimates
| for the provided observations.
``logp_a`` (batch,) | Numpy array of log probs for the
| actions in ``a``.
=========== ================ ======================================
The ``act`` method behaves the same as ``step`` but only returns ``a``.
The ``pi`` module's forward call should accept a batch of
observations and optionally a batch of actions, and return:
=========== ================ ======================================
Symbol Shape Description
=========== ================ ======================================
``pi`` N/A | Torch Distribution object, containing
| a batch of distributions describing
| the policy for the provided observations.
``logp_a`` (batch,) | Optional (only returned if batch of
| actions is given). Tensor containing
| the log probability, according to
| the policy, of the provided actions.
| If actions not given, will contain
| ``None``.
=========== ================ ======================================
The ``v`` module's forward call should accept a batch of observations
and return:
=========== ================ ======================================
Symbol Shape Description
=========== ================ ======================================
``v`` (batch,) | Tensor containing the value estimates
| for the provided observations. (Critical:
| make sure to flatten this!)
=========== ================ ======================================
ac_kwargs (dict): Any kwargs appropriate for the ActorCritic object
you provided to PPO.
seed (int): Seed for random number generators.
steps_per_epoch (int): Number of steps of interaction (state-action pairs)
for the agent and the environment in each epoch.
epochs (int): Number of epochs of interaction (equivalent to
number of policy updates) to perform.
epochs_rnd_warmup (int): Number of epochs of training RND before starting training agent.
gamma (float): Discount factor. (Always between 0 and 1.)
clip_ratio (float): Hyperparameter for clipping in the policy objective.
Roughly: how far can the new policy go from the old policy while
still profiting (improving the objective function)? The new policy
can still go farther than the clip_ratio says, but it doesn't help
on the objective anymore. (Usually small, 0.1 to 0.3.) Typically
denoted by :math:`\\epsilon`.
pi_lr (float): Learning rate for policy optimizer.
vf_lr (float): Learning rate for value function optimizer.
rnd_lr (float): Learning rate for RND optimizer.
train_pi_iters (int): Maximum number of gradient descent steps to take
on policy loss per epoch. (Early stopping may cause optimizer
to take fewer than this.)
train_v_iters (int): Number of gradient descent steps to take on
value function per epoch.
train_rnd_iters (int): Number of gradient descent steps to take on
RND per epoch.
lam (float): Lambda for GAE-Lambda. (Always between 0 and 1,
close to 1.)
max_ep_len (int): Maximum length of trajectory / episode / rollout.
target_kl (float): Roughly what KL divergence we think is appropriate
between new and old policies after an update. This will get used
for early stopping. (Usually small, 0.01 or 0.05.)
logger_kwargs (dict): Keyword args for EpochLogger.
save_freq (int): How often (in terms of gap between epochs) to save
the current policy and value function.
scale_reward (float): total_reward = extr_reward + scale_reward*intr_reward
"""
# Special function to avoid certain slowdowns from PyTorch + MPI combo.
setup_pytorch_for_mpi()
# Set up logger and save configuration
logger = EpochLogger(**logger_kwargs)
logger.save_config(locals())
# Random seed
seed += 10000 * proc_id()
torch.manual_seed(seed)
np.random.seed(seed)
# Instantiate environment
env = env_fn()
obs_dim = env.observation_space.shape
act_dim = env.action_space.shape
# Create actor-critic module
ac = actor_critic(env.observation_space, env.action_space, **ac_kwargs)
# Sync params across processes
sync_params(ac)
# Create RND module and optimizer
rnd = RND(obs_dim[0], (32, 32), nn.Sigmoid)
sync_params(rnd)
rnd_optimizer = Adam(rnd.predictor_network.parameters(), lr=rnd_lr)
# Create running estimator for reward normalization
reward_std_estimator = core.running_exp_estimator(alpha_std_est)
# Count variables
var_counts = tuple(core.count_vars(module) for module in [ac.pi, ac.v_extr, ac.v_intr, rnd.predictor_network])
logger.log("\nNumber of parameters: \t pi: %d, \t v_extr: %d, \t v_intr: %d, \t rnd: %d\n" % var_counts)
local_steps_per_epoch = int(steps_per_epoch / num_procs())
o = env.reset()
# Train RND on random agent for 'epochs_rnd_warmup' epochs
for epoch in range(epochs_rnd_warmup):
for t in range(local_steps_per_epoch):
a, _, _, _ = ac.step(torch.as_tensor(o, dtype=torch.float32))
next_o, r, d, _ = env.step(a)
rnd_loss = rnd.loss(torch.as_tensor(next_o, dtype=torch.float32))
reward_std_estimator.update(rnd_loss.item())
rnd_optimizer.zero_grad()
rnd_loss.backward()
mpi_avg_grads(rnd.predictor_network) # average grads across MPI processes
rnd_optimizer.step()
# Set up experience buffer
buf = PPOBuffer(obs_dim, act_dim, local_steps_per_epoch, gamma, lam)
# Set up function for computing PPO policy loss
def compute_loss_pi(data):
obs, act, adv, logp_old = data["obs"], data["act"], data["adv"], data["logp"]
# Policy loss
pi, logp = ac.pi(obs, act)
ratio = torch.exp(logp - logp_old)
clip_adv = torch.clamp(ratio, 1 - clip_ratio, 1 + clip_ratio) * adv
loss_pi = -(torch.min(ratio * adv, clip_adv)).mean()
# Useful extra info
approx_kl = (logp_old - logp).mean().item()
ent = pi.entropy().mean().item()
clipped = ratio.gt(1 + clip_ratio) | ratio.lt(1 - clip_ratio)
clipfrac = torch.as_tensor(clipped, dtype=torch.float32).mean().item()
pi_info = dict(kl=approx_kl, ent=ent, cf=clipfrac)
return loss_pi, pi_info
# Set up functions for computing value loss
def compute_loss_v_extr(data):
obs, ret = data["obs"], data["ret_extr"]
return ((ac.v_extr(obs) - ret) ** 2).mean()
def compute_loss_v_intr(data):
obs, ret = data["obs"], data["ret_intr"]
return ((ac.v_intr(obs) - ret) ** 2).mean()
# Set up optimizers for policy and value function
pi_optimizer = Adam(ac.pi.parameters(), lr=pi_lr)
vf_extr_optimizer = Adam(ac.v_extr.parameters(), lr=vf_lr)
if not single_head:
vf_intr_optimizer = Adam(ac.v_intr.parameters(), lr=vf_lr)
# Set up model saving
logger.setup_pytorch_saver(ac)
def update(epoch):
data = buf.get()
pi_l_old, pi_info_old = compute_loss_pi(data)
pi_l_old = pi_l_old.item()
v_extr_l_old = compute_loss_v_extr(data).item()
if not single_head:
v_intr_l_old = compute_loss_v_intr(data).item()
loss_rnd_old = rnd.loss(data["obs"]).item()
# Train policy with multiple steps of gradient descent
for i in range(train_pi_iters):
pi_optimizer.zero_grad()
loss_pi, pi_info = compute_loss_pi(data)
kl = mpi_avg(pi_info["kl"])
if kl > 1.5 * target_kl:
logger.log("Early stopping at step %d due to reaching max kl." % i)
break
loss_pi.backward()
mpi_avg_grads(ac.pi) # average grads across MPI processes
pi_optimizer.step()
logger.store(StopIter=i)
# Value function learning
for i in range(train_v_iters):
vf_extr_optimizer.zero_grad()
loss_v_extr = compute_loss_v_extr(data)
loss_v_extr.backward()
mpi_avg_grads(ac.v_extr) # average grads across MPI processes
vf_extr_optimizer.step()
if not single_head:
for i in range(train_v_iters):
vf_intr_optimizer.zero_grad()
loss_v_intr = compute_loss_v_intr(data)
loss_v_intr.backward()
mpi_avg_grads(ac.v_intr) # average grads across MPI processes
vf_intr_optimizer.step()
for i in range(train_rnd_iters):
rnd_optimizer.zero_grad()
loss_rnd = rnd.loss(data["obs"])
loss_rnd.backward()
mpi_avg_grads(rnd.predictor_network) # average grads across MPI processes
rnd_optimizer.step()
# Log changes from update
kl, ent, cf = pi_info["kl"], pi_info_old["ent"], pi_info["cf"]
logger.store(
LossPi=pi_l_old,
LossV_extr=v_extr_l_old,
LossRND=loss_rnd_old,
KL=kl,
Entropy=ent,
ClipFrac=cf,
DeltaLossPi=(loss_pi.item() - pi_l_old),
DeltaLossV_extr=(loss_v_extr.item() - v_extr_l_old),
DeltaLossRND=(loss_rnd.item() - loss_rnd_old),
)
if not single_head:
logger.store(LossV_intr=v_intr_l_old, DeltaLossV_intr=(loss_v_intr.item() - v_intr_l_old))
# Prepare for interaction with environment
start_time = time.time()
o, ep_ret_extr, ep_ret_intr, ep_len = env.reset(), 0, 0, 0
# Main loop: collect experience in env and update/log each epoch
for epoch in range(epochs):
for t in range(local_steps_per_epoch):
a, v_extr, v_intr, logp = ac.step(torch.as_tensor(o, dtype=torch.float32))
next_o, r_extr, d, _ = env.step(a)
rnd_reward = rnd.reward(torch.as_tensor(next_o, dtype=torch.float32))
if norm_intr:
reward_std_estimator.update(rnd_reward)
r_intr = rnd_reward / reward_std_estimator.get_std()
logger.store(EpRet_exp_std=reward_std_estimator.get_std())
else:
r_intr = rnd_reward
# save and log
ep_ret_extr += r_extr
ep_ret_intr += r_intr
ep_len += 1
if only_intr:
r_extr = 0
if single_head:
buf.store(o, a, r_extr + scale_reward * r_intr, 0, v_extr, 0, logp)
else:
buf.store(o, a, r_extr, scale_reward * r_intr, v_extr, v_intr, logp)
logger.store(VVals_extr=v_extr, VVals_intr=v_intr)
# Update obs (critical!)
o = next_o
timeout = ep_len == max_ep_len
terminal = d or timeout
epoch_ended = t == local_steps_per_epoch - 1
if terminal or epoch_ended:
# if epoch_ended and not(terminal):
# print('Warning: trajectory cut off by epoch at %d steps.' % ep_len, flush=True)
# logger.log('Warning: trajectory cut off by epoch at %d steps.' % ep_len)
_, v_extr, v_intr, _ = ac.step(torch.as_tensor(o, dtype=torch.float32))
# if trajectory reached terminal state, value_extr target is zero, else bootstrap value target
if not (timeout or epoch_ended):
v_extr = 0
if single_head:
buf.finish_path(v_extr + v_intr, 0)
else:
buf.finish_path(v_extr, v_intr)
if terminal:
# only save EpRet / EpLen if trajectory finished
logger.store(EpRet_extr=ep_ret_extr, EpLen=ep_len, EpRet_intr=ep_ret_intr)
o, ep_ret_extr, ep_ret_intr, ep_len = env.reset(), 0, 0, 0
# Save model
if (epoch % save_freq == 0) or (epoch == epochs - 1):
logger.save_state({"env": env}, None)
# Perform PPO update!
update(epoch)
# Log info about epoch
logger.log_tabular("Epoch", epoch)
logger.log_tabular("EpRet_extr", with_min_and_max=True)
logger.log_tabular("EpRet_intr", average_only=True)
if norm_intr:
logger.log_tabular("EpRet_exp_std", average_only=True)
logger.log_tabular("EpLen", average_only=True)
logger.log_tabular("VVals_extr", average_only=True)
if not single_head:
logger.log_tabular("VVals_intr", average_only=True)
logger.log_tabular("LossPi", average_only=True)
logger.log_tabular("LossV_extr", average_only=True)
if not single_head:
logger.log_tabular("LossV_intr", average_only=True)
logger.log_tabular("LossRND", average_only=True)
logger.log_tabular("DeltaLossPi", average_only=True)
logger.log_tabular("DeltaLossV_extr", average_only=True)
if not single_head:
logger.log_tabular("DeltaLossV_intr", average_only=True)
logger.log_tabular("TotalEnvInteracts", (epoch + 1) * steps_per_epoch)
logger.log_tabular("Entropy", average_only=True)
logger.log_tabular("KL", average_only=True)
logger.log_tabular("ClipFrac", average_only=True)
logger.log_tabular("StopIter", average_only=True)
logger.log_tabular("Time", time.time() - start_time)
logger.dump_tabular()
| 5,350,520 |
def get_zz500_stocks():
"""
获取中证500成分股
"""
# 登陆系统
lg = bs.login()
# 显示登陆返回信息
print('login respond error_code:'+lg.error_code)
print('login respond error_msg:'+lg.error_msg)
# 获取中证500成分股
rs = bs.query_zz500_stocks()
print('query_zz500 error_code:'+rs.error_code)
print('query_zz500 error_msg:'+rs.error_msg)
# 打印结果集
zz500_stocks = []
while (rs.error_code == '0') & rs.next():
# 获取一条记录,将记录合并在一起
zz500_stocks.append(rs.get_row_data())
result = pd.DataFrame(zz500_stocks, columns=rs.fields)
dtype = {'updateDate': String(10), 'code': String(9), 'code_name': String(10)}
result.to_sql('odl_bs_zz500_stocks', engine, schema=CQ_Config.DB_SCHEMA, if_exists='replace', index=False, dtype=dtype)
# 登出系统
bs.logout()
| 5,350,521 |
def ltopk(k, seq, key=None):
"""
>>> ltopk(2, [1, 100, 10, 1000])
[1000, 100]
>>> ltopk(2, ['Alice', 'Bob', 'Charlie', 'Dan'], key=len)
['Charlie', 'Alice']
"""
if key is not None and not callable(key):
key = getter(key)
return list(heapq.nlargest(k, seq, key=key))
| 5,350,522 |
def index():
""" Root URL response """
return (
jsonify(
name="Promotion REST API Service",
version="1.0",
),
status.HTTP_200_OK,
)
| 5,350,523 |
def test_insight_get_comments(requests_mock):
"""Tests sumologic-sec-insight-get-comments command function.
"""
from SumoLogicCloudSIEM import Client, insight_get_comments, DEFAULT_HEADERS
mock_response = util_load_json('test_data/insight_comments.json')
insight_id = 'INSIGHT-116'
comments = mock_response['data']['comments']
requests_mock.get('{}/sec/v1/insights/{}/comments'.format(MOCK_URL, insight_id), json=mock_response)
client = Client(
base_url=MOCK_URL,
verify=False,
headers=DEFAULT_HEADERS,
proxy=False,
auth=('access_id', 'access_key'),
ok_codes=[200])
args = {
'insight_id': insight_id
}
response = insight_get_comments(client, args)
assert response.outputs_prefix == 'SumoLogicSec.InsightComments'
assert response.outputs_key_field == 'Id'
assert response.outputs[0]['Id'] == comments[0]['id'] == '2'
assert response.outputs[0]['Author'] == comments[0]['author']['username'] == '[email protected]'
assert response.outputs[0]['Body'] == comments[0]['body'] == 'This is an example comment'
| 5,350,524 |
def dump_report_to_file(file: Union[TextIO, str],
etype: Optional[Type[BaseException]], value: Optional[BaseException],
tb: Optional[TracebackType], *, show_locals: bool = True,
show_globals: bool = True, show_main_globals: bool = True,
show_sys: bool = True, show_simple_tb: bool = True,
show_exception_vars: bool = True,
show_exc_vars_recur: bool = True,
custom_values: Dict[str, Union[Any, Callable[[], Any]]] = None
) -> None:
"""Dumps an exception dump to the specified file-like object or file name
Arguments
---------
file: Union[TextIO, str]
A file-like object or filename to dump the report to"""
if isinstance(file, str):
with open(file, 'w') as fp:
dump_report_to_file(fp, etype, value, tb,
show_locals=show_locals,
show_globals=show_globals,
show_main_globals=show_main_globals,
show_sys=show_sys,
show_simple_tb=show_simple_tb,
show_exception_vars=show_exception_vars,
show_exc_vars_recur=show_exc_vars_recur,
custom_values=custom_values)
return
if value is None:
value = sys.exc_info()[1]
if value is None:
return
import __main__
etype = type(value)
if tb is None:
tb = value.__traceback__
# Write name and date and additional info
file.write(f'"{__main__.__file__}" crashed at {time.strftime("%Y-%m-%dT%H:%M:%S%z")} ({time.strftime("%F %H:%M:%S %Z")})\n\n')
if custom_values is None:
custom_values = {
'os.getcwd()': os.getcwd,
'os.environ': (lambda: dict(os.environ)),
}
for (key, custom_value) in custom_values.items():
if callable(custom_value):
custom_values[key] = custom_value()
_variable_summary(file, custom_values)
_write_separator(file)
# Write traceback
if show_simple_tb:
tb_lines = traceback.format_exception(etype, value, tb)
file.write(''.join(tb_lines))
_write_separator(file)
# Write the contents of the exception
if show_exception_vars:
file.write('Summary of exception variables:\n')
if show_exc_vars_recur:
_recursive_exc_var_dump(file, value, set())
else:
_variable_summary(file, _exhaustive_vars(value))
_write_separator(file)
show_exhaustive = show_locals or show_globals
# Write the contents of sys
if show_sys:
file.write('Summary of sys variables:\n')
_variable_summary(file, _exhaustive_vars(sys))
_write_separator(file, 3 if show_exhaustive else 1)
# Write an exhaustive stack trace that shows all
# locals and globals (configurable) of the entire stack
if show_exhaustive:
_trace_exchaustive(file, value, tb, show_locals, show_globals, set())
# Write the main globals for the program
# This is included in the exhaustive stack trace,
# so we don't show it when we show an exhastive stack trace.
elif show_main_globals:
file.write('Summary of __main__ globals:\n')
_variable_summary(file, _exhaustive_vars(__main__))
_write_separator(file)
| 5,350,525 |
def add_cameras_default(scene):
""" Make two camera (main/top) default setup for demo images."""
cam_main = create_camera_perspective(
location=(-33.3056, 24.1123, 26.0909),
rotation_quat=(0.42119, 0.21272, -0.39741, -0.78703),
)
scene.collection.objects.link(cam_main)
cam_top = create_camera_top_view_ortho()
scene.collection.objects.link(cam_top)
# make this the main scene camera
scene.camera = cam_main
return cam_main, cam_top
| 5,350,526 |
def distr_mean_stde(distribution: np.ndarray) -> tuple:
"""
Purpose:
Compute the mean and standard deviation for a distribution.
Args:
distribution (np.ndarray): distribution
Returns:
tuple (ie. distribution mean and standard deviation)
"""
# Compute and print the mean, stdev of the resample distribution of means
distribution_mean = np.mean(distribution)
standard_error = np.std(distribution)
print('Bootstrap Distribution: center={:0.2f}, spread={:0.2f}'.format(distribution_mean, standard_error))
print()
return distribution_mean, standard_error
| 5,350,527 |
def release_waiting_requests_grouped_fifo(rse_id, count=None, direction='destination', deadline=1, volume=0, session=None):
"""
Release waiting requests. Transfer requests that were requested first, get released first (FIFO).
Also all requests to DIDs that are attached to the same dataset get released, if one children of the dataset is choosed to be released (Grouped FIFO).
:param rse_id: The RSE id.
:param count: The count to be released. If None, release all waiting requests.
:param direction: Direction if requests are grouped by source RSE or destination RSE.
:param deadline: Maximal waiting time in hours until a dataset gets released.
:param volume: The maximum volume in bytes that should be transfered.
:param session: The database session.
"""
amount_updated_requests = 0
# Release requests that exceeded waiting time
if deadline:
amount_updated_requests = release_waiting_requests_per_deadline(rse_id=rse_id, deadline=deadline, session=session)
count = count - amount_updated_requests
grouped_requests_subquery, filtered_requests_subquery = create_base_query_grouped_fifo(rse_id=rse_id, filter_by_rse=direction, session=session)
# cumulate amount of children per dataset and combine with each request and only keep requests that dont exceed the limit
cumulated_children_subquery = session.query(grouped_requests_subquery.c.name,
grouped_requests_subquery.c.scope,
grouped_requests_subquery.c.amount_childs,
grouped_requests_subquery.c.oldest_requested_at,
func.sum(grouped_requests_subquery.c.amount_childs).over(order_by=(grouped_requests_subquery.c.oldest_requested_at)).label('cum_amount_childs'))\
.subquery()
cumulated_children_subquery = session.query(filtered_requests_subquery.c.id)\
.join(cumulated_children_subquery, and_(filtered_requests_subquery.c.dataset_name == cumulated_children_subquery.c.name, filtered_requests_subquery.c.dataset_scope == cumulated_children_subquery.c.scope))\
.filter(cumulated_children_subquery.c.cum_amount_childs - cumulated_children_subquery.c.amount_childs < count)\
.subquery()
# needed for mysql to update and select from the same table
cumulated_children_subquery = session.query(cumulated_children_subquery.c.id).subquery()
statement = update(models.Request).where(models.Request.id.in_(cumulated_children_subquery)).values(state=RequestState.QUEUED)
amount_updated_requests += session.execute(statement).rowcount
# release requests where the whole datasets volume fits in the available volume space
if volume:
amount_updated_requests += release_waiting_requests_per_free_volume(rse_id=rse_id, volume=volume, session=session)
return amount_updated_requests
| 5,350,528 |
def _get_gap_memory_pool_size_MB():
"""
Return the gap memory pool size suitable for usage on the GAP
command line.
The GAP 4.5.6 command line parser had issues with large numbers, so
we return it in megabytes.
OUTPUT:
String.
EXAMPLES:
sage: from sage.interfaces.gap import \
... _get_gap_memory_pool_size_MB
sage: _get_gap_memory_pool_size_MB() # random output
'1467m'
"""
pool = get_gap_memory_pool_size()
pool = (pool // (1024**2)) + 1
return str(pool)+'m'
| 5,350,529 |
async def order_book_l2(symbol: str) -> dict:
"""オーダーブックを取得"""
async with pybotters.Client(base_url=base_url, apis=apis) as client:
r = await client.get("/orderBook/L2", params={"symbol": symbol,},)
data = await r.json()
return data
| 5,350,530 |
def fold_generator(
number_of_folds,
data,
labels,
max_per_class,
transformer_class=Standardizer
):
"""generate class balanced splits of data and labels"""
for fold in range(number_of_folds):
if isinstance(data, dict):
data_type_labels = list(data.keys())
if labels is None:
X_train, X_test = get_learning_data_in_dict_mode(
data,
labels=labels,
data_types=data_type_labels,
max_per_class=max_per_class
)
y_train = None
y_test = None
else:
X_train, y_train, X_test, y_test = get_learning_data_in_dict_mode( # noqa
data,
labels=labels,
data_types=data_type_labels,
max_per_class=max_per_class
)
for data_type in data_type_labels:
# learn normalization only on train data
transformer = transformer_class()
X_train[data_type] = transformer.apply(X_train[data_type])
X_test[data_type] = transformer.reapply(X_test[data_type])
else:
if labels is None:
X_train, X_test = get_learning_data(
data, labels=labels, max_per_class=max_per_class
)
y_train = None
y_test = None
else:
X_train, y_train, X_test, y_test = get_learning_data(
data, labels=labels, max_per_class=max_per_class
)
X_train = transformer.apply(X_train)
X_test = transformer.reapply(X_test)
yield {
'X_train': X_train,
'y_train': y_train,
'X_test': X_test,
'y_test': y_test,
'fold': fold
}
| 5,350,531 |
def notify_by_email():
"""Sends out notifications via email"""
samples = db.session.query(Sample)\
.filter(Sample.result_code != None)\
.filter(Sample.email_notified == False).all()
notifier = NotificationService(app)
for sample in samples:
try:
notifier.send_result_email(sample)
sample.email_notified = True
except CommError as ce:
print("Error")
| 5,350,532 |
def _load_json(json_path):
"""Load JSON from a file with a given path."""
# Note: Binary so load can detect encoding (as in Section 3 of RFC 4627)
with open(json_path, 'rb') as json_file:
try:
return json.load(json_file)
except Exception as ex:
if sys.version_info[0] >= 3:
ex2 = Exception('Error loading ' + json_path)
exec('raise ex2 from ex') # nosec
else:
ex2 = Exception('Error loading ' + json_path + ': ' + str(ex))
ex2.__cause__ = ex
raise ex2
| 5,350,533 |
def add_ticket_to_package(ticket, package):
"""Add a ticket to the definition file for a package and commit that
change."""
project_repo = Repo.discover()
package_file = Path(
project_repo.path, 'deploy', 'packages', package, 'tickets.yml')
with package_file.open('a') as f:
f.write(f'- {ticket}\n')
git.stage_file(project_repo, package_file)
git.commit(project_repo, f'Add ticket {ticket} to package {package}')
logger.info(f'Added ticket {ticket} to package {package}')
| 5,350,534 |
def op_skip_to_output_sig(c: AF_Continuation) -> None:
"""
WordDefinition(Op_name) -> WordDefinition(Op_name), OutputTypeSignature(TypeSignature).
Used when a WordDefition is encountered that has no InputTypeSignature.
"""
sig = TypeSignature([],[])
c.stack.push(StackObject(sig,TOutputTypeSignature))
| 5,350,535 |
async def create_comment_in_post(*, post: models.Post = Depends(resolve_post), created_comment: CreateComment,
current_user: models.User = Depends(resolve_current_user),
db: Session = Depends(get_db)):
"""Create a comment in a post."""
return crud.create_comment(db, author_id=current_user.id, parent_resub_id=post.parent_resub_id,
parent_post_id=post.id, parent_comment_id=None, content=created_comment.content)
| 5,350,536 |
def sigma_bot(sigma_lc_bot, sigma_hc_bot, x_aver_bot_mass):
"""
Calculates the surface tension at the bottom of column.
Parameters
----------
sigma_lc_bot : float
The surface tension of low-boilling component at the bottom of column, [N / m]
sigma_hc_bot : float
The surface tension of high-boilling component at the bottom of column, [N / m]
x_aver_bot_mass : float
The average mass concentration at bot of column, [kg/kg]
Returns
-------
sigma_bot : float
The surface tension at the bottom of column, [N / m]
References
----------
&&&&&
"""
return (sigma_lc_bot * x_aver_bot_mass + (1 - x_aver_bot_mass) * sigma_hc_bot)
| 5,350,537 |
def usage():
"""usage information"""
print(r"""
%(EXEC)s--
Calculate volumes of a given image
Usage: %(EXEC)s [OPTIONS]
OPTIONS:
Basic:
[-i --input ] < file > Input image
[-M --mask ] < file > Mask image in the --input image space (optional)
[-s --subid ] < ID > Subject ID (optional)
[-o --output ] < file > Output csv file (optional)
[-n --nonzero ] Calculate volumes for the non-zero intensities (optional)
[-I --intensity ] < int > Calculate volumes for the given intensity (optional)
Derived:
[-d --derived ] Calculate derived volumes as well (optional)
[-v --ICV ] < file > File to calculate the ICV from (optional, valid only if calculating derived volumes)
[-m --map ] < file > Mapping for all derived ROIs (optional, valid only if calculating derived volumes)
default: '@DATA_DIR@/List/MUSE_DerivedROIs_Mappings.csv'
[-V --version ] Version information
""" % {'EXEC':EXEC_NAME})
| 5,350,538 |
def _normalize_zonal_lat_lon(ds: xr.Dataset) -> xr.Dataset:
"""
In case that the dataset only contains lat_centers and is a zonal mean dataset,
the longitude dimension created and filled with the variable value of certain latitude.
:param ds: some xarray dataset
:return: a normalized xarray dataset
"""
if 'latitude_centers' not in ds.coords or 'lon' in ds.coords:
return ds
ds_zonal = ds.copy()
resolution = (ds.latitude_centers[1].values - ds.latitude_centers[0].values)
ds_zonal = ds_zonal.assign_coords(
lon=[i + (resolution / 2) for i in np.arange(-180.0, 180.0, resolution)])
for var in ds_zonal.data_vars:
if 'latitude_centers' in ds_zonal[var].dims:
ds_zonal[var] = xr.concat([ds_zonal[var] for _ in ds_zonal.lon], 'lon')
ds_zonal[var]['lon'] = ds_zonal.lon
var_dims = ds_zonal[var].attrs.get('dimensions', [])
lat_center_index = var_dims.index('latitude_centers')
var_dims.remove('latitude_centers')
var_dims.append('lat')
var_dims.append('lon')
var_chunk_sizes = ds_zonal[var].attrs.get('chunk_sizes', [])
lat_chunk_size = var_chunk_sizes[lat_center_index]
del var_chunk_sizes[lat_center_index]
var_chunk_sizes.append(lat_chunk_size)
var_chunk_sizes.append(ds_zonal.lon.size)
ds_zonal = ds_zonal.rename_dims({'latitude_centers': 'lat'})
ds_zonal = ds_zonal.assign_coords(lat=ds.latitude_centers.values)
ds_zonal = ds_zonal.drop_vars('latitude_centers')
ds_zonal = ds_zonal.transpose(..., 'lat', 'lon')
has_lon_bnds = 'lon_bnds' in ds_zonal.coords or 'lon_bnds' in ds_zonal
if not has_lon_bnds:
lon_values = [[i - (resolution / 2), i + (resolution / 2)] for i in ds_zonal.lon.values]
ds_zonal = ds_zonal.assign_coords(lon_bnds=xr.DataArray(lon_values, dims=['lon', 'bnds']))
has_lat_bnds = 'lat_bnds' in ds_zonal.coords or 'lat_bnds' in ds_zonal
if not has_lat_bnds:
lat_values = [[i - (resolution / 2), i + (resolution / 2)] for i in ds_zonal.lat.values]
ds_zonal = ds_zonal.assign_coords(lat_bnds=xr.DataArray(lat_values, dims=['lat', 'bnds']))
ds_zonal.lon.attrs['bounds'] = 'lon_bnds'
ds_zonal.lon.attrs['long_name'] = 'longitude'
ds_zonal.lon.attrs['standard_name'] = 'longitude'
ds_zonal.lon.attrs['units'] = 'degrees_east'
ds_zonal.lat.attrs['bounds'] = 'lat_bnds'
ds_zonal.lat.attrs['long_name'] = 'latitude'
ds_zonal.lat.attrs['standard_name'] = 'latitude'
ds_zonal.lat.attrs['units'] = 'degrees_north'
return ds_zonal
| 5,350,539 |
def setup_audio(song_filename):
"""Setup audio file
and setup setup the output device.output is a lambda that will send data to
fm process or to the specified ALSA sound card
:param song_filename: path / filename to music file
:type song_filename: str
:return: output, fm_process, fft_calc, music_file
:rtype tuple: lambda, subprocess, fft.FFT, decoder
"""
# Set up audio
force_header = False
if any([ax for ax in [".mp4", ".m4a", ".m4b"] if ax in song_filename]):
force_header = True
music_file = decoder.open(song_filename, force_header)
sample_rate = music_file.getframerate()
num_channels = music_file.getnchannels()
fft_calc = fft.FFT(CHUNK_SIZE,
sample_rate,
hc.GPIOLEN,
cm.audio_processing.min_frequency,
cm.audio_processing.max_frequency,
cm.audio_processing.custom_channel_mapping,
cm.audio_processing.custom_channel_frequencies)
# setup output device
output = set_audio_device(sample_rate, num_channels)
chunks_per_sec = ((16 * num_channels * sample_rate) / 8) / CHUNK_SIZE
light_delay = int(cm.audio_processing.light_delay * chunks_per_sec)
# Output a bit about what we're about to play to the logs
nframes = str(music_file.getnframes() / sample_rate)
log.info("Playing: " + song_filename + " (" + nframes + " sec)")
return output, fft_calc, music_file, light_delay
| 5,350,540 |
def _collect_package_prefixes(package_dir, packages):
"""
Collect the list of prefixes for all packages
The list is used to match paths in the install manifest to packages
specified in the setup.py script.
The list is sorted in decreasing order of prefix length so that paths are
matched with their immediate parent package, instead of any of that
package's ancestors.
For example, consider the project structure below. Assume that the
setup call was made with a package list featuring "top" and "top.bar", but
not "top.not_a_subpackage".
::
top/ -> top/
__init__.py -> top/__init__.py (parent: top)
foo.py -> top/foo.py (parent: top)
bar/ -> top/bar/ (parent: top)
__init__.py -> top/bar/__init__.py (parent: top.bar)
not_a_subpackage/ -> top/not_a_subpackage/ (parent: top)
data_0.txt -> top/not_a_subpackage/data_0.txt (parent: top)
data_1.txt -> top/not_a_subpackage/data_1.txt (parent: top)
The paths in the generated install manifest are matched to packages
according to the parents indicated on the right. Only packages that are
specified in the setup() call are considered. Because of the sort order,
the data files on the bottom would have been mapped to
"top.not_a_subpackage" instead of "top", proper -- had such a package been
specified.
"""
return list(
sorted(
((package_dir[package].replace(".", "/"), package) for package in packages),
key=lambda tup: len(tup[0]),
reverse=True,
)
)
| 5,350,541 |
def _grae_ymin_ ( graph ) :
"""Get minimal y for the points
>>> graph = ...
>>> ymin = graph.ymin ()
"""
ymn = None
np = len(graph)
for ip in range( np ) :
x , exl , exh , y , eyl , eyh = graph[ip]
y = y - abs( eyl )
if None == ymn or y <= ymn : ymn = y
return ymn
| 5,350,542 |
def get_working_id(id_: str, entry_id: str) -> str:
"""Sometimes new scanned files ID will be only a number. Should connect them with base64(MD5:_id).
Fixes bug in VirusTotal API.
Args:
entry_id: the entry id connected to the file
id_: id given from the API
Returns:
A working ID that we can use in other commands.
"""
if isinstance(id_, str) and id_.isnumeric() or (isinstance(id_, int)):
demisto.debug(f'Got an integer id from file-scan. {id_=}, {entry_id=}\n')
raise DemistoException(
f'Got an int {id_=} as analysis report. This is a bug in VirusTotal v3 API.\n'
f'While VirusTotal team is fixing the problem, try to resend the file.'
)
return id_
| 5,350,543 |
def cls():
"""nt (windows) = cls | unix = clear"""
os.system('cls' if os.name == 'nt' else 'clear')
| 5,350,544 |
def remove_empty_blocks(blocks: List[CodeBlock]) -> None:
"""
Removes empty blocks from given list, keeping the program correct
Ex:
[a=1]> > > [write(a)]>
turns into
[a=1]> [write(a)]>
"""
while isinstance(blocks[0], CodeBlockEmpty):
# First block is empty. Remove it from the list:
first_empty_block = blocks.pop(0)
# And move it's next block to the beginning:
blocks.remove(first_empty_block.next_block)
blocks.insert(0, first_empty_block.next_block)
for block in blocks:
if isinstance(block, CodeBlockEmpty):
# We'll remove empty blocks later, after removing their dependencies
continue
elif isinstance(block, CodeBlockStatement):
visited_blocks = []
while isinstance(block.next_block, CodeBlockEmpty):
block.next_block = block.next_block.next_block
if block.next_block in visited_blocks:
# We have cycle (Ex: [a=1]> > < )
raise CompilerError(block.next_block.line, block.next_block.column,
"Cycle in blocks with no content")
visited_blocks.append(block.next_block)
elif isinstance(block, CodeBlockCondition):
visited_blocks = []
while isinstance(block.true_block, CodeBlockEmpty):
block.true_block = block.true_block.next_block
if block.true_block in visited_blocks:
# We have cycle (Ex: [a=1]> > < )
raise CompilerError(block.true_block.line, block.true_block.column,
"Cycle in blocks with no content")
visited_blocks.append(block.true_block)
visited_blocks.clear()
while isinstance(block.false_block, CodeBlockEmpty):
block.false_block = block.false_block.next_block
if block.false_block in visited_blocks:
# We have cycle (Ex: [a=1]> > < )
raise CompilerError(block.false_block.line, block.false_block.column,
"Cycle in blocks with no content")
visited_blocks.append(block.false_block)
# And remove empty blocks from list
blocks[:] = [block for block in blocks if not isinstance(block, CodeBlockEmpty)]
| 5,350,545 |
def merck_net(input_shape=(128)):
"""
# The recommended network presented in the paper: Junshui Ma et. al., Deep Neural Nets as a Method for Quantitative
# Structure Activity Relationships
# URL: http://www.cs.toronto.edu/~gdahl/papers/deepQSARJChemInfModel2015.pdf
# :param input_shape: dim of input features
# :return: a keras model
"""
from keras import models
from keras.layers import Dense
from keras.layers.advanced_activations import LeakyReLU
from keras.layers.normalization import BatchNormalization
from keras.layers import Dropout
from keras.layers.noise import GaussianNoise
from keras.regularizers import l2
# TODO: is kernel_regularizer=l2(0.0001) the best way to add weight cost strength?
model = models.Sequential()
model.add(Dense(4000, activation='relu', input_shape=input_shape, kernel_regularizer=l2(0.0001)))
model.add(Dropout(0.25))
model.add(Dense(2000, activation='relu', input_shape=input_shape, kernel_regularizer=l2(0.0001)))
model.add(Dropout(0.25))
model.add(Dense(1000, activation='relu', input_shape=input_shape, kernel_regularizer=l2(0.0001)))
model.add(Dropout(0.25))
model.add(Dense(1000, activation='relu', input_shape=input_shape, kernel_regularizer=l2(0.0001)))
model.add(Dropout(0.10))
model.add(Dense(1, activation=None, use_bias=True, kernel_regularizer=l2(0.0001)))
# model.summary()
return model
| 5,350,546 |
def vec_sum(a, b):
"""Compute the sum of two vector given in lists."""
return [va + vb for va, vb in zip(a, b)]
| 5,350,547 |
def one_v_one_classifiers(x,y,lambd,max_iters,eps=.0001):
"""
Function for running a 1v1 classifier on many classes using the linearsvm function.
Inputs:
x: numpy matrix
a matrix of size nxd
y: numpy matrix
a matrix of size nx1
lambd: float
lambda, the penalization constant. Default = -1
max_iters: int
maximum number of iterations. Default: 100
eps: float
the stopping criteria for the normalized gradient. Default: .001
Returns:
vals: numpy matrix
beta values for each pair of classes
i_vals: numpy matrix
matrix of first class tested for 1v1 comparison of class i vs class j
j_vals: numpy matrix
matrix of second class tested for 1v1 comparison of class i vs class j
"""
classified_vals = []
i_vals = []
j_vals = []
classes = len(np.unique(y))
t_init = 10**-1
t0 = time.time()
vals_to_run = []
k=3 # 3 fold CV
num_lambdas = 3 # num lambdas to try in CV
vals = []
vals_to_run = [] # group
for i in range(classes):
for j in range(i+1,classes):
features_to_test = x[(y==i)|(y==j)]
scaler = preprocessing.StandardScaler()
features_to_test = scaler.fit_transform(features_to_test)
labels_to_test = y[(y==i)|(y==j)]
labels_to_test = ((labels_to_test - min(labels_to_test)) / (max(labels_to_test)-min(labels_to_test)))*2-1
# save a list of parameters to call run_svm as a list
vals_to_run.append( (features_to_test,
labels_to_test,
k,
max_iters,
num_lambdas ,
t_init,
lambd ,
eps) )
#classified_vals.append(betas[-1])
i_vals.append(i)
j_vals.append(j)
print("setup complete. Time :",time.time()-t0, " " , time.strftime('%X %x %Z'))
t0 = time.time()
#do computation
pool = ThreadPool(35)
vals_temp = pool.starmap(run_svm,vals_to_run)
objs = np.asarray(vals_temp)[:,1]
vals_temp = np.asarray(vals_temp)[:,0]
vals = vals + list(vals_temp)
return np.asarray(vals), np.asarray(i_vals) , np.asarray(j_vals), objs
| 5,350,548 |
def rlsp(mdp, s_current, p_0, horizon, temp=1, epochs=1, learning_rate=0.2,
r_prior=None, r_vec=None, threshold=1e-3, check_grad_flag=False):
"""The RLSP algorithm"""
def compute_grad(r_vec):
# Compute the Boltzmann rational policy \pi_{s,a} = \exp(Q_{s,a} - V_s)
policy = value_iter(mdp, 1, mdp.f_matrix @ r_vec, horizon, temp)
d_last_step, d_last_step_list = compute_d_last_step(
mdp, policy, p_0, horizon, return_all=True)
if d_last_step[s_current] == 0:
print('Error in om_method: No feasible trajectories!')
return r_vec
expected_features, expected_features_list = compute_feature_expectations(
mdp, policy, p_0, horizon)
G = compute_g(mdp, policy, p_0, horizon, d_last_step_list, expected_features_list)
# Compute the gradient
dL_dr_vec = G[s_current] / d_last_step[s_current]
# Gradient of the prior
if r_prior!= None: dL_dr_vec += r_prior.logdistr_grad(r_vec)
return dL_dr_vec
def compute_log_likelihood(r_vec):
policy = value_iter(mdp, 1, mdp.f_matrix @ r_vec, horizon, temp)
d_last_step = compute_d_last_step(mdp, policy, p_0, horizon)
log_likelihood = np.log(d_last_step[s_current])
if r_prior!= None: log_likelihood += np.sum(r_prior.logpdf(r_vec))
return log_likelihood
def get_grad(_):
"""dummy function for use with check_grad()"""
return dL_dr_vec
if r_vec is None:
r_vec = 0.01*np.random.randn(mdp.f_matrix.shape[1])
print('Initial reward vector: {}'.format(r_vec))
if check_grad_flag: grad_error_list=[]
for i in range(epochs):
dL_dr_vec = compute_grad(r_vec)
if check_grad_flag:
grad_error_list.append(check_grad(compute_log_likelihood, get_grad, r_vec))
# Gradient ascent
r_vec = r_vec + learning_rate * dL_dr_vec
# with printoptions(precision=4, suppress=True):
# print('Epoch {}; Reward vector: {}'.format(i, r_vec))
# if check_grad_flag: print('grad error: {}'.format(grad_error_list[-1]))
if np.linalg.norm(dL_dr_vec) < threshold:
if check_grad_flag:
print()
print('Max grad error: {}'.format(np.amax(np.asarray(grad_error_list))))
print('Median grad error: {}'.format(np.median(np.asarray(grad_error_list))))
break
return r_vec
| 5,350,549 |
def rotate(img, angle=0, order=1):
"""Rotate image by a certain angle around its center.
Parameters
----------
img : ndarray(uint16 or uint8)
Input image.
angle : integer
Rotation angle in degrees in counter-clockwise direction.
Returns
-------
rotated : ndarray(uint16 or uint8)
Rotated version of the input.
Examples
--------
rotate(image, 30)
rotate(image, 180)
"""
if not _is_numpy_image(img):
raise TypeError('img should be numpy ndarray. Got {}'.format(type(img)))
if not (isinstance(angle, numbers.Number)):
raise TypeError('Angle should be integer. Got {}'.format(type(angle)))
img_new = transform.rotate(img, angle, order=order, preserve_range=True)
img_new = img_new.astype(img.dtype)
return img_new
| 5,350,550 |
def activate(request, uidb64, token):
"""Function that activates the user account."""
try:
uid = force_text(urlsafe_base64_decode(uidb64))
user = User.objects.get(pk=uid)
except(TypeError, ValueError, OverflowError, User.DoesNotExist):
user = None
if user is not None and account_activation_token.check_token(user, token):
user.is_active = True
user.save()
login(request, user)
messages.success(request, 'Konto zostało pomyślnie aktywowane. Możesz się zalogować.')
return redirect('login')
else:
messages.warning(request, 'Link aktywacyjny jest nieprawidłowy lub konto zostało już aktywowane.')
return redirect('login')
| 5,350,551 |
def dfn(*args, **kwargs):
"""
The HTML Definition Element (<dfn>) represents the defining
instance of a term.
"""
return el('dfn', *args, **kwargs)
| 5,350,552 |
def _compile_rds_files_TRHP(array_codes, years_processed, filetypes_to_check, extensions_to_check,
subfolder_filestypes):
""" Get indexed information from server for
Hydrothermal Vent Fluid Temperature and Resistivity (RS03INT1-MJ03C-10-TRHPHA301)
Example where dat exists:
https://rawdata.oceanobservatories.org/files/RS03INT1/MJ03C/TRHPHA301/2017/08/
['TRHPHA301_10.31.8.10_2101_20171202T0211_UTC.dat',
'TRHPHA301_10.31.8.10_2101_20171203T0000_UTC.dat',
. . .]
Cache build out with actual reference designators:
{
"rds_TRHP": {
"RS03INT1-MJ03C-10-TRHPHA301": {
"2017": {
"08": {
"22": [
{
"date": "2017-08-22",
"datetime": "20170822T075800.000Z",
"ext": ".dat",
"filename": "TRHPHA301_10.31.8.10_2101_20170822T0758_UTC.dat",
"rd": "RS03INT1-MJ03C-10-TRHPHA301",
"url": "/RS03INT1/MJ03C/TRHPHA301/2017/08/"
},
{
"date": "2017-08-22",
"datetime": "20170822T103300.000Z",
"ext": ".dat",
"filename": "TRHPHA301_10.31.8.10_2101_20170822T1033_UTC.dat",
"rd": "RS03INT1-MJ03C-10-TRHPHA301",
"url": "/RS03INT1/MJ03C/TRHPHA301/2017/08/"
},
{
"date": "2017-08-22",
"datetime": "20170822T104900.000Z",
"ext": ".dat",
"filename": "TRHPHA301_10.31.8.10_2101_20170822T1049_UTC.dat",
"rd": "RS03INT1-MJ03C-10-TRHPHA301",
"url": "/RS03INT1/MJ03C/TRHPHA301/2017/08/"
},
{
"date": "2017-08-22",
"datetime": "20170822T105200.000Z",
"ext": ".dat",
"filename": "TRHPHA301_10.31.8.10_2101_20170822T1052_UTC.dat",
"rd": "RS03INT1-MJ03C-10-TRHPHA301",
"url": "/RS03INT1/MJ03C/TRHPHA301/2017/08/"
}
],
"""
debug = False
debug_trace = False
debug_details = False
time = True
try:
# Local variables.
rds_base_url = get_rds_base_url()
base_url = rds_base_url + '/'
timeout, timeout_read = get_uframe_timeout_info()
# Specific instruments processed.
actual_reference_designator = 'RS03INT1-MJ03C-10-TRHPHA301'
# Create rds navigation urls.
work_nav_urls = {}
work_nav_urls[actual_reference_designator] = None
# Verify sensor type requested in processed in this function, else return {}.
sensor_type = filetypes_to_check
if sensor_type != ['TRHP']:
return {}
# Determine cache destination.
cache_destination = get_target_cache_by_sensor_type(sensor_type)
if debug: print '\n debug -- Entered _compile_rds_files_TRHP...'
if time:
print '\n Compiling cache for ', filetypes_to_check[0].replace('-','')
print '\t-- Arrays processed: ', array_codes
print '\t-- Years processed: ', years_processed
print '\t-- Sensor types processed: ', filetypes_to_check
print '\t-- Extensions checked: ', extensions_to_check
print '\t-- Subfolder filetypes to check: ', subfolder_filestypes
# Time
start = datetime.now()
if time: print '\t-- Start time: ', start
# Get and process returned content for links.
r = requests.get(base_url, timeout=(timeout, timeout_read))
soup = BeautifulSoup(r.content, "html.parser")
ss = soup.findAll('a')
data_dict = {}
# Get root entry (either subsite or subsite-node).
ss_reduced = []
for s in ss:
if 'href' in s.attrs:
len_href = len(s.attrs['href'])
if len_href == 9 or len_href == 15 or len_href == 28:
ss_reduced.append(s)
if debug_trace: print '\n debug -- The root folder items: ', len(ss_reduced)
for s in ss_reduced:
# Limit to those arrays identified in array_codes, not processing platforms at this time
# Lock down to specific subsite for this sensor type.
rd = s.attrs['href']
if rd and len(rd) == 9 or len(rd) == 15:
if len(rd) == 9:
subsite = rd.rstrip('/')
if subsite != 'RS03INT1':
continue
else:
continue
#-----------------------------------------------
# Level 1 - subsite processing
d_url = base_url+s.attrs['href']
subfolders, file_list = _get_subfolder_list(d_url,
filetypes=subfolder_filestypes,
extensions=extensions_to_check)
if not subfolders or subfolders is None:
continue
# Level 2 - node processing
if debug_details: print '\n debug -- Now walking subfolders...'
for item in subfolders:
if len(item) != 6:
continue
# Determine if item is a folder link or file
if '/' in item:
subfolder_url = base_url + rd + item
node_subfolders, node_file_list = _get_subfolder_list(subfolder_url,
filetypes=subfolder_filestypes,
extensions=extensions_to_check)
if not node_subfolders or node_subfolders is None:
continue
# Level 3 - processing sensor information
if node_subfolders:
for node_item in node_subfolders:
#==================
ok_to_go = False
for check in filetypes_to_check:
if check in node_item:
ok_to_go = True
break
if not ok_to_go:
continue
#================
node_folder_url = subfolder_url + node_item
nav_url = '/' + node_folder_url.replace(base_url, '')
detail_subfolders, detail_file_list = _get_subfolder_list(node_folder_url,
filetypes=subfolder_filestypes,
extensions=extensions_to_check)
if detail_subfolders:
# Process years
for year in detail_subfolders:
#=======================================
# Remove to process all years
folder_year = year
year_tmp = folder_year.rstrip('/')
if year_tmp not in years_processed:
continue
#=======================================
year_url = node_folder_url + year
months_subfolders, months_file_list = _get_subfolder_list(year_url, None)
if months_subfolders:
for month in months_subfolders:
month_url = year_url + month
days_subfolders, days_file_list = \
_get_subfolder_list(month_url,
filetypes=filetypes_to_check,
extensions=extensions_to_check)
if not days_file_list:
continue
date_part = None
for filename in days_file_list:
if debug: print '\n debug ------------ Processing filename: ', filename
if '_UTC.dat' in filename:
tmp_filename = filename.replace('_UTC.dat', '')
junk_part, date_part = tmp_filename.rsplit('_', 1)
# Process date_part (i.e. 20170815T1927)
if date_part is None:
continue
_dt = date_part
# Process file datetime based on extension.
ext = None
for extension in extensions_to_check:
if extension in filename:
ext = extension
break
dt = _dt + '00.000Z'
_year = _dt[0:4]
_month = _dt[4:6]
_day = _dt[6:8]
_url = urllib.unquote(month_url).decode('utf8')
if rds_base_url in _url:
_url = _url.replace(rds_base_url, '')
_url = _url.replace(filename, '')
tmp_item = {'url': _url,
'filename': filename,
'datetime': dt,
'ext': ext}
# Update rds_nav_urls for sensor.
work_nav_urls[actual_reference_designator] = rds_base_url + nav_url
# Custom for instrument
ref_des = actual_reference_designator
# Build cache dictionary entry
if ref_des not in data_dict:
data_dict[str(ref_des)] = {}
if _year not in data_dict[ref_des]:
data_dict[ref_des][_year] = {}
if _month not in data_dict[ref_des][_year]:
data_dict[ref_des][_year][_month] = {}
if _day not in data_dict[ref_des][_year][_month]:
data_dict[ref_des][_year][_month][_day] = []
# Add date to item
_year = _year.rstrip('/')
_month = _month.rstrip('/')
_day = _day.rstrip('/')
tmp_item['date'] = '-'.join([_year, _month, _day])
tmp_item['rd'] = ref_des
# Add item for cache dictionary.
data_dict[ref_des][_year][_month][_day].append(tmp_item)
else:
# Item is not a folder
continue
end = datetime.now()
if time:
print '\t-- End time: ', end
print '\t-- Time to compile information for cache: %s' % str(end - start)
# Process navigation urls.
add_nav_urls_to_cache(work_nav_urls, 'TRHP')
# Populate cache for sensor type.
if data_dict and data_dict is not None:
cache.delete(cache_destination)
cache.set(cache_destination, data_dict, timeout=get_cache_timeout())
result_keys = data_dict.keys()
result_keys.sort()
print '\n\t-- Number of items in %s cache(%d): %s' % (cache_destination, len(result_keys), result_keys)
return data_dict
except Exception as err:
message = str(err)
current_app.logger.info(message)
raise Exception(message)
| 5,350,553 |
def handle_post_runs(project_id, deployment_id):
"""Handles POST requests to /."""
is_experiment_deployment = False
experiment_deployment = request.args.get('experimentDeploy')
if experiment_deployment and experiment_deployment == 'true':
is_experiment_deployment = True
run_id = create_deployment_run(project_id, deployment_id, is_experiment_deployment)
return jsonify({"message": "Pipeline running.", "runId": run_id})
| 5,350,554 |
def get_in_reply_to_user_id(tweet):
"""
Get the user id of the uesr whose Tweet is being replied to, and None
if this Tweet is not a reply. \n
Note that this is unavailable in activity-streams format
Args:
tweet (Tweet): A Tweet object (or a dictionary)
Returns:
str: the user id of the user whose Tweet is being replied to, None
(if not a reply), or for activity-streams raise a NotAvailableError
Example:
>>> from tweet_parser.getter_methods.tweet_reply import *
>>> original_format_dict = {
... "created_at": "Wed May 24 20:17:19 +0000 2017",
... "in_reply_to_user_id_str": "2382763597"
... }
>>> get_in_reply_to_user_id(original_format_dict)
'2382763597'
"""
if is_original_format(tweet):
return tweet["in_reply_to_user_id_str"]
else:
raise NotAvailableError("Gnip activity-streams format does not" +
" return the replied to user's id")
| 5,350,555 |
def cl35():
"""Cl35 ENDF data (contains RML resonance range)"""
endf_data = os.environ['OPENMC_ENDF_DATA']
filename = os.path.join(endf_data, 'neutrons', 'n-017_Cl_035.endf')
return openmc.data.IncidentNeutron.from_endf(filename)
| 5,350,556 |
def release_definition_show(definition_id=None, name=None, open_browser=False, team_instance=None, project=None,
detect=None):
"""Get the details of a release definition.
:param definition_id: ID of the definition.
:type definition_id: int
:param name: Name of the definition. Ignored if --id is supplied.
:type name: str
:param open_browser: Open the definition summary page in your web browser.
:type open_browser: bool
:param team_instance: VSTS account or TFS collection URL. Example: https://myaccount.visualstudio.com
:type team_instance: str
:param project: Name or ID of the team project.
:type project: str
:param detect: Automatically detect values for instance and project. Default is "on".
:type detect: str
:rtype: ReleaseDefinitionReference
"""
team_instance, project = resolve_instance_and_project(detect=detect,
team_instance=team_instance,
project=project)
client = get_release_client(team_instance)
if definition_id is None:
if name is not None:
definition_id = get_definition_id_from_name(name, client, project)
else:
raise ValueError("Either the --id argument or the --name argument must be supplied for this command.")
release_definition = client.get_release_definition(definition_id=definition_id, project=project)
if open_browser:
_open_definition(release_definition)
return release_definition
| 5,350,557 |
def GKtoUTM(ea, no=None, zone=32, gk=None, gkzone=None):
"""Transform any Gauss-Krueger to UTM autodetect GK zone from offset."""
if gk is None and gkzone is None:
if no is None:
rr = ea[0][0]
else:
if isinstance(ea, list) or isinstance(ea, tuple):
rr = ea[0]
else:
rr = ea
gkzone = int(floor(rr * 1e-6))
print(gkzone)
if gkzone <= 0 or gkzone >= 5:
print("cannot detect valid GK zone")
pyproj = opt_import('pyproj', 'coordinate transformations')
if pyproj is None:
return None
gk = pyproj.Proj(init="epsg:"+str(31464+gkzone))
wgs84 = pyproj.Proj(init="epsg:4326") # pure ellipsoid to doubel transform
utm = pyproj.Proj(proj='utm', zone=zone, ellps='WGS84') # UTM
if no is None: # two-column matrix
lon, lat = pyproj.transform(gk, wgs84, ea[0], ea[1])
else:
lon, lat = pyproj.transform(gk, wgs84, ea, no)
return utm(lon, lat)
| 5,350,558 |
def run_metric_image(metric_frontend_name, docker_client, common_labels,
prometheus_port, prom_config_path, log_config,
extra_container_kwargs):
"""
Run the prometheus image.
:param metric_frontend_name: container name
:param docker_client: The docker client object
:param common_labels: Labels to pass in
:param prom_config_path: Where config file lives
:param extra_container_kwargs: Kwargs to pass in.
:return: None
"""
# CMD comes from https://github.com/prometheus/prometheus/blob/release-2.1/Dockerfile
metric_cmd = [
"--config.file=/etc/prometheus/prometheus.yml",
"--storage.tsdb.path=/prometheus",
"--web.console.libraries=/etc/prometheus/console_libraries",
"--web.console.templates=/etc/prometheus/consoles",
"--web.enable-lifecycle"
]
metric_labels = common_labels.copy()
run_container(
docker_client=docker_client,
image="prom/prometheus:{}".format(PROM_VERSION),
cmd=metric_cmd,
name=metric_frontend_name,
ports={'9090/tcp': prometheus_port},
log_config=log_config,
volumes={
prom_config_path: {
'bind': '/etc/prometheus/prometheus.yml',
'mode': 'ro'
}
},
user='root', # prom use nobody by default but it can't access config.
labels=metric_labels,
extra_container_kwargs=extra_container_kwargs)
| 5,350,559 |
def robots(req):
"""
.. seealso:: http://www.sitemaps.org/protocol.html#submit_robots
"""
return Response(
"Sitemap: %s\n" % req.route_url('sitemapindex'), content_type="text/plain")
| 5,350,560 |
def bolling(asset:list, samples:int=20, alpha:float=0, width:float=2):
"""
According to MATLAB:
BOLLING(ASSET,SAMPLES,ALPHA,WIDTH) plots Bollinger bands for given ASSET
data vector. SAMPLES specifies the number of samples to use in computing
the moving average. ALPHA is an optional input that specifies the exponent
used to compute the element weights of the moving average. The default
ALPHA is 0 (simple moving average). WIDTH is an optional input that
specifies the number of standard deviations to include in the envelope. It
is a multiplicative factor specifying how tight the bounds should be made
around the simple moving average. The default WIDTH is 2. This calling
syntax plots the data only and does not return the data.
Note: The standard deviations are normalized by (N-1) where N is the
sequence length.
"""
# build weight vector
import numpy as np
# 主体
r = len(asset)
i = np.arange(1,samples+1) ** alpha
w = i / sum(i)
# build moving average vectors with for loops
a = np.zeros((r-samples, 1))
b = a.copy()
for i in range(samples, r):
a[i-samples] = np.sum( asset[i-samples:i] * w )
b[i-samples] = width * np.sum(np.std( asset[i-samples:i] * w ))
return a,a+b,a-b
| 5,350,561 |
def metrics():
"""
Expose metrics for the Prometheus collector
"""
collector = SensorsDataCollector(sensors_data=list(sensors.values()), prefix='airrohr_')
return Response(generate_latest(registry=collector), mimetype='text/plain')
| 5,350,562 |
def solar_energy_striking_earth_today() -> dict:
"""Get number of solar energy striking earth today."""
return get_metric_of(label='solar_energy_striking_earth_today')
| 5,350,563 |
def write_data_str(geoms, grads, hessians):
""" Writes a string containing the geometry, gradient, and Hessian
for either a single species or points along a reaction path
that is formatted appropriately for the ProjRot input file.
:param geoms: geometries
:type geoms: list
:param grads: gradients
:type grads: list
:param hessians: Hessians
:type hessians: list
:rtype: str
"""
# if not isinstance(geoms, list):
# geoms = [geoms]
# if not isinstance(grads, list):
# grads = [grads]
# if not isinstance(hessians, list):
# hessians = [hessians]
nsteps = len(geoms)
data_str = ''
for i, (geo, grad, hess) in enumerate(zip(geoms, grads, hessians)):
data_str += 'Step {0}\n'.format(str(i+1))
data_str += 'geometry\n'
data_str += _format_geom_str(geo)
data_str += 'gradient\n'
data_str += _format_grad_str(geo, grad)
data_str += 'Hessian\n'
data_str += _format_hessian_str(hess)
if i != nsteps-1:
data_str += '\n'
return remove_trail_whitespace(data_str)
| 5,350,564 |
def gen_chart_name(data: types.ChartAxis,
formatter: Dict[str, Any],
device: device_info.DrawerBackendInfo
) -> List[drawings.TextData]:
"""Generate the name of chart.
Stylesheets:
- The `axis_label` style is applied.
Args:
data: Chart axis data to draw.
formatter: Dictionary of stylesheet settings.
device: Backend configuration.
Returns:
List of `TextData` drawings.
"""
style = {'zorder': formatter['layer.axis_label'],
'color': formatter['color.axis_label'],
'size': formatter['text_size.axis_label'],
'va': 'center',
'ha': 'right'}
text = drawings.TextData(data_type=types.LabelType.CH_NAME,
channels=data.channels,
xvals=[types.AbstractCoordinate.LEFT],
yvals=[0],
text=data.name,
ignore_scaling=True,
styles=style)
return [text]
| 5,350,565 |
def _parse_whois_response(response):
"""
Dealing with the many many different interpretations of the whois response format.
If an empty line is encountered, start a new record
If a line with a semicolon is encountered, treat everything before first : as key and start a value
If a line without semicolon is encountered when value is started, add it to current value.
If a line without semicolon is encountered before value is started, skip it.
:param response: the raw response to parse
:return:a list of records containg (key, value) tuples
"""
newkvre = re.compile("^(\s*)([^\>\%\s][^:]+):(\s*(.*))?$")
commre = re.compile("^\s*[\%\>\@\;].*$")
records = []
currecord, curkey = {}, None
comment = False
for line in response.splitlines():
if line.strip() is "":
comment = False
if len(currecord):
records.append(currecord)
currecord, curkey = {}, None
continue
if comment:
continue
match = newkvre.match(line)
matchcomm = commre.match(line)
if match and matchcomm is None:
curkey = match.group(2)
val = match.group(4) if match.group(4) else ""
if curkey in currecord:
currecord[curkey] += "\n" + val
else:
currecord[curkey] = val
elif matchcomm: # part of comments
comment = True
continue
elif match is None and curkey: # this is likely part of multiline value
currecord[curkey] += "\n" + line.strip()
else:
comment = True
continue # this is likely start of comments
if len(currecord):
records.append(currecord)
_log.debug("Response parsed succesfully. %d records", len(records))
return records
| 5,350,566 |
def vector_cosine_similarity(docs: Sequence[spacy.tokens.Doc]) -> np.ndarray:
"""
Get the pairwise cosine similarity between each
document in docs.
"""
vectors = np.vstack([doc.vector for doc in docs])
return pairwise.cosine_similarity(vectors)
| 5,350,567 |
def test_ycbcr_interp(tmpdir):
"""A YCbCr TIFF has red, green, blue bands."""
with rasterio.open('tests/data/RGB.byte.tif') as src:
meta = src.meta
meta['photometric'] = 'ycbcr'
meta['compress'] = 'jpeg'
meta['count'] = 3
tiffname = str(tmpdir.join('foo.tif'))
with rasterio.open(tiffname, 'w', **meta) as dst:
assert dst.colorinterp == (
ColorInterp.red, ColorInterp.green, ColorInterp.blue)
| 5,350,568 |
def create_test_validation():
"""
Returns a constructor function for creating a Validation object.
"""
def _create_test_validation(db_session, resource, success=None, started_at=None, secret=None):
create_kwargs = {"resource": resource}
for kwarg in ['success', 'started_at', 'secret']:
if locals()[kwarg] is not None:
create_kwargs[kwarg] = locals()[kwarg]
(validation, _) = get_one_or_create(db_session, Validation, **create_kwargs)
return validation
return _create_test_validation
| 5,350,569 |
def nrmse(img, ref, axes = (0,1)):
""" Compute the normalized root mean squared error (nrmse)
:param img: input image (np.array)
:param ref: reference image (np.array)
:param axes: tuple of axes over which the nrmse is computed
:return: (mean) nrmse
"""
nominator = np.real(np.sum( (img - ref) * np.conj(img - ref), axis = axes))
denominator = np.real(np.sum( ref * np.conj(ref), axis = axes))
nrmse = np.sqrt(nominator / denominator)
return np.mean(nrmse)
| 5,350,570 |
def pairwise_comparison(column1,var1,column2,var2):
"""
Arg: column1 --> column name 1 in df
column2 --> column name 2 in df
var1---> 3 cases:
abbreviation in column 1 (seeking better model)
abbreviation in column 1 (seeking lesser value in column1 in comparison to column2)
empty strong (seeking greater value in column2 in comparison to column1)
var2---> 3 cases:
abbreviation in column 2 (seeking better model)
abbreviation in column 2 (seeking greater value in column2 in comparison to column1)
empty strong (seeking lesser value in column1 in comparison to column2)
Return: 2 cases:
abbreviation of column name in which is smaller/greater depending on function use
Function: list comprehension , put two column together (zip)
used to find data set with a smaller/greater value
"""
return [var1 if r < c else var2 for r,c in zip(column1,column2)]
| 5,350,571 |
def test_is_valid_manifest_format_with_invalid_urls(caplog):
"""
Test that invalid urls are detected and error logged
Test that empty arrays and empty quote pairs are detected and error logged
"""
result = is_valid_manifest_format(
"tests/validate_manifest_format/manifests/manifest_with_invalid_urls.tsv"
)
error_log = caplog.text
assert '"wrong_protocol://test_bucket/test.txt"' in error_log
assert '"test/test.txt"' in error_log
assert '"testaws/aws/test.txt"' in error_log
assert '"://test_bucket/test.txt"' in error_log
assert '"s3://"' in error_log
assert '"gs://"' in error_log
assert '"s3://bucket_without_object"' in error_log
assert '"s3://bucket_without_object/"' in error_log
assert '"test_bucket/aws/test.txt"' in error_log
assert '"s3:/test_bucket/aws/test.txt"' in error_log
assert '"s3:test_bucket/aws/test.txt"' in error_log
assert '"://test_bucket/aws/test.txt"' in error_log
assert '"s3test_bucket/aws/test.txt"' in error_log
assert '"https://www.uchicago.edu"' in error_log
assert '"https://www.uchicago.edu/about"' in error_log
assert '"google.com/path"' in error_log
assert '""""' in error_log
assert "\"''\"" in error_log
assert '"[]"' in error_log
assert "\"['']\"" in error_log
assert '"[""]"' in error_log
assert '"["", ""]"' in error_log
assert '"["", \'\']"' in error_log
assert result == False
| 5,350,572 |
def copy_static_folder():
"""
Copies a static folder to output directory
"""
copy(TEMPLATES_DIR + "/static", OUTPUT_DIR+"/static")
| 5,350,573 |
def prep_data_CNN(documents):
"""
Prepare the padded docs and vocab_size for CNN training
"""
t = Tokenizer()
docs = list(filter(None, documents))
print("Size of the documents in prep_data {}".format(len(documents)))
t.fit_on_texts(docs)
vocab_size = len(t.word_counts)
print("Vocab size {}".format(vocab_size))
encoded_docs = t.texts_to_sequences(docs)
print("Size of the encoded documents {}".format(len(encoded_docs)))
e_lens = []
for i in range(len(encoded_docs)):
e_lens.append(len(encoded_docs[i]))
lens_edocs = list(map(size, encoded_docs))
max_length = np.average(lens_edocs)
sequence_length = 1500 # Can use this instead of the above average max_length value
max_length = sequence_length
padded_docs = pad_sequences(
encoded_docs, maxlen=int(max_length), padding='post')
print("Length of a padded row {}".format(padded_docs.shape))
print("max_length {} and min_length {} and average {}".format(
max_length, min(lens_edocs), np.average(lens_edocs)))
return padded_docs, max_length, vocab_size, t.word_index
| 5,350,574 |
def report_error(exc, source, swallow, output=None):
"""
report_error(exc, source, swallow, output=None) -> None
Write a report about the given error to the given output stream. exc is
the exception being handled; source indicates the origin of the error
(in particular an ERRS_* constant); swallow indicates whether the report
should be short (swallow is true) or extensive (swallow is false); output
is a file-like object to write the report to, defaulting to standard error
if None.
This consults sys.exc_info() in order to retrieve a stack trace, but also
works if that is not available. In order to avoid interleaving, this
issues a single call to the output's write() method and one to its flush()
method.
"""
if output is None: output = sys.stderr
buf = ['ERROR at %r:' % (source,)]
info = sys.exc_info()
if info[1] is not exc or swallow:
buf.append(' %r' % (exc,))
buf.append('\n')
if info[1] is not None and not swallow:
buf.append('Handler traceback (likely to provide useful context):\n')
buf.extend(traceback.format_stack())
buf.extend(traceback.format_exception(*info))
output.write(''.join(buf))
output.flush()
| 5,350,575 |
def main() -> None:
"""Main function"""
args = worker.handle_args(parseargs())
actapi = worker.init_act(args)
ta_cards = worker.fetch_json(
args.thaicert_url, args.proxy_string, args.http_timeout
)
process(actapi, ta_cards["values"])
vocab = worker.fetch_json(STIX_VOCAB, args.proxy_string, args.http_timeout)
add_sectors(actapi, ta_cards["values"], vocab)
countries = worker.fetch_json(COUNTRY_REGIONS, args.proxy_string, args.http_timeout)
countries = [country["name"].lower() for country in countries]
add_countries(actapi, ta_cards["values"], countries)
tools = worker.fetch_json(THAICERT_TOOLS_URL, args.proxy_string, args.http_timeout)
add_tools(actapi, ta_cards["values"], tools["values"])
| 5,350,576 |
def gc2gd_lat(gc_lat):
"""Convert geocentric latitude to geodetic latitude using WGS84.
Parameters
-----------
gc_lat : (array_like or float)
Geocentric latitude in degrees N
Returns
---------
gd_lat : (same as input)
Geodetic latitude in degrees N
"""
wgs84_e2 = 0.006694379990141317 - 1.0
gd_lat = np.rad2deg(-np.arctan(np.tan(np.deg2rad(gc_lat)) / wgs84_e2))
return gd_lat
| 5,350,577 |
def _coroutine_format_stack(coro, complete=False):
"""Formats a traceback from a stack of coroutines/generators.
"""
dirname = os.path.dirname(__file__)
extracted_list = []
checked = set()
for f in _get_coroutine_stack(coro):
lineno = f.f_lineno
co = f.f_code
filename = co.co_filename
name = co.co_name
if not complete and os.path.dirname(filename) == dirname:
continue
if filename not in checked:
checked.add(filename)
linecache.checkcache(filename)
line = linecache.getline(filename, lineno, f.f_globals)
extracted_list.append((filename, lineno, name, line))
if not extracted_list:
resp = 'No stack for %r' % coro
else:
resp = 'Stack for %r (most recent call last):\n' % coro
resp += ''.join(traceback.format_list(extracted_list))
return resp
| 5,350,578 |
def set_logging(level):
"""Convenience function to enable logging for the SDK
This function will enable logging for the SDK at the level
provided. It sends all logging information to stderr.
:param level: logging level to emit
:type level: int
"""
log = logging.getLogger(__name__)
log.setLevel(level)
for handler in log.handlers:
if handler.get_name() == __name__:
log.debug("logging handler {} is already configured".format(handler.get_name()))
break
else:
handler = logging.StreamHandler()
handler.set_name(__name__)
handler.setFormatter(logging.Formatter("%(asctime)s: %(name)s: %(message)s"))
log.addHandler(handler)
log.debug("Added stderr logging handler to logger: %s", __name__)
| 5,350,579 |
def polynomial_kernel(X, Y, c, p):
"""
Compute the polynomial kernel between two matrices X and Y::
K(x, y) = (<x, y> + c)^p
for each pair of rows x in X and y in Y.
Args:
X - (n, d) NumPy array (n datapoints each with d features)
Y - (m, d) NumPy array (m datapoints each with d features)
c - a coefficient to trade off high-order and low-order terms (scalar)
p - the degree of the polynomial kernel
Returns:
kernel_matrix - (n, m) Numpy array containing the kernel matrix
"""
# YOUR CODE HERE
# raise NotImplementedError
kernel_matrix = (np.matmul(X, Y.T) + c) ** p
return kernel_matrix
| 5,350,580 |
def slug_from_iter(it, max_len=128, delim='-'):
"""Produce a slug (short URI-friendly string) from an iterable (list, tuple, dict)
>>> slug_from_iter(['.a.', '=b=', '--alpha--'])
'a-b-alpha'
"""
nonnull_values = [str(v) for v in it if v or ((isinstance(v, (int, float, Decimal)) and str(v)))]
return slugify(delim.join(shorten(v, max_len=int(float(max_len) / len(nonnull_values))) for v in nonnull_values), word_boundary=True)
| 5,350,581 |
def modulusOfRigidity(find="G", printEqs=True, **kwargs):
"""
Defines the slope of the stress-strain curve up to the elastic limit of the material.
For most ductile materials it is the same in compression as in tensions. Not true for cast irons, other brittle materials, or magnesium.
Where:
E = modulus of elasticity
v = poisson's ratio
Material v
Aluminum 0.34
Copper 0.35
Iron 0.28
Steel 0.28
Magnesium 0.33
Titanium 0.34
"""
eq = list()
eq.append("Eq(G, E / (2*(1+v))")
return solveEqs(eq, find=find, printEq=printEqs, **kwargs)
| 5,350,582 |
def _splitcmdline(cmdline):
"""
Parses the command-line and returns the tuple in the form
(command, [param1, param2, ...])
>>> splitcmdline('c:\\someexecutable.exe')
('c:\\\\someexecutable.exe', [])
>>> splitcmdline('C:\\Program Files\\Internet Explorer\\iexplore.exe')
('C:\\\\Program Files\\\\Internet Explorer\\\\iexplore.exe', [])
>>> splitcmdline('c:\\someexecutable.exe "param 1" param2')
('c:\\\\someexecutable.exe', ['param 1', 'param2'])
>>> splitcmdline(r'c:\\program files\\executable.exe')
('c:\\\\program', ['files\\\\executable.exe'])
>>> splitcmdline(r'"c:\\program files\\executable.exe" param1 param2 ')
('c:\\\\program files\\\\executable.exe', ['param1', 'param2'])
"""
from pyparsing import Combine, OneOrMore, Word, Optional, Literal, LineEnd, CharsNotIn, quotedString, delimitedList
# Replace tabs and newlines with spaces
cmdline = cmdline.strip(' \r\n\t').replace('\t', ' ').replace('\r', ' ').replace('\n', ' ')
"""
_nonspace = "".join( [ c for c in printables if c not in (" ", "\t") ] )
_spacesepitem = Combine(OneOrMore(CharsNotInWord(_nonspace) +
Optional( Word(" \t") +
~Literal(",") + ~LineEnd() ) ) ).streamline().setName("commaItem")
"""
spaceSeparatedList = delimitedList(
Optional( quotedString | CharsNotIn(" \t"), default=""), delim=" "
).setName("spaceSeparatedList")
print spaceSeparatedList.parseString(cmdline)
pass
| 5,350,583 |
def read_xml_files(region, paths, writer):
"""Read all XML files for the specified region
"""
logger.info('Started reading XML files')
if region in ['belgium', 'brussels']:
read_region(
ET.parse(paths['BrusselsMunicipality']).getroot(),
ET.parse(paths['BrusselsPostalinfo']).getroot(),
ET.parse(paths['BrusselsStreetname']).getroot(),
ET.iterparse(paths['BrusselsAddress']),
'BE-BRU',
writer
)
logger.info('Read the Brussels addresses')
if region in ['belgium', 'flanders']:
read_region(
ET.parse(paths['FlandersMunicipality']).getroot(),
ET.parse(paths['FlandersPostalinfo']).getroot(),
ET.parse(paths['FlandersStreetname']).getroot(),
ET.iterparse(paths['FlandersAddress']),
'BE-VLG',
writer
)
logger.info('Read the Flanders addresses')
if region in ['belgium', 'wallonia']:
read_region(
ET.parse(paths['WalloniaMunicipality']).getroot(),
ET.parse(paths['WalloniaPostalinfo']).getroot(),
ET.parse(paths['WalloniaStreetname']).getroot(),
ET.iterparse(paths['WalloniaAddress']),
'BE-WAL',
writer
)
logger.info('Read the Wallonia addresses')
| 5,350,584 |
async def async_unload_entry(hass: HomeAssistantType, entry: ConfigType) -> bool:
"""Unload FRITZ!Box Tools config entry."""
hass.services.async_remove(DOMAIN, SERVICE_RECONNECT)
for domain in SUPPORTED_DOMAINS:
await hass.config_entries.async_forward_entry_unload(entry, domain)
del hass.data[DOMAIN]
return True
| 5,350,585 |
def cli(ctx, configfile, configclass):
"""CattleDB Command Line Tool"""
ctx.ensure_object(dict)
if configfile:
_imported = import_config_file(configfile)
if configclass:
config = getattr(_imported, configclass)
else:
config = _imported
click.echo("Using Config: {}".format(configfile))
else:
config = _default_config
click.echo("Using Default Config")
con = CDBClient.from_config(config)
ctx.obj["client"] = con
ctx.obj["db"] = con.db
ctx.obj["config"] = config
| 5,350,586 |
def test_valid_targetapps():
"""
Tests that the install.rdf contains only valid entries for target
applications.
"""
results = _do_test("tests/resources/targetapplication/pass.xpi",
targetapp.test_targetedapplications,
False,
True)
supports = results.get_resource("supports")
print supports
assert "firefox" in supports and "mozilla" in supports
assert len(supports) == 2
supported_versions = results.supported_versions
print supported_versions
assert (supported_versions['{ec8030f7-c20a-464f-9b0e-13a3a9e97384}'] ==
['3.6', '3.6.4', '3.6.*'])
| 5,350,587 |
def queryMaxTransferOutAmount(asset, isolatedSymbol="", recvWindow=""):
"""# Query Max Transfer-Out Amount (USER_DATA)
#### `GET /sapi/v1/margin/maxTransferable (HMAC SHA256)`
### Weight:
5
### Parameters:
Name |Type |Mandatory |Description
--------|--------|--------|--------
asset |STRING |YES |
isolatedSymbol |STRING |NO |isolated symbol
recvWindow |LONG |NO |The value cannot be greater than <code>60000</code>
timestamp |LONG |YES |
"""
endpoint = '/sapi/v1/margin/maxTransferable'
params = {
"asset": asset
}
if isolatedSymbol: params["isolatedSymbol"] = isolatedSymbol
if recvWindow: params["recvWindow"] = recvWindow
return getbinancedata_sig(endpoint, params)
| 5,350,588 |
def IteratePriorityQueueEntry(root, element_type, field_name):
""" iterate over a priority queue as defined with struct priority_queue from osfmk/kern/priority_queue.h
root - value : Value object for the priority queue
element_type - str : Type of the link element
field_name - str : Name of the field in link element's structure
returns:
A generator does not return. It is used for iterating
value : an object thats of type (element_type). Always a pointer object
"""
def _make_pqe(addr):
return value(root.GetSBValue().CreateValueFromExpression(None,'(struct priority_queue_entry *)'+str(addr)))
queue = [unsigned(root.pq_root_packed) & ~3]
while len(queue):
elt = _make_pqe(queue.pop())
while elt:
yield containerof(elt, element_type, field_name)
addr = unsigned(elt.child)
if addr: queue.append(addr)
elt = elt.next
| 5,350,589 |
def remove_layer(nn, del_idx, additional_edges, new_strides=None):
""" Deletes the layer indicated in del_idx and adds additional_edges specified
in additional_edges. """
layer_labels, num_units_in_each_layer, conn_mat, mandatory_child_attributes = \
get_copies_from_old_nn(nn)
# First add new edges to conn_mat and remove edges to and from del_idx
for add_edge in additional_edges:
conn_mat[add_edge[0], add_edge[1]] = 1
conn_mat[del_idx, :] = 0
conn_mat[:, del_idx] = 0
# Now reorder everything so that del_idx is at the end
all_idxs = list(range(len(layer_labels)))
new_order = all_idxs[:del_idx] + all_idxs[del_idx+1:] + [del_idx]
# Now reorder everything so that the layer to be remove is at the end
layer_labels = reorder_list_or_array(layer_labels, new_order)
num_units_in_each_layer = reorder_list_or_array(num_units_in_each_layer, new_order)
conn_mat = reorder_rows_and_cols_in_matrix(conn_mat, new_order)
# remove layer
layer_labels = layer_labels[:-1]
num_units_in_each_layer = num_units_in_each_layer[:-1]
conn_mat = conn_mat[:-1, :-1]
# Strides for a convolutional network
if nn.nn_class == 'cnn':
new_strides = new_strides if new_strides is not None else \
mandatory_child_attributes.strides
mandatory_child_attributes.strides = reorder_list_or_array(
new_strides, new_order)
mandatory_child_attributes.strides = mandatory_child_attributes.strides[:-1]
return get_new_nn(nn, layer_labels, num_units_in_each_layer, conn_mat,
mandatory_child_attributes)
| 5,350,590 |
def brutekeys(pinlength, keys="0123456789", randomorder=False):
"""
Returns a list of all possibilities to try, based on the length of s and buttons given.
Yeah, lots of slow list copying here, but who cares, it's dwarfed by the actual guessing.
"""
allpossible = list(itertools.imap(lambda x: "".join(x),itertools.product(keys, repeat=pinlength)))
if randomorder:
random.shuffle(allpossible)
return allpossible
| 5,350,591 |
def return_figures():
"""Creates four plotly visualizations
Args:
None
Returns:
list (dict): list containing the four plotly visualizations
"""
df = query_generation('DE', 14)
graph_one = []
x_val = df.index
for energy_source in df.columns:
y_val = df[energy_source].tolist()
graph_one.append(
go.Scatter(
x=x_val,
y=y_val,
mode='lines',
name=energy_source,
stackgroup = 'one'
)
)
layout_one = dict(title='Generation in Germany during the last 14 days',
xaxis=dict(title='Date'),
yaxis=dict(title='Net Generation (MW)'),
colorway = ['#008000', '#ffa500', '#ff0000', '#000080', '#008080', '#808080', '#a52a2a', '#1e90ff', '#ffc40c'],
plot_bgcolor = '#E8E8E8',
hovermode = 'closest',
hoverdistance = -1,
height = 500
)
# append all charts to the figures list
figures = []
figures.append(dict(data=graph_one, layout=layout_one))
return figures
| 5,350,592 |
def create_central_storage_strategy():
"""Create a CentralStorageStrategy, using a GPU if it is available."""
compute_devices = ['cpu:0', 'gpu:0'] if (
tf.config.list_logical_devices('GPU')) else ['cpu:0']
return tf.distribute.experimental.CentralStorageStrategy(
compute_devices, parameter_device='cpu:0')
| 5,350,593 |
def ToolStep(step_class, os, **kwargs):
"""Modify build step arguments to run the command with our custom tools."""
if os.startswith('win'):
command = kwargs.get('command')
env = kwargs.get('env')
if isinstance(command, list):
command = [WIN_BUILD_ENV_PATH] + command
else:
command = WIN_BUILD_ENV_PATH + ' ' + command
if env:
env = dict(env) # Copy
else:
env = {}
env['BOTTOOLS'] = WithProperties('%(workdir)s\\tools\\buildbot\\bot_tools')
kwargs['command'] = command
kwargs['env'] = env
return step_class(**kwargs)
| 5,350,594 |
def get_security_groups():
"""
Gets all available AWS security group names and ids associated with an AWS role.
Return:
sg_names (list): list of security group id, name, and description
"""
sg_groups = boto3.client('ec2', region_name='us-west-1').describe_security_groups()['SecurityGroups']
sg_names = []
for sg in sg_groups:
sg_names.append(sg['GroupId'] + ': ' + sg['GroupName'] + ': ' + sg['Description'])
return sg_names
| 5,350,595 |
def traverseTokens(tokens, lines, callback):
"""Traverses a list of tokens to identify functions. Then uses a callback
to perform some work on the functions. Each function seen gets a new State
object created from the given callback method; there is a single State for
global code which is given None in the constructor. Then, each token seen
is passed to the 'add' method of the State. This is used by the State to
either calculate sizes, print tokens, or detect dependencies. The 'build'
method is called at the end of the function to create a result object that
is returned as an array at the end.
Arguments:
tokens - An array of Tokens.
lines - An array of compiled code lines.
callback - A constructor that returns a state object. It takes a start
token or None if outside a function. It has two member
functions:
add - accepts the current token and the token's index.
build - returns an object to be added to the results.
Returns:
an array of State objects in a format controlled by the callback.
"""
ret = []
state = callback(None, None)
# Create a token iterator. This is used to read tokens from the array. We
# cannot use a for loop because the iterator is passed to readFunction.
tokenIter = enumerate(tokens)
try:
while True:
index, token = next(tokenIter)
if isFunction(token, lines):
ret += readFunction(tokenIter, token, index, lines, callback)
else:
state.add(token, index)
except StopIteration:
pass
temp = state.build()
if temp:
ret.append(temp)
return ret
| 5,350,596 |
def test_string_representation():
"""
Check unit string representation.
"""
pc = Unit("pc")
Myr = Unit("Myr")
speed = pc / Myr
dimensionless = Unit()
assert_true(str(pc) == "pc")
assert_true(str(Myr) == "Myr")
assert_true(str(speed) == "pc/Myr")
assert_true(repr(speed) == "pc/Myr")
assert_true(str(dimensionless) == "dimensionless")
| 5,350,597 |
def pollutant():
"""
Water treatment example from BHH2, Ch 5, Question 19.
Description
-----------
The data are from the first 8 rows of the pollutant water treatment example
n the book by Box, Hunter and Hunter, 2nd edition, Chapter 5, Question 19.
The 3 factors (C, T, and S) are in coded units where:
C = -1 is chemical brand A; C = +1 is chemical brand B
T = -1 is 72F for treatment temperature; T = +1 is 100F for the temperature
S = -1 is No stirring; S = +1 is with fast stirring
The outcome variable is:
y = the pollutant amount in the discharge [lb/day].
The aim is to find treatment conditions that MINIMIZE the amount of pollutant
discharged each day, where the limit is 10 lb/day.
Dimensions
----------
A data frame containing 8 observations of 4 variables (C, S, T and y).
Source
------
Box, G. E. P. and Hunter, J. S. and Hunter, W. G.r, Statistics for
Experimenters, Wiley, 2nd edition, Chapter 5, Question 19, page 232.
Example
-------
"""
| 5,350,598 |
def publications_by_country(papers: dict[str, Any]) -> dict[Location, int]:
"""returns number of published papers per country"""
countries_publications = {}
for paper in papers:
participant_countries = {Location(city=None, state=None, country=location.country) \
for location in paper.locations}
for country in participant_countries:
try:
countries_publications[country] += 1
except KeyError:
countries_publications[country] = 1
return (dict(sorted(countries_publications.items(), key=lambda x: x[1], reverse=True)))
| 5,350,599 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.