content
stringlengths 22
815k
| id
int64 0
4.91M
|
---|---|
def walk(time_limit=_DEFAULT_TIME_LIMIT, random=None, environment_kwargs=None):
"""Returns the Walk task."""
# physics = Physics.from_xml_string(*get_model_and_assets())
physics = SuperballContactSimulation("tt_ntrt_on_ground.xml")
task = PlanarSuperball(move_speed=_WALK_SPEED, random=random)
environment_kwargs = environment_kwargs or {}
return control.Environment(
physics, task, time_limit=time_limit, control_timestep=_CONTROL_TIMESTEP,
**environment_kwargs)
| 5,347,400 |
def confound_isolating_sampling(y, z, random_seed=None, min_sample_size=None,
n_remove=None):
"""
Sampling method based on the 'Confound isolating cross-validation'
technique.
# TODO Reference to the paper
:param y: numpy.array, shape (n_samples), target
:param z: numpy.array, shape (n_samples), confound
:param random_seed: int
Random seed used to initialize the pseudo-random number generator.
Can be any integer between 0 and 2**32 - 1 inclusive. Default is None
:param min_sample_size: int
Minimum sample size (in samples) to be reached, default is 10% of the
data
:param n_remove: int,
number of the samples to be removed on each iteration of sampling,
default is 4
:return:
sampled_index,
mutual_information
correlation
"""
sampled_index = list(range(0, y.shape[0]))
mutual_information = []
correlation = []
index_to_remove = []
n_remove = _ensure_int_positive(n_remove, default=4)
min_sample_size = _ensure_int_positive(min_sample_size, default=10)
min_size = np.int(y.shape[0] * min_sample_size / 100)
while y.shape[0] > min_size:
# remove subject from the previous iteration
y = np.delete(y, index_to_remove, axis=0)
z = np.delete(z, index_to_remove, axis=0)
sampled_index = np.delete(sampled_index, index_to_remove, axis=0)
# control the pseudo random number generator
if random_seed is None:
prng = None
else:
prng = np.random.RandomState(seed=random_seed)
# return indexes
index_to_remove = confound_isolating_index_2remove(y, z,
n_remove=n_remove,
prng=prng)
# The case when target and confound are equal
if np.all(y==z) == True:
mutual_information.append('NaN')
else:
mutual_information.append(mutual_kde(y.astype(float),
z.astype(float)))
correlation.append(np.corrcoef(y.astype(float), z.astype(float))[0, 1])
# sampled_set = {'sampled_index': array_data[:, 2],
# 'mutual_information': mi_list,
# 'correlation': corr_list}
# sampled_index = array_data[:, 2]
# return Bunch(**sampled_set)
return sampled_index, mutual_information, correlation
| 5,347,401 |
def encode_direct(list_a: list):
"""Problem 13: Run-length encoding of a list (direct solution).
Parameters
----------
list_a : list
The input list
Returns
-------
list of list
An length-encoded list
Raises
------
TypeError
If the given argument is not of `list` type
"""
if not isinstance(list_a, list):
raise TypeError('The argument given is not of `list` type.')
if len(list_a) <= 1:
# In case of empty or one-element list return.
return list_a
encoded, current, count = [], list_a[0], 1
for element in list_a[1:]:
if current != element:
# If current element does not match the recorded current
# append the count to the list
encoded.append(current if count == 1 else [count, current])
current, count = element, 1
else:
# If another same element is found, increase counter
count += 1
encoded.append(current if count == 1 else [count, current])
return encoded
| 5,347,402 |
def main():
"""
This main function is used to run "Orteil Idle Game Maker Code Generator" tool.
:return: None
"""
print("Welcome to 'Orteil Idle Game Maker Code Generator' tool by GlobalCreativeCommunityFounder.")
print("This tool will quickly generate Orteil Idle Game Maker code to shorten your time taken in developing ")
print("an idle game using the tool 'Orteil Idle Game Maker'.")
print("Enter 'Y' for yes.")
print("Enter anything else for no.")
continue_using: str = input("Do you want to continue using the tool 'Orteil Idle Game Maker Code Generator'? ")
while continue_using == "Y":
clear()
game_script: str = "" # initial value
print("Welcome to 'Let's make a game!' section!")
game_name: str = input("Please enter name of game: ")
author_name: str = input("Please enter your name: ")
description: str = input("Please enter description of the game: ")
game_script += lets_make_a_game(game_name, author_name, description)
clear()
print("Welcome to 'Settings' section!")
background: str = input("Please enter link to background image file: ")
building_cost_increase: str = input("Please enter building cost increase in the game: ")
building_cost_refund: str = input("Please enter building cost refund in the game: ")
spritesheet: str = input("Please enter spritesheet details: ")
stylesheet: str = input("Please enter link to stylesheet file: ")
game_script += settings(background, building_cost_increase, building_cost_refund, spritesheet, stylesheet)
clear()
print("Welcome to 'Layout' section!")
game_script += """
Layout
"""
num_box_keys: int = int(input("How many box keys do you want? "))
if num_box_keys > 0:
for i in range(num_box_keys):
box_key_name: str = input("Please enter name of box key: ")
contains: str = input("What sections does this box key contain? ")
header: str = input("What would you put in the header of this box key? ")
game_script += """
*""" + str(box_key_name) + """
contains:""" + str(contains) + """
header:""" + str(header) + """
"""
else:
game_script += """
use default
"""
clear()
print("Welcome to 'Buttons' section!")
button_item_key: str = input("Please enter item key for the button: ")
button_name: str = input("Please enter name of button: ")
description: str = input("Please enter button description: ")
on_click_effects: list = [] # initial value
print("Enter 'Y' for yes.")
print("Enter anything else for no.")
add_on_click_effect: bool = input("Do you want to add an 'on click' effect to this button? ") == "Y"
while add_on_click_effect:
on_click_effect: str = input("What 'on click' effect do you want to add to this button? ")
on_click_effects.append(on_click_effect)
print("Enter 'Y' for yes.")
print("Enter anything else for no.")
add_on_click_effect = input("Do you want to add an 'on click' effect to this button? ") == "Y"
icon: str = input("Please enter link to this button's icon: ")
button_class: str = input("Please enter button class: ")
icon_class: str = input("Please enter button item class: ")
tooltip_origin: str = input("Please enter tooltip origin of this button: ")
tooltip_class: str = input("Please enter tooltip class of this button: ")
game_script += """
Buttons
*""" + str(button_item_key) + """
name:""" + str(button_name) + """
description:""" + str(description) + """
"""
for effect in on_click_effects:
game_script += """
on click:""" + str(effect) + """
"""
game_script += """
icon:""" + str(icon) + """
no text
class:""" + str(button_class) + """
icon class:""" + str(icon_class) + """
tooltip origin:""" + str(tooltip_origin) + """
tooltip class:""" + str(tooltip_class) + """
"""
clear()
print("Welcome to 'Resources' section!")
game_script += """
Resources
"""
num_resources: int = int(input("How many resources do you want in your game? "))
for i in range(num_resources):
print("You are required to enter information about the resource you want to add.")
resource_item_key: str = input("Please enter item key for the resource: ")
resource_name: str = input("Please enter name of resource: ")
description = input("Please enter description of resource: ")
icon = input("Please enter link to the icon of the resource: ")
resource_class: str = input("Please enter class of the resource: ")
additional_information: str = input("Enter additional information (e.g., show earned) about the "
"resource: ")
game_script += """
*""" + str(resource_item_key) + """
name:""" + str(resource_name) + """
desc:""" + str(description) + """
icon:""" + str(icon) + """
class:""" + str(resource_class) + """
""" + str(additional_information) + """
"""
game_script += """
Shinies
"""
clear()
print("Welcome to 'Shinies' section!")
num_shinies: int = int(input("How many shinies do you want in your game? "))
for i in range(num_shinies):
print("You are required to enter information about the shiny you want to add.")
shiny_item_key: str = input("Please enter item key for the shiny: ")
on_click_effects = [] # initial value
add_on_click_effect: bool = input("Do you want to add an 'on click' effect to this shiny? ") == "Y"
while add_on_click_effect:
on_click_effect: str = input("What 'on click' effect do you want to add to this shiny? ")
on_click_effects.append(on_click_effect)
print("Enter 'Y' for yes.")
print("Enter anything else for no.")
add_on_click_effect = input("Do you want to add an 'on click' effect to this shiny? ") == "Y"
movement: str = input("Please describe the movement of the shiny: ")
frequency: str = input("Please enter the frequency of this shiny: ")
frequency_variation: str = input("Please enter the frequency variation of this shiny: ")
icon = input("Please enter the link to the icon of this shiny: ")
shiny_class: str = input("Please enter the class of this shiny: ")
game_script += """
*""" + str(shiny_item_key) + """
movement:""" + str(movement) + """
frequency:""" + str(frequency) + """
frequency variation:""" + str(frequency_variation) + """
icon:""" + str(icon) + """
class:""" + str(shiny_class) + """
"""
for effect in on_click_effects:
game_script += """
on click:""" + str(effect) + """
"""
game_script += """
Buildings
*TEMPLATE
on click:anim glow
"""
clear()
print("Welcome to 'Buildings' section!")
num_buildings: int = int(input("How many buildings do you want to have in your game? "))
for i in range(num_buildings):
print("You are required to enter information about the building you want to add.")
building_item_key: str = input("Please enter item key for the building: ")
building_name: str = input("Please enter name of building: ")
description = input("Please enter building description: ")
icon = input("Please enter link to this building's icon: ")
cost: str = input("Please enter cost of the building: ")
on_tick_effects: list = [] # initial value
add_on_tick_effect: bool = input("Do you want to add an 'on tick' effect to this building? ") == "Y"
while add_on_tick_effect:
on_tick_effect: str = input("What 'on tick' effect do you want to add to this building? ")
on_tick_effects.append(on_tick_effect)
print("Enter 'Y' for yes.")
print("Enter anything else for no.")
add_on_tick_effect = input("Do you want to add an 'on tick' effect to this building? ") == "Y"
game_script += """
*""" + str(building_item_key) + """
name:""" + str(building_name) + """
desc:""" + str(description) + """
icon:""" + str(icon) + """
cost:""" + str(cost) + """
"""
for effect in on_tick_effects:
game_script += """
on tick:""" + str(effect) + """
"""
print("Enter 'Y' for yes.")
print("Enter anything else for no.")
has_requirements: bool = input("Does this building have requirements to unlock it? ") == "Y"
if not has_requirements:
game_script += """
unlocked
"""
else:
requirements: str = input("What requirements does this building have? ")
game_script += """
req:""" + str(requirements) + """
"""
game_script += """
Upgrades
*TEMPLATE
on click:anim glow
"""
clear()
print("Welcome to 'Upgrades' section!")
num_upgrades: int = int(input("How many upgrades do you want to have in your game? "))
for i in range(num_upgrades):
print("You are required to enter information about the upgrade you want to add.")
upgrade_item_key: str = input("Please enter item key for the upgrade: ")
upgrade_name: str = input("Please enter name of upgrade: ")
description = input("Please enter description for this upgrade: ")
icon = input("Please enter link to this upgrade's icon: ")
cost = input("Please enter this upgrade's cost: ")
passive_effects: list = [] # initial value
print("Enter 'Y' for yes.")
print("Enter anything else for no.")
add_passive_effect: bool = input("Do you want to add a passive effect to this upgrade? ") == "Y"
while add_passive_effect == "Y":
passive_effect: str = input("Please enter a passive effect you want to add to this upgrade:")
passive_effects.append(passive_effect)
print("Enter 'Y' for yes.")
print("Enter anything else for no.")
add_passive_effect = input("Do you want to add a passive effect to this upgrade? ") == "Y"
requirements: str = input("Please enter the requirements to unlock this upgrade: ")
game_script += """
*""" + str(upgrade_item_key) + """
name:""" + str(upgrade_name) + """
desc:""" + str(description) + """
icon:""" + str(icon) + """
cost:""" + str(cost) + """
req:""" + str(requirements) + """
"""
for effect in passive_effects:
game_script += """
passive:""" + str(effect) + """
"""
game_script += """
Achievements
*TEMPLATE
on click:anim glow
"""
clear()
print("Welcome to 'Achievements' section!")
num_achievements: int = int(input("How many achievements do you want to have in your game? "))
for i in range(num_achievements):
print("You are required to enter information about the achievement you want to add.")
achievement_item_key: str = input("Please enter item key for the achievement: ")
achievement_name: str = input("Please enter name of achievement: ")
description = input("Please enter description for this achievement: ")
icon = input("Please enter link to this achievement's icon: ")
requirements: str = input("Please enter the requirements to get this achievement: ")
game_script += """
*""" + str(achievement_item_key) + """
name:""" + str(achievement_name) + """
desc:""" + str(description) + """
icon:""" + str(icon) + """
req:""" + str(requirements) + """
"""
game_file = open(str(game_name) + ".txt", "w+")
game_file.write(game_script)
print("'Orteil Idle Game Maker' code is successfully generated! The code is in the file '"
+ str(game_name) + ".txt'.")
print("Enter 'Y' for yes.")
print("Enter anything else for no.")
continue_using = input("Do you want to continue using the tool 'Orteil Idle Game Maker Code Generator'? ")
sys.exit()
| 5,347,403 |
def handler500(request):
"""
HTTP Error 500 Internal Server Error
"""
return HttpResponse('<h1>HTTP Error 500 Internal server error</h1>', {})
| 5,347,404 |
def increase_line_complexity(linestring, n_points):
"""
linestring (shapely.geometry.linestring.LineString):
n_points (int): target number of points
"""
# or to get the distances closest to the desired one:
# n = round(line.length / desired_distance_delta)
distances = np.linspace(0, linestring.length, n_points)
points = [linestring.interpolate(distance) for distance in distances]
return shapely.geometry.linestring.LineString(points)
| 5,347,405 |
def get_topic_prevelance(doc_topic_matrix, num_topics, total_num_docs):
"""Input: doc_topic_matrix, a numpy nd array where each row represents a doc, and each collumn is the assocication
of the doc with a topic. Num_topics and integer holding the number of topics. Total_num_docs is an int holding the
number of docs in the corpus.
Output: a list where index i represents the prevelance of topic i within the corpus."""
topic_prev = [0] * num_topics
for i in range(0, num_topics):
topic_doc = doc_topic_matrix[:,i]
for j in range(0, len(topic_doc)):
if topic_doc[j] > TOPIC_PRESSENCE_THRESHOLD:
topic_prev[i] +=1
topic_prev[i] = topic_prev[i]/total_num_docs
return topic_prev
| 5,347,406 |
async def drones_byDroneId_delete(request, droneId):
"""
Remove a drone from the fleet
It is handler for DELETE /drones/<droneId>
"""
return handlers.drones_byDroneId_deleteHandler(request, droneId)
| 5,347,407 |
def generate_sql_integration_data(sql_test_backends):
"""Populate test data for SQL backends for integration testing."""
sql_schema_info = get_sqlalchemy_schema_info()
vertex_values, edge_values, uuid_to_class_name = get_integration_data()
# Represent all edges as foreign keys
uuid_to_foreign_key_values = {}
for edge_name, edge_values in six.iteritems(edge_values):
for edge_value in edge_values:
from_classname = uuid_to_class_name[edge_value["from_uuid"]]
edge_field_name = "out_{}".format(edge_name)
join_descriptor = sql_schema_info.join_descriptors[from_classname][edge_field_name]
is_from_uuid = join_descriptor.from_column == "uuid"
is_to_uuid = join_descriptor.to_column == "uuid"
if is_from_uuid == is_to_uuid:
raise NotImplementedError(
"Exactly one of the join columns was expected to"
"be uuid. found {}".format(join_descriptor)
)
if is_from_uuid:
existing_foreign_key_values = uuid_to_foreign_key_values.setdefault(
edge_value["to_uuid"], {}
)
if join_descriptor.to_column in existing_foreign_key_values:
raise NotImplementedError(
"The SQL backend does not support many-to-many "
"edges. Found multiple edges of class {} from "
"vertex {}.".format(edge_name, edge_value["to_uuid"])
)
existing_foreign_key_values[join_descriptor.to_column] = edge_value["from_uuid"]
elif is_to_uuid:
existing_foreign_key_values = uuid_to_foreign_key_values.setdefault(
edge_value["from_uuid"], {}
)
if join_descriptor.from_column in existing_foreign_key_values:
raise NotImplementedError(
"The SQL backend does not support many-to-many "
"edges. Found multiple edges of class {} to "
"vertex {}.".format(edge_name, edge_value["to_uuid"])
)
existing_foreign_key_values[join_descriptor.from_column] = edge_value["to_uuid"]
# Insert all the prepared data into the test database
for sql_test_backend in six.itervalues(sql_test_backends):
for vertex_name, insert_values in six.iteritems(vertex_values):
table = sql_schema_info.vertex_name_to_table[vertex_name]
table.delete(bind=sql_test_backend.engine)
table.create(bind=sql_test_backend.engine)
for insert_value in insert_values:
foreign_key_values = uuid_to_foreign_key_values.get(insert_value["uuid"], {})
all_values = merge_non_overlapping_dicts(insert_value, foreign_key_values)
sql_test_backend.engine.execute(table.insert().values(**all_values))
return sql_schema_info
| 5,347,408 |
def p4( command ):
"""
Run a perforce command line instance and marshal the
result as a list of dictionaries.
"""
commandline = 'p4 %s -G %s' % (P4_PORT_AND_USER, command)
logging.debug( '%s' % commandline )
stream = os.popen( commandline, 'rb' )
entries = []
try:
while 1:
entry = marshal.load(stream)
entries.append(entry)
except EOFError:
pass
code = stream.close()
if None != code:
raise IOError( "Failed to execute %s: %d" % (commandline, int(code)) )
return entries
| 5,347,409 |
def restart_apps_or_services(app_or_service_names=None):
"""Restart any containers associated with Dusty, or associated with
the provided app_or_service_names."""
if app_or_service_names:
log_to_client("Restarting the following apps or services: {}".format(', '.join(app_or_service_names)))
else:
log_to_client("Restarting all active containers associated with Dusty")
if app_or_service_names:
specs = spec_assembler.get_assembled_specs()
specs_list = [specs['apps'][app_name] for app_name in app_or_service_names if app_name in specs['apps']]
repos = set()
for spec in specs_list:
if spec['repo']:
repos = repos.union(spec_assembler.get_same_container_repos_from_spec(spec))
nfs.update_nfs_with_repos(repos)
else:
nfs.update_nfs_with_repos(spec_assembler.get_all_repos(active_only=True, include_specs_repo=False))
compose.restart_running_services(app_or_service_names)
| 5,347,410 |
def install_script_job(function):
""" Adds a script job for file read and scene opened
"""
kill_script_job(function.__name__)
cmds.scriptJob(event=["NewSceneOpened", function])
cmds.scriptJob(conditionTrue=["readingFile", function])
| 5,347,411 |
def measure_shear_metacal_plus_mof(res, *, s2n_cut, t_ratio_cut):
"""Measure the shear parameters for metacal+MOF.
NOTE: Returns None if nothing can be measured.
Parameters
----------
res : dict
The metacal results.
s2n_cut : float
The cut on `wmom_s2n`. Typically 10.
t_ratio_cut : float
The cut on `t_ratio_cut`. Typically 0.5.
Returns
-------
g1p : float
The mean 1-component shape for the plus metacal measurement.
g1m : float
The mean 1-component shape for the minus metacal measurement.
g1 : float
The mean 1-component shape for the zero-shear metacal measurement.
g2p : float
The mean 2-component shape for the plus metacal measurement.
g2m : float
The mean 2-component shape for the minus metacal measurement.
g2 : float
The mean 2-component shape for the zero-shear metacal measurement.
"""
def _mask(cat):
return (
(cat['flags'] == 0) &
(cat['mcal_flags'] == 0) &
(cat['mcal_s2n'] > s2n_cut) &
(cat['mcal_T_ratio'] > t_ratio_cut))
msks = {}
for sh in METACAL_TYPES:
logger.debug('%s: %s', sh, res[sh].dtype)
msks[sh] = _mask(res[sh])
if not np.any(msks[sh]):
return None
g1p = res['1p']['mcal_g'][msks['1p'], 0]
g1m = res['1m']['mcal_g'][msks['1m'], 0]
g2p = res['2p']['mcal_g'][msks['2p'], 1]
g2m = res['2m']['mcal_g'][msks['2m'], 1]
g1 = res['noshear']['mcal_g'][msks['noshear'], 0]
g2 = res['noshear']['mcal_g'][msks['noshear'], 1]
return (
np.mean(g1p), np.mean(g1m), np.mean(g1),
np.mean(g2p), np.mean(g2m), np.mean(g2))
| 5,347,412 |
def software_detail(request, context, task_id, vm_id):
""" render the detail of the user page: vm-stats, softwares, and runs """
softwares = model.get_software(task_id, vm_id)
runs = model.get_vm_runs_by_task(task_id, vm_id)
datasets = model.get_datasets_by_task(task_id)
# Construct a dictionary that has the software as a key and as value a list of runs with that software
# Note that we order the list in such a way, that evaluations of a run are right behind that run in the list
# (based on the input_run)
runs_with_input = {} # get the runs which have an input_run_id
for r in runs:
# if we loop once, might as well get the review-info here.
r['review'] = model.get_run_review(r.get("dataset"), vm_id, r.get("run_id"))
if r.get("input_run_id") == 'none':
continue
runs_with_input.setdefault(r.get("input_run_id"), []).append(r)
runs_without_input = [r for r in runs if r.get("input_run_id") == "none"]
runs_by_software = {}
for r in runs_without_input:
runs_by_software.setdefault(r.get("software"), []).append(r)
runs_by_software.setdefault(r.get("software"), []).extend(runs_with_input.pop(r.get("run_id"), []))
for k, v in runs_with_input.items(): # left-over runs_with_input, where the input-run does not exist anymore
for r in v:
runs_by_software.setdefault(r.get("software"), []).append(r)
software = [{
"software": sw,
"runs": runs_by_software.get(sw["id"])
} for sw in softwares]
vm = model.get_vm(vm_id)
context["task"] = model.get_task(task_id)
context["vm_id"] = vm_id
context["vm"] = {"host": vm.host, "user": vm.userName, "password": vm.userPw, "ssh": vm.portSsh, "rdp": vm.portRdp}
context["software"] = software
context["datasets"] = datasets
return render(request, 'tira/software.html', context)
| 5,347,413 |
def test_quota_namespace_count():
"""
Lets make sure we can violate the number of names in a directory limitation
"""
cluster = mini_cluster.shared_cluster()
try:
fs = cluster.fs
fs.setuser(cluster.superuser)
if fs.exists('/tmp/foo2'):
fs.rmtree('/tmp/foo2')
fs.mkdir("/tmp/foo2", 0777)
# check the get_namespace_quota function
fs.set_namespace_quota("/tmp/foo2", 4)
assert_equals(fs.get_namespace_quota("/tmp/foo2"), 4)
# violate the namespace count
for i in range(3):
f = fs.open('/tmp/foo2/works' + str(i), 'w')
f.write('a')
f.close()
f = fs.open('/tmp/foo2/asdfsdc', 'w')
f.write('a')
assert_raises(IOError, f.close)
# Check summary stats
summary = fs.get_usage_and_quota('/tmp/foo2')
assert_equals(3, summary["file_count"])
assert_equals(4, summary["file_quota"])
assert_equals(None, summary["space_quota"])
assert_true(None is not summary["space_used"])
# make sure the clear works
fs.clear_namespace_quota("/tmp/foo2")
assert_equals(fs.get_namespace_quota("/tmp/foo2"), None)
f = fs.open('/tmp/foo2/asdfsdd', 'w')
f.write('a')
f.close()
finally:
if fs.exists('/tmp/foo2'):
fs.rmtree("/tmp/foo2")
cluster.shutdown()
| 5,347,414 |
def compile_error_curves(dfs, window_size = 60):
"""
takes a list of timeseries dfs and
returns a DataFrame in which each column is
the monatonically decreasing version of % error
for one of the dfs in the list.
usefull for summarizing how a bunch of timeseries converge on
some value after a certain point.
params
-----
dfs: (list of pd.DataFrames)
each df should be a track timeseries
window_size: (int or float)
size of bins (in seconds)
"""
error_series = []
for i, t in enumerate(dfs):
df = dfs[t]
df_window = df[df['t'] <= window_size].copy()
if df_window is None:
continue
if len(df_window) < 0.8 * window_size:
continue
end_time = df_window.iloc[len(df_window)-1]['t']
#print(t, len(df_window) / 60., end_time)
d = calculate_error_window(df_window).set_index('t')['error_window']
d = d.reindex(np.arange(0, window_size + 1))
d = d.fillna(method='bfill')
d = d.fillna(method='ffill')
d.name = t
error_series.append(d)
return pd.concat(error_series, axis=1)
| 5,347,415 |
def _get_cluster_medoids(idx_interval: np.ndarray, labels: np.ndarray,
pdist: np.ndarray, order_map: np.ndarray) \
-> np.ndarray:
"""
Get the indexes of the cluster medoids.
Parameters
----------
idx_interval : np.ndarray
Embedding indexes.
labels : np.ndarray
Cluster labels.
pdist : np.ndarray
Condensed pairwise distance matrix.
order_map : np.ndarray
Map to convert label indexes to pairwise distance matrix indexes.
Returns
-------
List[int]
List with indexes of the medoids for each cluster.
"""
medoids, m = [], len(idx_interval)
for start_i, stop_i in _get_cluster_group_idx(labels):
if stop_i - start_i > 1:
row_sum = np.zeros(stop_i - start_i, np.float32)
for row in range(stop_i - start_i):
for col in range(row + 1, stop_i - start_i):
i, j = order_map[start_i + row], order_map[start_i + col]
if i > j:
i, j = j, i
pdist_ij = pdist[m * i + j - ((i + 2) * (i + 1)) // 2]
row_sum[row] += pdist_ij
row_sum[col] += pdist_ij
medoids.append(idx_interval[start_i + np.argmin(row_sum)])
return np.asarray(medoids, dtype=np.int32)
| 5,347,416 |
def printHexArray(hex_list):
"""
打印十六进制数组的数组
hex_list: 一个十六进制数组的数组
"""
for item in hex_list:
print convertBytesToHexStr(item)
| 5,347,417 |
def cli(package_name):
"""Shows all releases for a package and some info about it.
PACKAGE_NAME Name of the package to fetch info about.
"""
main.main(package_name=package_name)
| 5,347,418 |
def perform_authorization_code_flow():
"""
Performs spotify's Authorization Code Flow to retrive an API token.
This uses the OAuth 2.0 protocol, which requires user input and consent.
Output
______
api_key: str
a user's api key with prompted permissions
refresh_token: str
A refresh token used to retrive future api keys
expires_in: int
the time (in seconds) until the token expires
"""
# create server that runs at the redirect URI. This is used to catch the
# response sent from the OAuth authentication
server = OAuthServer(("127.0.0.1", 8080))
# generate a uri with the required Oauth headers and open it in a webbrowser
auth_uri, code_verifier, state_token = generate_client_PKCE()
webbrowser.open_new_tab(auth_uri)
# parse the spotify API's http response for the User's token
raw_http_response = server.handle_auth().decode("utf-8")
http_headers = parse_spotify_http_response(raw_http_response)
# verify that state tokens match to prevent CSRF
if state_token != http_headers["state"]:
raise StateTokenException
# exchange code for access token. The refresh token is automatically cached
access_token, refresh_token, expires_in = exchange_auth_code(
http_headers["code"], code_verifier
)
return access_token, refresh_token, expires_in
| 5,347,419 |
def fetch_papers(db_manager: DBManager,
base_url: str,
list_url: str,
conf_id: str,
conf_sub_id: str,
conf_name: str) -> None:
""" Fetches the data of all the papers found at list_url and add them to
the database, if the data is valid.
"""
print(conf_name)
print(conf_id, conf_sub_id)
print(list_url)
with urllib.request.urlopen(list_url) as url:
response = url.read()
soup = BeautifulSoup(response, 'html.parser')
papers_meta_list = soup.find('div', {'class', 'container-fluid'}).find_all('li')
page_urls_list = [base_url + m.find('a').get('href') for m in papers_meta_list]
titles_list = [str(m.find('a').string) for m in papers_meta_list]
authors_list = [format_authors(m.find('i').string) for m in papers_meta_list]
conf_date = dateutil.parser.parse(conf_id[-4:] + '-12')
if (len(page_urls_list) == len(titles_list) and
len(page_urls_list) == len(authors_list)):
for i, page_url in enumerate(tqdm(page_urls_list)):
pid = db_manager.create_paper_id(conf_id, conf_sub_id, titles_list[i])
if not db_manager.exists(pid):
try:
with urllib.request.urlopen(page_url) as url:
response2 = url.read()
soup2 = BeautifulSoup(response2, 'html.parser')
pdf_url = soup2.find('div', {'class', 'container-fluid'}).find_all('a')
pdf_url = [p.get('href') for p in pdf_url]
pdf_url = base_url + [p for p in pdf_url if p.lower().endswith('paper.pdf')][0]
summary = soup2.find('div', {'class', 'container-fluid'}).find_all('p')[-1]
summary = flatten_content_list(summary.contents)
print(titles_list[i])
print(authors_list[i])
print(page_urls_list[i])
print(pdf_url)
print(summary)
db_manager.add_paper(
conf_id, conf_sub_id, conf_sub_id.lower() != 'main',
conf_name, titles_list[i], authors_list[i],
page_urls_list[i], pdf_url, conf_date, summary)
except urllib.error.URLError:
print('Skipping {:} - URLError'.format(page_url))
else:
print('Skipping {:} - Exists'.format(page_url))
else:
print('SKIPPING!!! Wrong list sizes. ({:d}, {:d}, {:d})'.format(
len(page_urls_list), len(titles_list), len(authors_list)))
| 5,347,420 |
def calculate_empirical_cdf(variable_values):
"""Calculate numerical cumulative distribution function.
Output tuple can be used to plot empirical cdf of input variable.
Parameters
----------
variable_values : numpy array
Values of a given variable.
Returns
-------
numpy array
Ordered variable values.
numpy array
Accumulated percentages of relative variable values.
"""
# Sort array and calculate accumulated percentages.
values = np.sort(variable_values)
accum_percentages = np.arange(1, len(values) + 1) / float(len(values))
return values, accum_percentages
| 5,347,421 |
def import_reference(filename):
"""
Imports object from reference node filename
:param filename: str
"""
return maya.cmds.file(filename, importReference=True)
| 5,347,422 |
def product_mapping(name, setup, cleanup=True):
"""Obtain the kernel mapping.
:return: Kernel Mapping
:rtype: str
"""
kernel_list_file = (
setup.working_directory
+ os.sep
+ f"{setup.mission_acronym}_{setup.run_type}_"
f"{int(setup.release):02d}.kernel_list"
)
get_map = False
mapping = False
with open(kernel_list_file, 'r') as lst:
for line in lst:
if name in line:
get_map = True
if get_map and "MAPPING" in line:
mapping = line.split("=")[-1].strip()
get_map = False
if not cleanup:
setup = False
#
# If cleanup is not being performed this is an indication that if the kernel
# mapping does not exist, this can be intentional and therefore an error
# does not have to be reported.
#
if not mapping and cleanup:
error_message(
f"{name} does not have mapping on {kernel_list_file}.",
setup=setup,
)
return mapping
| 5,347,423 |
def test_connect():
"""
test disconnecing and then reconnecting
"""
cam = CCDCam("localhost", 7624)
v = cam.disconnect()
assert(v is not None)
v = cam.connect()
assert(v is not None)
t = cam.temperature
assert(t == 20.0)
cam.quit()
| 5,347,424 |
def override_setting(name, default=None, help=u'', category=_CATEGORY):
""" Override a setting.
"""
_attach_callback(
name,
default=default,
help=help,
category=category,
override=True
)
| 5,347,425 |
def generate_example(interactive, config, riotbase):
"""Generate the code of an example application."""
group = "application"
params = load_and_check_application_params(
group, interactive, config, riotbase, in_riot_dir="examples"
)
params["application"]["type"] = "example"
output_dir = get_output_dir(params, group, riotbase, "examples")
check_overwrite(output_dir)
render_application_source(params, group, output_dir)
click.echo(
click.style(
f"Example '{params[group]['name']}' generated "
f"in {output_dir} with success!",
bold=True,
)
)
| 5,347,426 |
def PlotExpectedGains(guess1=20000, guess2=40000):
"""Plots expected gains as a function of bid.
guess1: player1's estimate of the price of showcase 1
guess2: player2's estimate of the price of showcase 2
"""
player1, player2 = MakePlayers()
MakePlots(player1, player2)
player1.MakeBeliefs(guess1)
player2.MakeBeliefs(guess2)
print('Player 1 prior mle', player1.prior.MaximumLikelihood())
print('Player 2 prior mle', player2.prior.MaximumLikelihood())
print('Player 1 mean', player1.posterior.Mean())
print('Player 2 mean', player2.posterior.Mean())
print('Player 1 mle', player1.posterior.MaximumLikelihood())
print('Player 2 mle', player2.posterior.MaximumLikelihood())
player1.PlotBeliefs('price3')
player2.PlotBeliefs('price4')
calc1 = GainCalculator(player1, player2)
calc2 = GainCalculator(player2, player1)
thinkplot.Clf()
thinkplot.PrePlot(num=2)
bids, gains = calc1.ExpectedGains()
thinkplot.Plot(bids, gains, label='Player 1')
print('Player 1 optimal bid', max(zip(gains, bids)))
bids, gains = calc2.ExpectedGains()
thinkplot.Plot(bids, gains, label='Player 2')
print('Player 2 optimal bid', max(zip(gains, bids)))
thinkplot.Save(root='price5',
xlabel='bid ($)',
ylabel='expected gain ($)',
formats=FORMATS)
| 5,347,427 |
def remove_persons_with_few_joints(all_keypoints, min_total_joints=10, min_leg_joints=2, include_head=False):
"""Remove bad skeletons before sending to the tracker"""
good_keypoints = []
for keypoints in all_keypoints:
# include head point or not
total_keypoints = keypoints[5:, 1:] if not include_head else keypoints[:, 1:]
num_valid_joints = sum(total_keypoints!=0)[0] # number of valid joints
num_leg_joints = sum(total_keypoints[-7:-1]!=0)[0] # number of joints for legs
if num_valid_joints >= min_total_joints and num_leg_joints >= min_leg_joints:
good_keypoints.append(keypoints)
return np.array(good_keypoints)
| 5,347,428 |
def message_has_races(message):
"""
Checks to see if a message has a race kwarg.
"""
races = get_races_from_message(message)
return len(races) > 0 and races[0] != ""
| 5,347,429 |
def _find_word(input):
"""
_find_word - function to find words in the input sentence
Inputs:
- input : string
Input sentence
Outputs:
- outputs : list
List of words
"""
# lower case
input = input.lower()
# split by whitespace
input = re.split(pattern = '[\s]+', string = input)
# find words in WORD_POS pattern
valid_word = lambda x: True if re.findall(pattern = r'[a-z]*_[a-z]*', string = x) else False
outputs = []
for token in input:
if valid_word(token):
outputs.append(token.split('_')[0])
return outputs
| 5,347,430 |
def logp1_r_squared_linreg(y_true, y_pred):
"""Compute custom logp1 r squared ((follows the scipy linear regression implementation of R2).
Parameters
----------
y_true
y_true.
y_pred
y_pred.
Returns
-------
r2
"""
y_pred, _ = tf.split(y_pred, num_or_size_splits=2, axis=2)
x = tf.math.log(y_true + 1.0)
y = tf.math.log(y_pred + 1.0)
# means
xmean = tnp.mean(x)
ymean = tnp.mean(y)
ssxm = tnp.mean(tnp.square(x - xmean))
ssym = tnp.mean(tnp.square(y - ymean))
ssxym = tnp.mean((x - xmean) * (y - ymean))
# R-value
r = ssxym / tnp.sqrt(ssxm * ssym)
return r ** 2
| 5,347,431 |
def to_legacy_data_type(data_type: Union[JsonDict, dt.DataType]) -> JsonDict:
"""
Convert to simple datatypes ("String", "Long", etc) instead of JSON objects,
if possible.
The frontend expects the "type" field for enums and arrays to be lowercase.
"""
if not isinstance(data_type, dt.DataType):
return json.loads(data_type)
if data_type.is_simple:
return data_type.into_simple()
data = data_type.to_dict()
if data["type"] == "Enum":
data["type"] = "enum"
if data["type"] == "Array":
data["type"] = "array"
return data
| 5,347,432 |
def command_list_all_users(context, args) -> None:
"""
List all users via portal/users
"""
client = __get_service_api_client(context, args)
response = client.navigate('portal', 'users').GET()
users = response.DATA.get('UserInfoList', [])
print(f'Found {len(users)} users')
for user in users:
pprint.pprint(user)
| 5,347,433 |
def cluster_build_trees(
identity, set_name, cluster_file=None, click_loguru=None
):
"""Calculate homology clusters, MSAs, trees."""
options = click_loguru.get_global_options()
user_options = click_loguru.get_user_global_options()
parallel = user_options["parallel"]
set_path = Path(set_name)
# read and possibly update proteomes
proteomes_path = set_path / PROTEOMES_FILE
proteomes_in = read_tsv_or_parquet(proteomes_path)
proteomes = sort_proteome_frame(proteomes_in)
if not proteomes_in.equals(proteomes):
logger.info("proteomes sort order changed, writing new proteomes file")
write_tsv_or_parquet(proteomes, proteomes_path)
n_proteomes = len(proteomes)
# read and update fragment ID's
frags = read_tsv_or_parquet(set_path / FRAGMENTS_FILE)
frags["frag.idx"] = pd.array(frags.index, dtype=pd.UInt32Dtype())
frag_frames = {}
for dotpath, subframe in frags.groupby(by=["path"]):
frag_frames[dotpath] = subframe.copy().set_index("frag.orig_id")
arg_list = []
concat_fasta_path = set_path / "proteins.fa"
for i, row in proteomes.iterrows():
arg_list.append((row, concat_fasta_path, frag_frames[row["path"]]))
file_idx = {}
stem_dict = {}
for i, row in proteomes.iterrows():
stem = row["path"]
file_idx[stem] = i
stem_dict[i] = stem
if cluster_file is None:
if concat_fasta_path.exists():
concat_fasta_path.unlink()
if not options.quiet:
logger.info(
f"Renaming fragments and concatenating sequences for {len(arg_list)}"
" proteomes:"
)
for args in arg_list:
write_protein_fasta(args)
del arg_list
cwd = Path.cwd()
os.chdir(set_path)
n_clusters, run_stats, cluster_hist = homology_cluster(
"proteins.fa",
identity,
write_ids=True,
delete=False,
cluster_stats=False,
outname="homology",
click_loguru=click_loguru,
)
log_path = Path("homology.log")
log_dir_path = Path("logs")
log_dir_path.mkdir(exist_ok=True)
shutil.copy2(log_path, "logs/homology.log")
log_path.unlink()
os.chdir(cwd)
logger.info(f"Number of clusters: {n_clusters}")
del cluster_hist
del run_stats
concat_fasta_path.unlink()
else: # use pre-existing clusters
homology_path = set_path / "homology"
if homology_path.exists():
shutil.rmtree(homology_path)
inclusts = pd.read_csv(cluster_file, sep="\t")
for col in ["cluster_id", "members"]:
if col not in inclusts.columns:
logger.error(
f'Column named "{col}" not found in external homology cluster file'
)
sys.exit(1)
cluster_counts = inclusts["cluster_id"].value_counts()
cluster_map = pd.Series(
range(len(cluster_counts)), index=cluster_counts.index
)
cluster_ids = inclusts["cluster_id"].map(cluster_map)
cluster_sizes = inclusts["cluster_id"].map(cluster_counts)
predef_clusters = pd.DataFrame(
{
"cluster_id": cluster_ids,
"size": cluster_sizes,
"members": inclusts["members"],
}
)
predef_clusters.sort_values(by=["cluster_id"], inplace=True)
predef_clusters.drop(
predef_clusters[predef_clusters["size"] < 2].index,
axis=0,
inplace=True,
)
n_clusters = predef_clusters["cluster_id"].max() + 1
predef_clusters.index = range(len(predef_clusters))
external_cluster_path = set_path / EXTERNAL_CLUSTERS_FILE
logger.info(
f"Writing {external_cluster_path} with {len(predef_clusters)} genes"
+ f" in {n_clusters} homology clusters"
)
predef_clusters.to_csv(external_cluster_path, sep="\t")
del cluster_counts, cluster_map, cluster_sizes, inclusts
homology_path = set_path / "homology"
homology_path.mkdir(exist_ok=True)
if not options.quiet:
logger.info(
f"Creating cluster files for for {len(arg_list)}" " proteomes:"
)
proteome_no = 0
for args in arg_list:
logger.info(f"doing proteome {proteome_no}")
write_protein_fasta(
args, fasta_dir=homology_path, clusters=predef_clusters
)
proteome_no += 1
del arg_list
logger.info(
"Checking that all cluster files are present (gene-id mismatch)"
)
missing_files = False
for i in range(n_clusters):
if not (homology_path / f"{i}.fa").exists():
logger.error(f"External cluster {i} is missing.")
missing_files = True
if missing_files:
sys.exit(1)
#
# Write homology info back into proteomes
#
click_loguru.elapsed_time("Alignment/tree-building")
hom_mb = DataMailboxes(
n_boxes=n_proteomes,
mb_dir_path=(set_path / "mailboxes" / "clusters2proteomes"),
file_extension="tsv",
)
hom_mb.write_tsv_headers(HOMOLOGY_COLS)
cluster_paths = [
set_path / "homology" / f"{i}.fa" for i in range(n_clusters)
]
bag = db.from_sequence(cluster_paths)
cluster_stats = []
if not options.quiet:
logger.info(
f"Calculating MSAs and trees for {len(cluster_paths)} homology"
" clusters:"
)
ProgressBar(dt=SPINNER_UPDATE_PERIOD).register()
if parallel:
cluster_stats = bag.map(
parse_cluster,
file_dict=file_idx,
file_writer=hom_mb.locked_open_for_write,
)
else:
for clust_fasta in cluster_paths:
cluster_stats.append(
parse_cluster(
clust_fasta,
file_dict=file_idx,
file_writer=hom_mb.locked_open_for_write,
)
)
n_clust_genes = 0
clusters_dict = {}
for cluster_id, cluster_dict in cluster_stats:
n_clust_genes += cluster_dict["size"]
clusters_dict[cluster_id] = cluster_dict
del cluster_stats
clusters = pd.DataFrame.from_dict(clusters_dict).transpose()
del clusters_dict
clusters.sort_index(inplace=True)
grouping_dict = {}
for i in range(n_proteomes): # keep numbering of single-file clusters
grouping_dict[f"[{i}]"] = i
grouping_dict[str(list(range(n_proteomes)))] = 0
for n_members, subframe in clusters.groupby(["n_memb"]):
if n_members == 1:
continue
if n_members == n_proteomes:
continue
member_counts = pd.DataFrame(subframe["n_members"].value_counts())
member_counts["key"] = range(len(member_counts))
for newcol in range(n_members):
member_counts[f"memb{newcol}"] = ""
for member_string, row in member_counts.iterrows():
grouping_dict[member_string] = row["key"]
member_list = json.loads(member_string)
for col in range(n_members):
member_counts.loc[member_string, f"memb{col}"] = stem_dict[
member_list[col]
]
member_counts = member_counts.set_index("key")
write_tsv_or_parquet(
member_counts, set_path / group_key_filename(n_members)
)
clusters["n_members"] = clusters["n_members"].map(grouping_dict)
clusters = clusters.rename(columns={"n_members": "group_key"})
n_adj = clusters["n_adj"].sum()
adj_pct = n_adj * 100.0 / n_clust_genes
n_adj_clust = sum(clusters["adj_groups"] != 0)
adj_clust_pct = n_adj_clust * 100.0 / len(clusters)
logger.info(
f"{n_adj} ({adj_pct:.1f}%) out of {n_clust_genes}"
+ " clustered genes are adjacent"
)
logger.info(
f"{n_adj_clust} ({adj_clust_pct:.1f}%) out of "
+ f"{len(clusters)} clusters contain adjacency"
)
write_tsv_or_parquet(clusters, set_path / CLUSTERS_FILE)
# join homology cluster info to proteome info
click_loguru.elapsed_time("Joining")
arg_list = []
for i, row in proteomes.iterrows():
arg_list.append(
(
i,
dotpath_to_path(row["path"]),
)
)
bag = db.from_sequence(arg_list)
hom_stats = []
if not options.quiet:
logger.info(f"Joining homology info to {n_proteomes} proteomes:")
ProgressBar(dt=SPINNER_UPDATE_PERIOD).register()
if parallel:
hom_stats = bag.map(
join_homology_to_proteome, mailbox_reader=hom_mb.open_then_delete
).compute()
else:
for args in arg_list:
hom_stats.append(
join_homology_to_proteome(
args, mailbox_reader=hom_mb.open_then_delete
)
)
hom_mb.delete()
hom_frame = pd.DataFrame.from_dict(hom_stats)
hom_frame.set_index(["prot.idx"], inplace=True)
hom_frame.sort_index(inplace=True)
logger.info("Homology cluster coverage:")
with pd.option_context(
"display.max_rows", None, "display.float_format", "{:,.2f}%".format
):
logger.info(hom_frame)
proteomes = pd.concat([proteomes, hom_frame], axis=1)
write_tsv_or_parquet(
proteomes, set_path / PROTEOMOLOGY_FILE, float_format="%5.2f"
)
click_loguru.elapsed_time(None)
| 5,347,434 |
def display_dictionary(dictionary, renormalize=False, reshaping=None,
groupings=None, label_inds=False, highlighting=None,
plot_title=""):
"""
Plot each of the dictionary elements side by side
Parameters
----------
dictionary : ndarray(float32, size=(s, n) OR (s, c, kh, kw))
If the size of dictionary is (s, n), this is a 'fully-connected'
dictionary where each basis element has the same dimensionality as the
image it is trying to represent. n is the size of the image and s the
number of basis functions. If the size of dictionary is (s, c, kh, kw),
this is a 'convolutional' dictionary where each basis element is
(potentially much) smaller than the image it is trying to represent. c
is the number of channels that in the input space, kh is the dictionary
kernel height, and kw is the dictionary kernel width.
renormalize : bool, optional
If present, display basis functions on their own color scale, using
standardize_for_imshow() to put values in the range [0, 1]. Will
accentuate the largest-magnitude values in the dictionary element.
Default False.
reshaping : tuple(int, int), optional
Should only be specified for a fully-connected dictionary (where
dictionary.ndim==2). The dimension of each patch before vectorization
to size n. We reshape the dictionary elements based on this. Default None
label_inds : bool, optional
Supimpose the index into the dictionary of each element in the displayed
grid--helps with quick lookup/selection of individual dictionary
elements. Default False.
highlighting : dictionary, optional
This is used to re-sort and color code the dictionary elements according
to scalar weights. Has two keys:
'weights' : ndarray(float, size=(s,))
The weights for each dictionary element
'color_range': tuple(float, float)
Values less than or equal to highlighting['color_range'][0] get mapped
to dark blue, and values greater than or equal to
highlighting['color_range'][1] get mapped to dark red.
'reorder' : bool
Use the highlighting weights to reorder the dictionary.
Default None.
plot_title : str, optional
The title of the plot. Default ""
Returns
-------
dictionary_figs : list
A list containing pyplot figures. Can be saved separately, or whatever
from the calling function
"""
if groupings is None:
t_ims, raw_val_mapping, lab_w_pix_coords = get_dictionary_tile_imgs(
dictionary, reshape_to_these_dims=reshaping, indv_renorm=renormalize,
highlights=highlighting)
else:
t_ims = get_dictionary_tile_imgs_arr_by_group(dictionary, groupings,
indv_renorm=renormalize, reshape_to_these_dims=reshaping,
highlights=highlighting)
fig_refs = []
for fig_idx in range(len(t_ims)):
fig = plt.figure(figsize=(10, 10))
ax = plt.axes([0.075, 0.075, 0.85, 0.85]) # [bottom, left, height, width]
fig.suptitle(plot_title + ', fig {} of {}'.format(
fig_idx+1, len(t_ims)), fontsize=20)
im_ref = ax.imshow(t_ims[fig_idx], interpolation='None')
if label_inds and groupings is None:
for lab_and_coord in lab_w_pix_coords[fig_idx]:
ax.text(lab_and_coord[2], lab_and_coord[1], lab_and_coord[0],
fontsize=6, verticalalignment='top',
horizontalalignment='left', color='w')
ax.axis('off')
if not renormalize and groupings is None:
# add a luminance colorbar. Because there isn't good rgb colorbar
# support in pyplot I hack this by adding another image subplot
cbar_ax = plt.axes([0.945, 0.4, 0.01, 0.2])
gradient = np.linspace(1.0, 0.0, 256)[:, None]
cbar_ax.imshow(gradient, cmap='gray')
cbar_ax.set_aspect('auto')
cbar_ax.yaxis.tick_right()
cbar_ax.xaxis.set_ticks([])
cbar_ax.yaxis.set_ticks([255, 128, 0])
cbar_ax.yaxis.set_ticklabels(['{:.2f}'.format(x)
for x in raw_val_mapping], fontsize=8)
fig_refs.append(fig)
return fig_refs
| 5,347,435 |
def example(a,b):
""" que onda
"""
| 5,347,436 |
def rbinary_search(arr, target, left=0, right=None):
"""Recursive implementation of binary search.
:param arr: input list
:param target: search item
:param left: left most item in the search sub-array
:param right: right most item in the search sub-array
:return: index of item if found `-1` otherwise
"""
right = len(arr) - 1 if right is None else right
#: base condition (search space is exhausted)
if left > right:
return UNSUCCESSFUL
mid = left + (right - left)//2
if arr[mid] < target:
#: focus on right subtree
result = rbinary_search(arr, target, mid+1, right)
elif arr[mid] > target:
#: focus on left subtree
result = rbinary_search(arr, target, left, mid-1)
else:
result = mid
return result
| 5,347,437 |
def get_redis_posts(author: str) -> (str, str):
"""Return user's first and other post IDs
Retrieve the user's first and other post IDs from Redis,
then return them as a tuple in the form (first, extra)
:param author: The username to get posts for
:return: Tuple of the first and other post IDs
"""
return r.lindex(author, 0), r.lrange(author, 1, -1)
| 5,347,438 |
def template_localtime(value, use_tz=None):
"""
Checks if value is a datetime and converts it to local time if necessary.
If use_tz is provided and is not None, that will force the value to
be converted (or not), overriding the value of settings.USE_TZ.
This function is designed for use by the template engine.
"""
should_convert = (isinstance(value, datetime)
and (settings.USE_TZ if use_tz is None else use_tz)
and not is_naive(value)
and getattr(value, 'convert_to_local_time', True))
return localtime(value) if should_convert else value
| 5,347,439 |
def FilesBrowse(button_text='Browse', target=(ThisRow, -1), file_types=(("ALL Files", "*.*"),), disabled=False,
initial_folder=None, tooltip=None, size=(None, None), auto_size_button=None, button_color=None,
change_submits=False, enable_events=False,
font=None, pad=None, key=None):
"""
:param button_text: text in the button (Default value = 'Browse')
:param target: key or (row,col) target for the button (Default value = (ThisRow, -1))
:param file_types: (Default value = (("ALL Files", "*.*")))
:param disabled: set disable state for element (Default = False)
:param initial_folder: starting path for folders and files
:param tooltip: (str) text, that will appear when mouse hovers over the element
:param size: (w,h) w=characters-wide, h=rows-high
:param auto_size_button: True if button size is determined by button text
:param button_color: button color (foreground, background)
:param change_submits: If True, pressing Enter key submits window (Default = False)
:param enable_events: Turns on the element specific events.(Default = False)
:param font: Union[str, Tuple[str, int]] specifies the font family, size, etc
:param pad: Amount of padding to put around element
:param key: Used with window.FindElement and with return values to uniquely identify this element
"""
return Button(button_text=button_text, button_type=BUTTON_TYPE_BROWSE_FILES, target=target, file_types=file_types,
initial_folder=initial_folder, change_submits=change_submits, enable_events=enable_events,
tooltip=tooltip, size=size, auto_size_button=auto_size_button,
disabled=disabled, button_color=button_color, font=font, pad=pad, key=key)
| 5,347,440 |
def test_operation_to_channel_matrix(channel):
"""Verifies that cirq.channel_matrix correctly computes the channel matrix."""
actual = cirq.operation_to_channel_matrix(channel)
expected = compute_channel_matrix(channel)
assert np.all(actual == expected)
| 5,347,441 |
def ensure_sudo() -> str:
"""ensures user is root and SUDO_USER is in os.environ,
:returns: the real username (see real_username())
"""
# if we aren't root, or don't have access to host environment variables...
username = real_username()
uid = os.getuid() # pylint: disable=no-member
if username == "root":
# this could happen with sudo su, for example
raise EnvironmentError("Could not look up SUDO_USER")
if uid != 0:
raise PermissionError("this script needs sudo")
return username
| 5,347,442 |
def exibir_tarefas():
""" exibe a lista de tarefas cadastradas, com algumas formatações básicas """
for tarefa in db.get_tarefas():
# check = \u2713 é o caracter unicode que representa o concluido
check = u'\u2713' if tarefa[2] == 1 else ""
"""
os parametros passados para esse format() são o seguinte
{:>4} = 4 posições, alinhado a direita
{:<47} = 47 posições, alinhado a esquerda
{:^3} = 3 posições, centralizado
"""
t = "- [{:>4}] {:<47} {:^3}".format(tarefa[0], tarefa[1], check)
print (t)
print ("-" * 60)
| 5,347,443 |
def f2p(phrase, max_word_size=15, cutoff=3):
"""Convert a Finglish phrase to the most probable Persian phrase.
"""
results = f2p_list(phrase, max_word_size, cutoff)
return ' '.join(i[0][0] for i in results)
| 5,347,444 |
def updateProf(id):
""" update prof w/ id """
r = requests.get("http://www.ratemyprofessors.com/ShowRatings.jsp?tid="+str(id))
p = Professor.query.filter_by(id=id).first()
if p:
soup = BeautifulSoup(r.text, "html.parser")
# scrape the data from the page
ratings_title = ["helpfull", "clarity", "ease"]
ratings_number = [float(i.text) for i in soup.find_all("div") if i.get("class") == ["rating"]][:3]
ratings = dict(zip(ratings_title, ratings_number))
ratings["tags"] = [i.text for i in soup.find_all("span") if i.get("class") == ["tag-box-choosetags"]]
ratings["grade"] = [i.text for i in soup.find_all("div") if i.get("class") == ["grade"]][1]
# update our data
p.ease = ratings["ease"]
p.helpfull = ratings["helpfull"]
p.clarity = ratings["clarity"]
p.rating = (p.clarity + p.helpfull + p.ease) / 3
p.tags = ratings["tags"]
p.grade = ratings["grade"]
p.updated = str(datetime.utcnow().isoformat())
db.session.commit()
| 5,347,445 |
def read_sj_out_tab(filename):
"""Read an SJ.out.tab file as produced by the RNA-STAR aligner into a
pandas Dataframe.
Parameters
----------
filename : str of filename or file handle
Filename of the SJ.out.tab file you want to read in
Returns
-------
sj : pandas.DataFrame
Dataframe of splice junctions
"""
def int_to_intron_motif(n):
if n == 0:
return 'non-canonical'
if n == 1:
return 'GT/AG'
if n == 2:
return 'CT/AC'
if n == 3:
return 'GC/AG'
if n == 4:
return 'CT/GC'
if n == 5:
return 'AT/AC'
if n == 6:
return 'GT/AT'
sj = pd.read_table(filename, header=None, names=COLUMN_NAMES,
low_memory=False)
sj.intron_motif = sj.intron_motif.map(int_to_intron_motif)
sj.annotated = sj.annotated.map(bool)
sj.strand.astype('object')
sj.strand = sj.strand.apply(lambda x: ['unk','+','-'][x])
# See https://groups.google.com/d/msg/rna-star/B0Y4oH8ZSOY/NO4OJbbUU4cJ for
# definition of strand in SJout files.
sj = sj.sort_values(by=['chrom', 'start', 'end'])
return sj
| 5,347,446 |
def gc_subseq(seq, k=2000):
"""
Returns GC content of non − overlapping sub− sequences of size k.
The result is a list.
"""
res = []
for i in range(0, len(seq)-k+1, k):
subseq = seq[i:i+k]
gc = calculate_gc(subseq)
res.append(gc)
return gc
| 5,347,447 |
def get_links_from_page(text: str=None) -> set:
"""
extract the links from the HTML
:param text: the search term
:return: a set of links
:rtype: set
"""
links = set()
link_pattern = re.compile('img.src=.+') # todo expand this to get href's
# link_pattern = re.compile(r'href=*')
if text:
text = quote(text)
url = "https://www.flickr.com/search/?text=%s" % text
else:
url = "https://www.flickr.com/search/"
logger.info("url: %s", url)
try:
response = urlopen(url)
data = response.read().decode('utf-8')
except:
logger.error('url: %s', url, exc_info=True)
return links
# logger.info("data: %s", data)
for line in data.splitlines():
# logger.info("line: %s", line)
img_data = link_pattern.search(line)
# seems best to step through the lines
# img_data = link_pattern.search(data)
if img_data:
# input('found something: %s' % img_data)
# logger.info("img_data: %s", img_data)
# logger.info("line: %s", line)
link = line.split('=')[1].replace("'", '').strip(';').lower()
ext = os.path.splitext(link)[1]
# logger.info('ext: %s', ext)
if ext in SUPPORTED_IMAGE_TYPES:
links.add(link)
logger.info("%s %s links: %s", len(links), text, links)
return links
| 5,347,448 |
def pcursor():
"""Database cursor."""
dbconn = get_dbconn("portfolio")
return dbconn.cursor()
| 5,347,449 |
def base_experiment_cmd(args, rest):
"""Execute experiment function"""
if not getattr(args, 'expfunc', None):
args.command.print_help()
else:
args.expfunc(args, rest)
| 5,347,450 |
def get_args_from_command_line():
"""Parse the command line arguments."""
parser = argparse.ArgumentParser()
parser.add_argument("--HITId", type=str)
parser.add_argument("--worker_id", type=str)
args = parser.parse_args()
return args
| 5,347,451 |
def truncate(f, n):
"""
Floors float to n-digits after comma.
"""
return math.floor(f * 10 ** n) / 10 ** n
| 5,347,452 |
async def hacs(hass: HomeAssistant):
"""Fixture to provide a HACS object."""
hacs_obj = HacsBase()
hacs_obj.hass = hass
hacs_obj.tasks = HacsTaskManager(hacs=hacs_obj, hass=hass)
hacs_obj.validation = ValidationManager(hacs=hacs_obj, hass=hass)
hacs_obj.session = async_get_clientsession(hass)
hacs_obj.repositories = HacsRepositories()
hacs_obj.integration = Integration(
hass=hass,
pkg_path="custom_components.hacs",
file_path=Path(hass.config.path("custom_components/hacs")),
manifest={"domain": DOMAIN, "version": "0.0.0", "requirements": ["hacs_frontend==1"]},
)
hacs_obj.common = HacsCommon()
hacs_obj.data = AsyncMock()
hacs_obj.queue = QueueManager(hass=hass)
hacs_obj.core = HacsCore()
hacs_obj.system = HacsSystem()
hacs_obj.core.config_path = hass.config.path()
hacs_obj.core.ha_version = AwesomeVersion(HAVERSION)
hacs_obj.version = hacs_obj.integration.version
hacs_obj.configuration.token = TOKEN
## New GitHub client
hacs_obj.githubapi = GitHubAPI(
token=hacs_obj.configuration.token,
session=hacs_obj.session,
**{"client_name": "HACS/pytest"},
)
await hacs_obj.tasks.async_load()
hacs_obj.queue.clear()
hass.data[DOMAIN] = hacs_obj
yield hacs_obj
| 5,347,453 |
def process_simple_summary_csv(out_f, in_f, rundate):
"""Scan file and compute sums for 2 columns"""
df = panda.read_csv(in_f)
FORMATTING_FILE = "ColumnFormatting.json"
with open(FORMATTING_FILE) as json_data:
column_details = json.load(json_data)
# this dictionary will contain information about individual column data type
DAYS = 30
"""
Depending on the amount of detail in the report from PAI this dataframe may include more than one row
for each location. When only one row contains information on an individual location then the file lacks any
indication of WHEN the report covers. We can get the date of the report being created by PAI from the
filename but the time range covered by the report won't be included.
When the report has MULTIPLE lines for each LOCATION then each line contains a datestring. These datestrings
can be converted to datetimes and sorted to find the earliest and latest dates in the report.
print(dft2) # sample dataframe of timestamps
0 2019-12-01
1 2019-12-02
2 2019-12-03
3 2019-12-04
4 2019-12-05
5 2019-12-06
6 2019-12-07
7 2019-12-08
8 2019-12-09
9 2019-12-10
10 2019-12-11
11 2019-12-12
12 2019-12-13
13 2019-12-14
14 2019-12-15
15 2019-12-16
16 2019-12-17
17 2019-12-18
18 2019-12-19
19 2019-12-20
20 2019-12-21
21 2019-12-22
22 2019-12-23
23 2019-12-24
24 2019-12-25
25 2019-12-26
26 2019-12-27
27 2019-12-28
28 2019-12-29
29 2019-12-30
30 2019-12-31
Name: Settlement Date, dtype: datetime64[ns]
dft2.astype(str).max()
'2019-12-31'
dft2.astype(str).min()
'2019-12-01'
"""
try:
# TODO standardize the function that strips extra characters from a numeric string
# e.g. df[?] = strip2float(df[?])
# try to recognize as many standard strings as possible. $1 ($1) -$1 $-1,234.876 etc
df["Surch"].replace("[\$,)]", "", regex=True, inplace=True)
df["Surch"] = df["Surch"].astype(float)
except KeyError as e:
logger.error(f"KeyError in dataframe: {e}")
return False
try:
df["Settlement"].replace("[\$,)]", "", regex=True, inplace=True)
df["Settlement"] = df["Settlement"].astype(float)
except KeyError as e:
logger.error(f"KeyError in dataframe: {e}")
return False
try:
df["WD Trxs"] = df["WD Trxs"].astype(float)
except KeyError as e:
logger.error(f"KeyError in dataframe: {e}")
return False
def calc(row):
"""Calculate the surcharge earned per withdrawl."""
wd = row["WD Trxs"]
if wd > 0:
return round(row["Surch"] / wd, 2)
else:
return 0
try:
df["Surcharge amt"] = df.apply(lambda row: calc(row), axis=1)
except KeyError as e:
logger.error(f"KeyError in dataframe: {e}")
return False
def avgWD(row):
"""Calculate the average amount of withdrawls."""
wd = row["WD Trxs"]
if wd > 0:
return round(row["Settlement"] / wd, 2)
else:
return 0
try:
df["Average WD amount"] = df.apply(lambda row: avgWD(row), axis=1)
except KeyError as e:
logger.error(f"KeyError in dataframe: {e}")
return False
def DailyWD(row):
"""Assuming 30 days in report data calculate daily withdrawl total."""
return round(row["Settlement"] / DAYS, 2)
try:
df["Daily Vault AVG"] = df.apply(lambda row: DailyWD(row), axis=1)
except KeyError as e:
logger.error(f"KeyError in dataframe: {e}")
return False
# work is finished. Drop unneeded columns from output
# TODO expand this to drop all columns except those desired in the report
df = df.drop(["Settlement Date"], axis=1) # df.columns is zero-based panda.Index
# sort the data
df = df.sort_values("Surch", ascending=False)
indx = 0
return {f"Outputfile{indx}.xlsx": df}
| 5,347,454 |
def test_export_post_command_bad_command():
"""Test --post-command with bad command"""
import os.path
from osxphotos.cli import cli
runner = CliRunner()
cwd = os.getcwd()
# pylint: disable=not-context-manager
with runner.isolated_filesystem():
result = runner.invoke(
cli,
[
"export",
"--db",
os.path.join(cwd, PHOTOS_DB_15_7),
".",
"--post-command",
"exported",
"foobar {filepath.name|shell_quote} >> {export_dir}/exported.txt",
"--name",
"Park",
"--skip-original-if-edited",
],
)
assert result.exit_code == 0
assert 'Error running command "foobar' in result.output
| 5,347,455 |
def test_with_zip_concat():
"""
Feature: SentencePieceTokenizer
Description: test SentencePieceTokenizer with zip and concat operations
Expectation: output is equal to the expected value
"""
data = ds.TextFileDataset(VOCAB_FILE, shuffle=False)
vocab = text.SentencePieceVocab.from_dataset(data, ["text"], 100, 0.9995, SentencePieceModel.UNIGRAM, {})
tokenizer = text.SentencePieceTokenizer(vocab, out_type=SPieceTokenizerOutType.STRING)
dataset = ds.TextFileDataset(DATA_FILE, shuffle=False)
dataset = dataset.map(operations=tokenizer, num_parallel_workers=2)
zip_test(dataset)
concat_test(dataset)
| 5,347,456 |
def test_inspect_auto_flats(tmp_path, save_changes):
"""Test flat channel & segment detection."""
import matplotlib
import matplotlib.pyplot as plt
matplotlib.use('Agg')
plt.close('all')
bids_root = setup_bids_test_dir(tmp_path)
bids_path = _bids_path.copy().update(root=bids_root)
channels_tsv_fname = bids_path.copy().update(suffix='channels',
extension='.tsv')
raw = read_raw_bids(bids_path=bids_path, verbose='error')
# Inject an entirely flat channel.
raw.load_data()
raw._data[10] = np.zeros_like(raw._data[10], dtype=raw._data.dtype)
# Add a a flat time segment (approx. 100 ms) to another channel
raw._data[20, 500:500 + int(np.ceil(0.1 * raw.info['sfreq']))] = 0
raw.save(raw.filenames[0], overwrite=True)
old_bads = raw.info['bads'].copy()
inspect_dataset(bids_path)
raw_fig = mne_bids.inspect._global_vars['raw_fig']
# Closing the window should open a dialog box.
raw_fig.canvas.key_press_event(raw_fig.mne.close_key)
fig_dialog = mne_bids.inspect._global_vars['dialog_fig']
if save_changes:
key = 'return'
else:
key = 'escape'
fig_dialog.canvas.key_press_event(key)
raw = read_raw_bids(bids_path=bids_path, verbose='error')
if save_changes:
assert old_bads != raw.info['bads']
assert raw.ch_names[10] in raw.info['bads']
channels_tsv_data = _from_tsv(channels_tsv_fname)
assert (channels_tsv_data['status_description'][10] ==
'Flat channel, auto-detected via MNE-BIDS')
# This channel should not have been added to `bads`, but produced a
# flat annotation.
assert raw.ch_names[20] not in raw.info['bads']
assert 'BAD_flat' in raw.annotations.description
else:
assert old_bads == raw.info['bads']
assert 'BAD_flat' not in raw.annotations.description
| 5,347,457 |
def rotY(theta):
""" returns Rotation matrix such that R*v -> v', v' is rotated about y axis through theta_d.
theta is in radians.
rotY = Ry'
"""
st = math.sin(theta)
ct = math.cos(theta)
return np.matrix([[ ct, 0., st ],
[ 0., 1., 0. ],
[ -st, 0., ct ]])
| 5,347,458 |
def get_core_blockdata(core_index, spltcore_index, core_bases):
"""
Get Core Offset and Length
:param core_index: Index of the Core
:param splitcore_index: Index of last core before split
:param core_bases: Array with base offset and offset after split
:return: Array with core offset and core length
"""
core_base = int(core_bases[0])
core_len = int(core_bases[1])
core_split = 0
if len(core_bases) > 4:
core_split = int(core_bases[4])
core_offset = core_base + core_index * core_len
if core_split and core_index + 2 > spltcore_index:
core_offset = core_split + (core_index - spltcore_index + 1) * core_len
return [core_offset, core_len]
| 5,347,459 |
def make_bench_verify_token(alg):
""" Return function which will generate token for particular algorithm """
privk = priv_keys[alg].get('default', priv_key)
token = jwt.generate_jwt(payload, privk, alg, timedelta(days=1))
def f(_):
""" Verify token """
pubk = pub_keys[alg].get('default', pub_key)
jwt.verify_jwt(token, pubk, [alg])
return f
| 5,347,460 |
def test_dag_integrity(dag_path):
"""Import DAG files and check for a valid DAG instance."""
dag_name = path.basename(dag_path)
module = _import_file(dag_name, dag_path)
# Validate if there is at least 1 DAG object in the file
dag_objects = [
var for var in vars(module).values() if isinstance(var, airflow_models.DAG)
]
assert dag_objects
# For every DAG object, test for cycles
for dag in dag_objects:
dag.test_cycle()
| 5,347,461 |
def test_make_package(qipkg_action, qipy_action):
""" Test Make Package """
tmpdir = qipy_action.worktree.tmpdir
qipkg_action.add_test_project("a_cpp")
qipkg_action.add_test_project("b_py")
c_pkg_proj = qipkg_action.add_test_project("c_pkg")
# ipython 5 is the last version compatible with Python 2.7
qipy_action("bootstrap", "pip", "virtualenv", "ipython<=5")
pml = os.path.join(c_pkg_proj.path, "c_pkg.pml")
qipkg_action("configure", pml)
qipkg_action("build", pml)
pkg = qipkg_action("make-package", pml)
qipkg_action("extract-package", pkg)
expected_paths = [
"manifest.xml",
"lib/libfoo.so",
"lib/python/site-packages/b.py",
"c_behavior/behavior.xar",
]
for path in expected_paths:
full_path = tmpdir.join("c-0.1", path)
assert full_path.check(file=True)
| 5,347,462 |
def status():
""" Status of the API """
return jsonify({'status': 'OK'})
| 5,347,463 |
def calc_median(input_list):
"""sort the list and return median"""
new_list = sorted(input_list)
len_list = len(new_list)
if len_list%2 == 0:
return (new_list[len_list/2-1] + new_list[len_list/2] ) / 2
else:
return new_list[len_list/2]
| 5,347,464 |
def _load_eigenvalue(h5_result, log):
"""Loads a RealEigenvalue"""
class_name = _cast(h5_result.get('class_name'))
table_name = '???'
title = ''
nmodes = _cast(h5_result.get('nmodes'))
if class_name == 'RealEigenvalues':
obj = RealEigenvalues(title, table_name, nmodes=nmodes)
elif class_name == 'ComplexEigenvalues':
obj = ComplexEigenvalues(title, table_name, nmodes)
elif class_name == 'BucklingEigenvalues':
obj = BucklingEigenvalues(title, table_name, nmodes=nmodes)
else:
log.warning(' %r is not supported...skipping' % class_name)
return None
assert obj.class_name == class_name, 'class_name=%r selected; should be %r' % (obj.class_name, class_name)
keys_to_skip = ['class_name', 'is_complex', 'is_real', 'table_name_str']
for key in h5_result.keys():
if key in keys_to_skip:
continue
else:
datai = _cast(h5_result.get(key))
if isinstance(datai, bytes):
pass
elif isinstance(datai, str):
datai = datai.encode('latin1')
else:
assert not isinstance(datai, bytes), key
setattr(obj, key, datai)
return obj
| 5,347,465 |
def _GenerateBaseResourcesAllowList(base_module_rtxt_path,
base_allowlist_rtxt_path):
"""Generate a allowlist of base master resource ids.
Args:
base_module_rtxt_path: Path to base module R.txt file.
base_allowlist_rtxt_path: Path to base allowlist R.txt file.
Returns:
list of resource ids.
"""
ids_map = resource_utils.GenerateStringResourcesAllowList(
base_module_rtxt_path, base_allowlist_rtxt_path)
return ids_map.keys()
| 5,347,466 |
def test_two_unrelated_w_a_wout_c(clean_db, unrelated_with_trials, capsys):
"""Test two unrelated experiments with --all."""
orion.core.cli.main(['status', '--all'])
captured = capsys.readouterr().out
expected = """\
test_double_exp
===============
id status
-------------------------------- -----------
a8f8122af9e5162e1e2328fdd5dd75db broken
ab82b1fa316de5accb4306656caa07d0 completed
c187684f7c7d9832ba953f246900462d interrupted
1497d4f27622520439c4bc132c6046b1 new
bd0999e1a3b00bf8658303b14867b30e reserved
b9f1506db880645a25ad9b5d2cfa0f37 suspended
test_single_exp
===============
id status min obj
-------------------------------- ----------- ---------
ec6ee7892275400a9acbf4f4d5cd530d broken
c4c44cb46d075546824e2a32f800fece completed 0
2b5059fa8fdcdc01f769c31e63d93f24 interrupted
7e8eade99d5fb1aa59a1985e614732bc new
507496236ff94d0f3ad332949dfea484 reserved
caf6afc856536f6d061676e63d14c948 suspended
"""
assert captured == expected
| 5,347,467 |
def finite_min_max(array_like):
""" Obtain finite (non-NaN, non-Inf) minimum and maximum of an array.
Parameters
----------
array_like : array_like
A numeric array of some kind, possibly containing NaN or Inf values.
Returns
-------
tuple
Two-valued tuple containing the finite minimum and maximum of *array_like*.
"""
array_like = np.asanyarray(array_like)
finite_values = array_like[np.isfinite(array_like)]
return finite_values.min(), finite_values.max()
| 5,347,468 |
def list_extend1(n):
"""
using a list to built it up, then convert to a numpy array
"""
l = []
num_to_extend = 100
data = range(num_to_extend)
for i in xrange(n/num_to_extend):
l.extend(data)
return np.array(l)
| 5,347,469 |
def ccf(tdm, tsuid_list_or_dataset, lag_max=None, tsuids_out=False, cut_ts=False):
"""
This function calculates the maximum of the cross correlation function matrix between all ts
in tsuid_list_or_dataset in a serial mode.
The result is normalized (between -1 and 1)
Cross correlation is a correlation between two timeseries whose one is delayed of successive lag
values. Result of CCF is a timeseries (correlation function of the lag between timeseries).
This function keep the maximum value of the CCF function generated and pull it in the matrix for
corresponding timeseries couple.
:returns: a string matrix (whose size is equal to the number of tsuids in tsuid_list_or_dataset
plus one line and one column for headers)
:rtype: np.ndarray
:param tdm: Temporal Data Manager client
:param tsuid_list_or_dataset: list of identifiers of the time series or dataset name
:param lag_max: maximum lag between timeseries (cf. _ccf function for more details)
:param tsuids_out: True to fill headers with tsuids
False to fill headers with functional ids
:param cut_ts: Cut the TS list to the min-length if set to True
:type tdm: TemporalDataMgr
:type tsuid_list_or_dataset: list of str or str
:type lag_max: positive int
:type tsuids_out: boolean
:type cut_ts: boolean
:raises TypeError: if tsuids_out is not a boolean
"""
if type(tsuids_out) is not bool:
raise TypeError("tsuids_out must be a boolean")
# retrieve data from temporal data manager
ts_data_list, tsuid_list = __retrieve_data(
tdm, tsuid_list_or_dataset)
if tsuids_out:
ts_list = tsuid_list
else:
ts_list = __retrieve_func_id(tdm, tsuid_list)
# number and size of time series
ts_nb = len(ts_data_list)
ts_size = len(ts_data_list[0])
if cut_ts:
for ts in ts_data_list:
ts_size = min(len(ts), ts_size)
else:
# check time series have same length
for ts in ts_data_list:
if len(ts) != ts_size:
raise ValueError('time series do not have same length')
# matrix initialization
matrix_corr = np.zeros([ts_nb, ts_nb])
for index1, _ in enumerate(ts_data_list):
matrix_corr[index1, index1] = 1
# Conversion ts1 data from list (keeping only value column) to an array
ts1 = np.asarray(ts_data_list[index1][:ts_size, 1])
for index2 in range(index1 + 1, len(ts_data_list)):
# Conversion ts2 data from list (keeping only value column) to an
# array
ts2 = np.asarray(ts_data_list[index2][:ts_size, 1])
# cross correlation calculation
# keeping the maximum absolute value between cross correlation with
# positive and with negative lag
ccf_fcn = _ccf(ts1, ts2, lag_max)
max_ccf = __get_max_abs_value(ccf_fcn)
# fill matrix with result (max of ccf is commutative)
matrix_corr[index1, index2] = max_ccf
matrix_corr[index2, index1] = max_ccf
# fill final matrix with headers
matrix = __fill_headers_to_final_matrix(matrix_corr, ts_list)
return matrix
| 5,347,470 |
def filter_zoau_installs(zoau_installs, build_info, minimum_zoau_version):
"""Sort and filter potential ZOAU installs based on build date
and version.
Args:
zoau_installs (list[dict]): A list of found ZOAU installation paths.
build_info (list[str]): A list of build info strings
minimum_zoau_version (str): The minimum version of ZOAU to accept.
Returns:
list[dict]: A sorted and filtered list of ZOAU installation paths.
"""
for index, zoau_install in enumerate(zoau_installs):
zoau_install["build"] = build_info[index]
for zoau_install in zoau_installs:
zoau_install["build"] = _get_version_from_build_string(zoau_install.get("build", ""))
zoau_installs.sort(key=lambda x: _version_to_tuple(x.get("build")), reverse=True)
min_version = _version_to_tuple(minimum_zoau_version)
valid_installs = []
for zoau_install in zoau_installs:
if min_version <= _version_to_tuple(
zoau_install.get("build")
):
valid_installs.append(zoau_install)
# account for the fact 1.1.0 may or may not require pip install depending on PTF
if "1.1.0" in zoau_install.get("build", ""):
backup_install = zoau_install.copy()
# set build to none so we do not treat it like a pip 1.1.0 install when testing
backup_install["build"] = ""
valid_installs.append(backup_install)
return valid_installs
| 5,347,471 |
def make_chain(node, address, privkeys, parent_txid, parent_value, n=0, parent_locking_script=None, fee=DEFAULT_FEE):
"""Build a transaction that spends parent_txid.vout[n] and produces one output with
amount = parent_value with a fee deducted.
Return tuple (CTransaction object, raw hex, nValue, scriptPubKey of the output created).
"""
inputs = [{"txid": parent_txid, "vout": n}]
my_value = parent_value - fee
outputs = {address : my_value}
rawtx = node.createrawtransaction(inputs, outputs)
prevtxs = [{
"txid": parent_txid,
"vout": n,
"scriptPubKey": parent_locking_script,
"amount": parent_value,
}] if parent_locking_script else None
signedtx = node.signrawtransactionwithkey(hexstring=rawtx, privkeys=privkeys, prevtxs=prevtxs)
assert signedtx["complete"]
tx = tx_from_hex(signedtx["hex"])
return (tx, signedtx["hex"], my_value, tx.vout[0].scriptPubKey.hex())
| 5,347,472 |
def parse_args():
"""
Parse input arguments
Returns
-------
args : object
Parsed args
"""
h = {
"program": "Simple Baselines training",
"train_folder": "Path to training data folder.",
"batch_size": "Number of images to load per batch. Set according to your PC GPU memory available. If you get "
"out-of-memory errors, lower the value. defaults to 64",
"epochs": "How many epochs to train for. Once every training image has been shown to the CNN once, an epoch "
"has passed. Defaults to 15",
"test_folder": "Path to test data folder",
"num_workers": "Number of workers to load in batches of data. Change according to GPU usage",
"test_only": "Set to true if you want to test a loaded model. Make sure to pass in model path",
"model_path": "Path to your model",
"learning_rate": "The learning rate of your model. Tune it if it's overfitting or not learning enough"}
parser = argparse.ArgumentParser(description=h['program'], formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--train_folder', help=h["train_folder"], type=str)
parser.add_argument('--batch_size', help=h['batch_size'], type=int, default=64)
parser.add_argument('--epochs', help=h["epochs"], type=int, default=15)
parser.add_argument('--test_folder', help=h["test_folder"], type=str)
parser.add_argument('--num_workers', help=h["num_workers"], type=int, default=5)
parser.add_argument('--test_only', help=h["test_only"], type=bool, default=False)
parser.add_argument('--model_path', help=h["num_workers"], type=str),
parser.add_argument('--learning_rate', help=h["learning_rate"], type=float, default=0.003)
args = parser.parse_args()
return args
| 5,347,473 |
def timestamp(date):
"""Get the timestamp of the `date`, python2/3 compatible
:param datetime.datetime date: the utc date.
:return: the timestamp of the date.
:rtype: float
"""
return (date - datetime(1970, 1, 1)).total_seconds()
| 5,347,474 |
def pratt_arrow_risk_aversion(t, c, theta, **params):
"""Assume constant relative risk aversion"""
return theta / c
| 5,347,475 |
def parse_content_type(content_type):
"""
Parse a content-type and its parameters into values.
RFC 2616 sec 14.17 and 3.7 are pertinent.
**Examples**::
'text/plain; charset=UTF-8' -> ('text/plain', [('charset, 'UTF-8')])
'text/plain; charset=UTF-8; level=1' ->
('text/plain', [('charset, 'UTF-8'), ('level', '1')])
:param content_type: content_type to parse
:returns: a tuple containing (content type, list of k, v parameter tuples)
"""
parm_list = []
if ';' in content_type:
content_type, parms = content_type.split(';', 1)
parms = ';' + parms
for m in _rfc_extension_pattern.findall(parms):
key = m[0].strip()
value = m[1].strip()
parm_list.append((key, value))
return content_type, parm_list
| 5,347,476 |
def mapplot(df, var, metric, ref_short, ref_grid_stepsize=None, plot_extent=None, colormap=None, projection=None,
add_cbar=True, figsize=globals.map_figsize, dpi=globals.dpi,
**style_kwargs):
"""
Create an overview map from df using df[var] as color.
Plots a scatterplot for ISMN and a image plot for other input values.
Parameters
----------
df : pandas.DataFrame
DataFrame with lat and lon in the multiindex and var as a column
var : str
variable to be plotted.
metric:
ref_short: str
short name of the reference dataset (read from netCDF file)
ref_is_regular: bool (or 0, 1), optional (True by default)
information if dataset hase a regular grid (in terms of angular distance)
ref_grid_stepsize: float or None, optional (None by default)
angular grid stepsize, needed only when ref_is_angular == False,
plot_extent: tuple
(x_min, x_max, y_min, y_max) in Data coordinates. The default is None.
colormap: Colormap, optional
colormap to be used.
If None, defaults to globals._colormaps.
projection: cartopy.crs, optional
Projection to be used. If none, defaults to globals.map_projection.
The default is None.
add_cbar: bool, optional
Add a colorbar. The default is True.
figsize: tuple, optional
Figure size in inches. The default is globals.map_figsize.
dpi: int, optional
Resolution for raster graphic output. The default is globals.dpi.
**style_kwargs :
Keyword arguments for plotter.style_map().
Returns
-------
fig : TYPE
DESCRIPTION.
ax : TYPE
DESCRIPTION.
"""
# === value range ===
v_min, v_max = get_value_range(df[var], metric)
# === init plot ===
fig, ax, cax = init_plot(figsize, dpi, add_cbar, projection)
if not colormap:
# colormap = globals._colormaps[meta['metric']]
cmap = globals._colormaps[metric]
else:
cmap = colormap
# cmap = plt.cm.get_cmap(colormap)
# === scatter or mapplot ===
if ref_short in globals.scattered_datasets: # === scatterplot ===
# === coordiniate range ===
if not plot_extent:
plot_extent = get_plot_extent(df)
# === marker size ===
markersize = globals.markersize ** 2 # in points**2
# === plot ===
lat, lon = globals.index_names
im = ax.scatter(df.index.get_level_values(lon), df.index.get_level_values(lat),
c=df[var], cmap=cmap, s=markersize, vmin=v_min, vmax=v_max, edgecolors='black',
linewidths=0.1, zorder=2, transform=globals.data_crs)
else: # === mapplot ===
# === coordiniate range ===
if not plot_extent:
plot_extent = get_plot_extent(df, grid_stepsize=ref_grid_stepsize, grid=True)
# === prepare values ===
zz, zz_extent, origin = geotraj_to_geo2d(df, var, grid_stepsize=ref_grid_stepsize)
# === plot ===
im = ax.imshow(zz, cmap=cmap, vmin=v_min, vmax=v_max,
interpolation='nearest', origin=origin,
extent=zz_extent,
transform=globals.data_crs, zorder=2)
# === add colorbar ===
if add_cbar:
_make_cbar(fig, im, cax, ref_short, metric)
style_map(ax, plot_extent, **style_kwargs)
# === layout ===
fig.canvas.draw() # very slow. necessary bcs of a bug in cartopy: https://github.com/SciTools/cartopy/issues/1207
# plt.tight_layout() # pad=1) # pad=0.5,h_pad=1,w_pad=1,rect=(0, 0, 1, 1))
return fig, ax
| 5,347,477 |
def generate_athena(config):
"""Generate Athena Terraform.
Args:
config (dict): The loaded config from the 'conf/' directory
Returns:
dict: Athena dict to be marshalled to JSON
"""
result = infinitedict()
prefix = config['global']['account']['prefix']
athena_config = config['lambda']['athena_partitioner_config']
data_buckets = athena_partition_buckets_tf(config)
database = athena_config.get('database_name', '{}_streamalert'.format(prefix))
results_bucket_name = athena_query_results_bucket(config)
queue_name = athena_config.get(
'queue_name',
'{}_streamalert_athena_s3_notifications'.format(prefix)
).strip()
logging_bucket, _ = s3_access_logging_bucket(config)
# Set variables for the athena partitioner's IAM permissions
result['module']['athena_partitioner_iam'] = {
'source': './modules/tf_athena',
'account_id': config['global']['account']['aws_account_id'],
'prefix': prefix,
's3_logging_bucket': logging_bucket,
'database_name': database,
'queue_name': queue_name,
'athena_data_buckets': data_buckets,
'results_bucket': results_bucket_name,
'lambda_timeout': athena_config['timeout'],
'kms_key_id': '${aws_kms_key.server_side_encryption.key_id}',
'function_role_id': '${module.athena_partitioner_lambda.role_id}',
'function_name': '${module.athena_partitioner_lambda.function_name}',
'function_alias_arn': '${module.athena_partitioner_lambda.function_alias_arn}',
}
# Set variables for the Lambda module
result['module']['athena_partitioner_lambda'] = generate_lambda(
'{}_streamalert_{}'.format(prefix, ATHENA_PARTITIONER_NAME),
'streamalert.athena_partitioner.main.handler',
athena_config,
config,
tags={
'Subcomponent': 'AthenaPartitioner'
}
)
return result
| 5,347,478 |
def test_copying_custom_behvior():
"""Test copying a modification that was turned into a replacment.
"""
class Subclass(CopyModfifcationTurnedToReplacement):
pass
# Chech the test class work
c = CopyModfifcationTurnedToReplacement()
assert c.counter == 0
c.feat
assert c.counter == 1
# Check the subclass works
s = Subclass()
assert s.counter == 0
s.feat
assert s.counter == 1
| 5,347,479 |
def _angular_rate_to_rotvec_dot_matrix(rotvecs):
"""Compute matrices to transform angular rates to rot. vector derivatives.
The matrices depend on the current attitude represented as a rotation
vector.
Parameters
----------
rotvecs : ndarray, shape (n, 3)
Set of rotation vectors.
Returns
-------
ndarray, shape (n, 3, 3)
"""
norm = np.linalg.norm(rotvecs, axis=1)
k = np.empty_like(norm)
mask = norm > 1e-4
nm = norm[mask]
k[mask] = (1 - 0.5 * nm / np.tan(0.5 * nm)) / nm**2
mask = ~mask
nm = norm[mask]
k[mask] = 1/12 + 1/720 * nm**2
skew = _create_skew_matrix(rotvecs)
result = np.empty((len(rotvecs), 3, 3))
result[:] = np.identity(3)
result[:] += 0.5 * skew
result[:] += k[:, None, None] * np.matmul(skew, skew)
return result
| 5,347,480 |
def get_all_learners() -> Any:
"""Get all learner configurations which are prepared."""
return {
"learner_types": sorted(
[
possible_dir.name
for possible_dir in LEARNERS_DIR.iterdir()
if possible_dir.is_dir()
]
)
}
| 5,347,481 |
def getdate(targetconnection, ymdstr, default=None):
"""Convert a string of the form 'yyyy-MM-dd' to a Date object.
The returned Date is in the given targetconnection's format.
Arguments:
- targetconnection: a ConnectionWrapper whose underlying module's
Date format is used
- ymdstr: the string to convert
- default: The value to return if the conversion fails
"""
try:
(year, month, day) = ymdstr.split('-')
modref = targetconnection.getunderlyingmodule()
return modref.Date(int(year), int(month), int(day))
except Exception:
return default
| 5,347,482 |
def _assert_unique_keys(variant_key_map):
"""Checks that the keys are unique across different submaps.
Other parts of the code make the assumption that they subkeys are unique.
I'm open to changing this requirement as long as we do it safely.
"""
field_name_sets = [(submap, set(variant_key_map[submap].keys()))
for submap in variant_key_map.keys()]
# Check all pairs for intersections.
for i in range(len(field_name_sets[1])):
for j in range(len(field_name_sets[1])):
submap_i = field_name_sets[i][0]
submap_j = field_name_sets[j][0]
if i <= j:
continue
assert not (field_name_sets[i][1] & field_name_sets[j][1]), (
'Duplicate SNP filter keys between submaps {submap_i}'
' and {submap_j}. Keys in commmon: {common_keys}'.format(
submap_i= submap_i,
submap_j = submap_j,
common_keys = ' '.join(
field_name_sets[i][1] & field_name_sets[j][1])
)
)
| 5,347,483 |
def thermal_dm(n, u):
"""
return the thermal density matrix for a boson
n: integer
dimension of the Fock space
u: float
reduced temperature, omega/k_B T
"""
nlist = np.arange(n)
diags = exp(- nlist * u)
diags /= np.sum(diags)
rho = lil_matrix(n)
rho.setdiag(diags)
return rho.tocsr()
| 5,347,484 |
def _get_confidence_bounds(confidence):
"""
Get the upper and lower confidence bounds given a desired confidence level.
Args:
confidence (float): [description]
# TODO: ^^
Returns:
float, float:
- upper confidence bound
- lower confidence bound
"""
return [50 + 0.5 * confidence, 50 - 0.5 * confidence]
| 5,347,485 |
def zcml_strings(dir, domain="zope", site_zcml=None):
"""Retrieve all ZCML messages from `dir` that are in the `domain`."""
from zope.configuration import xmlconfig, config
# Load server-independent site config
context = config.ConfigurationMachine()
xmlconfig.registerCommonDirectives(context)
context.provideFeature("devmode")
context = xmlconfig.file(site_zcml, context=context, execute=False)
return context.i18n_strings.get(domain, {})
| 5,347,486 |
def const_p(a: C) -> Projector[C]:
"""
Make a projector that always returns the same still frame
"""
return lambda _: a
| 5,347,487 |
async def handle_all_log_servers(server_url_list, executor, args):
"""
Asynchronous function that handles each CT log server: it enqueues a list of
awaitable functions, one per CT log server, and give them to asyncio for
them to be run concurrently (green thread).
"""
async with aiohttp.ClientSession() as httpClientSession:
tasks = []
for server_url in server_url_list:
task = handle_log_server(httpClientSession, server_url, executor,
args)
tasks.append(task)
await asyncio.gather(*tasks, return_exceptions=True)
| 5,347,488 |
def get_s3_bucket(bucket_name, s3):
""""
Takes the s3 and bucket_name and returns s3 bucket
If does not exist, it will create bucket with permissions
"""
bucket_name = bucket_name.lower().replace('/','-')
bucket = s3.Bucket(bucket_name)
exists = True
try:
s3.meta.client.head_bucket(Bucket=bucket_name)
except ClientError as e:
# If a client error is thrown, then check that it was a 404 error.
# If it was a 404 error, then the bucket does not exist.
error_code = int(e.response['Error']['Code'])
if error_code == 404:
exists = False
if exists is False:
s3.create_bucket(Bucket=bucket_name, ACL='public-read')
# We need to set an S3 policy for our bucket to
# allow anyone read access to our bucket and files.
# If we do not set this policy, people will not be
# able to view our S3 static web site.
bucket_policy = s3.BucketPolicy(bucket_name)
policy_payload = {
"Version": "2012-10-17",
"Statement": [{
"Sid": "Allow Public Access to All Objects",
"Effect": "Allow",
"Principal": "*",
"Action": "s3:GetObject",
"Resource": "arn:aws:s3:::%s/*" % (bucket_name)
}]
}
# Add the policy to the bucket
bucket_policy.put(Policy=json.dumps(policy_payload))
# Make our new S3 bucket a static website
bucket_website = s3.BucketWebsite(bucket_name)
# Create the configuration for the website
website_configuration = {
'ErrorDocument': {'Key': 'error.html'},
'IndexDocument': {'Suffix': 'index.html'},
}
bucket_website.put(
WebsiteConfiguration=website_configuration
)
bucket = s3.Bucket(bucket_name)
return bucket
| 5,347,489 |
def rf_render_ascii(tile_col):
"""Render ASCII art of tile"""
return _apply_column_function('rf_render_ascii', tile_col)
| 5,347,490 |
def index():
""" Display productpage with normal user and test user buttons"""
global productpage
table = json2html.convert(json = json.dumps(productpage),
table_attributes="class=\"table table-condensed table-bordered table-hover\"")
return render_template('index.html', serviceTable=table)
| 5,347,491 |
def student_classes(id):
"""
Show students registrered to class
* display list of all students (GET)
"""
template = "admin/class_students.html"
if not valid_integer(id):
return (
render_template(
"errors/custom.html", title="400", message="Id must be integer"
),
400,
)
school_class = dict_sql_query(
f"SELECT * FROM school_classes WHERE id={id}", fetchone=True
)
if not school_class:
return (
render_template(
"errors/custom.html", title="400", message="Class does not exist."
),
400,
)
# show students with class defined as this one
students = []
for student in dict_sql_query(
f"SELECT * FROM students WHERE class_id={school_class['id']}"
):
students.append(
{
"student": student,
"activity_name": dict_sql_query(
f"SELECT name FROM activities WHERE id={student['chosen_activity']}",
fetchone=True,
)["name"]
if student["chosen_activity"]
else "Ej valt",
}
)
return render_template(template, school_class=school_class, students=students)
| 5,347,492 |
def _get_service_handler(request, service):
"""Add the service handler to the HttpSession.
We use the django session object to store the service handler's
representation of the remote service between sequentially logic steps.
This is done in order to improve user experience, as we avoid making
multiple Capabilities requests (this is a time saver on servers that
feature many layers.
"""
service_handler = get_service_handler(
service.base_url, service.proxy_base, service.type)
request.session[service.base_url] = service_handler
logger.debug("Added handler to the session")
return service_handler
| 5,347,493 |
def game(agent1f, agent2f):
"""Play the game.
`agent1f` is 'X' and `agent2f` is 'O'. `agent1f` hence begins the
game.
"""
board = [[None, None, None],
[None, None, None],
[None, None, None]]
agent1 = agent1f('X')
next(agent1) # Prime.
agent2 = agent2f('O')
next(agent2) # Prime.
turn = False
while not is_finished(board):
agent_turn = [agent1, agent2][turn]
symbol_turn = ['X', 'O'][turn]
move = agent_turn.send(board)
while not is_legal(move, board):
move = agent_turn.throw(IllegalMove)
place_move(board, move, symbol_turn)
winner = find_winner(board)
if winner is not None:
inform_game_result([agent1, agent2], winner)
return
turn = not turn
else:
# No more legal moves but nobody won.
inform_game_result([agent1, agent2], None)
| 5,347,494 |
def move_by_blurry(src_dir, blurry_dir, clear_dir, blurry_thr):
"""
根据blurry 移动图片
:return:
"""
mv_tasks = [move_img_by_blurry(im_name, blurry_dir, clear_dir, blurry_thr)
for im_name in os.listdir(src_dir) if im_name.endswith('.jpg')]
print('len of mv_tasks={}'.format(len(mv_tasks)))
loop = asyncio.get_event_loop()
loop.run_until_complete(asyncio.wait(mv_tasks))
loop.close()
| 5,347,495 |
def f_score(overlap_count, gold_count, guess_count, f=1):
"""Compute the f1 score.
:param overlap_count: `int` The number of true positives.
:param gold_count: `int` The number of gold positives (tp + fn)
:param guess_count: `int` The number of predicted positives (tp + fp)
:param f: `int` The beta term to weight precision vs recall.
:returns: `float` The f score
"""
beta_sq = f*f
if guess_count == 0: return 0.0
p = precision(overlap_count, guess_count)
r = recall(overlap_count, gold_count)
if p == 0.0 or r == 0.0:
return 0.0
f = (1. + beta_sq) * (p * r) / (beta_sq * p + r)
return f
| 5,347,496 |
def phi(n):
"""Calculate phi using euler's product formula."""
assert math.sqrt(n) < primes[-1], "Not enough primes to deal with " + n
# For details, check:
# http://en.wikipedia.org/wiki/Euler's_totient_function#Euler.27s_product_formula
prod = n
for p in primes:
if p > n:
break
if n % p == 0:
prod *= 1 - (1 / p)
return int(prod)
| 5,347,497 |
def test_get_events_no_events(mocker):
"""Unit test
Given
- get_events command
- command args
- command raw response
When
- mock the Client's token generation.
- mock the Client's get_events_request response for no events.
Then
- Validate the human readable
"""
mocker.patch.object(FireEyeClient, '_get_token', return_value='token')
client = Client(base_url="https://fireeye.cm.com/", username='user', password='pass', verify=False, proxy=False)
mocker.patch.object(FireEyeClient, 'get_events_request',
return_value=util_load_json('test_data/get_events_none.json'))
command_results = get_events(client=client, args={'end_time': '2020-05-19T23:00:00.000-00:00',
'duration': '48_hours', 'limit': '3'})
assert command_results.readable_output == 'No events in the given timeframe were found.'
| 5,347,498 |
def run_bert_pretrain(strategy, custom_callbacks=None):
"""Runs BERT pre-training."""
bert_config = configs.BertConfig.from_json_file(FLAGS.bert_config_file)
if not strategy:
raise ValueError('Distribution strategy is not specified.')
# Runs customized training loop.
logging.info('Training using customized training loop TF 2.0 with distributed'
'strategy.')
performance.set_mixed_precision_policy(common_flags.dtype(),
use_experimental_api=False)
# Only when explicit_allreduce = True, post_allreduce_callbacks and
# allreduce_bytes_per_pack will take effect. optimizer.apply_gradients() no
# longer implicitly allreduce gradients, users manually allreduce gradient and
# pass the allreduced grads_and_vars to apply_gradients().
# With explicit_allreduce = True, clip_by_global_norm is moved to after
# allreduce.
return run_customized_training(
strategy,
bert_config,
FLAGS.init_checkpoint, # Used to initialize only the BERT submodel.
FLAGS.max_seq_length,
FLAGS.max_predictions_per_seq,
FLAGS.model_dir,
FLAGS.num_steps_per_epoch,
FLAGS.steps_per_loop,
FLAGS.num_train_epochs,
FLAGS.learning_rate,
FLAGS.warmup_steps,
FLAGS.end_lr,
FLAGS.optimizer_type,
FLAGS.input_files,
FLAGS.train_batch_size,
FLAGS.use_next_sentence_label,
FLAGS.train_summary_interval,
custom_callbacks=custom_callbacks,
explicit_allreduce=FLAGS.explicit_allreduce,
pre_allreduce_callbacks=[
model_training_utils.clip_by_global_norm_callback
],
allreduce_bytes_per_pack=FLAGS.allreduce_bytes_per_pack)
| 5,347,499 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.