content
stringlengths 22
815k
| id
int64 0
4.91M
|
---|---|
def test_quickstart_removes_git_files():
"""Quickstart removes git files after cloning
Check for the existence of any file that starts with `.git`.
This will report the existent files.
"""
import os
runner = CliRunner()
with runner.isolated_filesystem():
git_files = []
runner.invoke(cli, ["quickstart"])
for _, d, f in os.walk("quickstart"):
for name in d:
if name.startswith(".git"):
git_files.append(name)
for file in f:
if file.startswith(".git"):
git_files.append(file)
assert ".git" not in git_files
| 5,351,400 |
def pre_process_string_data(item: dict):
"""
remove extra whitespaces, linebreaks, quotes from strings
:param item: dictionary with data for analysis
:return: cleaned item
"""
try:
result_item = {key: item[key] for key in KEYS + ['_id']}
for prop in result_item:
if type(result_item[prop]) is str and prop != '_id':
result_item[prop] = re.sub(' +', ' ', item[prop])
result_item[prop] = re.sub('\n', ' ', item[prop])
result_item[prop] = item[prop].strip().strip('"').strip("'").lower().strip()
return result_item
except KeyError:
logging.warning("Wrong formed entity with id %s", item['_id'])
return None
| 5,351,401 |
def check_configs(config_wildcards: Sequence = ("config.*yaml",)):
"""
- Check if config files exist
- Offer to use the config files that match the wildcards
- For config.yaml, check its contents against the defaults to make sure nothing is missing/wrong
:param config_wildcards:
:return:
"""
path = pathlib.Path(__file__).parent.absolute()
for config_wildcard in config_wildcards:
config = config_wildcard.replace("*", "")
# use config defaults if configs do not exist?
if not (path / config).exists():
answer = questionary.select(
f"{config} does not exist, do you want to use one of the following"
" (not recommended without inspection)?",
choices=[p.name for p in path.glob(config_wildcard)],
).ask()
subprocess.run(["cp", f"{path / answer}", f"{path / config}"])
# check contents of config.yaml WRT config.defaults.yaml
if config == "config.yaml":
with open(path / config.replace(".yaml", ".defaults.yaml")) as config_yaml:
config_defaults = yaml.load(config_yaml, Loader=yaml.FullLoader)
with open(path / config) as config_yaml:
config_wildcard = yaml.load(config_yaml, Loader=yaml.FullLoader)
deep_diff = DeepDiff(config_defaults, config_wildcard, ignore_order=True)
difference = {
k: v for k, v in deep_diff.items() if k in ("dictionary_item_removed",)
}
if len(difference) > 0:
log("config.yaml structure differs from config.defaults.yaml")
pprint(difference)
raise KeyError("Fix config.yaml before proceeding")
| 5,351,402 |
def log_ks_statistic(y_true, y_pred, experiment=None, channel_name='metric_charts', prefix=''):
"""Creates and logs KS statistics curve and KS statistics score to Neptune.
Kolmogorov-Smirnov statistics chart can be calculated for true positive rates (TPR) and true negative rates (TNR)
for each threshold and plotted on a chart.
The maximum distance from TPR to TNR can be treated as performance metric.
Args:
y_true (array-like, shape (n_samples)): Ground truth (correct) target values.
y_pred (array-like, shape (n_samples, 2)): Predictions for classes 0 and 1 with values from 0 to 1.
experiment(`neptune.experiments.Experiment`): Neptune experiment. Default is None.
channel_name(str): name of the neptune channel. Default is 'metric_charts'.
prefix(str): Prefix that will be added before metric name when logged to Neptune.
Examples:
Train the model and make predictions on test::
from sklearn.datasets import make_classification
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import train_test_split
from sklearn.metrics import classification_report
X, y = make_classification(n_samples=2000)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2)
model = RandomForestClassifier()
model.fit(X_train, y_train)
y_test_pred = model.predict_proba(X_test)
Create and log KS statistics curve and KS statistics score to Neptune::
import neptune
from neptunecontrib.monitoring.metrics import log_ks_statistic
neptune.init()
with neptune.create_experiment():
log_ks_statistic(y_test, y_test_pred)
Check out this experiment https://ui.neptune.ai/o/neptune-ai/org/binary-classification-metrics/e/BIN-101/logs.
"""
assert len(y_pred.shape) == 2, 'y_pred needs to be (n_samples, 2), use expand_prediction helper to format it'
_exp = experiment if experiment else neptune
expect_not_a_run(_exp)
res = binary_ks_curve(y_true, y_pred[:, 1])
ks_stat = res[3]
_exp.log_metric(prefix + 'ks_statistic', ks_stat)
fig, ax = plt.subplots()
plt_metrics.plot_ks_statistic(y_true, y_pred, ax=ax)
send_figure(fig, channel_name=prefix + channel_name, experiment=_exp)
plt.close()
| 5,351,403 |
def load_profile_update_output(contents, filename):
"""
Decode the uploaded load profile
Returns TypeError if not a csv file
Saves dataframe to txt file within create_load_txt function
"""
if contents is not None:
#GLOBAL_CONTENTS = contents
# decode output from file upload
_, content_string = contents.split(',')
decoded_b64 = base64.b64decode(content_string)
# check that type csv
if filename.endswith('.csv'):
decoded_csv = io.StringIO(decoded_b64.decode('utf-8'))
# convert to pandas dataframe
data = pd.read_csv(decoded_csv)
# convert to txt
convert_load_profile.create_load_txt(data)
else:
raise TypeError('Load profile must be a csv file')
else:
pass
| 5,351,404 |
def create_snapshot(parent,src,dst):
"""
:param parent:
:param src:
:param dst:
:return:
"""
import btrfsutil
subv_src=os.path.join(parent, src)
subv_dst=os.path.join(parent, dst)
btrfsutil.create_snapshot(subv_src,subv_dst)
return
| 5,351,405 |
def remove_cmds_from_title(title):
"""
Função que remove os comandos colocados nos títulos
apenas por uma questão de objetividade no título
"""
arr = title.split()
output = " ".join(list(filter(lambda x: x[0] != "!", arr)))
return output
| 5,351,406 |
async def test_sign_in_failed(hass, config_entry, controller, caplog):
"""Test sign-in service logs error when not connected."""
await setup_component(hass, config_entry)
controller.sign_in.side_effect = CommandFailedError("", "Invalid credentials", 6)
await hass.services.async_call(
DOMAIN,
SERVICE_SIGN_IN,
{ATTR_USERNAME: "[email protected]", ATTR_PASSWORD: "password"},
blocking=True,
)
controller.sign_in.assert_called_once_with("[email protected]", "password")
assert "Sign in failed: Invalid credentials (6)" in caplog.text
| 5,351,407 |
def test_annot_io():
"""Test I/O from and to *.annot files"""
# copy necessary files from fsaverage to tempdir
tempdir = _TempDir()
subject = 'fsaverage'
label_src = os.path.join(subjects_dir, 'fsaverage', 'label')
surf_src = os.path.join(subjects_dir, 'fsaverage', 'surf')
label_dir = os.path.join(tempdir, subject, 'label')
surf_dir = os.path.join(tempdir, subject, 'surf')
os.makedirs(label_dir)
os.mkdir(surf_dir)
shutil.copy(os.path.join(label_src, 'lh.PALS_B12_Lobes.annot'), label_dir)
shutil.copy(os.path.join(label_src, 'rh.PALS_B12_Lobes.annot'), label_dir)
shutil.copy(os.path.join(surf_src, 'lh.white'), surf_dir)
shutil.copy(os.path.join(surf_src, 'rh.white'), surf_dir)
# read original labels
assert_raises(IOError, read_labels_from_annot, subject, 'PALS_B12_Lobesey',
subjects_dir=tempdir)
labels = read_labels_from_annot(subject, 'PALS_B12_Lobes',
subjects_dir=tempdir)
# test saving parcellation only covering one hemisphere
parc = [l for l in labels if l.name == 'LOBE.TEMPORAL-lh']
write_labels_to_annot(parc, subject, 'myparc', subjects_dir=tempdir)
parc1 = read_labels_from_annot(subject, 'myparc', subjects_dir=tempdir)
parc1 = [l for l in parc1 if not l.name.startswith('unknown')]
assert_equal(len(parc1), len(parc))
for l1, l in zip(parc1, parc):
assert_labels_equal(l1, l)
# test saving only one hemisphere
parc = [l for l in labels if l.name.startswith('LOBE')]
write_labels_to_annot(parc, subject, 'myparc2', hemi='lh',
subjects_dir=tempdir)
annot_fname = os.path.join(tempdir, subject, 'label', '%sh.myparc2.annot')
assert_true(os.path.isfile(annot_fname % 'l'))
assert_false(os.path.isfile(annot_fname % 'r'))
parc1 = read_labels_from_annot(subject, 'myparc2',
annot_fname=annot_fname % 'l',
subjects_dir=tempdir)
parc_lh = [l for l in parc if l.name.endswith('lh')]
for l1, l in zip(parc1, parc_lh):
assert_labels_equal(l1, l)
| 5,351,408 |
def load_config(file_path):
"""Loads the config file into a config-namedtuple
Parameters:
input (pathlib.Path):
takes a Path object for the config file. It does not correct any
relative path issues.
Returns:
(namedtuple -- config):
Contains two sub-structures (run, plot) that will return a
dictionary of configuration options. You can get your desired
config-dictionary via `config.run` or `config.plot`.
"""
with open(file_path) as f:
return config(**loads(f.read()))
| 5,351,409 |
def main():
""" A simple example """
import torchvision.transforms as transforms
root = '../data/'
split = 'train'
dataset = Talk2Car(root, split, './utils/vocabulary.txt', transforms.ToTensor())
print('=> Load a sample')
sample = dataset.__getitem__(15)
img = np.transpose(sample['image'].numpy(), (1,2,0))
command = dataset.convert_command_to_text(sample['command'])
print('Command in human readable text: %s' %(command))
import matplotlib.pyplot as plt
import matplotlib.patches as patches
print('=> Plot image with bounding box around referred object')
fig, ax = plt.subplots(1)
ax.imshow(img)
xl, yb, xr, yt = sample['gt_bbox_lbrt'].tolist()
w, h = xr - xl, yt - yb
rect = patches.Rectangle((xl, yb), w, h, fill = False, edgecolor = 'r')
ax.add_patch(rect)
plt.axis('off')
plt.show()
print('=> Plot image with region proposals (red), gt bbox (blue)')
fig, ax = plt.subplots(1)
ax.imshow(img)
for i in range(sample['rpn_bbox_lbrt'].size(0)):
bbox = sample['rpn_bbox_lbrt'][i].tolist()
xl, yb, xr, yt = bbox
w, h = xr - xl, yt - yb
rect = patches.Rectangle((xl, yb), w, h, fill = False, edgecolor = 'r')
ax.add_patch(rect)
gt_box = (sample['rpn_bbox_lbrt'][sample['rpn_gt'].item()]).tolist()
xl, yb, xr, yt = gt_box
w, h = xr - xl, yt - yb
rect = patches.Rectangle((xl, yb), w, h, fill = False, edgecolor = 'b')
ax.add_patch(rect)
plt.axis('off')
plt.tight_layout()
plt.show()
| 5,351,410 |
def describe_maintenance_windows(Filters=None, MaxResults=None, NextToken=None):
"""
Retrieves the maintenance windows in an AWS account.
See also: AWS API Documentation
Exceptions
:example: response = client.describe_maintenance_windows(
Filters=[
{
'Key': 'string',
'Values': [
'string',
]
},
],
MaxResults=123,
NextToken='string'
)
:type Filters: list
:param Filters: Optional filters used to narrow down the scope of the returned maintenance windows. Supported filter keys are Name and Enabled .\n\n(dict) --Filter used in the request. Supported filter keys are Name and Enabled.\n\nKey (string) --The name of the filter.\n\nValues (list) --The filter values.\n\n(string) --\n\n\n\n\n\n
:type MaxResults: integer
:param MaxResults: The maximum number of items to return for this call. The call also returns a token that you can specify in a subsequent call to get the next set of results.
:type NextToken: string
:param NextToken: The token for the next set of items to return. (You received this token from a previous call.)
:rtype: dict
ReturnsResponse Syntax
{
'WindowIdentities': [
{
'WindowId': 'string',
'Name': 'string',
'Description': 'string',
'Enabled': True|False,
'Duration': 123,
'Cutoff': 123,
'Schedule': 'string',
'ScheduleTimezone': 'string',
'EndDate': 'string',
'StartDate': 'string',
'NextExecutionTime': 'string'
},
],
'NextToken': 'string'
}
Response Structure
(dict) --
WindowIdentities (list) --
Information about the maintenance windows.
(dict) --
Information about the maintenance window.
WindowId (string) --
The ID of the maintenance window.
Name (string) --
The name of the maintenance window.
Description (string) --
A description of the maintenance window.
Enabled (boolean) --
Indicates whether the maintenance window is enabled.
Duration (integer) --
The duration of the maintenance window in hours.
Cutoff (integer) --
The number of hours before the end of the maintenance window that Systems Manager stops scheduling new tasks for execution.
Schedule (string) --
The schedule of the maintenance window in the form of a cron or rate expression.
ScheduleTimezone (string) --
The time zone that the scheduled maintenance window executions are based on, in Internet Assigned Numbers Authority (IANA) format.
EndDate (string) --
The date and time, in ISO-8601 Extended format, for when the maintenance window is scheduled to become inactive.
StartDate (string) --
The date and time, in ISO-8601 Extended format, for when the maintenance window is scheduled to become active.
NextExecutionTime (string) --
The next time the maintenance window will actually run, taking into account any specified times for the maintenance window to become active or inactive.
NextToken (string) --
The token to use when requesting the next set of items. If there are no additional items to return, the string is empty.
Exceptions
SSM.Client.exceptions.InternalServerError
:return: {
'WindowIdentities': [
{
'WindowId': 'string',
'Name': 'string',
'Description': 'string',
'Enabled': True|False,
'Duration': 123,
'Cutoff': 123,
'Schedule': 'string',
'ScheduleTimezone': 'string',
'EndDate': 'string',
'StartDate': 'string',
'NextExecutionTime': 'string'
},
],
'NextToken': 'string'
}
:returns:
SSM.Client.exceptions.InternalServerError
"""
pass
| 5,351,411 |
def parse_version_number(raw_version_number):
# type: (str) -> Tuple[int, int, int]
"""
Parse a valid "INT.INT.INT" string, or raise an
Exception. Exceptions are handled by caller and
mean invalid version number.
"""
converted_version_number = [int(part) for part in raw_version_number.split(".")]
if len(converted_version_number) != 3:
raise ValueError(
"Invalid version number %r, parsed as %r",
raw_version_number,
converted_version_number,
)
# Make mypy happy
version_number = (
converted_version_number[0],
converted_version_number[1],
converted_version_number[2],
)
return version_number
| 5,351,412 |
def get_error_directory_does_not_exists(dir_kind):
"""dir kind = [dir, file ,url]"""
return f"Error: Directory with {dir_kind} does not exist:"
| 5,351,413 |
def field_interact(compute, style=None, title="", figsize=(1.5, 1), **kwargs):
"""Field computed on-the-fly controlled by interactive sliders."""
kw = lambda k: pop_style_with_fallback(k, style, kwargs)
title = dash(kw("title"), title)
ctrls = compute.controls.copy() # gets modified
output = wg.Output()
@captured_fig(output, title, figsize=figsize, rel=True)
def plot(fig, ax, newfig, **kw):
# Ignore warnings due to computing and plotting contour/nan
with warnings.catch_warnings(), \
np.errstate(divide="ignore", invalid="ignore"):
warnings.filterwarnings(
"ignore", category=UserWarning, module="matplotlib.contour")
Z = compute(**kw)
field(ax, Z, style, colorbar=newfig, **kwargs)
if newfig:
fig.tight_layout()
# Add crosshairs
if "x" in kw and "y" in kw:
x, y = model.sub2xy_stretched(kw["x"], kw["y"])
d = dict(c="k", ls="--", lw=1)
ax.axhline(y, **d)
ax.axvline(x, **d)
# Make widget/interactive plot
linked = wg.interactive(plot, **ctrls)
*ww, _ = linked.children
# Adjust individual controls -- use border="solid" to debug
for w in ww:
if "Slider" in str(type(w)):
w.layout.width = "16em"
w.continuous_update = False # => faster
w.style.description_width = "2em"
if w.description == "y":
w.orientation = "vertical"
w.layout.width = "2em"
elif "Dropdown" in str(type(w)):
w.layout.width = 'max-content'
w.style.description_width = "max-content"
# Make layout -- Use flexboxes to scale automatically
# (which I did not seem to get from AppLayout or TwoByTwoLayout)
V, H = wg.VBox, wg.HBox
try:
# Fancy layout
c12, c34, cX, cY = V(ww[:2]), V(ww[2:4]), ww[4], ww[5]
except IndexError:
# Fallback layout -- works for any number of controls
cpanel = V(ww, layout=dict(align_items='center'))
layout = H([output, cpanel])
else:
# Complete the fancy layout
cY = V([cY], layout={"justify_content": "flex-end"})
layout = V([H([output, cY]), H([c12, c34, cX])])
# Display
display(layout)
plot(**{w.description: w.value for w in ww})
| 5,351,414 |
def var_text(vname, iotype, variable):
"""
Extract info from variable for vname of iotype
and return info as HTML string.
"""
if iotype == 'read':
txt = '<p><i>Input Variable Name:</i> <b>{}</b>'.format(vname)
if 'required' in variable:
txt += '<br><b><i>Required Input Variable</i></b>'
else:
txt = '<p><i>Output Variable Name:</i> <b>{}</b>'.format(vname)
txt += '<br><i>Description:</i> {}'.format(variable['desc'])
txt += '<br><i>Datatype:</i> {}'.format(variable['type'])
if iotype == 'read':
txt += '<br><i>Availability:</i> {}'.format(variable['availability'])
txt += '<br><i>IRS Form Location:</i>'
formdict = variable['form']
for yrange in sorted(formdict.keys()):
txt += '<br>{}: {}'.format(yrange, formdict[yrange])
txt += '</p>'
return txt
| 5,351,415 |
def test_logout_view_request_timeout(hass, cloud_client):
"""Test timeout while logging out."""
cloud = hass.data['cloud'] = MagicMock()
cloud.logout.side_effect = asyncio.TimeoutError
req = yield from cloud_client.post('/api/cloud/logout')
assert req.status == 502
| 5,351,416 |
def line_search_reset(binary_img, left_lane, right_line):
"""
#---------------------
# After applying calibration, thresholding, and a perspective transform to a road image,
# I have a binary image where the lane lines stand out clearly.
# However, I still need to decide explicitly which pixels are part of the lines
# and which belong to the left line and which belong to the right line.
#
# This lane line search is done using histogram and sliding window
#
# The sliding window implementation is based on lecture videos.
#
# This function searches lines from scratch, i.e. without using info from previous lines.
# However, the search is not entirely a blind search, since I am using histogram information.
#
# Use Cases:
# - Use this function on the first frame
# - Use when lines are lost or not detected in previous frames
#
"""
# I first take a histogram along all the columns in the lower half of the image
histogram = np.sum(binary_img[int(binary_img.shape[0] / 2):, :], axis=0)
# Create an output image to draw on and visualize the result
out_img = np.dstack((binary_img, binary_img, binary_img)) * 255
# Find the peak of the left and right halves of the histogram
# These will be the starting point for the left and right lines
midpoint = np.int(histogram.shape[0] / 2)
leftX_base = np.argmax(histogram[:midpoint])
rightX_base = np.argmax(histogram[midpoint:]) + midpoint
# Choose the number of sliding windows
num_windows = 9
# Set height of windows
window_height = np.int(binary_img.shape[0] / num_windows)
# Identify the x and y positions of all nonzero pixels in the image
nonzero = binary_img.nonzero()
nonzeroy = np.array(nonzero[0])
nonzerox = np.array(nonzero[1])
# Current positions to be updated for each window
current_leftX = leftX_base
current_rightX = rightX_base
# Set minimum number of pixels found to recenter window
min_num_pixel = 50
# Create empty lists to receive left and right lane pixel indices
win_left_lane = []
win_right_lane = []
window_margin = left_lane.window_margin
# Step through the windows one by one
for window in range(num_windows):
# Identify window boundaries in x and y (and right and left)
win_y_low = binary_img.shape[0] - (window + 1) * window_height
win_y_high = binary_img.shape[0] - window * window_height
win_leftx_min = current_leftX - window_margin
win_leftx_max = current_leftX + window_margin
win_rightx_min = current_rightX - window_margin
win_rightx_max = current_rightX + window_margin
# Draw the windows on the visualization image
cv2.rectangle(out_img, (win_leftx_min, win_y_low), (win_leftx_max, win_y_high), (0, 255, 0), 2)
cv2.rectangle(out_img, (win_rightx_min, win_y_low), (win_rightx_max, win_y_high), (0, 255, 0), 2)
# Identify the nonzero pixels in x and y within the window
left_window_inds = ((nonzeroy >= win_y_low) & (nonzeroy <= win_y_high) & (nonzerox >= win_leftx_min) & (
nonzerox <= win_leftx_max)).nonzero()[0]
right_window_inds = ((nonzeroy >= win_y_low) & (nonzeroy <= win_y_high) & (nonzerox >= win_rightx_min) & (
nonzerox <= win_rightx_max)).nonzero()[0]
# Append these indices to the lists
win_left_lane.append(left_window_inds)
win_right_lane.append(right_window_inds)
# If you found > minpix pixels, recenter next window on their mean position
if len(left_window_inds) > min_num_pixel:
current_leftX = np.int(np.mean(nonzerox[left_window_inds]))
if len(right_window_inds) > min_num_pixel:
current_rightX = np.int(np.mean(nonzerox[right_window_inds]))
# Concatenate the arrays of indices
win_left_lane = np.concatenate(win_left_lane)
win_right_lane = np.concatenate(win_right_lane)
# Extract left and right line pixel positions
leftx= nonzerox[win_left_lane]
lefty = nonzeroy[win_left_lane]
rightx = nonzerox[win_right_lane]
righty = nonzeroy[win_right_lane]
out_img[lefty, leftx] = [255, 0, 0]
out_img[righty, rightx] = [0, 0, 255]
# Fit a second order polynomial to each
left_fit = np.polyfit(lefty, leftx, 2)
right_fit = np.polyfit(righty, rightx, 2)
left_lane.current_fit = left_fit
right_line.current_fit = right_fit
# Generate x and y values for plotting
ploty = np.linspace(0, binary_img.shape[0] - 1, binary_img.shape[0])
# ax^2 + bx + c
left_plotx = left_fit[0] * ploty ** 2 + left_fit[1] * ploty + left_fit[2]
right_plotx = right_fit[0] * ploty ** 2 + right_fit[1] * ploty + right_fit[2]
left_lane.prevx.append(left_plotx)
right_line.prevx.append(right_plotx)
if len(left_lane.prevx) > 10:
left_avg_line = smoothing(left_lane.prevx, 10)
left_avg_fit = np.polyfit(ploty, left_avg_line, 2)
left_fit_plotx = left_avg_fit[0] * ploty ** 2 + left_avg_fit[1] * ploty + left_avg_fit[2]
left_lane.current_fit = left_avg_fit
left_lane.allx, left_lane.ally = left_fit_plotx, ploty
else:
left_lane.current_fit = left_fit
left_lane.allx, left_lane.ally = left_plotx, ploty
if len(right_line.prevx) > 10:
right_avg_line = smoothing(right_line.prevx, 10)
right_avg_fit = np.polyfit(ploty, right_avg_line, 2)
right_fit_plotx = right_avg_fit[0] * ploty ** 2 + right_avg_fit[1] * ploty + right_avg_fit[2]
right_line.current_fit = right_avg_fit
right_line.allx, right_line.ally = right_fit_plotx, ploty
else:
right_line.current_fit = right_fit
right_line.allx, right_line.ally = right_plotx, ploty
left_lane.startx, right_line.startx = left_lane.allx[len(left_lane.allx)-1], right_line.allx[len(right_line.allx)-1]
left_lane.endx, right_line.endx = left_lane.allx[0], right_line.allx[0]
# Set detected=True for both lines
left_lane.detected, right_line.detected = True, True
measure_curvature(left_lane, right_line)
return out_img
| 5,351,417 |
def get_primary_language(current_site=None):
"""Fetch the first language of the current site settings."""
current_site = current_site or Site.objects.get_current()
return get_languages()[current_site.id][0]['code']
| 5,351,418 |
def test_atoms_read(batch=50):
"""test on randomly sampled entries"""
shuffle(raw_entries)
for e in raw_entries[:batch]:
print(e["aurl"])
entry = Entry(**e)
# Read the CONTCAR.relax, which should always present
atoms = entry.atoms()
assert atoms is not None
| 5,351,419 |
def plot_Xy(X_in, y_in, title="2D Xs with y color coding",
s=25):
"""Plot the Xs in x0,x1 space and color-code by the ys.
"""
# set figure limits
xmin = -1.0
xmax = 1.0
ymin = -1.0
ymax = 1.0
plt.title(title)
axes = plt.gca()
axes.set_xlim([xmin, xmax])
axes.set_ylim([ymin, ymax])
plt.scatter(X_in[:, 0], X_in[:, 1],
c=y_in.squeeze(), s=s, cmap=plt.cm.Spectral)
| 5,351,420 |
def download_s3_file(bucket_name, file_path, file_name):
"""
download s3 file, if bucket is not exist, create it first.
"""
bucket = s3.Bucket(bucket_name)
try:
s3.meta.client.head_bucket(Bucket=bucket_name)
except ClientError:
print("The log state file bucket "+ bucket_name +" does not exist.")
s3.BucketVersioning(bucket_name).enable()
try:
bucket.download_file(file_name, file_path + file_name)
except ClientError:
print("The log state file "+ file_name +" does not exist.")
| 5,351,421 |
def get_Theta_ref_cnd_H(Theta_sur_f_hex_H):
"""(23)
Args:
Theta_sur_f_hex_H: 暖房時の室内機熱交換器の表面温度(℃)
Returns:
暖房時の冷媒の凝縮温度(℃)
"""
Theta_ref_cnd_H = Theta_sur_f_hex_H
if Theta_ref_cnd_H > 65:
Theta_ref_cnd_H = 65
return Theta_ref_cnd_H
| 5,351,422 |
def test_bad_refund_amount(amount):
""" Test that an invalid refund amount raises an exception"""
enrollment = BootcampRunEnrollmentFactory.create()
with pytest.raises(EcommerceException) as exc:
create_refund_order(
user=enrollment.user, bootcamp_run=enrollment.bootcamp_run, amount=amount
)
assert exc.value.args[0] == "Amount to refund must be greater than zero"
| 5,351,423 |
def get_tool_info(arduino_info, tool_name):
"""."""
tool_info = {}
has_tool = False
sel_pkg = arduino_info['selected'].get('package')
inst_pkgs_info = arduino_info.get('installed_packages', {})
inst_pkg_names = inst_pkgs_info.get('names', [])
pkg_info = get_package_info(inst_pkgs_info, sel_pkg)
pkg_path = pkg_info.get('path', '')
tools_path = os.path.join(pkg_path, 'tools')
tool_path = os.path.join(tools_path, tool_name)
if os.path.isdir(tool_path):
has_tool = True
else:
for pkg_name in inst_pkg_names:
pkg_info = get_package_info(inst_pkgs_info, pkg_name)
pkg_path = pkg_info.get('path', '')
tools_path = os.path.join(pkg_path, 'tools')
tool_path = os.path.join(tools_path, tool_name)
if os.path.isdir(tool_path):
has_tool = True
break
has_bin = False
if has_tool:
sub_paths = glob.glob(tool_path + '/*')[::-1]
for sub_path in sub_paths:
if os.path.isfile(sub_path):
has_bin = True
break
if not has_bin:
for sub_path in sub_paths:
bin_path = os.path.join(sub_path, 'bin')
if os.path.isdir(bin_path):
has_bin = True
else:
s_sub_paths = glob.glob(sub_path + '/*')
for s_sub_path in s_sub_paths:
if os.path.isfile(s_sub_path):
has_bin = True
break
if has_bin:
tool_path = sub_path
break
if not has_bin:
tool_path = ''
avial_pkgs_info = arduino_info.get('packages', {})
avail_pkg_names = avial_pkgs_info.get('names', [])
for pkg_name in avail_pkg_names:
pkg_info = get_package_info(avial_pkgs_info, pkg_name)
tools_info = pkg_info.get('tools', {})
tool_names = tools_info.get('names', [])
if tool_name in tool_names:
tool_info = tools_info.get(tool_name)
break
tool_info['name'] = tool_name
tool_info['path'] = tool_path
return tool_info
| 5,351,424 |
def hpat_pandas_series_shape(self):
"""
Intel Scalable Dataframe Compiler User Guide
********************************************
Pandas API: pandas.Series.shape
Examples
--------
.. literalinclude:: ../../../examples/series/series_shape.py
:language: python
:lines: 27-
:caption: Return a tuple of the shape of the underlying data.
:name: ex_series_shape
.. command-output:: python ./series/series_shape.py
:cwd: ../../../examples
Intel Scalable Dataframe Compiler Developer Guide
*************************************************
Pandas Series attribute :attr:`pandas.Series.shape` implementation
.. only:: developer
Test: python -m sdc.runtests sdc.tests.test_series.TestSeries.test_series_shape1
"""
_func_name = 'Attribute shape.'
ty_checker = TypeChecker(_func_name)
ty_checker.check(self, SeriesType)
def hpat_pandas_series_shape_impl(self):
return self._data.shape
return hpat_pandas_series_shape_impl
| 5,351,425 |
def WarnAndCopyFlag(old_name, new_name):
"""Copy a value from an old flag to a new one, warning the user.
Args:
old_name: old name of flag.
new_name: new name of flag.
"""
if FLAGS[old_name].present:
logging.warning(
'Flag --%s is deprecated and will be removed. Please '
'switch to --%s.', old_name, new_name)
if not FLAGS[new_name].present:
FLAGS[new_name].value = FLAGS[old_name].value
# Mark the new flag as present so we'll print it out in our list
# of flag values.
FLAGS[new_name].present = True
else:
logging.warning('Ignoring legacy flag %s because new flag %s is present.',
old_name, new_name)
# We keep the old flag around so that providers that haven't been
# updated yet will continue to work.
| 5,351,426 |
def download_apps(artifacts_url, appstack):
"""
Downloads applications artifacts from specified source. Url should include '{name}'
parameter like here: http://artifacts.com/download/{name}
"""
if not validators.url(artifacts_url):
_log.error('Value %s is not valid Url.', artifacts_url)
raise ApployerArgumentError('Provided invalid url')
_download_artifacts_from_url(artifacts_url, appstack)
| 5,351,427 |
async def port_utilization_range(
port_id: str, direction: str, limit: int, start: str, granularity: int, end=None,
):
"""Get port utilization by date range."""
async with Influx("telegraf", granularity=granularity) as db:
q = (
db.SELECT(f"derivative(max(bytes{direction.title()}), 1s) * 8")
.FROM("interfaces")
.BETWEEN(start, end)
.WHERE(port_id=port_id)
.GROUP("port_id", "participant_id")
.FILL("none")
.LIMIT(limit)
)
return await q.query()
| 5,351,428 |
def test_md018_bad_multiple_within_paragraph_separated_collapsed_image_multi():
"""
Test to make sure we get the expected behavior after scanning a good file from the
test/resources/rules/md018 directory that has multiple possible atx headings within
a single paragraph.
"""
# Arrange
scanner = MarkdownScanner()
supplied_arguments = [
"--disable-rules",
"md020",
"scan",
"test/resources/rules/md018/multiple_within_paragraph_separated_collapsed_image_multi.md",
]
expected_return_code = 1
expected_output = (
"test/resources/rules/md018/multiple_within_paragraph_separated_collapsed_image_multi.md:4:3: "
+ "MD018: No space present after the hash character on a possible Atx Heading. (no-missing-space-atx)\n"
)
expected_error = ""
# Act
execute_results = scanner.invoke_main(arguments=supplied_arguments)
# Assert
execute_results.assert_results(
expected_output, expected_error, expected_return_code
)
| 5,351,429 |
def composer_includes(context):
"""
Include the composer JS and CSS files in a page if the user has permission.
"""
if context.get('can_compose_permission', False):
url = settings.STATIC_URL
url += '' if url[-1] == '/' else '/'
js = '<script type="text/javascript" src="%sjs/composer.min.js"></script>' % url
css = '<link rel="stylesheet" type="text/css" href="%scss/composer.css">' % url
return js + css
return ''
| 5,351,430 |
def craft(crafter, recipe_name, *inputs, raise_exception=False, **kwargs):
"""
Access function. Craft a given recipe from a source recipe module. A
recipe module is a Python module containing recipe classes. Note that this
requires `settings.CRAFT_RECIPE_MODULES` to be added to a list of one or
more python-paths to modules holding Recipe-classes.
Args:
crafter (Object): The one doing the crafting.
recipe_name (str): The `CraftRecipe.name` to use. This uses fuzzy-matching
if the result is unique.
*inputs: Suitable ingredients and/or tools (Objects) to use in the crafting.
raise_exception (bool, optional): If crafting failed for whatever
reason, raise `CraftingError`. The user will still be informed by the
recipe.
**kwargs: Optional kwargs to pass into the recipe (will passed into
recipe.craft).
Returns:
list: Crafted objects, if any.
Raises:
CraftingError: If `raise_exception` is True and crafting failed to
produce an output. KeyError: If `recipe_name` failed to find a
matching recipe class (or the hit was not precise enough.)
Notes:
If no recipe_module is given, will look for a list `settings.CRAFT_RECIPE_MODULES` and
lastly fall back to the example module `"evennia.contrib."`
"""
# delayed loading/caching of recipes
_load_recipes()
RecipeClass = _RECIPE_CLASSES.get(recipe_name, None)
if not RecipeClass:
# try a startswith fuzzy match
matches = [key for key in _RECIPE_CLASSES if key.startswith(recipe_name)]
if not matches:
# try in-match
matches = [key for key in _RECIPE_CLASSES if recipe_name in key]
if len(matches) == 1:
RecipeClass = matches[0]
if not RecipeClass:
raise KeyError(
f"No recipe in settings.CRAFT_RECIPE_MODULES has a name matching {recipe_name}"
)
recipe = RecipeClass(crafter, *inputs, **kwargs)
return recipe.craft(raise_exception=raise_exception)
| 5,351,431 |
def load_data(
data,
*,
keys: Iterable[Union[str, int]] = (0,),
unique_keys: bool = False,
multiple_values: bool = False,
unique_values: bool = False,
**kwargs,
) -> Union[List[Any], Dict[Any, Union[Any, List[Any]]]]:
"""Load data.
If no values are provided, then return a list from keys.
If values are provided, then return a dictionary of keys/values.
Args:
data (str): File or buffer.
See Pandas 'filepath_or_buffer' option from 'read_csv()'.
Kwargs:
keys (Iterable[str|int]): Columns to use as dictionary keys.
Multiple keys are stored as tuples in same order as given.
If str, then it corresponds to 'headers' names.
If int, then it corresponds to column indices.
unique_keys (bool): Control if keys can be repeated or not.
Only applies if 'values' is None.
multiple_values (bool): Specify if values consist of single or multiple
elements. For multi-value case, values are placed in an iterable
container. For single-value case, the value is used as-is.
Only applies if 'values' is not None.
unique_values (bool): Control if values can be repeated or not.
Only applies if 'multiple_values' is True.
Kwargs: Options forwarded to 'iload_data()'.
"""
if kwargs.get('values') is None:
if unique_keys:
# NOTE: Convert to a list because JSON does not serializes sets.
_data = list(set(iload_data(data, keys=keys, **kwargs)))
else:
_data = list(iload_data(data, keys=keys, **kwargs))
elif multiple_values:
if unique_values:
_data = collections.defaultdict(list)
for k, v in iload_data(data, keys=keys, **kwargs):
if v not in _data[k]:
_data[k].append(v)
else:
_data = collections.defaultdict(list)
for k, v in iload_data(data, keys=keys, **kwargs):
_data[k].append(v)
else:
# Consider the value of the first appearance of a key.
_data = {}
for k, v in iload_data(data, keys=keys, **kwargs):
if k not in _data:
_data[k] = v
return _data
| 5,351,432 |
async def test_async_state_transit():
"""Test async state transit contextmanager."""
state = AsyncState()
state.set(None)
with state.transit(initial=1, success=2, fail=3):
assert state.get() == 1
assert state.get() == 2
state.set(None)
with suppress(ValueError):
with state.transit(initial=1, success=2, fail=3):
assert state.get() == 1
raise ValueError()
assert state.get() == 3
| 5,351,433 |
def get_avg_wind_speed(data):
"""this function gets the average wind speeds for each point in the fetched data"""
wind_speed_history = []
for point in data:
this_point_wind_speed = []
for year_reading in point:
hourly = []
for hour in year_reading['weather'][0]['hourly']:
hourly.append(float(hour['windspeedKmph']))
this_point_wind_speed.append(float(np.average(hourly)))
wind_speed_history.append(np.flip(this_point_wind_speed))
return wind_speed_history
| 5,351,434 |
def publish_collection(collection_path, api, wait, timeout):
"""Publish an Ansible collection tarball into an Ansible Galaxy server.
:param collection_path: The path to the collection tarball to publish.
:param api: A GalaxyAPI to publish the collection to.
:param wait: Whether to wait until the import process is complete.
:param timeout: The time in seconds to wait for the import process to finish, 0 is indefinite.
"""
import_uri = api.publish_collection(collection_path)
if wait:
# Galaxy returns a url fragment which differs between v2 and v3. The second to last entry is
# always the task_id, though.
# v2: {"task": "https://galaxy-dev.ansible.com/api/v2/collection-imports/35573/"}
# v3: {"task": "/api/automation-hub/v3/imports/collections/838d1308-a8f4-402c-95cb-7823f3806cd8/"}
task_id = None
for path_segment in reversed(import_uri.split('/')):
if path_segment:
task_id = path_segment
break
if not task_id:
raise AnsibleError("Publishing the collection did not return valid task info. Cannot wait for task status. Returned task info: '%s'" % import_uri)
display.display("Collection has been published to the Galaxy server %s %s" % (api.name, api.api_server))
with _display_progress():
api.wait_import_task(task_id, timeout)
display.display("Collection has been successfully published and imported to the Galaxy server %s %s"
% (api.name, api.api_server))
else:
display.display("Collection has been pushed to the Galaxy server %s %s, not waiting until import has "
"completed due to --no-wait being set. Import task results can be found at %s"
% (api.name, api.api_server, import_uri))
| 5,351,435 |
def get_uris_of_class(repository: str, endpoint: str, sparql_file: str, class_name: str, endpoint_type: str,
limit: int = 1000) -> List[URIRef]:
"""
Returns the list of uris of type class_name
:param repository: The repository containing the RDF data
:param endpoint: The SPARQL endpoint
:param sparql_file: The file containing the SPARQL query
:param class_name: The class_name to search
:param endpoint_type: GRAPHDB or VIRTUOSO (to change the way the endpoint is called)
:param limit: The sparql query limit
:return: The list of uris of type class_name
"""
uri_list = []
uris_of_class_sparql_query = open(sparql_file).read()
uris_of_class_template = Template(uris_of_class_sparql_query).substitute(class_name=class_name)
uris_of_class_template = Template(uris_of_class_template + " limit $limit offset $offset ")
for uri in get_sparql_results(uris_of_class_template, "uri", endpoint, repository,
endpoint_type, limit):
uri_list.append(uri)
if len(uri_list) % 1000 == 0:
print(len(uri_list))
return uri_list
| 5,351,436 |
def nspath_eval(xpath: str) -> str:
"""
Return an etree friendly xpath based expanding namespace
into namespace URIs
:param xpath: xpath string with namespace prefixes
:returns: etree friendly xpath
"""
out = []
for chunks in xpath.split('/'):
namespace, element = chunks.split(':')
out.append('{{{}}}{}'.format(NAMESPACES[namespace], element))
return '/'.join(out)
| 5,351,437 |
def basic_streamalert_config():
"""Generate basic StreamAlert configuration dictionary."""
return {
'global': {
'account': {
'aws_account_id': '123456789123',
'kms_key_alias': 'stream_alert_secrets',
'prefix': 'unit-testing',
'region': 'us-west-2'
},
'terraform': {
'tfstate_bucket': 'unit-testing.streamalert.terraform.state',
'tfstate_s3_key': 'stream_alert_state/terraform.tfstate',
'tfvars': 'terraform.tfvars'
},
'infrastructure': {
'monitoring': {
'create_sns_topic': True,
'metric_alarms': {
'rule_processor': {
'Aggregate Unit Testing Failed Parses Alarm': {
'alarm_description': '',
'comparison_operator': 'GreaterThanOrEqualToThreshold',
'evaluation_periods': 1,
'metric_name': 'RuleProcessor-FailedParses',
'period': 300,
'statistic': 'Sum',
'threshold': 1.0
}
}
}
}
}
},
'lambda': {
'alert_processor_config': {
'handler': 'stream_alert.alert_processor.main.handler',
'source_bucket': 'unit-testing.streamalert.source',
'source_current_hash': '<auto_generated>',
'source_object_key': '<auto_generated>',
'third_party_libraries': []
},
'rule_processor_config': {
'handler': 'stream_alert.rule_processor.main.handler',
'source_bucket': 'unit-testing.streamalert.source',
'source_current_hash': '<auto_generated>',
'source_object_key': '<auto_generated>',
'third_party_libraries': [
'jsonpath_rw',
'netaddr'
]
},
'athena_partition_refresh_config': {
'current_version': '$LATEST',
'enable_metrics': False,
'enabled': True,
'handler': 'main.handler',
'memory': 128,
'partitioning': {
'firehose': {},
'normal': {
'unit-testing.streamalerts': 'alerts'
}
},
'source_bucket': 'unit-testing.streamalert.source',
'source_current_hash': '<auto_generated>',
'source_object_key': '<auto_generated>',
'third_party_libraries': [
'backoff'
],
'timeout': 60
},
},
'clusters': {
'prod': {
'id': 'prod',
'modules': {
'cloudwatch_monitoring': {
'enabled': True
},
'kinesis': {
'firehose': {
'enabled': True,
's3_bucket_suffix': 'streamalert.results'
},
'streams': {
'retention': 24,
'shards': 1
}
},
'kinesis_events': {
'enabled': True
},
'stream_alert': {
'alert_processor': {
'current_version': '$LATEST',
'memory': 128,
'timeout': 10
},
'rule_processor': {
'current_version': '$LATEST',
"enable_metrics": True,
'memory': 128,
'metric_alarms': {
'Prod Unit Testing Failed Parses Alarm': {
'alarm_description': '',
'comparison_operator': 'GreaterThanOrEqualToThreshold',
'evaluation_periods': 1,
'metric_name': 'RuleProcessor-FailedParses-PROD',
'period': 300,
'statistic': 'Sum',
'threshold': 1.0
}
},
'timeout': 10
}
}
},
'outputs': {
'kinesis': [
'username',
'access_key_id',
'secret_key'
]
},
'region': 'us-east-1'
}
}
}
| 5,351,438 |
def _read_elastic_moduli(outfilename):
"""
Read elastic modulus matrix from a completed GULP job
:param outfilename: Path of the stdout from the GULP job
:type outfilename: str
:returns: 6x6 Elastic modulus matrix in GPa
"""
outfile = open(outfilename,'r')
moduli_array = []
while True:
oneline = outfile.readline()
if not oneline: # break at EOF
break
if 'Elastic Constant Matrix' in oneline:
moduli = np.zeros((6,6))
dummyline = outfile.readline()
dummyline = outfile.readline()
dummyline = outfile.readline()
dummyline = outfile.readline()
for i in range(6):
modline = outfile.readline().strip()
e1, e2, e3, e4, e5, e6 = modline[3:13], modline[13:23], modline[23:33], modline[33:43], modline[43:53], modline[53:63]
modarray = [e1,e2,e3,e4,e5,e6]
float_modarray = []
# Handle errors
for element in modarray:
if element[0] == "*":
float_modarray.append(0.0)
else:
float_modarray.append(float(element))
moduli[i,:] = float_modarray
moduli_array.append(moduli)
outfile.close()
return moduli_array
| 5,351,439 |
def test_process_tags_directive_and(tmpdir):
"""Test AND
"""
source_dir = tmpdir.mkdir('source')
source_dir.mkdir('ns1')
target_dir = tmpdir.mkdir('target')
create_markdown_file(source_dir.join('ns1', 'file1.md'),
{'title': 'Page One',
'tags': '[abc]'},
'{{abc +def}}')
create_markdown_file(source_dir.join('ns1', 'file2.md'),
{'title': 'Page Two',
'tags': '[abc, def]'},
'Text')
create_markdown_file(source_dir.join('ns1', 'file3.md'),
{'title': 'Page Three',
'tags': '[xyz]'},
'{{xyz}}')
create_wiki_config(str(source_dir.join('test.cfg')),
None,
{'name': 'ns1',
'path': f'{source_dir.join("ns1")}',
'target': str(target_dir)})
wiki = Wiki(source_dir.join('test.cfg'))
wiki.process_namespaces()
# assert page 3 not in list as it is not tagged 'abc'
expect1 = create_markdown_string({'title': 'Page One',
'tags': '[abc]'},
'''[Page Two](page_two.html)
''')
assert os.path.exists(target_dir.join('ns1', 'page_one.md'))
with open(target_dir.join('ns1', 'page_one.md'), 'r', encoding='utf8') as fh:
actual1 = fh.read()
assert compare_markdown_content(expect1, actual1)
| 5,351,440 |
def test_update_db(client):
"""
Test that database status correctly reports that the schema in the DB and
in the code match.
"""
response = client.get('/status/db')
print(response.data)
expected = 'DB schema at %s, code schema at %s, no action taken' % (SCHEMA_VERSION, SCHEMA_VERSION)
assert response.data == expected.encode('utf-8')
assert response.status_code == 200
| 5,351,441 |
def predict_koopman(lam, w, v, x0, ncp, g, h, u=None):
"""Predict the future dynamics of the system given an initial value `x0`. Result is returned
as a matrix where rows correspond to states and columns to time.
Args:
lam (tf.Tensor): Koopman eigenvalues.
w (tf.Tensor): Left eigenvectors.
v (tf.Tensor): Right eigenvectors.
x0 (tf.Tensor): Initial value of the system.
N (int): Number of time steps to predict.
g (Net): Encoder network.
h (Net): Decoder network.
u (tf.Tensor): Input signal.
Returns:
tuple: Prediction of the states of the system for N time steps into the future,
prediction of the observables of the system for N time steps into the future.
"""
# Precompute some constants for more efficient computations
wH = tf.linalg.adjoint(w)
norm_vec = 1/tf.math.reduce_sum(tf.math.multiply(tf.math.conj(w),v), axis=0)
# Store each time step in a list
res_x = tf.TensorArray(x0.dtype,size=ncp+1)
res_gx = tf.TensorArray(w.dtype,size=ncp+1)
res_x = res_x.write(0,x0)
res_gx = res_gx.write(0,tf.cast(tf.squeeze(g(tf.expand_dims(x0,0)),axis=[0]), w.dtype))
# Initiate time stepping
xk = x0
if u is not None:
for k in range(1,ncp+1):
xk = tf.concat([tf.expand_dims(xk[:-1],0),tf.reshape(u[k-1],[1,-1])],axis=1)
xk, gxk = one_step_pred(lam, wH, v, norm_vec, xk, g, h)
res_x = res_x.write(k,xk)
res_gx = res_gx.write(k,gxk)
else:
for k in range(1,ncp+1):
xk = tf.expand_dims(xk,0)
xk, gxk = one_step_pred(lam, wH, v, norm_vec, xk, g, h)
res_x = res_x.write(k,xk)
res_gx = res_gx.write(k,gxk)
return res_x.stack(), res_gx.stack()
| 5,351,442 |
def test_sparse_balance(): # pylint: disable=too-many-locals
"""Test sparse balance"""
for n_features in range(1, 4):
no_of_samples = 100
config_no_penalty = (
LimeConfig()
.withPerturbationContext(
PerturbationContext(jrandom, DEFAULT_NO_OF_PERTURBATIONS)
)
.withSamples(no_of_samples)
.withPenalizeBalanceSparse(False)
)
lime_explainer_no_penalty = LimeExplainer(config_no_penalty)
features = [mock_feature(i) for i in range(n_features)]
input_ = PredictionInput(features)
model = TestUtils.getSumSkipModel(0)
output = model.predictAsync([input_]).get().get(0)
prediction = SimplePrediction(input_, output)
saliency_map_no_penalty = lime_explainer_no_penalty.explain(
prediction, model
)
assert saliency_map_no_penalty is not None
decision_name = "sum-but0"
saliency_no_penalty = saliency_map_no_penalty.get(decision_name)
config = (
LimeConfig().withSamples(no_of_samples).withPenalizeBalanceSparse(True)
)
lime_explainer = LimeExplainer(config)
saliency_map = lime_explainer.explain(prediction, model)
assert saliency_map is not None
saliency = saliency_map.get(decision_name)
for i in range(len(features)):
score = saliency.getPerFeatureImportance().get(i).getScore()
score_no_penalty = (
saliency_no_penalty.getPerFeatureImportance().get(i).getScore()
)
assert abs(score) <= abs(score_no_penalty)
| 5,351,443 |
def scree_plot(analysis_actors_dict, dir_path, pcs_on_scree_plot=50, variance_ratio_line=0.75):
"""
Creates a plot with the scree plots for each ligand and saves it on the specified ``dir_path``. With blue color is
class 1 and with orange color class 2.
Args:
analysis_actors_dict: ``{ "Agonists": List[AnalysisActor.class], "Antagonists": List[AnalysisActor.class] }``
dir_path (str): The path of the directory the plot will be saved (must end with a ``/``)
pcs_on_scree_plot(int): The number of the first PCs that will be used on the scree plots
variance_ratio_line(float): Float from 0.0 to 1.0 which specifies the variance ratio that a vertical line will
be plotted
"""
# Get the dimensions of the final plot
plot_cols = 3
plot_rows = math.ceil(len(analysis_actors_dict['Agonists']) + len(analysis_actors_dict['Antagonists']) / plot_cols)
fig = plt.figure(figsize=(18, 6 * plot_rows))
plot_index = 1
# Agonists Iteration
for which_ligand in analysis_actors_dict['Agonists']:
ax = fig.add_subplot(plot_rows, plot_cols, plot_index)
plt.axvline(x=np.where(np.cumsum(which_ligand.pca_res.explained_variance_ratio_) > variance_ratio_line)[0][0],
ls='--', c='grey', label=f"Reached {int(variance_ratio_line * 100)}% variance")
plt.plot(np.arange(len(which_ligand.pca_res.explained_variance_[:pcs_on_scree_plot])),
which_ligand.pca_res.explained_variance_[:pcs_on_scree_plot], label="Variance Ratio")
plt.ylabel("Variance")
plt.xlabel("#PC")
plt.title(which_ligand.drug_name)
plt.legend()
plot_index += 1
# Antagonists Iteration
for which_ligand in analysis_actors_dict['Antagonists']:
ax = fig.add_subplot(plot_rows, plot_cols, plot_index)
plt.axvline(x=np.where(np.cumsum(which_ligand.pca_res.explained_variance_ratio_) > variance_ratio_line)[0][0],
ls='--', c='grey', label=f"Reached {int(variance_ratio_line * 100)}% variance")
plt.plot(np.arange(len(which_ligand.pca_res.explained_variance_[:pcs_on_scree_plot])),
which_ligand.pca_res.explained_variance_[:pcs_on_scree_plot], label="Variance", color='orange')
plt.ylabel("Variance")
plt.xlabel("#PC")
plt.title(which_ligand.drug_name)
plt.legend()
plot_index += 1
fig.suptitle('PCA Scree Plots\nAgonists: Blue\nAntagonists: Orange', fontsize=26, y=0.93)
plt.savefig(f'{dir_path}pca_scree_plots.png', format='png')
| 5,351,444 |
def test_raise_cmp_2():
"""
Show that not raising has no effect on the passing of results to the last action.
"""
add1 = Add1()
result = list_x.UntilFailure(
actions=[add1, add1, list_x.RaiseCmp(cmp=0, value=1), add1, add1]
).execute()
assert result.result == 4
| 5,351,445 |
def handle_registration():
""" Show the registration form or handles the registration
of a user, if the email or username is taken, take them back to the
registration form
- Upon successful login, take to the homepage
"""
form = RegisterForm()
email = form.email.data
username = form.username.data
# If there is a user with this email already
if User.query.filter_by(email=email).first():
form.email.errors = ["This email is already being used"]
# Check if there is a user with this username already
if User.query.filter_by(username=username).first():
form.username.errors = ["This username is already being used"]
if form.email.errors or form.username.errors:
return render_template('login_register/register.html', form=form)
if form.validate_on_submit():
pwd = form.password.data
f_name = form.first_name.data
l_name = form.last_name.data
user = User.register(username=username,
pwd=pwd,
email=email,
f_name=f_name,
l_name=l_name)
db.session.add(user)
db.session.commit()
login_user(user)
flash('Sucessfully logged in!', "success")
# on successful login, redirect to user detail page
return redirect(url_for("homepage.index"))
else:
return render_template("login_register/register.html", form=form)
| 5,351,446 |
def test_Lam_Hole_51_0N2(machine):
"""Test machine plot hole 51 with no magnet_1
"""
machine.rotor.hole[0].magnet_0 = Magnet()
machine.rotor.hole[0].magnet_1 = None
machine.rotor.hole[0].magnet_2 = Magnet()
machine.rotor.plot()
fig = plt.gcf()
fig.savefig(join(save_path, "test_Lam_Hole_s51_3-Rotor_0N2.png"))
# 2 for lam + 5*8 for holes
assert len(fig.axes[0].patches) == 42
| 5,351,447 |
def conll_to_brat(file_name: str, output_directory: str, format: str) -> None:
"""
Converts the content of the CoNLL file file_name into Brat files containing
each 5 sentences. The CoNLL format here is the CoNLL05 with columns for SRL
data. Writes also the
Parameters:
file_name (str): the filename containing CoNLL data
output_directory (str): the directory where to writ outputs
format (str): the input CoNLL format
Returns:
None
"""
docnum = 1
sentences = []
entity = None
# id of the next predicate to encounter
current_pred = 0
with open(file_name) as f:
# store (token, BIO-tag, type) triples for sentence
current = Sentence()
current_pred = 0
lines = f.readlines()
for line_num, line in enumerate(lines):
line = line.strip()
if line.startswith("#"):
continue
if re.match(r'^\s*$', line):
# blank lines separate sentences
if len(current) > 0:
sentences.append(current)
if len(sentences) >= 5:
output_brat(file_name, output_directory, docnum, sentences, format)
sentences = []
docnum += 1
new_sentence = Sentence()
current = new_sentence
current_pred = 0
continue
elif (re.match(r'^===*\s+O\s*$', line) or
re.match(r'^-DOCSTART-', line)):
# special character sequence separating documents
if len(sentences) > 0:
output_brat(file_name, output_directory, docnum, sentences, format)
sentences = []
docnum += 1
continue
if (line_num + 2 < len(lines) and
re.match(r'^\s*$', lines[line_num + 1]) and
re.match(r'^-+\s+O\s*$', lines[line_num + 2])):
# heuristic match for likely doc before current line
if len(sentences) > 0:
output_brat(file_name, output_directory, docnum, sentences, format)
sentences = []
docnum += 1
# go on to process current normally
if format == "conll05":
parse_conll05_line(line_num, line, current_pred, current)
elif format == "conllu":
parse_conllu_line(line_num, line, current_pred, current)
else:
raise ValueError(f"Unhandled CoNLL format {format}")
# process leftovers, if any
if len(current) > 0:
sentences.append(current)
if len(sentences) > 0:
output_brat(file_name, output_directory, docnum, sentences, format)
| 5,351,448 |
def func_xy_args_kwargs_annotate(
x: "0", y, *args: "2", **kwargs: "4"
) -> typing.Tuple:
"""func.
Parameters
----------
x, y: float
args: tuple
kwargs: dict
Returns
-------
x, y: float
args: tuple
kwargs: dict
"""
return x, y, None, None, args, None, None, kwargs
| 5,351,449 |
def mp_nerf_torch(a, b, c, l, theta, chi):
""" Custom Natural extension of Reference Frame.
Inputs:
* a: (batch, 3) or (3,). point(s) of the plane, not connected to d
* b: (batch, 3) or (3,). point(s) of the plane, not connected to d
* c: (batch, 3) or (3,). point(s) of the plane, connected to d
* theta: (batch,) or (float). angle(s) between b-c-d
* chi: (batch,) or float. dihedral angle(s) between the a-b-c and b-c-d planes
Outputs: d (batch, 3) or (float). the next point in the sequence, linked to c
"""
# safety check
if not ( (-np.pi <= theta) * (theta <= np.pi) ).all().item():
raise ValueError(f"theta(s) must be in radians and in [-pi, pi]. theta(s) = {theta}")
# calc vecs
ba = b-a
cb = c-b
# calc rotation matrix. based on plane normals and normalized
n_plane = torch.cross(ba, cb, dim=-1)
n_plane_ = torch.cross(n_plane, cb, dim=-1)
rotate = torch.stack([cb, n_plane_, n_plane], dim=-1)
rotate /= torch.norm(rotate, dim=-2, keepdim=True)
# calc proto point, rotate. add (-1 for sidechainnet convention)
# https://github.com/jonathanking/sidechainnet/issues/14
d = torch.stack([-torch.cos(theta),
torch.sin(theta) * torch.cos(chi),
torch.sin(theta) * torch.sin(chi)], dim=-1).unsqueeze(-1)
# extend base point, set length
return c + l.unsqueeze(-1) * torch.matmul(rotate, d).squeeze()
| 5,351,450 |
def makemarkers(nb):
""" Give a list of cycling markers. See http://matplotlib.org/api/markers_api.html
.. note:: This what I consider the *optimal* sequence of markers, they are clearly differentiable one from another and all are pretty.
Examples:
>>> makemarkers(7)
['o', 'D', 'v', 'p', '<', 's', '^']
>>> makemarkers(12)
['o', 'D', 'v', 'p', '<', 's', '^', '*', 'h', '>', 'o', 'D']
"""
allmarkers = ['o', 'D', 'v', 'p', '<', 's', '^', '*', 'h', '>']
longlist = allmarkers * (1 + int(nb / float(len(allmarkers)))) # Cycle the good number of time
return longlist[:nb]
| 5,351,451 |
def test_grad_diffoutdim(eval_data_loader, model, num_classes,
output_dir='pred', has_gt=True, save_vis=False, downsize_scale=1, args=None):
"""
Evaluates the effect of increasing output dimension on the norm of the gradient.
Monte Carlo sampling will be used and the result would be averaged.
First choose the number of pixels to calculate the loss for (output dimension) --> select_num.
For each select_num, we do the following MC_times(as Monte Carlo sampling):
Calculate the loss for select_num pixels chosen, backpropagate and get the input gradient.
Average all these.
:param eval_data_loader:
:param model:
:param num_classes:
:param output_dir:
:param has_gt:
:param save_vis:
:param downsize_scale:
:param args:
:return:
"""
model.eval()
batch_time = AverageMeter()
data_time = AverageMeter()
end = time.time()
hist = np.zeros((num_classes, num_classes))
# exit(0)
if torch.cuda.is_available():
GPU_flag = True
else:
GPU_flag = False
# Number of points to be selected for masking - analogous to number of output dimensions. Only these many pixels will be considered to calculate the loss.
select_num_list = [1] + [i * 4 for i in range(1, 100)] + [400 + i*200 for i in range(100)]
result_list = []
for select_num in select_num_list:
print("********")
print("selecting {} of output".format(select_num))
import random
grad_sample_avg_sum = 0
if select_num < 400:
MCtimes = 20
else:
MCtimes = 5
MCtimes = 1
# Monte Carlo Sampling - MCTimes is the number of times that we sample
for inner_i in range(MCtimes):
grad_sum = 0
cnt = 0
print("MC time {}".format(inner_i))
for iter, (image, label, name) in enumerate(eval_data_loader):
# break if 50 images (batches) done
if cnt > 1 and args.debug:
break
elif cnt > 200:
break
data_time.update(time.time() - end)
if torch.cuda.is_available():
image_var = Variable(image.cuda(), requires_grad=True)
else:
image_var = Variable(image, requires_grad=True)
# print("__shape of image var__", image_var.shape) # [1,3,1024,2048]
final = model(image_var)[0]
# print("__shape of final__", final.shape) # [1, 19, 1024,2048]
_, pred = torch.max(final, 1)
# print("__shape of pred__", pred.shape) # [1,1024,2048]
# for this image, sample select_num number of pixels
temp = [i for i in range(image_var.size(2) * image_var.size(3))]
selected = random.sample(temp, select_num)
# Build mask for image -
mask = np.zeros((image_var.size(2) * image_var.size(3)), dtype=np.uint8)
for iii in range(select_num):
mask[selected[iii]] = 1
mask = mask.reshape(1, 1, image_var.size(2), image_var.size(3))
mask = torch.from_numpy(mask)
mask = mask.float()
mask_target = mask.long()
# print('label', label)
label = label.long()
if GPU_flag:
# image.cuda()
# image_var.cuda() # BUG: too late
mask = mask.cuda()
mask_target = mask_target.cuda()
label = label.cuda()
target, mask = Variable(label), Variable(mask)
loss = cross_entropy2d(final * mask, target * mask_target, size_average=False)
loss.backward()
data_grad = image_var.grad
np_data_grad = data_grad.cpu().numpy()
# print(np_data_grad.shape)
L2_grad_norm = np.linalg.norm(np_data_grad) / select_num # the 1/M \sum_M \partial{Loss_i}/\partial{input}
grad_sum += L2_grad_norm
# increment the batch # counter
cnt += 1
pred = pred.cpu().data.numpy()
batch_time.update(time.time() - end)
end = time.time()
grad_avg = grad_sum / cnt # Represents the gradient average for batch. cnt is the number of samples in a batch.
grad_sample_avg_sum += grad_avg # For each sampling this is the sum of avg gradients in that sample.
grad_sample_avg_sum /= MCtimes
result_list.append(grad_sample_avg_sum)
print(select_num, 'middle result', result_list)
np.save('{}_{}_graph_more.npy'.format(args.dataset, args.arch), result_list)
print('Final', result_list)
np.save('{}_{}_graph_more.npy'.format(args.dataset, args.arch), result_list)
# not sure if has to be moved
if has_gt: # val
ious = per_class_iu(hist) * 100
logger.info(' '.join('{:.03f}'.format(i) for i in ious))
return round(np.nanmean(ious), 2)
| 5,351,452 |
def org_facility_onvalidation(form):
"""
Default the name to the Street Address
"""
form_vars = form.vars
name = form_vars.get("name", None)
if name:
return
address = form_vars.get("address", None)
if address:
form_vars.name = address
else:
# We need a default
form_vars.name = current.db.org_facility.location_id.represent(form_vars.location_id)
| 5,351,453 |
def uselist(*, schema: types.Schema, schemas: types.Schemas) -> typing.Optional[bool]:
"""
Retrieve the x-uselist of the schema.
Raises MalformedSchemaError if the x-uselist value is not a boolean.
Args:
schema: The schema to get x-uselist from.
schemas: The schemas for $ref lookup.
Returns:
The x-uselist or None.
"""
value = peek_key(
schema=schema, schemas=schemas, key=types.ExtensionProperties.USELIST
)
if value is None:
return None
if not isinstance(value, bool):
raise exceptions.MalformedSchemaError(
"The x-uselist property must be of type boolean."
)
return value
| 5,351,454 |
def getIPRules():
"""
Fetches a json representation of the Iptables rules on the server
GET: json object with the all the iptables rules on the system
"""
return jsonify({"result" : True, "rules" : hl.getIptablesRules()})
| 5,351,455 |
def process_indices(symbols, start_date, end_date, model_dir):
"""
:param symbols: list of index symbols to fetch, predict and plot
:param start_date: earliest date to fetch
:param end_date: latest date to fetch
:param model_dir: location to save model
:return:
"""
data_reader = StooqDataReader()
plotter = PAPlot(c.CHART_TRAIN_DIR)
for symbol in symbols:
symbol_name = symbol[1:] # strip off the ^ character
# load data
df = data_reader.load(symbol, start_date, end_date, symbol_name)
# use 'Close' (no adjusted close for indices) as our close price
df = talib.copy_column(df, "Close", c.CLOSE)
# train and plot all
train_all(df, symbol, symbol_name, model_dir, plotter)
| 5,351,456 |
def _get_only_relevant_data(video_data):
"""
Method to build ES document with only the relevant information
"""
return {
"kind": video_data["kind"],
"id": video_data["id"],
"published_at": video_data["snippet"]["publishedAt"],
"title": video_data["snippet"]["title"],
"description": video_data["snippet"]["description"],
"thumbnail_url": video_data["snippet"]["thumbnails"]["default"]["url"],
"channel_title": video_data["snippet"]["channelTitle"],
}
| 5,351,457 |
def get_mask(img):
"""
Convert an image to a mask array.
"""
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
ret, mask = cv2.threshold(gray, 0, 255, cv2.THRESH_BINARY)
return mask
| 5,351,458 |
def b32encode(hex_values, pad_left=True):
"""
Base32 encoder algorithm for Nano.
Transforms the given hex_value into a base-32 representation. The allowed letters are:
"13456789abcdefghijkmnopqrstuwxyz"
:param hex_values:
Hexadecimal values (string) or byte array containing the data to be encoded.
:param pad_left:
True if a byte of 0s should be prepended to the input. False otherwise.
This padding is required when generating a nanoblocks address with this algorithm.
"""
if type(hex_values) is str:
data_bytes = int(hex_values, 16).to_bytes(32, "big")
else:
data_bytes = hex_values
data_binary = ("0000" if pad_left else "") + "".join([f'{p:08b}' for p in data_bytes])
data_encoded = [int(split, 2) for split in chunkize(data_binary, 5)]
return "".join(pub_key_map.iloc[data_encoded].tolist())
| 5,351,459 |
def get_mfp(g, gv):
"""Calculate mean free path from inverse lifetime and group velocity."""
g = np.where(g > 0, g, -1)
gv_norm = np.sqrt((gv**2).sum(axis=2))
mean_freepath = np.where(g > 0, gv_norm / (2 * 2 * np.pi * g), 0)
return mean_freepath
| 5,351,460 |
def collate_participant_tables(subject_ids, base_dir):
"""
Generate a pandas dataframe across all subjects
Parameters
----------
subject_ids: list
a list of subject identifiers in
base_dir: str
path to a mindboggle output base directory (mindboggled)
Returns
-------
collated_table : pandas DataFrame
rows of subject_ids, and columns of shape measures
Examples
--------
>>> from mindboggle.mio.tables import collate_participant_tables
>>> subject_ids = ['arno', 'arno'] # normally two different subjects
>>> base_dir = os.environ['MINDBOGGLE_DATA'] # doctest: +SKIP
>>> dft = collate_participant_tables(subject_ids, base_dir) # doctest: +SKIP
>>> dft['lcsfs-sylvian fissure-area'] # doctest: +SKIP
arno 4.641015
arno 4.641015
Name: lcsfs-sylvian fissure-area, dtype: float64
"""
from glob import glob
import os
import pandas as pd
out = None
for id in subject_ids:
fl = glob(os.path.join(base_dir, id, 'tables', '*.csv')) + \
glob(os.path.join(base_dir, id, 'tables', '*', '*.csv'))
# skip vertices outputs
dft = pd.concat([fname2df(val) for val in sorted(fl)
if 'vertices' not in val], axis=1)
dft.index = [id]
out = dft if out is None else pd.concat((out, dft), axis=0)
return out
| 5,351,461 |
def convert_to_format(file: str, output: str, output_format: str):
"""
Converts a HOCON file to another format
Parameters
----------
file : str
hocon file to convert
output : str
output file to produce
output_format : str
format of the output file
Returns
-------
str
the output file
"""
(pyhocon
.converter
.HOCONConverter
.convert_from_file(file, output_file=output,
output_format=output_format))
os.remove(file)
return output
| 5,351,462 |
def test_stop(transfer, ts):
"""Intersect at end"""
transfer(0, 1, 100)
transfer(0, 2, 100)
transfer(0, 3, 42)
transfer(3, 2, 19)
transfer(3, 2, 22)
transfer(3, 2, 1)
ts(4)
| 5,351,463 |
def summarizeByDate(dict1):
"""
Takes dictionary of RSS Items per media outlet and returns a Seaborn
swarmplot/violinplot grouped by dates
Parameters
----------
dict1 : dict
DESCRIPTION key values of Feed Names and FeedParserDicts
Returns
-------
swarm : Swarmplot
DESCRIPTION. Columns are days, colours are per Source
"""
articleDate=[]
articleSize=[]
feedNames=[]
sns.set(rc={'figure.figsize':(14,17)})
for uid, val in tqdm(dict1.items(), desc="Summarizing by date"):
aDate=getRssArticleDate(val)
if aDate!=None:
articleDate.append(aDate)
feedNames.append(val.feed_name)
articleSize.append(getRssArticleSize(val))
outDict={"Source":feedNames, "Article Size (words)":articleSize, "Date":articleDate, "labels":feedNames}
df = pd.DataFrame(outDict)
df = df.sort_values('Date',ascending=True).reset_index()
paired50=sns.color_palette("hls", n_colors=50)
sns.boxplot(y="Date", x="Article Size (words)", hue="Source", data=df, width=20, palette=paired50)
plt.legend(bbox_to_anchor=(1.05, 1.0), loc='upper left', fontsize= "16",ncol=1)
#Tooltips
fig = plt.gcf()
tooltip = mpld3.plugins.PointLabelTooltip(fig, labels=list(outDict["labels"]))
mpld3.plugins.connect(fig, tooltip)
#swarm=sns.catplot(x="Date", y="Article Size (words)", hue="Source", orient = "h", kind="swarm", data=df, height=4, aspect= 1.5);
#sns.set_yticklabels(sns.get_yticklabels(), fontsize=7)
# swarm=sns.catplot(x="Date", y="Article Size (words)", hue="Source", kind="swarm", data=df);
plt.xticks(rotation = 45, horizontalalignment="right" )
plt.tight_layout()
return
| 5,351,464 |
def find_prime_root(l, blum=True, n=1):
"""Find smallest prime of bit length l satisfying given constraints.
Default is to return Blum primes (primes p with p % 4 == 3).
Also, a primitive root w is returned of prime order at least n.
"""
if l == 1:
assert not blum
assert n == 1
p = 2
w = 1
elif n <= 2:
n = 2
w = -1
p = gmpy2.next_prime(2**(l - 1))
if blum:
while p % 4 != 3:
p = gmpy2.next_prime(p)
p = int(p)
else:
assert blum
if not gmpy2.is_prime(n):
n = int(gmpy2.next_prime(n))
p = 1 + n * (1 + (n**2) % 4 + 4 * ((2**(l - 2)) // n))
while not gmpy2.is_prime(p):
p += 4 * n
a = 1
w = 1
while w == 1:
a += 1
w = gmpy2.powmod(a, (p - 1) // n, p)
p, w = int(p), int(w)
return p, n, w
| 5,351,465 |
async def test_agreement_already_set_up(
hass, aiohttp_client, aioclient_mock, current_request
):
"""Test showing display form again if display already exists."""
await setup_component(hass)
MockConfigEntry(domain=DOMAIN, unique_id=123).add_to_hass(hass)
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": SOURCE_USER}
)
# pylint: disable=protected-access
state = config_entry_oauth2_flow._encode_jwt(hass, {"flow_id": result["flow_id"]})
await hass.config_entries.flow.async_configure(
result["flow_id"], {"implementation": "eneco"}
)
client = await aiohttp_client(hass.http.app)
await client.get(f"/auth/external/callback?code=abcd&state={state}")
aioclient_mock.post(
"https://api.toon.eu/token",
json={
"refresh_token": "mock-refresh-token",
"access_token": "mock-access-token",
"type": "Bearer",
"expires_in": 60,
},
)
with patch("toonapi.Toon.agreements", return_value=[Agreement(agreement_id=123)]):
result3 = await hass.config_entries.flow.async_configure(result["flow_id"])
assert result3["type"] == data_entry_flow.RESULT_TYPE_ABORT
assert result3["reason"] == "already_configured"
| 5,351,466 |
def set_parameters(_configs, new=False):
"""
Sets configuration parameters
Parameters
----------
_configs :
Dictionary containing configuration options from the config file (config.json)
new : bool
Do you want to start from a new file?
Returns
-------
_configs :
Updated dictionary containing configuration options from the config file (config.json)
"""
if new:
_configs = {x: "NA" for x in _configs}
print('*Do not include single or double quotes*\n')
if _configs['eye_mask_path'] == 'NA':
_eye_mask_path = input('Add the full eye mask filepath: ')
_configs['eye_mask_path'] = _eye_mask_path
if _configs['train_file'] == 'NA':
_train_file = input('Add the name of the file used for training [peer1.nii.gz]: ')
if not _train_file:
_configs['train_file'] = 'peer1.nii.gz'
else:
_configs['train_file'] = _train_file
if _configs['test_file'] == 'NA':
_test_file = input('Which file would you like to predict eye movements from? [movie.nii.gz]: ')
if not _test_file:
_configs['test_file'] = 'movie.nii.gz'
else:
_configs['test_file'] = _test_file
if _configs['use_gsr'] == 'NA':
_use_gsr = input('Use global signal regression? (y/n) [n]: ')
if (not _use_gsr) or (_use_gsr == 'n'):
_configs['use_gsr'] = "0"
else:
_configs['use_gsr'] = "1"
if _configs['motion_scrub'] == 'NA':
_use_ms = input('Use motion scrubbing? (y/n) [n]: ')
if (not _use_ms) or (_use_ms == 'n'):
_configs['use_ms'] = "0"
_configs['motion_threshold'] = "0"
_configs['motion_scrub'] = "Not implemented"
elif _use_ms == 'y':
_configs['use_ms'] = "1"
_motion_scrub_filename = input('Add the filename of the CSV that contains the framewise displacement \
time series [motion_ts.csv]: ')
if not _motion_scrub_filename:
_configs['motion_scrub'] = 'motion_ts.csv'
else:
_configs['motion_scrub'] = _motion_scrub_filename
_motion_threshold = input('Add a motion threshold for motion scrubbing [.2]: ')
if not _motion_threshold:
_configs['motion_threshold'] = ".2"
else:
_configs['motion_threshold'] = _motion_threshold
with open('peer/config.json', 'w') as f:
json.dump(_configs, f)
return _configs
| 5,351,467 |
def _commandDisown(args):
"""
Disown a User
:param args: System Arguments Passed
"""
vaultFile = args.vault_file
privateKeyFile = args.private_key_file
publicKeyFile = args.public_key_file
_debugMessage('Bootstrapping Identity, this will take some time ...')
identity = Identity(privateKeyFile, publicKeyFile)
_debugMessage('Opening or Bootstrapping Vault ...')
vault = Vault(identity=identity, vaultFile=vaultFile)
identityToDisown = _valueOrContentsOf(args.identity)
confirm = args.confirm
_debugMessage('Disowning Identity ...')
assert (identityToDisown or confirm), 'Must Confirm when Removing Self'
vault.disown(identityToDisown)
_debugMessage('Saving Vault ...')
vault.save()
_debugMessage('Vault Saved.')
_printSuccess('Disowned.')
| 5,351,468 |
def create_all_files(sizes):
"""Create all files.
Parameters
----------
sizes : a list of lists of the form [(filesize,[block_size_1, block_size_2,...])]
Returns
-------
List of file names, a dictionary of measurements
"""
Stats=[]; files=[]
try:
for file_size,block_sizes in sizes:
for block_size in block_sizes:
n=block_size
m=int(file_size/block_size)
assert n*m==file_size , 'file_size=%d is not a multiple of block_size=%d'%(file_size,n)
filename='BlockData'+str(file_size)
(t_mem,t_disk) = create_file(n,m,filename=filename)
Stats.append({'n':n,
'm':m,
't_mem':t_mem,
't_disk':t_disk})
files.append(filename)
except:
traceback.print_exc(file=sys.stdout)
return files, Stats
| 5,351,469 |
def read_requirements(filename='requirements.txt'):
"""Reads the list of requirements from given file.
:param filename: Filename to read the requirements from.
Uses ``'requirements.txt'`` by default.
:return: Requirements as list of strings
"""
# allow for some leeway with the argument
if not filename.startswith('requirements'):
filename = 'requirements-' + filename
if not os.path.splitext(filename)[1]:
filename += '.txt' # no extension, add default
def valid_line(line):
line = line.strip()
return line and not any(line.startswith(p) for p in ('#', '-'))
def extract_requirement(line):
egg_eq = '#egg='
if egg_eq in line:
_, requirement = line.split(egg_eq, 1)
return requirement
return line
with open(filename) as f:
lines = f.readlines()
return list(map(extract_requirement, filter(valid_line, lines)))
| 5,351,470 |
def vgg16(mask_init='1s', mask_scale=1e-2, threshold_fn='binarizer', **kwargs):
"""VGG 16-layer model (configuration "D")."""
model = VGG(make_layers(cfg['D'], mask_init, mask_scale, threshold_fn),
mask_init, mask_scale, threshold_fn, **kwargs)
return model
| 5,351,471 |
def sort(X):
"""
Return sorted elements of :param:`X` and array of corresponding
sorted indices.
:param X: Target vector.
:type X: :class:`scipy.sparse` of format csr, csc, coo, bsr, dok, lil,
dia or :class:`numpy.matrix`
"""
assert 1 in X.shape, "X should be vector."
X = X.flatten().tolist()[0]
return sorted(X), sorted(list(range(len(X))), key=X.__getitem__)
| 5,351,472 |
def get_query(sf, query_text, verbose=True):
"""
Returns a list of lists based on a SOQL query with the fields as
the header column in the first list/row
"""
# execute query for up to 2,000 records
gc = sf.query(query_text)
records = gc['records']
if verbose:
print('Reading from %s object' % records[0]['attributes']['type'],
flush=True)
headers = list(records[0].keys())[1:] # get the headers
return_table = [ [record[heading] for heading in headers]
for record in records]
return_table.insert(0, headers)
# the above is complete unless there are >2,000 records
total_read_so_far = len(records)
while not gc['done']:
if verbose:
print('Progress: {} records out of {}'.format(
total_read_so_far, gc['totalSize']), flush=True)
gc = sf.query_more(gc['nextRecordsUrl'], True)
records = gc['records']
total_read_so_far += len(records)
next_table = [ [record[heading] for heading in headers]
for record in records]
return_table.extend(next_table)
return return_table
| 5,351,473 |
def trf(args):
"""
%prog trf outdir
Run TRF on FASTA files.
"""
from jcvi.apps.base import iglob
cparams = "1 1 2 80 5 200 2000"
p = OptionParser(trf.__doc__)
p.add_option("--mismatch", default=31, type="int",
help="Mismatch and gap penalty")
p.add_option("--minscore", default=MINSCORE, type="int",
help="Minimum score to report")
p.add_option("--period", default=6, type="int",
help="Maximum period to report")
p.add_option("--lobstr", default=False, action="store_true",
help="Generate output for lobSTR")
p.add_option("--telomeres", default=False, action="store_true",
help="Run telomere search: minscore=140 period=7")
p.add_option("--centromeres", default=False, action="store_true",
help="Run centromere search: {}".format(cparams))
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
outdir, = args
minlength = opts.minscore / 2
mm = MakeManager()
if opts.telomeres:
opts.minscore, opts.period = 140, 7
params = "2 {0} {0} 80 10 {1} {2}".\
format(opts.mismatch, opts.minscore, opts.period).split()
if opts.centromeres:
params = cparams.split()
bedfiles = []
for fastafile in natsorted(iglob(outdir, "*.fa,*.fasta")):
pf = op.basename(fastafile).split(".")[0]
cmd1 = "trf {0} {1} -d -h".format(fastafile, " ".join(params))
datfile = op.basename(fastafile) + "." + ".".join(params) + ".dat"
bedfile = "{0}.trf.bed".format(pf)
cmd2 = "cat {} | grep -v ^Parameters".format(datfile)
if opts.lobstr:
cmd2 += " | awk '($8 >= {} && $8 <= {})'".\
format(minlength, READLEN - minlength)
else:
cmd2 += " | awk '($8 >= 0)'"
cmd2 += " | sed 's/ /\\t/g'"
cmd2 += " | awk '{{print \"{0}\\t\" $0}}' > {1}".format(pf, bedfile)
mm.add(fastafile, datfile, cmd1)
mm.add(datfile, bedfile, cmd2)
bedfiles.append(bedfile)
bedfile = "trf.bed"
cmd = "cat {0} > {1}".format(" ".join(natsorted(bedfiles)), bedfile)
mm.add(bedfiles, bedfile, cmd)
mm.write()
| 5,351,474 |
def strip_blank(contents):
"""
strip the redundant blank in file contents.
"""
with io.StringIO(contents) as csvfile:
csvreader = csv.reader(csvfile, delimiter=",", quotechar='"')
rows = []
for row in csvreader:
rows.append(",".join(['"{}"'.format(x.strip()) for x in row]))
return "\n".join(rows)
| 5,351,475 |
def generate_id() -> str:
"""Generates an uuid v4.
:return: Hexadecimal string representation of the uuid.
"""
return uuid4().hex
| 5,351,476 |
def perms_of_length(n, length):
"""Return all permutations in :math:`S_n` of the given length (i.e., with the specified number of inversion).
This uses the algorithm in `<http://webhome.cs.uvic.ca/~ruskey/Publications/Inversion/InversionCAT.pdf>`_.
:param n: specifies the permutation group :math:`S_n`.
:param length: number of inversions.
:rtype: list of :class:`sage.Permutation`
"""
result = []
def gen(S, l, suffix=[]):
if l == 0:
result.append(Permutation(S + suffix))
return
n = len(S)
bin = (n - 1) * (n - 2) / 2
for i in range(n):
if n - (i + 1) <= l <= bin + n - (i + 1):
x = S[i]
gen(S[0:i] + S[i + 1 :], l - n + (i + 1), [x] + suffix)
gen(S=list(range(1, n + 1)), l=length)
return result
| 5,351,477 |
def test_rename_model_name():
"""Assert that an error is raised when name is a model's acronym."""
atom = ATOMClassifier(X_bin, y_bin, random_state=1)
with pytest.raises(ValueError, match=r".*model's acronym.*"):
atom.branch.rename("Lda")
| 5,351,478 |
def compare_sequence():
"""
This function is a common test used by all insert row functions. This
simply compares the contents of the original and resulting file.
"""
original = openpyxl.load_workbook('test_multiplication_table.xlsx')
original_sheet = original.active
result = openpyxl.load_workbook('result_test_multiplication_table.xlsx')
result_sheet = result.active
# verifies expected length
assert original_sheet.max_row + offset == result_sheet.max_row
# compares unmoved rows
for row in range(1, index):
for col in range(1, original_sheet.max_column):
assert original_sheet.cell(row=row, column=col).value == \
result_sheet.cell(row=row, column=col).value
# compares moved rows
for row in range(index, original_sheet.max_row + 1):
for col in range(1, original_sheet.max_column):
assert original_sheet.cell(row=row, column=col).value == \
result_sheet.cell(row=row + offset, column=col).value
| 5,351,479 |
def rank_by_entropy(pq, kl=True):
""" evaluate kl divergence, wasserstein distance
wasserstein: http://pythonhosted.org/pyriemann/_modules/pyriemann/utils/distance.html
"""
# to avoid Inf cases
pq = pq + 0.0000001
pq = pq/pq.sum(axis=0)
if kl: # entropy actually can calculate KL divergence
final=pq.iloc[:, :-1].apply(
lambda x: stats.entropy(x, pq.iloc[:, -1], base=2), axis=0)
label = 'KL'
else: # JS divergence
final=pq.iloc[:, :-1].apply(
lambda x: JSD(x, pq.iloc[:, -1]), axis=0)
label = 'JSD'
final.sort_values(ascending=False, inplace=True)
rank = final.rank(ascending=False)
final = pd.concat([final, rank], axis=1)
final.columns = [label, 'rank']
return final
| 5,351,480 |
def test_dynamic_partitions_multiple_indices(store):
"""
Do not specify partitions in metadata, but read them dynamically from store
"""
suffix = "suffix"
dataset_uuid = "uuid+namespace-attribute12_underscored"
partition0_core = create_partition_key(
dataset_uuid,
"core",
[("location", "L-0"), ("product", "P-0")],
"{}.parquet".format(suffix),
)
partition1_core = create_partition_key(
dataset_uuid,
"core",
[("location", "L-1"), ("product", "P-0")],
"{}.parquet".format(suffix),
)
metadata = {"dataset_metadata_version": 4, "dataset_uuid": dataset_uuid}
expected_partitions = {
"location=L-0/product=P-0/{}".format(suffix): {
"files": {"core": partition0_core}
},
"location=L-1/product=P-0/{}".format(suffix): {
"files": {"core": partition1_core}
},
}
expected_indices = {
"location": {
"L-0": ["location=L-0/product=P-0/{}".format(suffix)],
"L-1": ["location=L-1/product=P-0/{}".format(suffix)],
},
"product": {
"P-0": [
"location=L-0/product=P-0/{}".format(suffix),
"location=L-1/product=P-0/{}".format(suffix),
]
},
}
store.put(partition0_core, b"test")
store.put(partition1_core, b"test")
store_schema_metadata(
make_meta(pd.DataFrame({"location": ["L-0"], "product": ["P-0"]}), origin="1"),
dataset_uuid,
store,
"core",
)
dmd = DatasetMetadata.load_from_dict(metadata, store)
dmd = dmd.load_partition_indices()
dmd_dict = dmd.to_dict()
assert dmd_dict["partitions"] == expected_partitions
# Sorting may differ in the index list. This is ok for runtime
# but does produce flaky tests thus sort them.
sorted_result = {
column: {label: sorted(x) for label, x in index.items()}
for column, index in dmd_dict["indices"].items()
}
assert sorted_result == expected_indices
| 5,351,481 |
def generate_graph_properties(networks):
"""
This function constructs lists with centrality rankings of nodes in multiple networks.
Instead of using the absolute degree or betweenness centrality, this takes metric bias into account.
If the graph is not connected, the values are calculated for the largest connected component.
:param networks: List of input networks
:return: Pandas dataframe with rankings
"""
properties = dict()
property_names = ['Assortativity', 'Connectivity', 'Diameter', 'Radius', 'Average shortest path length']
for property in property_names:
properties[property] = list()
for network in networks:
if len(network[1].nodes) > 0:
properties['Assortativity'].append((network[0],
nx.degree_pearson_correlation_coefficient(network[1])))
properties['Connectivity'].append((network[0],
nx.average_node_connectivity(network[1])))
if nx.is_connected(network[1]):
properties['Diameter'].append((network[0], nx.diameter(network[1])))
properties['Radius'].append((network[0], nx.radius(network[1])))
properties['Average shortest path length'].append((network[0],
nx.average_shortest_path_length(network[1])))
else:
components = list(nx.connected_components(network[1]))
sizes = []
for component in components:
sizes.append(len(component))
subnetwork = nx.subgraph(network[1], components[np.where(np.max(sizes) == sizes)[0][0]])
properties['Diameter'].append((network[0], nx.diameter(subnetwork)))
properties['Radius'].append((network[0], nx.radius(subnetwork)))
properties['Average shortest path length'].append((network[0],
nx.average_shortest_path_length(subnetwork)))
else:
properties['Assortativity'].append(None)
properties['Connectivity'].append(None)
properties['Diameter'].append(None)
properties['Radius'].append(None)
properties['Average shortest path length'].append(None)
return properties
| 5,351,482 |
def anova_old(
expression, gene_id, photoperiod_set, strain_set, time_point_set, num_replicates
):
"""One-way analysis of variance (ANOVA) using F-test."""
num_groups = len(photoperiod_set) * len(strain_set) * len(time_point_set)
group_size = num_replicates
total_expression = 0
# First scan: calculate overall average.
for pp in photoperiod_set:
for ss in strain_set:
for tt in time_point_set:
total_expression += sum(expression[(gene_id, pp, ss, tt)])
overall_avg = total_expression / num_groups / group_size
# Second scan: calculate variances.
in_group_var = 0
bt_group_var = 0
for pp in photoperiod_set:
for ss in strain_set:
for tt in time_point_set:
group = expression[(gene_id, pp, ss, tt)]
group_avg = sum(group) / group_size
in_group_var += group_size * (group_avg - overall_avg) ** 2
for element in group:
bt_group_var += (element - group_avg) ** 2
dof = (num_groups - 1, group_size * num_groups - num_groups)
f_stat = bt_group_var / dof[0] / in_group_var * dof[1]
return f_stat, dof
| 5,351,483 |
def _make_ext_reader(ext_bits, ext_mask):
"""Helper for Stroke and ControlPoint parsing.
Returns:
- function reader(file) -> list<extension values>
- function writer(file, values)
- dict mapping extension_name -> extension_index
"""
# Make struct packing strings from the extension details
infos = []
while ext_mask:
bit = ext_mask & ~(ext_mask-1)
ext_mask = ext_mask ^ bit
try: info = ext_bits[bit]
except KeyError: info = ext_bits['unknown'](bit)
infos.append(info)
print(infos)
if len(infos) == 0:
print("[_make_ext_reader lambda] f:", f)
return (lambda f: [], lambda f, vs: None, {})
fmt = '<' + ''.join(info[1] for info in infos)
names = [info[0] for info in infos]
if '@' in fmt:
# struct.unpack isn't general enough to do the job
fmts = ['<'+info[1] for info in infos]
def reader(f, fmts=fmts):
print("[_make_ext_reader reader 1] f:", f, "fmt:", fmt)
values = [None] * len(fmts)
for i,fmt in enumerate(fmts):
if fmt == '<@':
nbytes, = struct.unpack('<I', f.read(4))
values[i] = f.read(nbytes)
else:
values[i], = struct.unpack(fmt, f.read(4))
else:
def reader(f, fmt=fmt, nbytes=len(infos)*4):
print("[_make_ext_reader reader 2] f:", f, "fmt:", fmt, "nbytes:", nbytes)
values = list(struct.unpack(fmt, f.read(nbytes)))
print("values", values)
return values
def writer(f, values, fmt=fmt):
print("[_make_ext_reader writer] f:", f, "values:", values, "fmt:", fmt)
return f.write(struct.pack(fmt, *values))
lookup = dict( (name,i) for (i,name) in enumerate(names) )
return reader, writer, lookup
| 5,351,484 |
def resolve_covariant(n_total, covariant=None):
"""Resolves a covariant in the following cases:
- If a covariant is not provided a diagonal matrix of 1s is generated, and symmetry is checked via a comparison with the datasets transpose
- If a covariant is provided, the symmetry is checked
args:
n_total {int} -- total number of informative features
covariant {[type]} -- [description] (default: {None})
returns:
covariant {np_array}
"""
if covariant is None:
print("No covariant provided, generating one.")
covariant = np.diag(np.ones(n_total))
# test for symmetry on covariance matrix by comparing the matrix to its transpose
try:
assert np.all(covariant == covariant.T)
except AssertionError:
print("Assertion error - please check covariance matrix is symmetric.")
return covariant
| 5,351,485 |
def create_generic_constant(
type_spec: Optional[computation_types.Type],
scalar_value: Union[int,
float]) -> building_blocks.ComputationBuildingBlock:
"""Creates constant for a combination of federated, tuple and tensor types.
Args:
type_spec: A `computation_types.Type` containing only federated, tuple or
tensor types, or `None` to use to construct a generic constant.
scalar_value: The scalar value we wish this constant to have.
Returns:
Instance of `building_blocks.ComputationBuildingBlock`
representing `scalar_value` packed into `type_spec`.
Raises:
TypeError: If types don't match their specification in the args section.
Notice validation of consistency of `type_spec` with `scalar_value` is not
the rsponsibility of this function.
"""
if type_spec is None:
return create_tensorflow_constant(type_spec, scalar_value)
py_typecheck.check_type(type_spec, computation_types.Type)
inferred_scalar_value_type = type_conversions.infer_type(scalar_value)
if (not inferred_scalar_value_type.is_tensor() or
inferred_scalar_value_type.shape != tf.TensorShape(())):
raise TypeError(
'Must pass a scalar value to `create_generic_constant`; encountered a '
'value {}'.format(scalar_value))
if not type_analysis.contains_only(
type_spec, lambda t: t.is_federated() or t.is_struct() or t.is_tensor()):
raise TypeError
if type_analysis.contains_only(type_spec,
lambda t: t.is_struct() or t.is_tensor()):
return create_tensorflow_constant(type_spec, scalar_value)
elif type_spec.is_federated():
unplaced_zero = create_tensorflow_constant(type_spec.member, scalar_value)
if type_spec.placement == placements.CLIENTS:
placement_federated_type = computation_types.FederatedType(
type_spec.member, type_spec.placement, all_equal=True)
placement_fn_type = computation_types.FunctionType(
type_spec.member, placement_federated_type)
placement_function = building_blocks.Intrinsic(
intrinsic_defs.FEDERATED_VALUE_AT_CLIENTS.uri, placement_fn_type)
elif type_spec.placement == placements.SERVER:
placement_federated_type = computation_types.FederatedType(
type_spec.member, type_spec.placement, all_equal=True)
placement_fn_type = computation_types.FunctionType(
type_spec.member, placement_federated_type)
placement_function = building_blocks.Intrinsic(
intrinsic_defs.FEDERATED_VALUE_AT_SERVER.uri, placement_fn_type)
return building_blocks.Call(placement_function, unplaced_zero)
elif type_spec.is_struct():
elements = []
for k in range(len(type_spec)):
elements.append(create_generic_constant(type_spec[k], scalar_value))
names = [name for name, _ in structure.iter_elements(type_spec)]
packed_elements = building_blocks.Struct(elements)
named_tuple = create_named_tuple(packed_elements, names,
type_spec.python_container)
return named_tuple
else:
raise ValueError(
'The type_spec {} has slipped through all our '
'generic constant cases, and failed to raise.'.format(type_spec))
| 5,351,486 |
def all_same(lst: list) -> bool:
"""test if all list entries are the same"""
return lst[1:] == lst[:-1]
| 5,351,487 |
def binary_find(N, x, array):
"""
Binary search
:param N: size of the array
:param x: value
:param array: array
:return: position where it is found. -1 if it is not found
"""
lower = 0
upper = N
while (lower + 1) < upper:
mid = int((lower + upper) / 2)
if x < array[mid]:
upper = mid
else:
lower = mid
if array[lower] <= x:
return lower
return -1
| 5,351,488 |
def _add_data_entity(app_context, entity_type, data):
"""Insert new entity into a given namespace."""
old_namespace = namespace_manager.get_namespace()
try:
namespace_manager.set_namespace(app_context.get_namespace_name())
new_object = entity_type()
new_object.data = data
new_object.put()
return new_object
finally:
namespace_manager.set_namespace(old_namespace)
| 5,351,489 |
def scalarmat(*q):
"""multiplies every object in q with each object in q. Should return a unity matrix for an orthonormal system"""
ret=[]
for a in q:
toa=[]
for b in q:
toa.append(a*b)
ret.append(toa)
return ret
| 5,351,490 |
def frames_per_second():
""" Return the estimated frames per second
Returns the current estimate for frames-per-second (FPS).
FPS is estimated by measured the amount of time that has elapsed since
this function was previously called. The FPS estimate is low-pass filtered
to reduce noise.
This function is intended to be called one time for every iteration of
the program's main loop.
Returns
-------
fps : float
Estimated frames-per-second. This value is low-pass filtered
to reduce noise.
"""
global _time_prev, _fps
time_now = time.time() * 1000.0
dt = time_now - _time_prev
_time_prev = time_now
if dt == 0.0:
return _fps.value
return _fps.update(1000.0 / dt)
| 5,351,491 |
def _absolute_flat_glob(pattern):
"""
Glob function for a pattern that do not contain wildcards.
:pattern: File or directory path
:return: Iterator that yields at most one valid file or dir name
"""
dirname, basename = os.path.split(pattern)
if basename:
if os.path.exists(pattern):
yield pattern
else:
# Patterns ending with a slash should match only directories.
if os.path.isdir(dirname):
yield pattern
return
| 5,351,492 |
def modularity(partition, graph, weight='weight'):
"""Compute the modularity of a partition of a graph
Parameters
----------
partition : dict
the partition of the nodes, i.e a dictionary where keys are their nodes
and values the communities
graph : networkx.Graph
the networkx graph which is decomposed
weight : str, optional
the key in graph to use as weight. Default to 'weight'
Returns
-------
modularity : float
The modularity
Raises
------
KeyError
If the partition is not a partition of all graph nodes
ValueError
If the graph has no link
TypeError
If graph is not a networkx.Graph
References
----------
.. 1. Newman, M.E.J. & Girvan, M. Finding and evaluating community
structure in networks. Physical Review E 69, 26113(2004).
Examples
--------
>>> G=nx.erdos_renyi_graph(100, 0.01)
>>> part = best_partition(G)
>>> modularity(part, G)
if type(graph) != nx.Graph:
raise TypeError("Bad graph type, use only non directed graph")
"""
inc = dict([])
deg = dict([])
links = graph.size(weight=weight)
if links == 0:
raise ValueError("A graph without link has an undefined modularity")
for node in graph:
com = partition[node]
deg[com] = deg.get(com, 0.) + graph.degree(node, weight=weight)
for neighbor, datas in graph[node].items():
edge_weight = datas.get(weight, 1)
if partition[neighbor] == com:
if neighbor == node:
inc[com] = inc.get(com, 0.) + float(edge_weight)
else:
inc[com] = inc.get(com, 0.) + float(edge_weight) / 2.
res = 0.
for com in set(partition.values()):
res += inc.get(com, 0.) - \
((deg.get(com, 0.) ** 2) / (4. * links))
return (1.0 / links) * res
| 5,351,493 |
def config_lst_bin_files(data_files, dlst=None, atol=1e-10, lst_start=0.0, fixed_lst_start=False, verbose=True,
ntimes_per_file=60):
"""
Configure lst grid, starting LST and output files given input data files and LSTbin params.
Parameters
----------
data_files : type=list of lists: nested set of lists, with each nested list containing
paths to miriad files from a particular night. These files should be sorted
by ascending Julian Date. Frequency axis of each file must be identical.
dlst : type=float, LST bin width. If None, will get this from the first file in data_files.
lst_start : type=float, starting LST for binner as it sweeps from lst_start to lst_start + 2pi.
fixed_lst_start : type=bool, if True, LST grid starts at lst_start, regardless of LST of first data
record. Otherwise, LST grid starts at LST of first data record.
ntimes_per_file : type=int, number of LST bins in a single output file
Returns (lst_grid, dlst, file_lsts, start_lst)
-------
lst_grid : float ndarray holding LST bin centers
dlst : float, LST bin width of output lst_grid
file_lsts : list, contains the lst grid of each output file
start_lst : float, starting lst for LST binner
"""
# get dlst from first data file if None
if dlst is None:
start, stop, int_time = utils.get_miriad_times(data_files[0][0])
dlst = int_time
# get start and stop times for each list of files in data_files.
# add_int_buffer adds an integration to the end time of df[:-1] files,
# and the %(2pi) ensures everything is within a 2pi LST grid.
data_times = []
for df in data_files:
data_times.append(np.array(utils.get_miriad_times(df, add_int_buffer=True))[:2, :].T % (2 * np.pi))
# unwrap data_times less than lst_start, get starting and ending lst
start_lst = 100
end_lst = -1
for dt in data_times:
# unwrap starts below lst_start
dt[:, 0][dt[:, 0] < lst_start - atol] += 2 * np.pi
# unwrap ends below starts
dt[:, 1][dt[:, 1] < dt[:, 0] - atol] += 2 * np.pi
# get start and end lst
start_lst = np.min(np.append(start_lst, dt[:, 0]))
end_lst = np.max(np.append(end_lst, dt.ravel()))
# ensure start_lst isn't beyond 2pi
if start_lst >= (2 * np.pi):
start_lst -= 2 * np.pi
end_lst -= 2 * np.pi
for dt in data_times:
dt -= 2 * np.pi
# create lst_grid
if fixed_lst_start:
start_lst = lst_start
lst_grid = make_lst_grid(dlst, lst_start=start_lst, verbose=verbose)
dlst = np.median(np.diff(lst_grid))
# get starting and stopping lst_grid indices
start_diff = lst_grid - start_lst
start_diff[start_diff < -dlst / 2 - atol] = 100
start_index = np.argmin(start_diff)
end_diff = lst_grid - end_lst
end_diff[end_diff > dlst / 2 + atol] = -100
end_index = np.argmax(end_diff)
# get number of output files
nfiles = int(np.ceil(float(end_index - start_index) / ntimes_per_file))
# get output file lsts
file_lsts = [lst_grid[start_index:end_index][ntimes_per_file * i:ntimes_per_file * (i + 1)] for i in range(nfiles)]
return data_times, lst_grid, dlst, file_lsts, start_lst
| 5,351,494 |
def list_members(t):
"""
List members
"""
owner, slug = get_slug()
# Get members
rel = {}
next_cursor = -1
while next_cursor != 0:
m = t.lists.members(
slug=slug,
owner_screen_name=owner,
cursor=next_cursor,
include_entities=False)
for u in m['users']:
rel[u['name']] = '@' + u['screen_name']
next_cursor = m['next_cursor']
printNicely('All: ' + str(len(rel)) + ' members.')
for name in rel:
user = ' ' + cycle_color(name)
user += color_func(c['TWEET']['nick'])(' ' + rel[name] + ' ')
printNicely(user)
| 5,351,495 |
def get_str_arr_info(val):
""" Find type of string in array val, and also the min and max length. Return
None if val does not contain strings."""
fval = np.array(val).flatten()
num_el = len(fval)
max_length = 0
total_length = 0
for sval in fval:
len_sval = len(sval)
if len_sval > max_length:
max_length = len_sval
total_length += len_sval
return (num_el, max_length, total_length)
| 5,351,496 |
def generate_priors(image_size=300,
layer_sizes=None,
pool_ratios=None,
min_sizes=None,
max_sizes=None,
aspect_ratios=None):
# TODO update feature maps, min_sizes, max_sizes for inputs size 5xx
"""
This method generate prior boxes for SSD Model. In total, there will be 8732 prior boxes
:param image_size: input image size for SSD Model
:param layer_sizes: Layer sizes for each feature map
:param pool_ratios: pooling ratio for each feature map.
layer_size*pool_ratio = image_size
:param min_sizes: minimum prior box size
:param max_sizes: maximum prior box size
:param aspect_ratios: ratio for prior box height and width
:return: tensor of prior boxes
"""
if aspect_ratios is None:
aspect_ratios = [[2], [2, 3], [2, 3], [2, 3], [2], [2]]
if min_sizes is None:
min_sizes = [30, 60, 111, 162, 213, 264]
if max_sizes is None:
max_sizes = [60, 111, 162, 213, 264, 315]
if pool_ratios is None:
pool_ratios = [8, 16, 32, 64, 100, 300]
if layer_sizes is None:
layer_sizes = [38, 19, 10, 5, 3, 1]
boxes = []
for layer_size_idx, layer_size in enumerate(layer_sizes):
min_size = min_sizes[layer_size_idx]
max_size = max_sizes[layer_size_idx]
pool_ratio = pool_ratios[layer_size_idx]
for layer_height in range(layer_size):
for layer_width in range(layer_size):
layer_image_size = image_size / pool_ratio
center_dim_x = (layer_width + 0.5) / layer_image_size
center_dim_y = (layer_height + 0.5) / layer_image_size
layer_min_size = min_size / image_size
boxes += [center_dim_x, center_dim_y, layer_min_size, layer_min_size]
diagonal = sqrt(layer_min_size * (max_size/image_size))
boxes += [center_dim_x, center_dim_y, diagonal, diagonal]
for ar in aspect_ratios[layer_size_idx]:
boxes += [center_dim_x, center_dim_y, layer_min_size * sqrt(ar), layer_min_size / sqrt(ar)]
boxes += [center_dim_x, center_dim_y, layer_min_size / sqrt(ar), layer_min_size * sqrt(ar)]
output = torch.Tensor(boxes).view(-1, 4).clamp_(min=0, max=1)
output.clamp_(max=1, min=0)
return output
| 5,351,497 |
def delete_properties_file(id):
""" Deletes a property file by its id
Parameters:
id (): a special number identifying the material system, as an int.
Returns: None
"""
os.remove("property_calculations/properties_"+str(id)+".txt")
return
| 5,351,498 |
def listFiles(dir):
"""
Walks the path and subdirectories to return a list of files.
Parameters
----------
dir : str
the top directory to search
subdirectories are also searched
Returns
-------
listname: list
a list of files in dir and subdirectories
Notes
-----
This can be replaced by functions in `os.path`, as if 3.4, pathlib is probably better.
It is not clear that this function is used anywhere in ChiantiPy
"""
alist = os.walk(dir)
listname = []
for (dirpath,dirnames,filenames) in alist:
if len(dirnames) == 0:
for f in filenames:
file = os.path.join(dirpath,f)
if os.path.isfile(file):
listname.append(file)
else:
for f in filenames:
file = os.path.join(dirpath,f)
if os.path.isfile(file):
listname.append(file)
return listname
| 5,351,499 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.