content
stringlengths 22
815k
| id
int64 0
4.91M
|
---|---|
def test_analyze(request,hash,db_name):
"""
Get features of a sequence, using the sequence's sha-1 hash as the
identifier.
"""
db = blat.models.Feature_Database.objects.get(name=db_name)
sequence = blat.models.Sequence.objects.get(db=db,hash=hash)
ts = int(time.mktime(sequence.modified.timetuple()))
return render_to_response(
'test/analyze.html', { "hash" : hash, "mtime" : ts },
context_instance=RequestContext(request)
)
| 5,350,100 |
def _collect_scaling_groups(owner: str) -> List:
"""Collect autoscaling groups that contain key `ES_role` and belong to the specified owner"""
client = boto3.client("autoscaling")
print("Collecting scaling groups")
resp = client.describe_auto_scaling_groups()
assert "NextToken" not in resp, "did not program to handle pagination"
groups = resp['AutoScalingGroups']
result = []
for group in groups:
if _get_tag_val(group['Tags'], 'Owner') == owner and \
any([tag['Key'] == ES_ROLE_KEY for tag in group['Tags']]):
result.append(group)
return result
| 5,350,101 |
def check_for_updates():
"""
Check for the existence of newer versions of the client, reporting both current release version and development
version.
For help installing development versions of the client, see the docs for
:py:mod:`synapseclient` or the `README.md <https://github.com/Sage-Bionetworks/synapsePythonClient>`_.
"""
sys.stderr.write('Python Synapse Client\n')
sys.stderr.write('currently running version: %s\n' % synapseclient.__version__)
release_version_info = _get_version_info(_VERSION_URL)
sys.stderr.write('latest release version: %s\n' % release_version_info['latestVersion'])
if _version_tuple(synapseclient.__version__, levels=3) < _version_tuple(release_version_info['latestVersion'],
levels=3):
print(("\nUPGRADE AVAILABLE\n\nA more recent version of the Synapse Client (%s) is available. "
"Your version (%s) can be upgraded by typing:\n"
" pip install --upgrade synapseclient\n\n") % (release_version_info['latestVersion'],
synapseclient.__version__,))
else:
sys.stderr.write('\nYour Synapse client is up to date!\n')
| 5,350,102 |
def generate(env):
# NOTE: SCons requires the use of this name, which fails gpylint.
"""SCons entry point for this tool."""
env = env # Silence gpylint
__builtin__.AddTargetGroup = AddTargetGroup
__builtin__.AddTargetHelp = AddTargetHelp
__builtin__.GetTargetGroups = GetTargetGroups
__builtin__.GetTargetModes = GetTargetModes
__builtin__.GetTargets = GetTargets
env.AddMethod(SetTargetDescription)
env.AddMethod(SetTargetProperty)
# Defer per-mode setup
env.Defer(AddTargetMode)
| 5,350,103 |
def cross_entropy_loss(logits, labels, label_smoothing=0., dtype=jnp.float32):
"""Compute cross entropy for logits and labels w/ label smoothing
Args:
logits: [batch, length, num_classes] float array.
labels: categorical labels [batch, length] int array.
label_smoothing: label smoothing constant, used to determine the on and off values.
dtype: dtype to perform loss calcs in, including log_softmax
"""
num_classes = logits.shape[-1]
labels = jax.nn.one_hot(labels, num_classes, dtype=dtype)
if label_smoothing > 0:
labels = labels * (1 - label_smoothing) + label_smoothing / num_classes
logp = jax.nn.log_softmax(logits.astype(dtype))
return -jnp.mean(jnp.sum(logp * labels, axis=-1))
| 5,350,104 |
def set_optimizer(name, model, learning_rate):
"""
Specify which optimizer to use during training.
Initialize a torch.optim optimizer for the given model based on the specified name and learning rate.
Parameters
----------
name : string or None, default = 'adam'
The name of the torch.optim optimizer to be used. The following
strings are accepted as arguments: 'adagrad', 'adam', 'adamax', 'adamw', 'rmsprop', or 'sgd'
model : utils.models.EncoderDecoder
The model which is to be optimized
learning_rate : float or None
The learning rate to be used by the optimizer. If set to None, the default value as defined in
torch.optim is used
Returns
-------
torch.optim optimizer class
A torch.optim optimizer that implements one of the following algorithms:
Adagrad, Adam, Adamax, AdamW, RMSprop, or SGD (stochastic gradient descent)
SGD is set to use a momentum of 0.5.
"""
if name == "adam":
optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)
if name == "sgd":
optimizer = torch.optim.SGD(model.parameters(), lr=learning_rate, momentum=0.5)
if name == "adamw":
optimizer = torch.optim.AdamW(model.parameters(), lr=learning_rate)
if name == "adagrad":
optimizer = torch.optim.Adagrad(model.parameters(), lr=learning_rate)
if name == "adamax":
optimizer = torch.optim.Adamax(model.parameters(), lr=learning_rate)
if name == "rmsprop":
optimizer = torch.optim.RMSprop(model.parameters(), lr=learning_rate)
return optimizer
| 5,350,105 |
def get_http_url(server_path, get_path):
"""
Вариант с использованием httplib напрямую; ничем не лучше urllib2
server_path = "example.com"
get_path = "/some_path"
"""
# urllib - более высокого уровня библиотека, которая в случае http использует
# httplib;
# используем httplib ради лучшего детектирования ошибок
direct_http = 1
if direct_http:
import httplib
conn = httplib.HTTPConnection(server_path)
try:
conn.request("GET", get_path)
except:
raise RuntimeError("Cant connect to: " + server_path)
response = conn.getresponse()
if response.reason != 'OK':
raise RuntimeError("Error getting data from: " + get_path)
#print response.status, response.reason, response.msg
return response
else:
import urllib
f = urllib.urlopen("http://" + server_path + get_path)
#print f.info()
return f
| 5,350,106 |
def not_found_view(request):
"""Not Found view.
"""
model = request.context
return render_main_template(model, request, contenttile='not_found')
| 5,350,107 |
def top_filtering(
logits, top_k=0, top_p=0.0, threshold=-float("Inf"), filter_value=-float("Inf")
):
""" Filter a distribution of logits using top-k, top-p (nucleus) and/or threshold filtering
Args:
logits: logits distribution shape (vocabulary size)
top_k: <=0: no filtering, >0: keep only top k tokens with highest probability.
top_p: <=0.0: no filtering, >0.0: keep only a subset S of candidates, where S is the smallest subset
whose total probability mass is greater than or equal to the threshold top_p.
In practice, we select the highest probability tokens whose cumulative probability mass exceeds
the threshold top_p.
threshold: a minimal threshold to keep logits
Taken from `interact.py`
"""
assert (
logits.dim() == 1
) # Only work for batch size 1 for now - could update but it would obfuscate a bit the code
top_k = min(top_k, logits.size(-1))
if top_k > 0:
# Remove all tokens with a probability less than the last token in the top-k tokens
indices_to_remove = logits < torch.topk(logits, top_k)[0][..., -1, None]
logits[indices_to_remove] = filter_value
if top_p > 0.0:
# Compute cumulative probabilities of sorted tokens
sorted_logits, sorted_indices = torch.sort(logits, descending=True)
cumulative_probabilities = torch.cumsum(
F.softmax(sorted_logits, dim=-1), dim=-1
)
# Remove tokens with cumulative probability above the threshold
sorted_indices_to_remove = cumulative_probabilities > top_p
# Shift the indices to the right to keep also the first token above the threshold
sorted_indices_to_remove[..., 1:] = sorted_indices_to_remove[..., :-1].clone()
sorted_indices_to_remove[..., 0] = 0
# Back to unsorted indices and set them to -infinity
indices_to_remove = sorted_indices[sorted_indices_to_remove]
logits[indices_to_remove] = filter_value
indices_to_remove = logits < threshold
logits[indices_to_remove] = filter_value
return logits
| 5,350,108 |
def get_physical_type(obj):
"""
Return the physical type that corresponds to a unit (or another
physical type representation).
Parameters
----------
obj : quantity-like or `~astropy.units.PhysicalType`-like
An object that (implicitly or explicitly) has a corresponding
physical type. This object may be a unit, a
`~astropy.units.Quantity`, an object that can be converted to a
`~astropy.units.Quantity` (such as a number or array), a string
that contains a name of a physical type, or a
`~astropy.units.PhysicalType` instance.
Returns
-------
`~astropy.units.PhysicalType`
A representation of the physical type(s) of the unit.
Examples
--------
The physical type may be retrieved from a unit or a
`~astropy.units.Quantity`.
>>> import astropy.units as u
>>> u.get_physical_type(u.meter ** -2)
PhysicalType('column density')
>>> u.get_physical_type(0.62 * u.barn * u.Mpc)
PhysicalType('volume')
The physical type may also be retrieved by providing a `str` that
contains the name of a physical type.
>>> u.get_physical_type("energy")
PhysicalType({'energy', 'torque', 'work'})
Numbers and arrays of numbers correspond to a dimensionless physical
type.
>>> u.get_physical_type(1)
PhysicalType('dimensionless')
"""
if isinstance(obj, PhysicalType):
return obj
if isinstance(obj, str):
return _physical_type_from_str(obj)
try:
unit = obj if isinstance(obj, core.UnitBase) else quantity.Quantity(obj, copy=False).unit
except TypeError as exc:
raise TypeError(f"{obj} does not correspond to a physical type.") from exc
unit = _replace_temperatures_with_kelvin(unit)
physical_type_id = unit._get_physical_type_id()
unit_has_known_physical_type = physical_type_id in _physical_unit_mapping
if unit_has_known_physical_type:
return _physical_unit_mapping[physical_type_id]
else:
return PhysicalType(unit, "unknown")
| 5,350,109 |
def reverse(list):
"""Returns a new list or string with the elements or characters in reverse
order"""
if isinstance(list, str):
return "".join(reversed(list))
return _list(reversed(list))
| 5,350,110 |
def PubMedDiabetes(
directed: bool = False,
preprocess: bool = True,
load_nodes: bool = True,
verbose: int = 2,
cache: bool = True,
cache_path: str = "graphs/linqs",
version: str = "latest",
**additional_graph_kwargs: Dict
) -> Graph:
"""Return new instance of the PubMedDiabetes graph.
The graph is automatically retrieved from the LINQS repository. The Pubmed Diabetes dataset consists of 19717 scientific publications from
PubMed database pertaining to diabetes classified into one of three classes.
The citation network consists of 44338 links. Each publication in the dataset
is described by a TF/IDF weighted word vector from a dictionary which consists
of 500 unique words.
Parameters
-------------------
directed: bool = False
Wether to load the graph as directed or undirected.
By default false.
preprocess: bool = True
Whether to preprocess the graph to be loaded in
optimal time and memory.
load_nodes: bool = True,
Whether to load the nodes vocabulary or treat the nodes
simply as a numeric range.
verbose: int = 2,
Wether to show loading bars during the retrieval and building
of the graph.
cache: bool = True
Whether to use cache, i.e. download files only once
and preprocess them only once.
cache_path: str = "graphs"
Where to store the downloaded graphs.
version: str = "latest"
The version of the graph to retrieve.
additional_graph_kwargs: Dict
Additional graph kwargs.
Returns
-----------------------
Instace of PubMedDiabetes graph.
References
---------------------
Please cite the following if you use the data:
```bib
@inproceedings{namata2012query,
title={Query-driven active surveying for collective classification},
author={Namata, Galileo and London, Ben and Getoor, Lise and Huang, Bert and EDU, UMD},
booktitle={10th International Workshop on Mining and Learning with Graphs},
volume={8},
year={2012}
}
```
"""
return AutomaticallyRetrievedGraph(
graph_name="PubMedDiabetes",
repository="linqs",
version=version,
directed=directed,
preprocess=preprocess,
load_nodes=load_nodes,
verbose=verbose,
cache=cache,
cache_path=cache_path,
additional_graph_kwargs=additional_graph_kwargs,
callbacks=[
parse_linqs_pubmed_incidence_matrix
],
callbacks_arguments=[
{
"cites_path": "Pubmed-Diabetes/Pubmed-Diabetes/data/Pubmed-Diabetes.DIRECTED.cites.tab",
"content_path": "Pubmed-Diabetes/Pubmed-Diabetes/data/Pubmed-Diabetes.NODE.paper.tab",
"node_path": "nodes.tsv",
"edge_path": "edges.tsv"
}
]
)()
| 5,350,111 |
def run_dti_artifact_correction(subject_list, session_list):
""" Attach the FSL-based diffusion MRI artifact detection and correction
workflow to the `main_wf`.
Parameters
----------
main_wf: nipype Workflow
wf_name: str
Name of the preprocessing workflow
params: dict with parameter values
atlas_file: str
Path to the anatomical atlas to be transformed to diffusion MRI space.
Nipype Inputs for `main_wf`
---------------------------
Note: The `main_wf` workflow is expected to have an `input_files` and a `datasink` nodes.
input_files.select.diff: input node
datasink: nipype Node
Returns
-------
main_wf: nipype Workflow
"""
# name of output folder
output_dir = opj(DATA, 'processed')
working_dir = opj(DATA, 'interim')
# Infosource - a function free node to iterate over the list of subject names
infosource = pe.Node(IdentityInterface(fields=['subject_id',
'session_id']),
name="infosource")
infosource.iterables = [('subject_id', subject_list),
('session_id', session_list)]
# SelectFiles
templates = {'diff': 'raw/bids/{subject_id}/{session_id}/dwi/{subject_id}_{session_id}_dwi.nii.gz',
'bval': 'raw/bids/{subject_id}/{session_id}/dwi/{subject_id}_{session_id}_dwi.bval',
'bvec': 'raw/bids/{subject_id}/{session_id}/dwi/{subject_id}_{session_id}_dwi.bvec'}
selectfiles = pe.Node(SelectFiles(templates,
base_directory=DATA),
name="selectfiles")
# Datasink
datasink = pe.Node(DataSink(base_directory=DATA,
container=output_dir),
name="datasink")
# The workflow box
art_dti_wf = dti_artifact_correction()
# dataSink output substitutions
## The base name of the 'diff' file for the substitutions
diff_fbasename = remove_ext(op.basename(get_input_file_name(selectfiles, 'diff')))
regexp_subst = [
(r"/brain_mask_{diff}_space\.nii$", "/brain_mask.nii"),
(r"/eddy_corrected\.nii$", "/{diff}_eddycor.nii"),
]
regexp_subst = format_pair_list(regexp_subst, diff=diff_fbasename)
regexp_subst += extension_duplicates(regexp_subst)
datasink.inputs.regexp_substitutions = extend_trait_list(datasink.inputs.regexp_substitutions,
regexp_subst)
wf = pe.Workflow(name='artifact')
wf.base_dir = working_dir
# input and output diffusion MRI workflow to main workflow connections
wf.connect([(infosource, selectfiles, [('subject_id', 'subject_id'),
('session_id', 'session_id')]),
(selectfiles, art_dti_wf, [("diff", "dti_art_input.diff"),
("bval", "dti_art_input.bval"),
("bvec", "dti_art_input.bvec"),
]),
(art_dti_wf, datasink, [("dti_art_output.eddy_corr_file", "diff.@eddy_corr_file"),
("dti_art_output.bvec_rotated", "diff.@bvec_rotated"),
("dti_art_output.brain_mask_1", "diff.@brain_mask_1"),
("dti_art_output.brain_mask_2", "diff.@brain_mask_2"),
("dti_art_output.acqp", "diff.@acquisition_pars"),
("dti_art_output.index", "diff.@acquisition_idx"),
("dti_art_output.avg_b0", "diff.@avg_b0"),
("dti_art_output.hmc_corr_file", "diff.artifact_stats.@hmc_corr_file"),
("dti_art_output.hmc_corr_bvec", "diff.artifact_stats.@hmc_rot_bvec"),
("dti_art_output.hmc_corr_xfms", "diff.artifact_stats.@hmc_corr_xfms"),
# ("dti_art_output.art_displacement_files", "diff.artifact_stats.@art_disp_files"),
# ("dti_art_output.art_intensity_files", "diff.artifact_stats.@art_ints_files"),
# ("dti_art_output.art_norm_files", "diff.artifact_stats.@art_norm_files"),
# ("dti_art_output.art_outlier_files", "diff.artifact_stats.@art_outliers"),
# ("dti_art_output.art_plot_files", "diff.artifact_stats.@art_plots"),
# ("dti_art_output.art_statistic_files", "diff.artifact_stats.@art_stats"),
]),
])
wf.run()
return
| 5,350,112 |
def find_neighbor_indices(atoms, probe, k):
"""
Returns list of indices of atoms within probe distance to atom k.
"""
neighbor_indices = []
atom_k = atoms[k]
radius = atom_k.radius + probe + probe
indices = list(range(k))
indices = indices + list(range(k+1, len(atoms)))
for i in indices:
atom_i = atoms[i]
dist = pos_distance(atom_k.pos, atom_i.pos)
if dist < radius + atom_i.radius:
neighbor_indices.append(i)
return neighbor_indices
| 5,350,113 |
def genDir(EAs):
"""
Generate the projection direction given the euler angles. Since the image
is in the x-y plane, the projection direction is given by R(EA)*z where
z = (0,0,1)
"""
dir_vec = np.array([rotmat3D_EA(*EA)[:, 2] for EA in EAs])
return dir_vec
| 5,350,114 |
def plainica(x, reducedim=0.99, backend=None, random_state=None):
""" Source decomposition with ICA.
Apply ICA to the data x, with optional PCA dimensionality reduction.
Parameters
----------
x : array, shape (n_trials, n_channels, n_samples) or (n_channels, n_samples)
data set
reducedim : {int, float, 'no_pca'}, optional
A number of less than 1 in interpreted as the fraction of variance that should remain in the data. All
components that describe in total less than `1-reducedim` of the variance are removed by the PCA step.
An integer numer of 1 or greater is interpreted as the number of components to keep after applying the PCA.
If set to 'no_pca' the PCA step is skipped.
backend : dict-like, optional
Specify backend to use. When set to None the backend configured in config.backend is used.
Returns
-------
result : ResultICA
Source decomposition
"""
x = atleast_3d(x)
t, m, l = np.shape(x)
if backend is None:
backend = scotbackend
# pre-transform the data with PCA
if reducedim == 'no pca':
c = np.eye(m)
d = np.eye(m)
xpca = x
else:
c, d, xpca = backend['pca'](x, reducedim)
# run on residuals ICA to estimate volume conduction
mx, ux = backend['ica'](cat_trials(xpca), random_state=random_state)
# correct (un)mixing matrix estimatees
mx = mx.dot(d)
ux = c.dot(ux)
class Result:
unmixing = ux
mixing = mx
return Result
| 5,350,115 |
def make_spectrum_layout(obj, spectra, user, device, width, smoothing, smooth_number):
"""
Helper function that takes the object, spectra and user info,
as well as the total width of the figure,
and produces one layout for a spectrum plot.
This can be used once for each tab on the spectrum plot,
if using different spectrum types.
Parameters
----------
obj : dict
The underlying object that is associated with all these spectra.
spectra : dict
The different spectra to be plotted. This can be a subset of
e.g., all the spectra of one type.
user : dict
info about the user, used to get the individual user plot preferences.
device: string
name of the device used ("browser", "mobile", "mobile_portrait", "tablet", etc).
width: int
width of the external frame of the plot, including the buttons/sliders.
smoothing: bool
choose if to start the display with the smoothed plot or the full resolution spectrum.
smooth_number: int
number of data points to use in the moving average when displaying the smoothed spectrum.
Returns
-------
dict
Bokeh JSON embedding of one layout that can be tabbed or
used as the plot specifications on its own.
"""
rainbow = cm.get_cmap('rainbow', len(spectra))
palette = list(map(rgb2hex, rainbow(range(len(spectra)))))
color_map = dict(zip([s.id for s in spectra], palette))
data = []
for i, s in enumerate(spectra):
# normalize spectra to a median flux of 1 for easy comparison
normfac = np.nanmedian(np.abs(s.fluxes))
normfac = normfac if normfac != 0.0 else 1e-20
altdata = json.dumps(s.altdata) if s.altdata is not None else ""
annotations = (
AnnotationOnSpectrum.query_records_accessible_by(user)
.filter(AnnotationOnSpectrum.spectrum_id == s.id)
.all()
)
annotations = (
json.dumps([{a.origin: a.data} for a in annotations])
if len(annotations)
else ""
)
df = pd.DataFrame(
{
'wavelength': s.wavelengths,
'flux': s.fluxes / normfac,
'flux_original': s.fluxes / normfac,
'id': s.id,
'telescope': s.instrument.telescope.name,
'instrument': s.instrument.name,
'date_observed': s.observed_at.isoformat(sep=' ', timespec='seconds'),
'pi': (
s.assignment.run.pi
if s.assignment is not None
else (
s.followup_request.allocation.pi
if s.followup_request is not None
else ""
)
),
'origin': s.origin,
'altdata': altdata[:20] + "..." if len(altdata) > 20 else altdata,
'annotations': annotations,
}
)
data.append(df)
data = pd.concat(data)
data.sort_values(by=['date_observed', 'wavelength'], inplace=True)
split = data.groupby('id', sort=False)
(
frame_width,
aspect_ratio,
legend_row_height,
legend_items_per_row,
) = get_dimensions_by_device(device, width)
plot_height = (
math.floor(width / aspect_ratio)
if device == "browser"
else math.floor(width / aspect_ratio)
+ legend_row_height * int(len(split) / legend_items_per_row)
+ 30 # 30 is the height of the toolbar
)
# Add some height for the checkboxes and sliders
if device == "mobile_portrait":
height = plot_height + 440
elif device == "mobile_landscape":
height = plot_height + 370
else:
height = plot_height + 220
# check browser plot_height for legend overflow
if device == "browser":
plot_height_of_legend = (
legend_row_height * int(len(split) / legend_items_per_row)
+ 90 # 90 is height of toolbar plus legend offset
)
if plot_height_of_legend > plot_height:
plot_height = plot_height_of_legend
hover = HoverTool(
tooltips=[
('wavelength', '@wavelength{0,0.000}'),
('flux', '@flux'),
('telesecope', '@telescope'),
('instrument', '@instrument'),
('UTC date observed', '@date_observed'),
('PI', '@pi'),
('origin', '@origin'),
('altdata', '@altdata{safe}'),
('annotations', '@annotations{safe}'),
],
)
flux_max = np.max(data['flux'])
flux_min = np.min(data['flux'])
ymax = flux_max * 1.05
ymin = flux_min - 0.05 * (flux_max - flux_min)
xmin = np.min(data['wavelength']) - 100
xmax = np.max(data['wavelength']) + 100
if obj.redshift is not None and obj.redshift > 0:
xmin_rest = xmin / (1.0 + obj.redshift)
xmax_rest = xmax / (1.0 + obj.redshift)
active_drag = None if "mobile" in device or "tablet" in device else "box_zoom"
tools = (
"box_zoom, pan, reset"
if "mobile" in device or "tablet" in device
else "box_zoom,wheel_zoom,pan,reset"
)
plot = figure(
frame_width=frame_width,
height=plot_height,
y_range=(ymin, ymax),
x_range=(xmin, xmax),
tools=tools,
toolbar_location="above",
active_drag=active_drag,
)
model_dict = {}
legend_items = []
for i, (key, df) in enumerate(split):
renderers = []
s = next(spec for spec in spectra if spec.id == key)
if s.label is not None and len(s.label) > 0:
label = s.label
else:
label = f'{s.instrument.name} ({s.observed_at.date().strftime("%m/%d/%y")})'
model_dict['s' + str(i)] = plot.step(
x='wavelength',
y='flux',
color=color_map[key],
source=ColumnDataSource(df),
)
renderers.append(model_dict[f's{i}'])
# this starts out the same as the previous plot, but can be binned/smoothed later in JS
dfs = copy.deepcopy(df)
if smoothing:
dfs['flux'] = smoothing_function(dfs['flux_original'], smooth_number)
model_dict[f'bin{i}'] = plot.step(
x='wavelength', y='flux', color=color_map[key], source=ColumnDataSource(dfs)
)
renderers.append(model_dict[f'bin{i}'])
# add this line plot to be able to show tooltip at hover
model_dict['l' + str(i)] = plot.line(
x='wavelength',
y='flux',
color=color_map[key],
source=ColumnDataSource(df),
line_alpha=0.0,
)
renderers.append(model_dict[f'l{i}'])
legend_items.append(LegendItem(label=label, renderers=renderers))
plot.xaxis.axis_label = 'Wavelength (Å)'
plot.yaxis.axis_label = 'Flux'
plot.toolbar.logo = None
if obj.redshift is not None and obj.redshift > 0:
plot.extra_x_ranges = {"rest_wave": Range1d(start=xmin_rest, end=xmax_rest)}
plot.add_layout(
LinearAxis(x_range_name="rest_wave", axis_label="Rest Wavelength (Å)"),
'above',
)
# TODO how to choose a good default?
plot.y_range = Range1d(0, 1.03 * data.flux.max())
legend_loc = "below" if "mobile" in device or "tablet" in device else "right"
legend_orientation = (
"vertical" if device in ["browser", "mobile_portrait"] else "horizontal"
)
add_plot_legend(plot, legend_items, width, legend_orientation, legend_loc)
# only show this tooltip for spectra, not elemental lines
hover.renderers = list(model_dict.values())
plot.add_tools(hover)
smooth_checkbox = CheckboxGroup(
labels=["smoothing"],
active=[0] if smoothing else [],
)
smooth_slider = Slider(
start=0.0,
end=100.0,
value=0.0,
step=1.0,
show_value=False,
max_width=350,
# margin=(4, 10, 0, 10),
)
smooth_input = NumericInput(value=smooth_number)
smooth_callback = CustomJS(
args=dict(
model_dict=model_dict,
n_labels=len(split),
checkbox=smooth_checkbox,
input=smooth_input,
slider=smooth_slider,
),
code=open(
os.path.join(
os.path.dirname(__file__), '../static/js/plotjs', 'smooth_spectra.js'
)
).read(),
)
smooth_checkbox.js_on_click(smooth_callback)
smooth_input.js_on_change('value', smooth_callback)
smooth_slider.js_on_change(
'value',
CustomJS(
args={'slider': smooth_slider, 'input': smooth_input},
code="""
input.value = slider.value;
input.change.emit();
""",
),
)
smooth_column = column(
smooth_checkbox,
smooth_slider,
smooth_input,
width=width if "mobile" in device else int(width * 1 / 5) - 20,
margin=(4, 10, 0, 10),
)
# 20 is for padding
slider_width = width if "mobile" in device else int(width * 2 / 5) - 20
z_title = Div(text="Redshift (<i>z</i>): ")
z_slider = Slider(
value=obj.redshift if obj.redshift is not None else 0.0,
start=0.0,
end=3.0,
step=0.00001,
show_value=False,
format="0[.]0000",
)
z_input = NumericInput(
value=obj.redshift if obj.redshift is not None else 0.0,
mode='float',
)
z_slider.js_on_change(
'value',
CustomJS(
args={'slider': z_slider, 'input': z_input},
code="""
input.value = slider.value;
input.change.emit();
""",
),
)
z = column(
z_title,
z_slider,
z_input,
width=slider_width,
margin=(4, 10, 0, 10),
)
v_title = Div(text="<i>V</i><sub>expansion</sub> (km/s): ")
v_exp_slider = Slider(
value=0.0,
start=0.0,
end=3e4,
step=10.0,
show_value=False,
)
v_exp_input = NumericInput(value=0, mode='int')
v_exp_slider.js_on_change(
'value',
CustomJS(
args={'slider': v_exp_slider, 'input': v_exp_input},
code="""
input.value = slider.value;
input.change.emit();
""",
),
)
v_exp = column(
v_title,
v_exp_slider,
v_exp_input,
width=slider_width,
margin=(0, 10, 0, 10),
)
# Track elements that need to be shifted with change in z / v
shifting_elements = []
renderers = []
obj_redshift = 0 if obj.redshift is None else obj.redshift
for i, (name, (wavelengths, color)) in enumerate(SPEC_LINES.items()):
if name in ('Tellurics-1', 'Tellurics-2'):
el_data = pd.DataFrame(
{
'name': name,
'wavelength': [(wavelengths[0] + wavelengths[1]) / 2],
'bandwidth': [wavelengths[1] - wavelengths[0]],
}
)
new_line = plot.vbar(
x='wavelength',
width='bandwidth',
top=ymax,
color=color,
source=ColumnDataSource(el_data),
alpha=0.3,
)
else:
flux_values = list(np.linspace(ymin, ymax, 100))
flux_values[-1] = np.nan
wavelength_values = [
w for w in wavelengths for _ in flux_values
] # repeat each wavelength 100 times
el_data = pd.DataFrame(
{
'name': name,
'x': wavelength_values,
'wavelength': wavelength_values,
'flux': [f for _ in wavelengths for f in flux_values],
}
)
if name != 'Sky Lines':
el_data['x'] = el_data['wavelength'] * (1.0 + obj_redshift)
new_line = plot.line(
x='x',
y='flux',
color=color,
line_alpha=0.3,
source=ColumnDataSource(el_data),
)
new_line.visible = False
model_dict[f'element_{i}'] = new_line
renderers.append(new_line)
if name not in ('Sky Lines', 'Tellurics-1', 'Tellurics-2'):
shifting_elements.append(new_line)
new_line.glyph.line_alpha = 1.0
# add the elemental lines to hover tool
hover_lines = HoverTool(
tooltips=[
('name', '@name'),
('wavelength', '@wavelength{0,0}'),
],
renderers=renderers,
)
plot.add_tools(hover_lines)
# Split spectral line legend into columns
if device == "mobile_portrait":
columns = 3
elif device == "mobile_landscape":
columns = 5
else:
columns = 7
# Create columns from a list.
#
# `list(zip_longest(a, b, c, ...))` returns a tuple where the i-th
# element comes from the i-th iterable argument.
#
# The trick here is to pass in the same iterable `column` times.
# This gives us rows.
rows = itertools.zip_longest(*[iter(SPEC_LINES.items())] * columns)
# To form columns from the rows, zip the rows together.
element_dicts = zip(*rows)
all_column_checkboxes = []
for column_idx, element_dict in enumerate(element_dicts):
element_dict = [e for e in element_dict if e is not None]
labels = [name for name, _ in element_dict]
colors = [color for name, (wavelengths, color) in element_dict]
column_checkboxes = CheckboxWithLegendGroup(
labels=labels, active=[], colors=colors, width=width // (columns + 1)
)
all_column_checkboxes.append(column_checkboxes)
callback_toggle_lines = CustomJS(
args={'column_checkboxes': column_checkboxes, **model_dict},
code=f"""
for (let i = 0; i < {len(labels)}; i = i + 1) {{
let el_idx = i * {columns} + {column_idx};
let el = eval("element_" + el_idx);
el.visible = (column_checkboxes.active.includes(i))
}}
""",
)
column_checkboxes.js_on_click(callback_toggle_lines)
# Move spectral lines when redshift or velocity changes
speclines = {f'specline_{i}': line for i, line in enumerate(shifting_elements)}
callback_zvs = CustomJS(
args={'z': z_input, 'v_exp': v_exp_input, **speclines},
code=f"""
const c = 299792.458; // speed of light in km / s
for (let i = 0; i < {len(speclines)}; i = i + 1) {{
let el = eval("specline_" + i);
el.data_source.data.x = el.data_source.data.wavelength.map(
x_i => ( x_i * (1 + z.value) /
(1 + v_exp.value / c) )
);
el.data_source.change.emit();
}}
""",
)
# Hook up callback that shifts spectral lines when z or v changes
z_input.js_on_change('value', callback_zvs)
v_exp_input.js_on_change('value', callback_zvs)
z_input.js_on_change(
'value',
CustomJS(
args={'z': z_input, 'slider': z_slider},
code="""
// Update slider value to match text input
slider.value = z.value;
""",
),
)
v_exp_input.js_on_change(
'value',
CustomJS(
args={'slider': v_exp_slider, 'v_exp': v_exp_input},
code="""
// Update slider value to match text input
slider.value = v_exp.value;
""",
),
)
row2 = row(all_column_checkboxes)
row3 = (
column(z, v_exp, smooth_column)
if "mobile" in device
else row(z, v_exp, smooth_column)
)
return column(
plot,
row2,
row3,
sizing_mode='stretch_width',
width=width,
height=height,
)
| 5,350,116 |
def all_equal(values: list):
"""Check that all values in given list are equal"""
return all(values[0] == v for v in values)
| 5,350,117 |
def _tester(func, *args):
"""
Tests function ``func`` on arguments and returns first positive.
>>> _tester(lambda x: x%3 == 0, 1, 2, 3, 4, 5, 6)
3
>>> _tester(lambda x: x%3 == 0, 1, 2)
None
:param func: function(arg)->boolean
:param args: other arguments
:return: something or none
"""
for arg in args:
if arg is not None and func(arg):
return arg
return None
| 5,350,118 |
def test_deadlock(config):
"""test when two backends try to extract state of each other"""
acon1, acon2 = n_async_connect(config, 2)
acurs1 = acon1.cursor()
acurs2 = acon2.cursor()
while True:
acurs1.callproc('pg_query_state', (acon2.get_backend_pid(),))
acurs2.callproc('pg_query_state', (acon1.get_backend_pid(),))
# listen acon1, acon2 with timeout = 10 sec to determine deadlock
r, w, x = select.select([acon1.fileno(), acon2.fileno()], [], [], 10)
assert (r or w or x), "Deadlock is happened under cross reading of query states"
wait(acon1)
wait(acon2)
# exit from loop if one backend could read state of execution 'pg_query_state'
# from other backend
if acurs1.fetchone() or acurs2.fetchone():
break
n_close((acon1, acon2))
| 5,350,119 |
def new_revision(partno):
"""
Presents the form to add a new revision, and creates it upon POST submit
"""
_load_if_released(partno) # ensures the component exists and is released
form = RevisionForm(request.form)
if request.method == 'POST' and form.validate_on_submit():
now = datetime.now()
result = current_app.mongo.db.components.update_one(
filter={'_id': partno},
update={
'$set': {
'released': False # a new revision is not already released
},
'$push': {
'revisions': {
'date': now,
'comment': form.comment.data
},
'history': {
'date': now,
'user': current_user.id,
'message': 'new revision created'
}
}
}
)
if result.modified_count == 1:
flash('new revision created', 'success')
else:
# should not happen.
flash('no data modified, please contact the administrator', 'error')
return redirect(url_for('components.details', partno=partno))
extract_errors(form)
return render_template('components/revision_form.html', form=form, partno=partno)
| 5,350,120 |
def fresnel_parameter(rays, diffraction_points):
""" returns the fresnel diffraction parameter (always as a positive)
Parameters
----------
rays : [n] list of shapely LineString (3d)
diffraction_points: [n] list of Points (3d)
diffraction point which the ray is rounding
Returns
-------
fresnel diffraction parameters: [n,] float array
"""
wavelength = 0.1903 # GPS L1 signal frequency of 1575.42 MHz
distances = np.array([r.project(d)
for r, d in zip(rays, diffraction_points)])
nearest_points = (r.interpolate(d) for r, d in zip(rays, distances))
diffraction_distances = np.array(
[d.z-p.z for p, d in zip(nearest_points, diffraction_points)])
v = np.where(distances == 0, -np.inf, diffraction_distances *
(2 / (wavelength * distances))**0.5)
return v
| 5,350,121 |
def update_compilers_object(
new_compiler: Dict[str, Any], contract_type: str, previous_compilers: List[Dict[str, Any]]
) -> Iterable[Dict[str, Any]]:
"""
Updates a manifest's top-level `compilers` with a new compiler information object.
- If compiler version already exists, we just update the compiler's `contractTypes`
"""
recorded_new_contract_type = False
for compiler in previous_compilers:
contract_types = compiler.pop("contractTypes")
if contract_type in contract_types:
raise ManifestBuildingError(
f"Contract type: {contract_type} already referenced in `compilers`."
)
if compiler == new_compiler:
contract_types.append(contract_type)
recorded_new_contract_type = True
compiler["contractTypes"] = contract_types
yield compiler
if not recorded_new_contract_type:
new_compiler["contractTypes"] = [contract_type]
yield new_compiler
| 5,350,122 |
def test_webapp_no_locales():
"""Test that locales are not required."""
err = ErrorBundle(listed=False)
data = _get_json()
del data["default_locale"]
del data["locales"]
_detect(err, data)
assert not err.failed()
| 5,350,123 |
def validate(net, val_data, ctx, eval_metric):
"""Test on validation dataset."""
eval_metric.reset()
# set nms threshold and topk constraint
net.set_nms(nms_thresh=0.45, nms_topk=400)
net.hybridize()
for batch in val_data:
data = gluon.utils.split_and_load(batch[0], ctx_list=ctx, batch_axis=0, even_split=False)
label = gluon.utils.split_and_load(batch[1], ctx_list=ctx, batch_axis=0, even_split=False)
det_bboxes = []
det_ids = []
det_scores = []
gt_bboxes = []
gt_ids = []
gt_difficults = []
for x, y in zip(data, label):
# get prediction results
ids, scores, bboxes = net(x)
det_ids.append(ids)
det_scores.append(scores)
# clip to image size
det_bboxes.append(bboxes.clip(0, batch[0].shape[2]))
# split ground truths
gt_ids.append(y.slice_axis(axis=-1, begin=4, end=5))
gt_bboxes.append(y.slice_axis(axis=-1, begin=0, end=4))
gt_difficults.append(y.slice_axis(axis=-1, begin=5, end=6) if y.shape[-1] > 5 else None)
# update metric
eval_metric.update(det_bboxes, det_ids, det_scores, gt_bboxes, gt_ids, gt_difficults)
return eval_metric.get()
| 5,350,124 |
def add_unique_geom_id(point_gdf: gpd.GeoDataFrame, log: Logger=None) -> gpd.GeoDataFrame:
"""Adds an unique identifier (string) to GeoDataFrame of points based on point locations (x/y).
"""
point_gdf[S.xy_id] = [f'{str(round(geom.x, 1))}_{str(round(geom.y, 1))}' for geom in point_gdf[S.geometry]]
unique_count = point_gdf[S.xy_id].nunique()
unique_share = round(100 * unique_count/len(point_gdf.index), 2)
log.info(f'found {unique_count} unique sampling points ({unique_share} %)')
return point_gdf
| 5,350,125 |
def get_data_parallel_rank():
"""Return my rank for the data parallel group."""
return _TOPOLOGY.get_data_parallel_rank()
| 5,350,126 |
def entropy(data):
"""
Compute the Shannon entropy, a measure of uncertainty.
"""
if len(data) == 0:
return None
n = sum(data)
_op = lambda f: f * math.log(f)
return - sum(_op(float(i) / n) for i in data)
| 5,350,127 |
def transform_mtl_to_stac(metadata: dict) -> Item:
"""
Handle USGS MTL as a dict and return a STAC item.
NOT IMPLEMENTED
Issues include:
- There's no reference to UTM Zone or any other CRS info in the MTL
- There's no absolute file path or reference to a URI to find data.
"""
LANDSAT_METADATA = metadata["LANDSAT_METADATA_FILE"]
product = LANDSAT_METADATA["PRODUCT_CONTENTS"]
projection = LANDSAT_METADATA["PROJECTION_ATTRIBUTES"]
image = LANDSAT_METADATA["IMAGE_ATTRIBUTES"]
proessing_record = LANDSAT_METADATA["LEVEL2_PROCESSING_RECORD"]
scene_id = product["LANDSAT_PRODUCT_ID"]
xmin, xmax = float(projection["CORNER_LL_LON_PRODUCT"]), float(
projection["CORNER_UR_LON_PRODUCT"])
ymin, ymax = float(projection["CORNER_LL_LAT_PRODUCT"]), float(
projection["CORNER_UR_LAT_PRODUCT"])
geom = mapping(box(xmin, ymin, xmax, ymax))
bounds = shape(geom).bounds
# Like: "2020-01-01" for date and "23:08:52.6773140Z" for time
acquired_date = _parse_date(
f"{image['DATE_ACQUIRED']}T{image['SCENE_CENTER_TIME']}")
created = _parse_date(proessing_record["DATE_PRODUCT_GENERATED"])
item = Item(id=scene_id,
geometry=geom,
bbox=bounds,
datetime=acquired_date,
properties={})
# Common metadata
item.common_metadata.created = created
item.common_metadata.platform = image["SPACECRAFT_ID"]
item.common_metadata.instruments = [
i.lower() for i in image["SENSOR_ID"].split("_")
]
# TODO: implement these three extensions
EOExtension.add_to(item)
ViewExtension.add_to(item)
ProjectionExtension.add_to(item)
return item
| 5,350,128 |
def alexnet(pretrained=False):
"""AlexNet model architecture from the
`"One weird trick..." <https://arxiv.org/abs/1404.5997>`_ paper.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = AlexNet()
if pretrained:
model_path = './model/alexnet.pth.tar'
pretrained_model = torch.load(model_path)
model.load_state_dict(pretrained_model['state_dict'])
return model
| 5,350,129 |
def main() :
"""
main():
parameters:
-----------
None
description:
------------
Creates a connection to the GPU underneath and holds it in "bc".
Next, we access the "colab" environment S3 bucket (which holds a parquet
file) and creates a table named "taxi" with it.
Finally, this example ends off by saying that we wish to understand how
BSQL is planning to extract the data rather than actually extracting it.
result:
-------
'LogicalSort(fetch=[2])
BindableTableScan(table=[[main, taxi]], projects=[[3, 4]], aliases=[[passenger_count, trip_distance]])
'
"""
global bc
bc.s3('blazingsql-colab', bucket_name='blazingsql-colab')
bc.create_table('taxi', 's3://blazingsql-colab/yellow_taxi/taxi_data.parquet')
query = 'SELECT passenger_count, trip_distance FROM taxi LIMIT 2'
print('#> We are going to run {} against the loaded data on the GPU'.format(query))
print(bc.explain(query))
| 5,350,130 |
def generate_random_targets(labels: Tensor, num_classes: int) -> Tensor:
"""
Generates one random target in (num_classes - 1) possibilities for each label that is different from the original
label.
Parameters
----------
labels: Tensor
Original labels. Generated targets will be different from labels.
num_classes: int
Number of classes to generate the random targets from.
Returns
-------
targets: Tensor
Random target for each label. Has the same shape as labels.
"""
random = torch.rand(len(labels), num_classes, device=labels.device, dtype=torch.float)
random.scatter_(1, labels.unsqueeze(-1), 0)
return random.argmax(1)
| 5,350,131 |
def gpsdio_bigquery_schema(ctx, schemafile, cols):
"""
Produce a BigQuery schema for gpsdio data.
Produces a BigQuery schema for use together with gpsdio data in a
CSV container format (as supported by gpsdio-csv).
You must supply the same column list as used in the CSV!
"""
cols = cols.split(",")
schema = [{"type": type_for_col(col), "name": col}
for col in cols]
with open(schemafile, "w") as f:
json.dump(schema, f)
| 5,350,132 |
def recall_k(sent_im_dist, im_labels, ks=(1, 5, 10)):
"""
Compute recall at given ks.
"""
im_labels = tf.cast(im_labels, tf.bool)
def retrieval_recall(dist, labels, k):
# Use negative distance to find the index of
# the smallest k elements in each row.
pred = tf.nn.top_k(-dist, k=k)[1]
# Create a boolean mask for each column (k value) in pred,
# s.t. mask[i][j] is 1 iff pred[i][k] = j.
pred_k_mask = lambda topk_idx: tf.one_hot(topk_idx, tf.shape(labels)[1],
on_value=True, off_value=False,
dtype=tf.bool)
# Create a boolean mask for the predicted indices
# by taking logical or of boolean masks for each column,
# s.t. mask[i][j] is 1 iff j is in pred[i].
pred_mask = tf.reduce_any(tf.map_fn(
pred_k_mask, tf.transpose(pred), dtype=tf.bool), axis=0)
# pred_mask = tf.map_fn(create_pred_mask, pred)
# Entry (i, j) is matched iff pred_mask[i][j] and labels[i][j] are 1.
matched = tf.cast(tf.logical_and(pred_mask, labels), dtype=tf.float32)
return tf.reduce_mean(tf.reduce_max(matched, axis=1))
img_sent_recall = [retrieval_recall(tf.transpose(sent_im_dist),
tf.transpose(im_labels), k) for k in ks]
sent_img_recall = [retrieval_recall(sent_im_dist, im_labels, k) for k in ks]
return img_sent_recall + sent_img_recall
| 5,350,133 |
def get_suggestion(project_slug, lang_slug, version_slug, pagename, user):
"""
| # | project | version | language | What to show |
| 1 | 0 | 0 | 0 | Error message |
| 2 | 0 | 0 | 1 | Error message (Can't happen) |
| 3 | 0 | 1 | 0 | Error message (Can't happen) |
| 4 | 0 | 1 | 1 | Error message (Can't happen) |
| 5 | 1 | 0 | 0 | A link to top-level page of default version |
| 6 | 1 | 0 | 1 | Available versions on the translation project |
| 7 | 1 | 1 | 0 | Available translations of requested version |
| 8 | 1 | 1 | 1 | A link to top-level page of requested version |
"""
suggestion = {}
if project_slug:
try:
proj = Project.objects.get(slug=project_slug)
if not lang_slug:
lang_slug = proj.language
try:
ver = Version.objects.get(
project__slug=project_slug, slug=version_slug)
except Version.DoesNotExist:
ver = None
if ver: # if requested version is available on main project
if lang_slug != proj.language:
try:
translations = proj.translations.filter(
language=lang_slug)
if translations:
ver = Version.objects.get(
project__slug=translations[0].slug, slug=version_slug)
else:
ver = None
except Version.DoesNotExist:
ver = None
# if requested version is available on translation project too
if ver:
# Case #8: Show a link to top-level page of the version
suggestion['type'] = 'top'
suggestion['message'] = "What are you looking for?"
suggestion['href'] = proj.get_docs_url(ver.slug, lang_slug)
# requested version is available but not in requested language
else:
# Case #7: Show available translations of the version
suggestion['type'] = 'list'
suggestion['message'] = (
"Requested page seems not to be translated in "
"requested language. But it's available in these "
"languages.")
suggestion['list'] = []
suggestion['list'].append({
'label': proj.language,
'project': proj,
'version_slug': version_slug,
'pagename': pagename
})
for t in proj.translations.all():
try:
Version.objects.get(
project__slug=t.slug, slug=version_slug)
suggestion['list'].append({
'label': t.language,
'project': t,
'version_slug': version_slug,
'pagename': pagename
})
except Version.DoesNotExist:
pass
else: # requested version does not exist on main project
if lang_slug == proj.language:
trans = proj
else:
translations = proj.translations.filter(language=lang_slug)
trans = translations[0] if translations else None
if trans: # requested language is available
# Case #6: Show available versions of the translation
suggestion['type'] = 'list'
suggestion['message'] = (
"Requested version seems not to have been built yet. "
"But these versions are available.")
suggestion['list'] = []
for v in Version.objects.public(user, trans, True):
suggestion['list'].append({
'label': v.slug,
'project': trans,
'version_slug': v.slug,
'pagename': pagename
})
# requested project exists but requested version and language
# are not available.
else:
# Case #5: Show a link to top-level page of default version
# of main project
suggestion['type'] = 'top'
suggestion['message'] = 'What are you looking for??'
suggestion['href'] = proj.get_docs_url()
except Project.DoesNotExist:
# Case #1-4: Show error mssage
suggestion['type'] = 'none'
suggestion[
'message'] = "We're sorry, we don't know what you're looking for"
else:
suggestion['type'] = 'none'
suggestion[
'message'] = "We're sorry, we don't know what you're looking for"
return suggestion
| 5,350,134 |
def a_test_predict_is_length():
"""
Tests that the prediction IS dataframe length is equal to the number of steps h
"""
model = ARIMA(data=data, ar=2, ma=2, family=Exponential())
x = model.fit()
assert(model.predict_is(h=5).shape[0] == 5)
| 5,350,135 |
def resample_data_or_seg(data, new_shape, is_seg, axis=None, order=3, do_separate_z=False, cval=0, order_z=0):
"""
separate_z=True will resample with order 0 along z
:param data:
:param new_shape:
:param is_seg:
:param axis:
:param order:
:param do_separate_z:
:param cval:
:param order_z: only applies if do_separate_z is True
:return:
"""
assert len(data.shape) == 4, "data must be (c, x, y, z)"
assert not is_seg, "do not use this patch for resampling segmentations"
print("running patched resample_data_or_seg function")
dtype_data = data.dtype
shape = np.array(data[0].shape)
new_shape = np.array(new_shape)
if np.all(shape == new_shape):
print("no resampling necessary")
return data
data = data.astype(float)
resize_fn = resize
kwargs = {'mode': 'edge', 'anti_aliasing': False}
if do_separate_z:
print("separate z, order in z is", order_z, "order inplane is", order)
assert len(axis) == 1, "only one anisotropic axis supported"
axis = axis[0]
if axis == 0:
new_shape_2d = new_shape[1:]
elif axis == 1:
new_shape_2d = new_shape[[0, 2]]
else:
new_shape_2d = new_shape[:-1]
reshaped_final_data = np.empty(shape=(data.shape[0], new_shape[0], new_shape[1], new_shape[2]), dtype=dtype_data)
do_z = shape[axis] != new_shape[axis]
if do_z:
if axis == 0:
buffer = np.empty(shape=(shape[axis], new_shape_2d[0], new_shape_2d[1]), dtype=float)
elif axis == 1:
buffer = np.empty(shape=(new_shape_2d[0], shape[axis], new_shape_2d[1]), dtype=float)
else:
buffer = np.empty(shape=(new_shape_2d[0], new_shape_2d[1], shape[axis]), dtype=float)
else:
buffer = None
for c in range(data.shape[0]):
if do_z:
reshaped_data = buffer
else:
reshaped_data = reshaped_final_data[c]
for slice_id in range(shape[axis]):
if axis == 0:
reshaped_data[slice_id, :, :] = resize_fn(data[c, slice_id], new_shape_2d, order, cval=cval, **kwargs)
elif axis == 1:
reshaped_data[:, slice_id, :] = resize_fn(data[c, :, slice_id], new_shape_2d, order, cval=cval, **kwargs)
else:
reshaped_data[:, :, slice_id] = resize_fn(data[c, :, :, slice_id], new_shape_2d, order, cval=cval, **kwargs)
if do_z:
# The following few lines are blatantly copied and modified from sklearn's resize()
rows, cols, dim = new_shape[0], new_shape[1], new_shape[2]
orig_rows, orig_cols, orig_dim = reshaped_data.shape
row_scale = float(orig_rows) / rows
col_scale = float(orig_cols) / cols
dim_scale = float(orig_dim) / dim
reshaped_final_data[c] = zoom(reshaped_data, (1 / row_scale, 1 / col_scale, 1 / dim_scale), order=order_z, cval=cval, mode='nearest')
else:
print("no separate z, order", order)
reshaped_final_data = np.empty(shape=(data.shape[0], new_shape[0], new_shape[1], new_shape[2]), dtype=dtype_data)
for c in range(data.shape[0]):
reshaped_final_data[c] = resize_fn(data[c], new_shape, order, cval=cval, **kwargs)
return reshaped_final_data
| 5,350,136 |
def get_eval(appdir, config):
"""Get an Evaluation object given the configured `GlobalConfig`.
"""
return core.Evaluation(appdir, config.client, config.reps,
config.test_reps, config.simulate)
| 5,350,137 |
def load_queue_from_disk(filename):
"""
Load the old queue from disk when started. Old messages that weren't
posted yet are read from the queue and processed.
"""
if os.path.exists(filename):
log.msg("Loading queue from %s" % filename)
try:
with closing(open(filename, 'r')) as fp:
data = pickle.load(fp)
return data
except IOError, e:
log.err()
backup_filename = "%s.%s" % (
filename,
datetime.utcnow().strftime("%Y%m%d_%H%M%S")
)
shutil.copyfile(filename, backup_filename)
log.err("Couldn't load queue from %s, backed it up to %s" % (
filename, backup_filename
))
# return an empty queue, start from scratch.
return []
| 5,350,138 |
def write_user_edges_to_file(edges):
"""
Given a list of edges `edges` (each edge of the form (f,g), where `f,g` are homogeneous
in 4 variables), save this list to the `user_edges` file to be read by the
main programs. The names of the variables are changed to `x,y,z,w`.
"""
R = PolynomialRing(QQ, 4, "xyzw")
(x,y,z,w) = R.gens()
with open(SRC_ABS_PATH + "user_input/" + "user_edges", 'w+') as F:
for e in edges:
F.write("[{},{}]\n".format(R(e[0]), R(e[1])))
return
| 5,350,139 |
def ask_ok(title="Confirm", message=""):
"""Ask the user to confirm something via an ok-cancel question.
Parameters:
title (str): the text to show as the window title.
message (str): the message to show in the body of the dialog.
Returns:
bool: Whether the user selected "OK".
"""
if not isinstance(title, string_types):
raise TypeError("ask_ok() title must be a string.")
if not isinstance(message, string_types):
raise TypeError("ask_ok() message must be a string.")
return _get_app().ask_ok(title, message)
| 5,350,140 |
def indexate(points):
"""
Create an array of unique points and indexes into this array.
Arguments:
points: A sequence of 3-tuples
Returns:
An array of indices and a sequence of unique 3-tuples.
"""
pd = {}
indices = tuple(pd.setdefault(tuple(p), len(pd)) for p in points)
pt = sorted([(v, k) for k, v in pd.items()], key=lambda x: x[0])
unique = tuple(i[1] for i in pt)
return indices, unique
| 5,350,141 |
def get_handle(endpoint_name,
relative_slo_ms=None,
absolute_slo_ms=None,
missing_ok=False):
"""Retrieve RayServeHandle for service endpoint to invoke it from Python.
Args:
endpoint_name (str): A registered service endpoint.
relative_slo_ms(float): Specify relative deadline in milliseconds for
queries fired using this handle. (Default: None)
absolute_slo_ms(float): Specify absolute deadline in milliseconds for
queries fired using this handle. (Default: None)
missing_ok (bool): If true, skip the check for the endpoint existence.
It can be useful when the endpoint has not been registered.
Returns:
RayServeHandle
"""
if not missing_ok:
assert endpoint_name in ray.get(
master_actor.get_all_endpoints.remote())
return RayServeHandle(
ray.get(master_actor.get_router.remote())[0],
endpoint_name,
relative_slo_ms,
absolute_slo_ms,
)
| 5,350,142 |
def help_command(update: Update, context: CallbackContext) -> None:
"""Send a message when the command /help is issued."""
message = r"К вашим услугам\! Вот что я умею:" + "\n\n"
message += r"/fact \- расскажу интересный факт" + "\n"
message += r"/cat \- отправлю картинку котика" + "\n"
message += r"/cute \- отправлю милого котика^^" + "\n"
message += r"/story \- расскажу историю" + "\n"
message += r"/funny \- попробую рассмешить" + "\n"
message += r"/top\_cat \- покажу топового кота" + "\n"
message += r"/http \<status\_code\> \- HTTP\-кот" + "\n"
message += "\n"
message += r"/gallery \- покажу кота из коллекции" + "\n"
message += r"/upload \- добавляю вашего котика" + "\n"
message += "\n"
message += r"/about \- расскажу немного о себе" + "\n"
update.message.reply_markdown_v2(message)
| 5,350,143 |
def test_get_parser(ap):
"""Test getting argument parser."""
import sattools.processing.showsat
sattools.processing.showsat.parse_cmdline()
assert ap.return_value.add_argument.call_count == 9
| 5,350,144 |
def senderPlusReceiver(dataBitArray):
"""[senderPlusReceiver function]
This function is used to send the data Bits
Sender -> sends the bits in the array
Receiver(in this case the user) -> ack the bit received using
"ack" or
"Ack" or
"ACK"
as the use input
This function is built in such a way that it combines
both programs into one program for better use of resources.
Therefore this is capable enough to solve both
the sender as well as the receiver logic.
"""
element = 0
while element <= len(dataBitArray)-1: # Sending all data bits one after the other using while loop
send(dataBitArray[element])
i, o, e = select.select( [sys.stdin], [], [], Timeout ) # This would wait for the previously defined timeout seconds for an input from the user
if (i): # if input is received....
if sys.stdin.readline().strip() == "ack" or sys.stdin.readline().strip() == "Ack" or sys.stdin.readline().strip() == "ACK": # it checks with ACK, Ack, ack. If matches, then it goes on to the next bit
print(" + Ack received! Sending next Data Bit...")
element = element + 1
else:
# else, in the case of the input not matching to the previously mentioned 3 accepted ack messages
# it would count it as corrupted ack, and it would resend the bit, again.
print(" > Ack Corrupted! Sending again...")
else:
# this is the case where there is no ack from the receiver (AKA user in this case)
# then it waits till the time out is over and then resend the data bit.
print(" > Ack Lost! Sending again...")
| 5,350,145 |
def create_symbolic_controller(states, inputs):
""""Returns a dictionary with keys that are the joint torque inputs and
the values are the controller expressions. This can be used to convert
the symbolic equations of motion from 0 = f(x', x, u, t) to a closed
loop form 0 = f(x', x, t).
Parameters
----------
states : sequence of len 2 * (n + 1)
The SymPy time dependent functions for the system states where n are
the number of links.
inputs : sequence of len n
The SymPy time depednent functions for the system joint torque
inputs (should not include the lateral force).
Returns
-------
controller_dict : dictionary
Maps joint torques to control expressions.
gain_symbols : list of SymPy Symbols
The symbols used in the gain matrix.
xeq : list of SymPy Symbols
The symbols for the equilibrium point.
"""
num_states = len(states)
num_inputs = len(inputs)
xeq = sym.Matrix([x.__class__.__name__ + '_eq' for x in states])
K = sym.Matrix(num_inputs, num_states, lambda i, j:
sym.Symbol('k_{}{}'.format(i, j)))
x = sym.Matrix(states)
T = sym.Matrix(inputs)
gain_symbols = [k for k in K]
# T = K * (xeq - x) -> 0 = T - K * (xeq - x)
controller_dict = sym.solve(T - K * (xeq - x), inputs)
return controller_dict, gain_symbols, xeq
| 5,350,146 |
def subprocess_main(framework=None):
"""
Please keep this file compatible with python2 in order to check user python version.
This function checks that Inference Engine Python API available and working as expected
and then in sub-process it executes main_<fw>.py files. Due to some OSs specifics we can't
just add paths to Python modules and libraries into current env. So to make Inference Engine
Python API to be available inside MO we need to use subprocess with new env.
"""
setup_env()
path_to_main = os.path.join(os.path.realpath(os.path.dirname(__file__)),
'main_{}.py'.format(framework) if framework else 'main.py')
# python2 compatible code. Do not remove.
args = [sys.executable, path_to_main]
for arg in sys.argv[1:]:
args.append(arg)
status = subprocess.run(args, env=os.environ)
sys.exit(status.returncode)
| 5,350,147 |
def is_gene_name(instance):
"""This SHOULD check a webservice at HGNC/MGI for validation, but for now this just returns True always.."""
ignored(instance)
return True
| 5,350,148 |
def install_package_family(pkg):
"""
:param: pkg ie asr900rsp2-universal.03.13.03.S.154-3.S3-ext.bin
:return: device_type of the installed image ie asr900
"""
img_dev = None
m = re.search(r'(asr\d+)\w*', pkg)
if m:
img_dev = m.group(1)
return img_dev
| 5,350,149 |
def generate_visible(tower_height, heightmap):
"""Trace a ray and determine if a region is viewable.
Args:
tower_height: the elevation in meters above sea level of your antenna
heightmap: an enumerable of heights in a given direction
Returns:
an enumerable of True/False for visibility
"""
min_angle = -10000
for i, height in enumerate(heightmap):
if tower_height - height == 0:
angle_to_point = 0
elif tower_height > height:
angle_to_point = math.atan(i / (tower_height - height))
else:
angle_to_point = math.atan((height - tower_height) / i) + math.pi / 2
if angle_to_point >= min_angle:
min_angle = angle_to_point
yield True
else:
yield False
| 5,350,150 |
def get_dendritic_mask_path_from_sessid(maindir, sessid, runtype="prod",
check=True):
"""
get_dendritic_mask_path_from_sessid(maindir, sessid)
Returns path to dendritic mask file for the specified session.
Required args:
- maindir (str): main directory
- sessid (int) : session ID
Optional args:
- runtype (str) : "prod" (production) or "pilot" data
default: "prod"
- check (bool) : if True, checks whether the files in the output
dictionary exist
default: True
Returns:
- maskfile (str): full path name of the extract masks hdf5 file
"""
sessdir, mouse_dir = get_sess_dir_path(maindir, sessid, runtype)
mouseid = get_mouseid(sessdir, mouse_dir)
expid = get_expid(sessdir)
maskfile = get_dendritic_mask_path(
maindir, sessid, expid, mouseid, runtype, mouse_dir, check)
return maskfile
| 5,350,151 |
def test_attachment_blank(tmpdir):
"""Attachment header without a filename is an error."""
template_path = Path(tmpdir/"template.txt")
template_path.write_text(textwrap.dedent(u"""\
TO: [email protected]
FROM: [email protected]
ATTACHMENT:
Hello world
"""))
template_message = TemplateMessage(template_path)
with pytest.raises(MailmergeError) as err:
with tmpdir.as_cwd():
template_message.render({})
assert "Empty attachment header" in str(err)
| 5,350,152 |
def shift(arr):
"""
Shifts all rows of boxs back one row
:param
arr: 2D Array of VPython Box elements
:return:
No return, this will adjust the input arr to allow for a new row to be added
"""
for i in range(99, 0, -1):
for j in range(99, -1, -1):
arr[i][j].height = arr[i-1][j].height
arr[i][j].color = arr[i - 1][j].color
arr[i][j].pos.y = arr[i][j].height / 2
| 5,350,153 |
def sam_to_bam(samfile, bamfile, samtools_bin="samtools"):
"""
Convert SAM to sorted and indexed BAM.
Args:
samfile: Input Sam
bamfile: Output Bam (without .bam extension)
samtools_bin: path to samtools binary
"""
with tempfile.NamedTemporaryFile() as tmp_bam:
# samtools takes only basename in 'sort' but full filename in 'index'
call(["{2} view -S -b {0} > {1}".format(samfile, tmp_bam.name, samtools_bin)], shell=True)
call([samtools_bin, "sort", tmp_bam.name, bamfile])
call([samtools_bin, "index", bamfile + ".bam"])
| 5,350,154 |
def abbn_min_vol():
"""
Real Name: b'"Ab-bn min vol"'
Original Eqn: b'25.6'
Units: b''
Limits: (None, None)
Type: constant
b''
"""
return 25.6
| 5,350,155 |
def clear(self: Client, player: str = None, item_name: str = None,
data: int = None, max_count: int = None) -> str:
"""Clears items from player inventory, including
items being dragged by the player.
Bedrock Edition implementation.
"""
return self.run('clear', player, item_name, data, max_count)
| 5,350,156 |
def load_glove_embeddings(dim, vocab):
"""
Load GloVe embedding vectors for all words in our vocabulary.
https://machinelearningmastery.com/use-word-embedding-layers-deep-learning-keras/
Parameters
----------
dim : int
Dimension of GloVe embeddings. Can be 50, 100, 200 and 300.
vocab : dict
Dictionary mapping words to index.
Returns
-------
embeddings_index : dict
A dictionary that maps word to embedding vector.
"""
embeddings_index = dict()
lower_dict = [word.lower() for word in vocab.keys()]
with open('glove.6B/glove.6B.'+str(dim)+'d.txt', 'r', encoding="utf-8") as f:
for line in f:
values = line.split()
word = values[0]
coefs = asarray(values[1:], dtype='float32')
# use only low case? GloVe seems to use only low case, but what about NER?
if word in vocab:
embeddings_index[vocab[word]] = coefs
# maybe Word get same embedding as word?
elif word in lower_dict:
try:
embeddings_index[vocab[word.title()]] = coefs
except KeyError:
continue
return embeddings_index
| 5,350,157 |
def check_vat_number(vat_number, country_code=None):
"""Check if a VAT number is valid.
If possible, the VAT number will be checked against available registries.
:param vat_number: VAT number to validate.
:param country_code:
Optional country code. Should be supplied if known, as there is no
guarantee that naively entered VAT numbers contain the correct alpha-2
country code prefix for EU countries just as not all non-EU countries
have a reliable country code prefix. Default ``None`` prompting
detection.
:returns:
a :class:`VatNumberCheckResult` instance containing the result for
the full VAT number check.
"""
# Decompose the VAT number.
vat_number, country_code = decompose_vat_number(vat_number, country_code)
if not vat_number or not country_code:
return VatNumberCheckResult(False, [
'> Unable to decompose VAT number, resulted in %r and %r' %
(vat_number, country_code)
])
# Test the VAT number format.
format_result = is_vat_number_format_valid(vat_number, country_code)
if format_result is not True:
return VatNumberCheckResult(format_result, [
'> VAT number validation failed: %r' % (format_result)
])
# Attempt to check the VAT number against a registry.
if country_code not in VAT_REGISTRIES:
return VatNumberCheckResult()
return VAT_REGISTRIES[country_code].check_vat_number(vat_number,
country_code)
| 5,350,158 |
def argextrema(y, separate=True):
"""
Deprecated in favor of argrel{min|max} in scypy.signal to get separate
extrema in about the same CPU time.
If you need a list of
all relative extrema in order, using this with separate=False takes about
half the time as by combining the scipy
functions with searchsorted.
Returns the indices of the local extrema of a series. When consecutive
points at an extreme have the same value, the index of the first is
returned.
"""
delta = y[1:] - y[:-1]
pos_neg = np.zeros(len(delta), np.int8)
pos_neg[delta > 0] = 1
pos_neg[delta < 0] = -1
curve_sign = pos_neg[1:] - pos_neg[:-1]
if separate:
argmax = np.nonzero(curve_sign < 0)[0] + 1
argmin = np.nonzero(curve_sign > 0)[0] + 1
return argmin,argmax
else:
argext = np.nonzero(curve_sign != 0)[0] + 1
return argext
| 5,350,159 |
def examine_normal_mode(r_mol: RDKitMol,
p_mol: RDKitMol,
ts_xyz: np.array,
disp: np.array,
amplitude: Union[float, list] = 0.25,
weights: Union[bool, np.array] = True,
verbose: bool = True,
as_factors: bool = True):
"""
Examine a TS's imaginary frequency given a known reactant complex and a
product complex. The function checks if the bond changes are corresponding
to the most significant change in the normal mode. The reactant and product
complex need to be atom mapped.
Args:
r_mol ('RDKitMol'): the reactant complex.
p_mol ('RDKitMol'): the product complex.
ts_xyz (np.array): The xyz coordinates of the transition state. It should have a
size of N x 3.
disp (np.array): The displacement of the normal mode. It should have a size of
N x 3.
amplitude (float): The amplitude of the motion. Defaults to 0.25.
weights (bool or np.array): If ``True``, use the sqrt(atom mass) as a scaling factor to the displacement.
If ``False``, use the identity weights. If a N x 1 ``np.array` is provided, then
The concern is that light atoms (e.g., H) tend to have larger motions
than heavier atoms.
verbose (bool): If print detailed information. Defaults to ``True``.
as_factors (bool): If return the value of factors instead of a judgment.
Defaults to ``False``
Returns:
- bool: ``True`` for pass the examination, ``False`` otherwise.
- list: If `as_factors == True`, two factors will be returned.
"""
# Analyze connectivity
broken, formed, changed = get_all_changing_bonds(r_mol, p_mol)
reacting_bonds = broken + formed + changed
# Generate weights
if isinstance(weights, bool) and weights:
atom_masses = np.array(r_mol.GetAtomMasses()).reshape(-1, 1)
weights = np.sqrt(atom_masses)
elif isinstance(weights, bool) and not weights:
weights = np.ones((ts_xyz.shape[0], 1))
# Generate conformer instance according to the displacement
xyzs = ts_xyz - amplitude * disp * weights, ts_xyz + amplitude * disp * weights
r_copy = r_mol.Copy(); r_copy.SetPositions(xyzs[0])
p_copy = p_mol.Copy(); p_copy.SetPositions(xyzs[1])
r_conf, p_conf = r_copy.GetConformer(), p_copy.GetConformer()
# Calculate bond distance change
formed_and_broken_diff = [abs(r_conf.GetBondLength(bond) - p_conf.GetBondLength(bond))
for bond in broken + formed]
changed_diff = [abs(r_conf.GetBondLength(bond) - p_conf.GetBondLength(bond))
for bond in changed]
other_bonds_diff = [abs(r_conf.GetBondLength(bond) - p_conf.GetBondLength(bond))
for bond in r_copy.GetBondsAsTuples() if bond not in reacting_bonds]
# We expect bonds that are formed or broken in the reaction
# have relatively large changes; For bonds that change their bond order
# in the reaction may have a smaller factor.
# In this function, we only use the larger factor as a check.
# The smaller factor is less deterministic, considering the change in
# other bonds due to the change of atom hybridization or bond conjugation.
baseline = np.max(other_bonds_diff)
std = np.std(other_bonds_diff)
larger_factor = (np.min(formed_and_broken_diff) - baseline) / std
if changed_diff:
# There might be no bond that only changes its order
smaller_factor = (np.min(changed_diff) - baseline) / std
else:
smaller_factor = 0
if verbose:
print(f'The min. bond distance change for bonds that are broken or formed'
f' is {np.min(formed_and_broken_diff)} A and is {larger_factor:.1f} STD off the baseline.')
if changed_diff:
print(f'The min. bond distance change for bonds that are changed'
f' is {np.min(changed_diff)} A and is {smaller_factor:.1f} STD off the baseline.')
if as_factors:
return larger_factor, smaller_factor
if larger_factor > 3:
return True
return False
| 5,350,160 |
def generate_sprites(factor_dist, num_sprites=1):
"""Create callable that samples sprites from a factor distribution.
Args:
factor_dist: The factor distribution from which to sample. Should be an
instance of factor_distributions.AbstractDistribution.
num_sprites: Int or callable returning int. Number of sprites to generate
per call.
Returns:
_generate: Callable that returns a list of Sprites.
"""
def _generate():
n = num_sprites() if callable(num_sprites) else num_sprites
sprites = [sprite.Sprite(**factor_dist.sample()) for _ in range(n)]
return sprites
return _generate
| 5,350,161 |
def recode_from_index_mapper(meta, series, index_mapper, append):
"""
Convert a {value: logic} map to a {value: index} map.
This function takes a mapper of {key: logic} entries and resolves
the logic statements using the given meta/data to return a mapper
of {key: index}. The indexes returned can be used on data to isolate
the cases described by arbitrarily complex logical statements.
Parameters
----------
meta : dict
Quantipy meta document.
series : pandas.Series
The series in which the recoded data will be stored and
returned.
index_mapper : dict
A mapper of {key: index}
append : bool
Should the new recodd data be appended to items already found
in series? If False, data from series (where found) will
overwrite whatever was found for that item in ds1 instead.
Returns
-------
series : pandas.Series
The series in which the recoded data will be stored and
returned.
"""
qtype = meta['columns'][series.name]['type']
if qtype in ['delimited set']:
if series.dtype in ['int64', 'float64']:
not_null = series.notnull()
if len(not_null) > 0:
series.loc[not_null] = series.loc[not_null].map(str) + ';'
if index_mapper:
cols = [str(c) for c in sorted(index_mapper.keys())]
else:
vals = meta['columns'][series.name]['values']
codes = [c['value'] for c in vals]
cols = [str(c) for c in codes]
ds = pd.DataFrame(0, index=series.index, columns=cols)
for key, idx in index_mapper.iteritems():
ds[str(key)].loc[idx] = 1
ds2 = condense_dichotomous_set(ds)
org_name = series.name
series = join_delimited_set_series(series, ds2, append)
## Remove potential duplicate values
if series.dropna().empty:
warn_msg = 'Could not recode {}, found empty data column dependency!'.format(org_name)
warnings.warn(warn_msg)
return series
ds = series.str.get_dummies(';')
# Make sure columns are in numeric order
ds.columns = [int(float(c)) for c in ds.columns]
cols = sorted(ds.columns.tolist())
ds = ds[cols]
ds.columns = [str(i) for i in ds.columns]
# Reconstruct the dichotomous set
series = condense_dichotomous_set(ds)
elif qtype in ['single', 'int', 'float']:
for key, idx in index_mapper.iteritems():
series.loc[idx] = key
else:
raise TypeError(
"Can't recode '{col}'. Recoding for '{typ}' columns is not"
" yet supported.".format(col=series.name, typ=qtype)
)
return series
| 5,350,162 |
def update_dynamoDB(global_table_name, query_id, slack_data, current_date):
"""
update jarvis data to dynamoDB, seperate by queryId,
:param query_id: query attached together by + sign
:param slack_data: a list
:return:
"""
# convert list to string for dynamodb storage
slack_data = json.dumps(slack_data)
session = boto3.session.Session(region_name='us-east-1')
iam_client = session.client('dynamodb')
list_group_detail = iam_client.update_item(TableName=global_table_name,
Key={'queryId': {'S': query_id}},
ExpressionAttributeNames={'#S': 'slackData','#D': 'dateTimeData'},
ExpressionAttributeValues={':s': {'S': slack_data},':d': {'S': current_date}},
ReturnValues='ALL_NEW',
UpdateExpression='SET #S = :s, #D = :d')
# print(list_group_detail['ResponseMetadata'])
if list_group_detail['ResponseMetadata']['HTTPStatusCode'] == 200:
print("update slackData to Database {0} successfully".format(global_table_name))
| 5,350,163 |
def get_variables():
"""Loads ODAHU config as Robot variable
"""
return {'CONFIG': {var: getattr(config, var) for var in config.ALL_VARIABLES}}
| 5,350,164 |
def is_number(char: Text) -> bool:
"""Checks if char is number. Returns Boolean."""
return char in string.digits
| 5,350,165 |
def sliceData(data, slicebox=[None,None,None,None]):
"""
Sum 2d data along both axes and return 1d datasets
**Inputs**
data (sans2d) : data in
slicebox (range?:xy): region over which to integrate (in data coordinates)
**Returns**
xout (sans1d) : xslice
yout (sans1d) : yslice
2018-04-20 Brian Maranville
"""
if slicebox is None:
slicebox = [None, None, None, None]
xmin, xmax, ymin, ymax = slicebox
res = data.copy()
if data.qx is None or data.qy is None:
# then use pixels
xslice = slice(int(np.ceil(xmin)) if xmin is not None else None, int(np.floor(xmax)) if xmax is not None else None)
yslice = slice(int(np.ceil(ymin)) if ymin is not None else None, int(np.floor(ymax)) if ymax is not None else None)
x_in = np.arange(data.data.x.shape[0])
y_in = np.arange(data.data.x.shape[1])
x_out = x_in[xslice]
y_out = y_in[yslice]
dx = np.zeros_like(x_out)
dy = np.zeros_like(y_out)
else:
# then use q-values
qxmin = data.qx_min if data.qx_min is not None else data.qx.min()
qxmax = data.qx_max if data.qx_max is not None else data.qx.max()
qx_in = np.linspace(qxmin, qxmax, data.data.x.shape[0])
qymin = data.qy_min if data.qy_min is not None else data.qy.min()
qymax = data.qy_max if data.qy_max is not None else data.qy.max()
qy_in = np.linspace(qymin, qymax, data.data.x.shape[1])
xslice = slice(get_index(qx_in, xmin), get_index(qx_in, xmax))
yslice = slice(get_index(qy_in, ymin), get_index(qy_in, ymax))
x_out = qx_in[xslice]
y_out = qy_in[yslice]
dx = np.zeros_like(x_out)
dy = np.zeros_like(y_out)
dataslice = (xslice, yslice)
x_sum = uncertainty.sum(data.data[dataslice], axis=1)
y_sum = uncertainty.sum(data.data[dataslice], axis=0)
x_output = Sans1dData(x_out, x_sum.x, dx=dx, dv=x_sum.variance, xlabel=data.xlabel, vlabel="I",
xunits="", vunits="neutrons", metadata=data.metadata)
y_output = Sans1dData(y_out, y_sum.x, dx=dy, dv=y_sum.variance, xlabel=data.ylabel, vlabel="I",
xunits="", vunits="neutrons", metadata=data.metadata)
return x_output, y_output
| 5,350,166 |
def set_plugin_temporarily_enabled(folder=None):
""" Disables the plugin globally or for folder.
Folder can be a view """
if folder is None:
if is_plugin_globally_disabled():
plugin_disabled_for_folders.remove("*global")
else:
if isinstance(folder, sublime.View):
folder = os.path.dirname(folder.file_name())
folder = os.path.normcase(folder)
Debug('project', 'Enable ArcticTypescript for %s' % folder)
if folder in plugin_disabled_for_folders:
plugin_disabled_for_folders.remove(folder)
| 5,350,167 |
def GetChangeUrl(host, change):
"""Given a Gerrit host name and change ID, returns a URL for the change."""
return '%s://%s/a/changes/%s' % (GERRIT_PROTOCOL, host, change)
| 5,350,168 |
def _ignored_jenkins_node_names() -> List[str]:
"""
Ignore nodes with these names
:return: Config list
"""
return json.loads(os.environ['IGNORED_JENKINS_NODE_NAMES'])
| 5,350,169 |
def recipe_clone_message(recipe):
"""
Renders the recipe clone message.
"""
return dict(recipe=recipe)
| 5,350,170 |
def read_expression_file(file):
"""Reads a file with the expression profiles."""
D = []
genes = []
with open(file) as fp:
firstline = fp.readline()
classes = [c.strip() for c in firstline.split("\t")[1:]]
for line in fp.readlines():
items = [w.strip() for w in line.split("\t")]
genes.append(items[0])
D.append([int(x) for x in items[1:]])
class_a = classes[0]
C = [int(c == class_a) for c in classes]
D = np.array(D)
return genes, D, C
| 5,350,171 |
def test_svd_soln():
"""
test SVD decomposition of solution by generating SVD, saving to file and reloading
"""
from proteus.deim_utils import read_snapshots,generate_svd_decomposition
ns = get_burgers_ns("test_svd_soln",T=0.1,nDTout=10,archive_pod_res=True)
failed = ns.calculateSolution("run_svd_soln")
assert not failed
from proteus import Archiver
archive = Archiver.XdmfArchive(".","test_svd_soln",readOnly=True)
U,s,V=generate_svd_decomposition(archive,len(ns.tnList),'u','soln')
S_svd = np.dot(U,np.dot(np.diag(s),V))
#now load back in and test
S = read_snapshots(archive,len(ns.tnList),'u')
npt.assert_almost_equal(S,S_svd)
| 5,350,172 |
def train(X, Y, n_h, num_iterations=10000, print_cost=False):
"""
定义神经网络模型,把之前的操作合并到一起
Args:
X: 输入值
Y: 真实值
n_h: 隐藏层大小/节点数
num_iterations: 训练次数
print_cost: 设置为True,则每1000次训练打印一次成本函数值
Return:
parameters: 模型训练所得参数,用于预测
"""
np.random.seed(3)
n_x = layer_sizes(X, Y)[0]
n_y = layer_sizes(X, Y)[2]
# 根据n_x, n_h, n_y初始化参数,并取出W1,b1,W2,b2
parameters = initialize_parameters(n_x, n_h, n_y)
W1 = parameters["W1"]
b1 = parameters["b1"]
W2 = parameters["W2"]
b2 = parameters["b2"]
for i in range(0, num_iterations):
# 前向传播, 输入: "X, parameters". 输出: "A2, cache".
A2, cache = forward_propagation(X, parameters)
# 成本计算. 输入: "A2, Y, parameters". 输出: "cost".
cost = calculate_cost(A2, Y, parameters)
# 后向传播, 输入: "parameters, cache, X, Y". 输出: "grads".
grads = backward_propagation(parameters, cache, X, Y)
# 参数更新. 输入: "parameters, grads". 输出: "parameters".
parameters = update_parameters(parameters, grads)
# 每1000次训练打印一次成本函数值
if print_cost and i % 1000 == 0:
print "Cost after iteration %i: %f" % (i, cost)
return parameters
| 5,350,173 |
def get_drawdowns(cum_returns):
"""
Computes the drawdowns of the cumulative returns.
Parameters
----------
cum_returns : Series or DataFrame, required
a Series or DataFrame of cumulative returns
Returns
-------
Series or DataFrame
"""
cum_returns = cum_returns[cum_returns.notnull()]
highwater_marks = cum_returns.expanding().max()
drawdowns = cum_returns/highwater_marks - 1
return drawdowns
| 5,350,174 |
def cmd_instance_create(context, classname, options):
"""
Create an instance and submit to wbemserver.
If successful, this operation returns the new instance name. Otherwise
it raises an exception
"""
ns = options['namespace'] or context.conn.default_namespace
try:
class_ = context.conn.GetClass(
classname, namespace=ns, LocalOnly=False)
except CIMError as ce:
if ce.status_code == CIM_ERR_NOT_FOUND:
raise click.ClickException('CIMClass: "{}" does not exist in '
'namespace "{}" in WEB '
'server: {}.'.format(classname, ns,
context.conn))
raise_pywbem_error_exception(ce)
except Error as er:
raise_pywbem_error_exception(er)
properties = options['property']
# properties is a tuple of name,value pairs
new_inst = create_ciminstance(class_, properties)
if options['verify']:
context.spinner_stop()
click.echo(new_inst.tomof())
if not verify_operation("Execute CreateInstance", msg=True):
return
try:
name = context.conn.CreateInstance(new_inst,
namespace=ns)
context.spinner_stop()
click.echo('{}'.format(name))
except Error as er:
raise click.ClickException('Server Error creating instance in '
'namespace {}. Exception: '
'{}: {}'.format(ns, er.__class__.__name__,
er))
| 5,350,175 |
def test_flow_udp(serializer, options, tx_port, rx_port, api):
"""UDP Flow test traffic configuration
"""
udp_endpoint = PortTxRx(tx_port_name=tx_port.name,
rx_port_name=rx_port.name)
test_dscp = Priority(
Dscp(phb=Pattern(Dscp.PHB_CS7, ingress_result_name='phb')))
udp_header = Udp(src_port=Pattern(Counter(start="12001",
step="2",
count=100),
ingress_result_name='UDP SRC PORT'),
dst_port=Pattern("20"))
udp_flow = Flow(name='UDP Flow',
tx_rx=TxRx(udp_endpoint),
packet=[
Header(Ethernet()),
Header(Vlan()),
Header(Ipv4(priority=test_dscp)),
Header(udp_header)
],
size=Size(128),
rate=Rate('line', 50),
duration=Duration(FixedPackets(packets=100000)))
config = Config(ports=[tx_port, rx_port],
flows=[udp_flow],
options=options)
state = State(ConfigState(config=config, state='set'))
print(serializer.json(state))
api.set_state(state)
| 5,350,176 |
def calc_recipe_quantity_ratio(
first_month: str,
first_recipe: str,
second_recipe: str,
file_name: str,
second_month: str = None) -> float:
"""
A function which calculates the ratio of quantity between two months.
:param first_month: str
:param first_recipe: str
:param second_recipe: str
:param file_name: str
:param second_month: str
:return: ratio: float
"""
if first_month not in VALID_MONTH:
raise ValueError("Date must be one of %s." % VALID_MONTH)
elif first_recipe not in VALID_RECIPE or second_recipe not in VALID_RECIPE:
raise ValueError("Recipe must be on of %s." % VALID_RECIPE)
else:
if second_month is None:
second_month: str = first_month
first_quantity: int = calc_month_quantity_by_recipe(first_month, first_recipe, file_name)
second_quantity: int = calc_month_quantity_by_recipe(second_month, second_recipe, file_name)
ratio = round(first_quantity / second_quantity, 2)
return ratio
| 5,350,177 |
def teacher_add_to_db():
"""Adds a teacher to database
Returns:
Redirect: Redirects to teachers list route
"""
if request.method == "POST":
fet_name = request.form["fet_name"]
fullname = request.form["fullname"]
teacher_email = request.form["t_email"]
try:
teacher_obj = Teacher(teacher_email=teacher_email,
fet_name=fet_name,
fullname=fullname)
db.session.add(teacher_obj)
db.session.commit()
flash(("Teacher {} added successfully.".format(fet_name)),
category="success")
return redirect(url_for('teacher_list')), 302
except Exception as e:
flash("Exception: {}".format(str(e)), category="danger")
return redirect(url_for("teacher_list")), 302
| 5,350,178 |
def authorize(app_id, channel_id, team_id):
"""Just double check if this app is invoked from the expected app/channel/team"""
if app_id != SLACK_APP_ID:
return f"app ID {app_id}"
if team_id not in SLACK_TEAM_IDS:
return f"team ID {team_id}"
if channel_id not in SLACK_CHANNEL_IDS:
return f"channel ID {channel_id}"
| 5,350,179 |
def deal_one_card():
""" returns a random card from the deck """
cards = [11, 2, 3, 4, 5, 6, 7, 8, 9, 10, 10, 10, 10]
return random.choice(cards)
| 5,350,180 |
def _run(bot, inputs, logger, server_override=None, botengine_override=None):
"""
Run the given bot with the given parameters
:param bot: bot to run
:param inputs: the input JSON from the bot server
:param logger: logger object
:param server_override: Override the server URL with the known server when executing on someone's computer
:param botengine_override: For playback simulators, override the botengine object
"""
global _bot_logger
_bot_logger = logger
next_timer_at_server = None
if botengine_override is None:
services = None
if 'services' in inputs:
services = inputs['services']
count = None
if 'count' in inputs:
count = int(inputs['count'])
if 'timer' in inputs:
next_timer_at_server = int(inputs['timer'])
lang = None
if 'lang' in inputs:
lang = inputs['lang']
cloud = None
if 'cloud' in inputs:
cloud = inputs['cloud']
botengine = BotEngine(inputs, server_override=server_override, services=services, lang=lang, count=count, cloud=cloud)
else:
botengine = botengine_override
botengine.start_time_sec = time.time()
botengine._download_core_variables()
botengine.load_variables_time_sec = time.time()
for server in botengine._servers:
if 'sbox' in server:
botengine._validate_count()
break
all_triggers = []
for i in inputs['inputs']:
all_triggers.append(i['trigger'])
botengine.all_trigger_types = all_triggers
timers_existed = False
botengine.triggers_total = len(all_triggers)
for execution_json in inputs['inputs']:
botengine.triggers_index += 1
trigger = execution_json['trigger']
if trigger > 0:
botengine._set_inputs(execution_json)
if trigger != 2048:
saved_timers = botengine.load_variable(TIMERS_VARIABLE_NAME)
if saved_timers is not None:
timers_existed |= len(saved_timers) > 1
for t in [ x[0] for x in saved_timers ]:
if t != MAXINT and t <= execution_json['time']:
focused_timer = saved_timers.pop(0)
botengine.all_trigger_types.append(64)
if callable(focused_timer[1]):
focused_timer[1](botengine, focused_timer[2])
else:
botengine.get_logger().error('BotEngine: Timer fired and popped, but cannot call the focused timer: ' + str(focused_timer))
else:
break
botengine.save_variable(TIMERS_VARIABLE_NAME, botengine.load_variable(TIMERS_VARIABLE_NAME))
if trigger != 64:
bot.run(botengine)
elif saved_timers is not None and not timers_existed:
botengine.get_logger().error('BotEngine: Timer fired but no recollection as to why.')
botengine.get_logger().error('Current timer variable is: ' + str(saved_timers))
botengine.flush_commands()
botengine.flush_questions()
botengine.flush_analytics()
botengine.flush_binary_variables()
if trigger != 2048:
saved_timers = botengine.load_variable(TIMERS_VARIABLE_NAME)
if saved_timers is not None and len(saved_timers) > 0:
while True:
try:
if saved_timers[0][0] != MAXINT:
if saved_timers[0][0] != next_timer_at_server:
botengine._execute_again_at_timestamp(saved_timers[0][0])
botengine.get_logger().info(('< Set alarm: {}').format(saved_timers[0]))
else:
botengine.get_logger().info(('| Alarm already set: {}').format(saved_timers[0]))
break
except Exception as e:
botengine.get_logger().error(('Could not _execute_again_at_timestamp to set timer: {}').format(str(e)))
continue
botengine.flush_rules()
botengine.flush_tags()
botengine.flush_asynchronous_requests()
return
| 5,350,181 |
def grey_pal(start=0.2, end=0.8):
"""
Utility for creating continuous grey scale palette
Parameters
----------
start : float
grey value at low end of palette
end : float
grey value at high end of palette
Returns
-------
out : function
Continuous color palette that takes a single
:class:`int` parameter ``n`` and returns ``n``
equally spaced colors.
Examples
--------
>>> palette = grey_pal()
>>> palette(5)
['#333333', '#737373', '#989898', '#b5b5b5', '#cccccc']
"""
gamma = 2.2
ends = ((0.0, start, start), (1.0, end, end))
cdict = {'red': ends, 'green': ends, 'blue': ends}
grey_cmap = mcolors.LinearSegmentedColormap('grey', cdict)
def continuous_grey_palette(n):
# The grey scale points are linearly separated in
# gamma encoded space
x = np.linspace(start**gamma, end**gamma, n)
# Map points onto the [0, 1] palette domain
vals = (x ** (1./gamma) - start) / (end - start)
return ratios_to_colors(vals, grey_cmap)
return continuous_grey_palette
| 5,350,182 |
def prepare(path, data_id):
"""Process each dataset based on individual characteristics
Args:
path to pull data from
"""
# assert type(train) == bool, 'Wrong train/test selection input'
if train:
suffix = "_train"
else:
suffix = "_test"
if dataset == "synapse":
feature_id = re.findall(r"[\w']+", feature_dir)[-3][-4:]
label_id = re.findall(r"[\w']+", label_dir)[-3][-4:]
assert feature_id == label_id, "Feature and label mis-match: {0}".format(
feature_id
)
feature_array = load_nifty_data(feature_dir)
feature_array = rotate_image(feature_array, 1)
feature_final = np.moveaxis(feature_array, -1, 0)
label_array = load_nifty_data(label_dir)
label_array = rotate_image(label_array, 1)
label_final = np.moveaxis(label_array, -1, 0)
else:
feature_id = re.findall(r"[\w']+", feature_dir)[-1][-4:]
label_id = re.findall(r"[\w']+", label_dir)[-3][-4:]
assert feature_id == label_id, "Feature and label mis-match: {0}".format(
feature_id
)
feature_array = load_dicom_data(feature_dir)
feature_array = rotate_image(feature_array, 2, (1, 2))
feature_final = rotate_image(feature_array, 2, (0, 2))
# feature_final = np.flip(feature_array, axis=0)
# feature_final = feature_array
label_array = load_nifty_data(label_dir)
label_array = np.transpose(label_array, (2, 0, 1))
# label_array = rotate_image(label_array, 2)
# label_final = np.moveaxis(label_array, -1, 0)
label_tcia = rotate_image(label_array, 1, (1, 2))
label_final = np.flip(label_tcia, axis=0)
# label_final = rotate_image(label_array, 2, (0, 2))
# save_dir = os.path.join(parent_save_folder, dataset + suffix, feature_id)
print("parent save folder:", parent_save_folder)
save_dir = os.path.join(parent_save_folder, feature_id)
# print('saving dir: {0}'.format(save_dir))
return save_dir, feature_final, label_final
| 5,350,183 |
def process_watchdog(function_process, process_info_path, process_time, action):
"""
Monitoring function for the file transfer processes spawned off using Multiprocessing.
It will monitor if the process has either finished or has gone over it's processing time.
Parameters
----------
function_process : multiprocessing.Process
Multiprocessing class that we are monitoring
process_info_path : str
Path to the process_info.json file for the process running
process_time : int
Amount of seconds we want the watchdog to the let the monitored process run
"""
slept_time = 0
while slept_time <= process_time:
sleep(1)
# Get the contents of process_info.json.
# While loop is required in case the json file is being written to while being read.
process_info_data = None
while process_info_data is None:
try:
process_info_data = read_file(process_info_path, True)
except json.decoder.JSONDecodeError:
# Pass while the process_info file is being written to
pass
# Exception is mostly in place for testing
except FileNotFoundError:
return
# If the monitored process has finished
if process_info_data[action]['status'] != 'in_progress':
return
slept_time += 1
# If we've reached here then the process reached our time limit and we need to terminate
# the monitored process and update the process_info.json file.
function_process.terminate()
process_info_data[action]['status'] = 'failed'
process_info_data[action]['message'] = 'The process took too long on the server.'
process_info_data[action]['status_code'] = 504
write_file(process_info_path, process_info_data, True)
| 5,350,184 |
def tags_to_matrix(events_df, tags_df, top_tags):
"""Converts tags to feature matrix
Args:
events_df: Events dataset
tags_df: Tags dataset
top_tags: Tags to include
Returns:
Feature matrix for tags
"""
# Combine tags into lists
tags = tags_df.groupby('id')['tag'].agg(lambda x: list(x)).reset_index()
# Handle events with no top tags
# TODO: Kludge, write nicer
missing_tags = pd.DataFrame({
'id': events_df[~events_df['id'].isin(tags['id'])]['id'].unique()
})
missing_tags['tag'] = [[] for _ in range(len(missing_tags))]
tags = pd.concat([tags, missing_tags])
# Align tags with events
aligned_tags = events_df.merge(tags, on='id')
# Convert aligned tags to matrix
mlb = MultiLabelBinarizer(classes=top_tags)
return mlb.fit_transform(aligned_tags['tag'])
| 5,350,185 |
def get_parameter_value_and_validate_return_type(
domain: Optional[Domain] = None,
parameter_reference: Optional[Union[Any, str]] = None,
expected_return_type: Optional[Union[type, tuple]] = None,
variables: Optional[ParameterContainer] = None,
parameters: Optional[Dict[str, ParameterContainer]] = None,
) -> Optional[Any]:
"""
This method allows for the parameter_reference to be specified as an object (literal, dict, any typed object, etc.)
or as a fully-qualified parameter name. In either case, it can optionally validate the type of the return value.
"""
if isinstance(parameter_reference, dict):
parameter_reference = safe_deep_copy(data=parameter_reference)
parameter_reference = get_parameter_value(
domain=domain,
parameter_reference=parameter_reference,
variables=variables,
parameters=parameters,
)
if expected_return_type is not None:
if not isinstance(parameter_reference, expected_return_type):
raise ge_exceptions.ProfilerExecutionError(
message=f"""Argument "{parameter_reference}" must be of type "{str(expected_return_type)}" \
(value of type "{str(type(parameter_reference))}" was encountered).
"""
)
return parameter_reference
| 5,350,186 |
async def test_oppio_discovery_webhook(opp, aioclient_mock, oppio_client):
"""Test discovery webhook."""
aioclient_mock.get(
"http://127.0.0.1/discovery/testuuid",
json={
"result": "ok",
"data": {
"service": "mqtt",
"uuid": "test",
"addon": "mosquitto",
"config": {
"broker": "mock-broker",
"port": 1883,
"username": "mock-user",
"password": "mock-pass",
"protocol": "3.1.1",
},
},
},
)
aioclient_mock.get(
"http://127.0.0.1/addons/mosquitto/info",
json={"result": "ok", "data": {"name": "Mosquitto Test"}},
)
with patch(
"openpeerpower.components.mqtt.config_flow.FlowHandler.async_step_oppio",
return_value={"type": "abort"},
) as mock_mqtt:
resp = await oppio_client.post(
"/api/oppio_push/discovery/testuuid",
json={"addon": "mosquitto", "service": "mqtt", "uuid": "testuuid"},
)
await opp.async_block_till_done()
assert resp.status == 200
assert aioclient_mock.call_count == 2
assert mock_mqtt.called
mock_mqtt.assert_called_with(
{
"broker": "mock-broker",
"port": 1883,
"username": "mock-user",
"password": "mock-pass",
"protocol": "3.1.1",
"addon": "Mosquitto Test",
}
)
| 5,350,187 |
def add_leaf_to_edge(t):
"""
Returns a `Shape` instance with a new root; both a new leaf and the input `Shape` pend from it.
:param t: `Shape` instance.
:return: `Shape` instance.
"""
return Shape([Shape.LEAF, t])
| 5,350,188 |
def setup_cwl_logger(ti, level=None):
"""
Sets logging level of cwltool logger to correspond LOGGING_LEVEL
from airflow.cfg. Configures handler based on the task instance
to redirect output to the proper file. Suppresses those loggers
from cwltool or related packages that spam.
Note: maybe we will need to remove StreamHandler <stderr> handler
from cwltool logger in case we see undesired outputs in the airflow
logs but not in the separate files.
"""
level = (
conf_get("logging", "LOGGING_LEVEL", "INFO").upper() if level is None else level
)
cwl_logger = logging.getLogger("cwltool")
for handler in cwl_logger.handlers:
try:
handler.set_context(ti)
except AttributeError:
pass
cwl_logger.setLevel(level)
loggers_to_suppress = ["rdflib.term", "salad", "requests", "urllib3"]
for logger_name in loggers_to_suppress:
logger = logging.getLogger(logger_name)
logger.setLevel("ERROR")
| 5,350,189 |
def create_simulation(parameter_values=None, experiment=None, make_inputs=False):
"""
Create a PyBaMM simulation set up for interation with liionpack
Parameters
----------
parameter_values : :class:`pybamm.ParameterValues`
The default is None.
experiment : :class:`pybamm.Experiment`
The default is None.
make_inputs : bool, optional
Changes "Current function [A]" and "Total heat transfer coefficient
[W.m-2.K-1]" to be inputs that are controlled by liionpack.
The default is False.
Returns
-------
sim : :class:`pybamm.Simulation`
A simulation that can be solved individually or passed into the
liionpack solve method
"""
# Create the pybamm model
model = pybamm.lithium_ion.SPMe(
options={
"thermal": "lumped",
}
)
# Set up parameter values
if parameter_values is None:
chemistry = pybamm.parameter_sets.Chen2020
parameter_values = pybamm.ParameterValues(chemistry=chemistry)
# Change the current function and heat transfer coefficient to be
# inputs controlled by the external circuit
if make_inputs:
parameter_values.update(
{
"Current function [A]": "[input]",
"Total heat transfer coefficient [W.m-2.K-1]": "[input]",
},
)
# Set up solver and simulation
solver = pybamm.CasadiSolver(mode="safe")
sim = pybamm.Simulation(
model=model,
experiment=experiment,
parameter_values=parameter_values,
solver=solver,
)
return sim
| 5,350,190 |
def tarball(options=""):
"""Build the article tarball.
Args:
options (str, optional): Additional options to pass to Snakemake.
"""
snakefile = snakefile = Path("${SYW_PATH}") / "workflow" / "build.smk"
snakemake = f"SNAKEMAKE_OUTPUT_CACHE={paths.user().cache} SNAKEMAKE_RUN_TYPE='tarball' snakemake -c1 --use-conda --reason --cache"
command = f"{snakemake} {options} -s {snakefile} syw__arxiv_entrypoint"
result = run_in_env(command, check=False)
if result.returncode > 0:
os._exit(1)
| 5,350,191 |
def test_parses_valid_outofrange_dates() -> None:
"""It parses valid dates outside the range supported by pandas.Timestamp."""
df = pd.DataFrame(
[
("1676-01-01", datetime.date(1676, 1, 1)),
("2263-12-31", datetime.date(2263, 12, 31)),
]
)
parsed = parse_date(df[0])
assert (parsed == df[1]).all()
| 5,350,192 |
def add(lexer: str) -> None:
"""Add a paste to pinnwand's database from stdin."""
if lexer not in utility.list_languages():
log.error("add: unknown lexer")
return
paste = database.Paste(
sys.stdin.read(), lexer=lexer, expiry=timedelta(days=1)
)
with database.session() as session:
session.add(paste)
session.commit()
log.info("add: paste created: %s", paste.paste_id)
| 5,350,193 |
def _logistic_loss_and_grad(w, X, y, alpha, mask, sample_weight=None):
"""Computes the logistic loss and gradient.
Parameters
----------
w : ndarray, shape (n_features,) or (n_features + 1,)
Coefficient vector.
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data.
y : ndarray, shape (n_samples,)
Array of labels.
alpha : float
Regularization parameter. alpha is equal to 1 / C.
mask : array-like, shape (n_features), (n_classes, n_features) optional
Masking array for coef.
sample_weight : array-like, shape (n_samples,) optional
Array of weights that are assigned to individual samples.
If not provided, then each sample is given unit weight.
Returns
-------
out : float
Logistic loss.
grad : ndarray, shape (n_features,) or (n_features + 1,)
Logistic gradient.
"""
n_samples, n_features = X.shape
if mask is not None:
w[:n_features] *= mask
grad = np.empty_like(w)
w, c, yz = _intercept_dot(w, X, y)
if sample_weight is None:
sample_weight = np.ones(n_samples)
# Logistic loss is the negative of the log of the logistic function.
out = -np.sum(sample_weight * log_logistic(yz)) / n_samples
out += .5 * alpha * np.dot(w, w)
z = expit(yz)
z0 = sample_weight * (z - 1) * y
grad[:n_features] = (safe_sparse_dot(X.T, z0) / n_samples) + alpha * w
if mask is not None:
grad[:n_features] *= mask
# Case where we fit the intercept.
if grad.shape[0] > n_features:
grad[-1] = z0.sum() / n_samples
return out, grad
| 5,350,194 |
def verify_status_code(request_response: requests.Response) -> tuple:
"""Verify the status code of the post request to the search url and raise exceptions if the code is unexpected
:type request_response: requests.Response
:return:
"""
if request_response.status_code == 200:
return STATUS_CODE_OK, ''
elif request_response.status_code == 429:
if 'user\'s rate limit' in request_response.text:
msg = "Search rate limit reached"
return STATUS_CODE_REPEAT, msg
if 'limit of 150 searches' in request_response.text:
raise DailyLimitReachedException('Daily search limit for unregistered users reached')
elif 'limit of 300 searches' in request_response.text:
raise DailyLimitReachedException('Daily search limit for basic users reached')
else:
raise DailyLimitReachedException('Daily search limit reached')
elif request_response.status_code == 403:
raise InvalidOrWrongApiKeyException("Invalid or wrong API key")
elif request_response.status_code == 413:
msg = "Payload too large, skipping file"
return STATUS_CODE_SKIP, msg
else:
msg = "Unknown status code: {0:d}".format(request_response.status_code)
return STATUS_CODE_REPEAT, msg
| 5,350,195 |
def copynew(strpath, destdir):#need an arge for the base destindation directory
"""replicate the fs tree of strpath in destdir'
if srcpath file is newer or doesn't exist then copy it
- bug where mutliple runs are required to get everything synced
- picks up a new file with each run
"""
d_src = glob(strpath+"\\*")
depth=1#not sure how to track recursion depth
for curpth in d_src:
if os.path.basename(curpth)[0] == '_': continue #skip files/dirs that start with underscore e.g. __pycache__
#if the name is a directory then recurse
dstpth = destdir+"\\"+os.path.split(curpth)[1]
#print("{} vs {}".format(curpth, dstpth))
if not(os.path.isfile(dstpth) or os.path.isdir(dstpth)):
if(os.path.isdir(curpth) ):
print("creating directory {}".format(dstpth))
os.mkdir(dstpth)#cant copy2 a directory
copynew(curpth, dstpth)
else:
print("{} does not exist...copying".format(dstpth))#check dst pth for the file
#if the file doesn't exist in the dest then copy it
shutil.copy2(curpth,dstpth)
#if the file doesn't exist then copy it
return#is this needed?
elif os.path.isdir(curpth):#recursive call to look at subpaths - need to update destdir as well
#print("{} is a directory".format(curpth))
#print("*"*20+ "subdir {}".format(curpth)+"*"*20)
copynew(curpth,dstpth)
else:#file is a file that needs to be checked
#if the file is newer or the file is a different size - copy it to the dest path
#print("{} is a file: mtime={} size={}bytes".format(curpth,os.stat(curpth).st_mtime,os.stat(curpth).st_size))
#if the file is newer than the one in dest then copy it
if(os.path.getmtime(curpth) > os.path.getmtime(dstpth)):#if file is newer - may need to check size as well
print("*** {} is newer than {}... syncing".format(curpth,dstpth))
shutil.copy2(curpth,dstpth)
#else do nothing
print("done with {} syncing new files to {} ".format(strpath, destdir))
print("*"*25)
return
| 5,350,196 |
def instability_product_graphs(gra):
""" Determine if the species has look for functional group attachments that
could cause molecule instabilities
"""
# Build graphs for the detection scheme
rad_grp_dct = radical_group_dct(gra)
# Check for instability causing functional groups
prd_gras = ()
for atm, grps in rad_grp_dct.items():
if atm in instab_fgrps.DCT:
fgrps, prds = instab_fgrps.DCT[atm]
for grp in grps:
grp_ich = automol.graph.inchi(grp)
if grp_ich in fgrps:
# If instability found, determine prod of the instability
prd_ich = prds[fgrps.index(grp_ich)]
prd_geo = automol.inchi.geometry(prd_ich)
prd_gra = automol.geom.graph(prd_geo)
prd_gras = radical_dissociation_prods(
gra, prd_gra)
break
return prd_gras
| 5,350,197 |
def build_pdb_rmsd_matrix(pdb_paths, pdb_diff_path=None):
"""
Returns rmsd difference matrix for multiple pdb files.
Returns rmsd_list (3-item list), pdb_comp_amount (int).
Optional with pdb_diff_path return pdb_diff_comp(int).
"""
# make 3 column list or ndarray for x, y = (pdb1-n * pdb1-n) and z = rmsd diff
rmsd_list = [[], [], []]
# get rmsd difference between each pdb file in nested loop and append
for pdb0 in pdb_paths:
# compare 2 different sets of pdb files
if pdb_diff_path != None:
for pdb1 in pdb_diff_path:
# append to x (col 0) pdb in outer loop
rmsd_list[0].append(pdb_paths.index(pdb0) + 1)
# append to y (col 1) pdb in inner loop
rmsd_list[1].append(pdb_diff_path.index(pdb1) + 1)
# find and append to z (col 2) rmsd value between pdb0 and pdb1
rmsd = rmsd_diff_calc(pdb0, pdb1)
#print(f"\n For PDB-A = {pdb0} and PDB-B = {pdb1} : RMSD = {rmsd}")
rmsd_list[2].append(rmsd)
elif pdb_diff_path == None:
for pdb1 in pdb_paths:
# append to x (col 0) pdb in outer loop
rmsd_list[0].append(pdb_paths.index(pdb0) + 1)
# append to y (col 1) pdb in inner loop
rmsd_list[1].append(pdb_paths.index(pdb1) + 1)
# find and append to z (col 2) rmsd value between pdb0 and pdb1
rmsd = rmsd_diff_calc(pdb0, pdb1)
rmsd_list[2].append(rmsd)
# amount of pdb files to compare to each other
pdb_comp_amount = len(pdb_paths)
if pdb_diff_path == None:
return rmsd_list, pdb_comp_amount
elif pdb_diff_path !=None:
pdb_diff_comp = len(pdb_diff_path)
return rmsd_list, pdb_comp_amount, pdb_diff_comp
| 5,350,198 |
def handle_options():
"""
Define default options for a complete and automatic process
then check the command line arguments for parts of the process to skip
Returns:
auto: whether or not we accept user inputs on job and location
scrap: whether or not we do the scraping
working_data: whether or not we get working data from csv and mongoDB
cleaner: whether or not we do the cleaning of data
model: whether or not we do the model part
update: whether or not we update the DB with our findings
report: whether or not we do the reporting to the CEO
"""
auto = False
scrap = True
working_data = True
cleaner = True
pre_process = True
model = True
update = True
report = True
log = logging.getLogger('main')
log.debug(sys.argv)
if sys.argv:
# there are command line arguments, we must handle them
for arg in sys.argv:
if arg == '-auto':
auto = True
elif arg == '-noScrap':
scrap = False
elif arg == '-noReport':
report = False
return auto, scrap, working_data, cleaner, pre_process, model, update, report
| 5,350,199 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.