content
stringlengths 22
815k
| id
int64 0
4.91M
|
---|---|
def NDVI(R, NIR):
""" Compute the NDVI
INPUT : R (np.array) -> the Red band images as a numpy array of float
NIR (np.array) -> the Near Infrared images as a numpy array of float
OUTPUT : NDVI (np.array) -> the NDVI
"""
NDVI = (NIR - R) / (NIR + R + 1e-12)
return NDVI
| 5,350,000 |
def get_nearby_stations_by_number(
latitude: float,
longitude: float,
num_stations_nearby: int,
parameter: Union[Parameter, str],
time_resolution: Union[TimeResolution, str],
period_type: Union[PeriodType, str],
minimal_available_date: Optional[Union[datetime, str]] = None,
maximal_available_date: Optional[Union[datetime, str]] = None,
) -> pd.DataFrame:
"""
Provides a list of weather station ids for the requested data
:param latitude: Latitude of location to search for nearest
weather station
:param longitude: Longitude of location to search for nearest
weather station
:param minimal_available_date: Start date of timespan where measurements
should be available
:param maximal_available_date: End date of timespan where measurements
should be available
:param parameter: Observation measure
:param time_resolution: Frequency/granularity of measurement interval
:param period_type: Recent or historical files
:param num_stations_nearby: Number of stations that should be nearby
:return: DataFrames with valid stations in radius per
requested location
"""
if num_stations_nearby <= 0:
raise ValueError("'num_stations_nearby' has to be at least 1.")
parameter = parse_enumeration_from_template(parameter, Parameter)
time_resolution = parse_enumeration_from_template(time_resolution, TimeResolution)
period_type = parse_enumeration_from_template(period_type, PeriodType)
if not check_parameters(parameter, time_resolution, period_type):
raise InvalidParameterCombination(
f"The combination of {parameter.value}, {time_resolution.value}, "
f"{period_type.value} is invalid."
)
minimal_available_date = (
minimal_available_date
if not minimal_available_date or isinstance(minimal_available_date, datetime)
else parse_datetime(minimal_available_date)
)
maximal_available_date = (
maximal_available_date
if not minimal_available_date or isinstance(maximal_available_date, datetime)
else parse_datetime(maximal_available_date)
)
if minimal_available_date and maximal_available_date:
if minimal_available_date > maximal_available_date:
raise ValueError(
"'minimal_available_date' has to be before " "'maximal_available_date'"
)
coords = Coordinates(np.array(latitude), np.array(longitude))
metadata = metadata_for_climate_observations(
parameter, time_resolution, period_type
)
# Filter only for stations that have a file
metadata = metadata[metadata[DWDMetaColumns.HAS_FILE.value].values]
if minimal_available_date:
metadata = metadata[
metadata[DWDMetaColumns.FROM_DATE.value] <= minimal_available_date
]
if maximal_available_date:
metadata = metadata[
metadata[DWDMetaColumns.TO_DATE.value] >= maximal_available_date
]
metadata = metadata.reset_index(drop=True)
distances, indices_nearest_neighbours = _derive_nearest_neighbours(
metadata.LAT.values, metadata.LON.values, coords, num_stations_nearby
)
distances = pd.Series(distances)
indices_nearest_neighbours = pd.Series(indices_nearest_neighbours)
# If num_stations_nearby is higher then the actual amount of stations
# further indices and distances are added which have to be filtered out
distances = distances[: min(metadata.shape[0], num_stations_nearby)]
indices_nearest_neighbours = indices_nearest_neighbours[
: min(metadata.shape[0], num_stations_nearby)
]
distances_km = np.array(distances * KM_EARTH_RADIUS)
metadata_location = metadata.iloc[indices_nearest_neighbours, :].reset_index(
drop=True
)
metadata_location[DWDMetaColumns.DISTANCE_TO_LOCATION.value] = distances_km
if metadata_location.empty:
logger.warning(
f"No weather stations were found for coordinate "
f"{latitude}°N and {longitude}°E "
)
return metadata_location
| 5,350,001 |
def test_new_hero(new_hero):
"""
GIVEN a Hero model
WHEN a new Hero is created
THEN check name, hash_password, group_id, health, permissions, is_participant
"""
assert new_hero.name == 'test_user'
assert new_hero.password_hash != 'password'
assert new_hero.check_password('password')
assert new_hero.check_public_id()
assert new_hero.health == 100
assert new_hero.group_id == GroupType.HUMAN
assert new_hero.is_participant
| 5,350,002 |
def get_state(tau, i=None, h=None, delta=None, state_0=None, a_matrix=None):
"""
Compute the magnetization state.
r(τ) = e^(Aτ)r(0) eq (11) at[1]
"""
if a_matrix is not None:
# get state from a known A matrix
# A matrix can be shared and it takes time to build
return np.matmul(scipy.linalg.expm(tau*a_matrix), state_0)
return np.matmul(scipy.linalg.expm(
tau*generate_A(i, h, delta, state_0.size-1)), state_0)
| 5,350,003 |
def get_version():
"""Returns version number, without module import (which can lead to ImportError
if some dependencies are unavailable before install."""
contents = read_file(os.path.join('webscaff', '__init__.py'))
version = re.search('VERSION = \(([^)]+)\)', contents)
version = version.group(1).replace(', ', '.').strip()
return version
| 5,350,004 |
def convert_latlon_arr(in_lat, in_lon, height, dtime, code="G2A"):
"""Converts between geomagnetic coordinates and AACGM coordinates.
Parameters
------------
in_lat : (np.ndarray or list or float)
Input latitude in degrees N (code specifies type of latitude)
in_lon : (np.ndarray or list or float)
Input longitude in degrees E (code specifies type of longitude)
height : (np.ndarray or list or float)
Altitude above the surface of the earth in km
dtime : (datetime)
Single datetime object for magnetic field
code : (int or str)
Bit code or string denoting which type(s) of conversion to perform
G2A - geographic (geodetic) to AACGM-v2
A2G - AACGM-v2 to geographic (geodetic)
TRACE - use field-line tracing, not coefficients
ALLOWTRACE - use trace only above 2000 km
BADIDEA - use coefficients above 2000 km
GEOCENTRIC - assume inputs are geocentric w/ RE=6371.2
(default = "G2A")
Returns
-------
out_lat : (np.ndarray)
Output latitudes in degrees N
out_lon : (np.ndarray)
Output longitudes in degrees E
out_r : (np.ndarray)
Geocentric radial distance (R_Earth) or altitude above the surface of
the Earth (km)
Notes
-------
At least one of in_lat, in_lon, and height must be a list or array.
"""
import aacgmv2._aacgmv2 as c_aacgmv2
# If a list was entered instead of a numpy array, recast it here
if isinstance(in_lat, list):
in_lat = np.array(in_lat)
if isinstance(in_lon, list):
in_lon = np.array(in_lon)
if isinstance(height, list):
height = np.array(height)
# If one or two of these elements is a float or int, create an array
test_array = np.array([hasattr(in_lat, "shape"), hasattr(in_lon, "shape"),
hasattr(height, "shape")])
if not test_array.all():
if test_array.any():
arr_shape = in_lat.shape if test_array.argmax() == 0 else \
(in_lon.shape if test_array.argmax() == 1 else
height.shape)
if not test_array[0]:
in_lat = np.ones(shape=arr_shape, dtype=float) * in_lat
if not test_array[1]:
in_lon = np.ones(shape=arr_shape, dtype=float) * in_lon
if not test_array[2]:
height = np.ones(shape=arr_shape, dtype=float) * height
else:
logging.info("for a single location, consider using convert_latlon")
in_lat = np.array([in_lat])
in_lon = np.array([in_lon])
height = np.array([height])
# Ensure that lat, lon, and height are the same length or if the lengths
# differ that the different ones contain only a single value
if not (in_lat.shape == in_lon.shape and in_lat.shape == height.shape):
ulen = np.unique([in_lat.shape, in_lon.shape, height.shape])
if ulen.min() != (1,):
logging.error("mismatched input arrays")
return None, None, None
# Test time
if isinstance(dtime, dt.date):
dtime = dt.datetime.combine(dtime, dt.time(0))
assert isinstance(dtime, dt.datetime), \
logging.error('time must be specified as datetime object')
# Test height
if np.min(height) < 0:
logging.warn('conversion not intended for altitudes < 0 km')
# Initialise output
lat_out = np.empty(shape=in_lat.shape, dtype=float) * np.nan
lon_out = np.empty(shape=in_lon.shape, dtype=float) * np.nan
r_out = np.empty(shape=height.shape, dtype=float) * np.nan
# Test code
try:
code = code.upper()
if(np.nanmax(height) > 2000 and code.find("TRACE") < 0 and
code.find("ALLOWTRACE") < 0 and code.find("BADIDEA") < 0):
estr = 'coefficients are not valid for altitudes above 2000 km. You'
estr += ' must either use field-line tracing (trace=True '
estr += 'or allowtrace=True) or indicate you know this '
estr += 'is a bad idea'
logging.error(estr)
return lat_out, lon_out, r_out
# make flag
bit_code = convert_str_to_bit(code)
except AttributeError:
bit_code = code
assert isinstance(bit_code, int), \
logging.error("unknown code {:}".format(bit_code))
# Test latitude range
if np.abs(in_lat).max() > 90.0:
assert np.abs(in_lat).max() <= 90.1, \
logging.error('unrealistic latitude')
in_lat = np.clip(in_lat, -90.0, 90.0)
# Constrain longitudes between -180 and 180
in_lon = ((in_lon + 180.0) % 360.0) - 180.0
# Set current date and time
try:
c_aacgmv2.set_datetime(dtime.year, dtime.month, dtime.day, dtime.hour,
dtime.minute, dtime.second)
except:
raise RuntimeError("unable to set time for {:}".format(dtime))
# Vectorise the AACGM code
convert_vectorised = np.vectorize(c_aacgmv2.convert)
# convert
try:
lat_out, lon_out, r_out = convert_vectorised(in_lat, in_lon, height,
bit_code)
except:
pass
return lat_out, lon_out, r_out
| 5,350,005 |
def get_entsoe_renewable_data(file=None, version=None):
"""
Load the default file for re time series or a specific file.
Returns
-------
Examples
--------
>>> my_re=get_entsoe_renewable_data()
>>> int(my_re['DE_solar_generation_actual'].sum())
188160676
"""
if version is None:
version = cfg.get("entsoe", "timeseries_version")
path_pattern = os.path.join(cfg.get("paths", "entsoe"), "{0}")
if file is None:
fn = path_pattern.format(
cfg.get("entsoe", "renewables_file_csv").format(version=version)
)
else:
fn = file.format(version=version)
if not os.path.isfile(fn):
if file is None:
renewables = split_timeseries_file(version=version).renewables
renewables.to_csv(fn)
re = pd.read_csv(
fn,
index_col=[0],
parse_dates=True,
date_parser=lambda x: datetime.datetime.strptime(
x.split("+")[0], "%Y-%m-%d %H:%M:%S"
),
)
return re
| 5,350,006 |
def getPredictedAnchor(title: str) -> str:
"""Return predicted anchor for given title, usually first letter."""
title = title.lower()
if title.startswith('npj '):
return 'npj series'
title = re.sub(r'^(the|a|an|der|die|das|den|dem|le|la|les|el|il)\s+', '',
title)
return title[0].upper()
| 5,350,007 |
def get_contact_flow(contact_flow_id: Optional[str] = None,
instance_id: Optional[str] = None,
name: Optional[str] = None,
tags: Optional[Mapping[str, str]] = None,
type: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetContactFlowResult:
"""
Provides details about a specific Amazon Connect Contact Flow.
## Example Usage
By name
```python
import pulumi
import pulumi_aws as aws
test = aws.connect.get_contact_flow(instance_id="aaaaaaaa-bbbb-cccc-dddd-111111111111",
name="Test")
```
By contact_flow_id
```python
import pulumi
import pulumi_aws as aws
test = aws.connect.get_contact_flow(contact_flow_id="cccccccc-bbbb-cccc-dddd-111111111111",
instance_id="aaaaaaaa-bbbb-cccc-dddd-111111111111")
```
:param str contact_flow_id: Returns information on a specific Contact Flow by contact flow id
:param str instance_id: Reference to the hosting Amazon Connect Instance
:param str name: Returns information on a specific Contact Flow by name
:param Mapping[str, str] tags: A the map of tags to assign to the Contact Flow.
:param str type: Specifies the type of Contact Flow.
"""
__args__ = dict()
__args__['contactFlowId'] = contact_flow_id
__args__['instanceId'] = instance_id
__args__['name'] = name
__args__['tags'] = tags
__args__['type'] = type
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('aws:connect/getContactFlow:getContactFlow', __args__, opts=opts, typ=GetContactFlowResult).value
return AwaitableGetContactFlowResult(
arn=__ret__.arn,
contact_flow_id=__ret__.contact_flow_id,
content=__ret__.content,
description=__ret__.description,
id=__ret__.id,
instance_id=__ret__.instance_id,
name=__ret__.name,
tags=__ret__.tags,
type=__ret__.type)
| 5,350,008 |
def jensen_alpha_beta(risk_returns ,benchmark_returns,Rebalancement_frequency):
"""
Compute the Beta and alpha of the investment under the CAPM
Parameters
----------
risk_returns : np.ndarray
benchmark_returns : np.ndarray
Rebalancement_frequency : np.float64
Returns
----------
np.float64,Beta,np.float64,Alpha
"""
benchmark_returns = sm.add_constant(benchmark_returns)
model = sm.OLS(risk_returns,benchmark_returns).fit()
alpha,beta = model.params[0] * Rebalancement_frequency , model.params[1]
return beta,alpha
| 5,350,009 |
def cycles_run() -> int:
"""Number of cycles run so far"""
return lib.m68k_cycles_run()
| 5,350,010 |
def plot_result(result, prefix='', comparison_data={}, direction='xy', plot_flags='mcdtvs'):
"""
plot cobea results.
Parameters
----------
result : object
A :py:class:`cobea.model.Result` object.
prefix : str
if print_figures=True, prefix contains the relative path to the current folder where results are printed.
comparison_data : dict
a dictionary containing optional data from alternative decoupled storage ring models, which may contain the following keys:
'name': name of the algorithm or model used
'beta': an array of shape (result.M,result.J) that contains Courant-Snyder beta values for each direction and monitor
'phi': an array of the same shape as 'beta', containing Courant-Snyder betatron phases
'dispersion': an array of the same shape, containing dispersion values
direction : str
direction characters for the result object. can be 'x','y', or 'xy'.
plot_flags : str
which plots are to be created. Each character represents a different result plot:
'm': monitor_results -> monitor_m*.pdf
'c': corrector_results -> corrector_m*.pdf
'd': plot_Dev_err -> Dev_err_w*.pdf, hist_w*.pdf
't': plot_topology -> topology.pdf
'v': convergence information -> convergence.pdf. Only works if convergence information is available.
"""
print('Printing result figures to path %s...' % prefix)
if 'm' in plot_flags:
for m in range(result.M):
for w in range(result.M):
fig = monitor_results(result, m, w, comparison_data, direction)
_savefig_close(fig, prefix + 'monitor_m%i_%s.pdf' % (m, direction[w]))
print(' m | monitor figures saved to monitor...pdf.')
if 'c' in plot_flags:
for m in range(result.M):
for filter in result.topology.corr_masks:
fig = corrector_results(result, m, comparison_data, direction, filter)
_savefig_close(fig, prefix + 'corrector_m%i_%s.pdf' % (m, filter.replace('*', '')))
print(' c | corrector figures saved to corrector...pdf.')
if 'd' in plot_flags:
for w in range(result.M):
for filter in result.topology.corr_masks:
mat_fig, hist_fig = plot_Dev_err(result, w, corr_filter=filter)
filter_str = filter.replace('*', '')
_savefig_close(mat_fig, prefix + 'Dev_err_%s_%s.pdf' % (direction[w], filter_str))
_savefig_close(hist_fig, prefix + 'hist_%s_%s.pdf' % (direction[w], filter_str))
print(' d | input matrix and deviations saved to Dev_err...pdf, hist...pdf.')
if 't' in plot_flags:
fig = plot_topology(result.topology)
_savefig_close(fig, prefix + 'topology.pdf')
print(' t | topology saved to topology.pdf')
if 'v' in plot_flags and 'conv' in result.additional: # convergence information is available
fig, ax = _subplots(1, 1, 'A5')
ax.semilogy(result.additional['conv']['it'], result.additional['conv']['f'])
ax.set_xlabel('L-BFGS iterations')
ax.set_ylabel('residual squared error $\chi^2$')
ax.set_xlim((0, result.additional['conv']['it'][-1]))
_savefig_close(fig, prefix + 'convergence.pdf')
print(' v | convergence information saved to convergence.pdf')
if 's' in plot_flags and result.known_element is not None:
delta_s = linspace(0, result.known_element.length, 256)
drift_js = result.topology.monitor_index(result.known_element.mon_names)
R_ends = result.R_jmw[drift_js]
R_ends_err = result.error.R_jmw[drift_js]
d_ends, d_ends_err = result.d_jw[drift_js], result.error.d_jw[drift_js]
for m in range(result.M):
for w in range(result.M):
fig, ax = _subplots(4, 1, 'A5', sharex=True)
R_s, R_s_err = result.known_element.inside_tracking(R_ends[:, m, w], delta_s,
rj_drift_err=R_ends_err[:, m, w])
beta_s = R_s.real**2 + R_s.imag**2
# beta_s_err = 2*(R_s.real*R_s_err.real + R_s.imag*R_s_err.imag)
phi_deg_s = angle( R_s*R_ends[0, m, w].conj() ) * 180 / pi
# _plot_boxes_complex(ax[0], R_s, err=R_s_err, xval=delta_s, markers=(None, None))
info_table(result, m, direction[w],
desc='optics in known element %s--%s' % tuple(result.known_element.mon_names), ax=ax[0])
ax[0].plot(delta_s, R_s.real, label='Re')
ax[0].plot(delta_s, R_s.imag, label='Im')
ax[0].set_ylabel(r'$\hat R_{%i%c}(s)$ / $\sqrt{\mathrm{m}}$' % (m, direction[w]))
ax[0].legend()
# _plot_boxes(ax[1], beta_s, err=beta_s_err, xval=delta_s, marker=None)
ax[1].plot(delta_s, beta_s)
ax[1].set_ylabel(r'$\beta_{%i%c}(s)$ / m' % (m, direction[w]))
# _plot_boxes(ax[2], phi_deg_s, err=None, xval=delta_s, marker=None)
ax[2].plot(delta_s, phi_deg_s)
ax[2].set_ylabel(r'$\phi_{%i%c}(s)$ / deg' % (m, direction[w]))
d_s, d_s_err = result.known_element.inside_tracking(d_ends[:,w], delta_s, rj_drift_err=d_ends_err[:,w])
ax[3].plot(delta_s, d_s)
ax[3].set_ylabel('const * dispersion / m')
ax[-1].set_xlim((0, result.known_element.length))
ax[-1].set_xlabel('distance from %s / m' % result.known_element.mon_names[0])
_savefig_close(fig, prefix + 'known_element_m%i_%s.pdf' % (m, direction[w]))
print(' s | known element plots saved to known_element...pdf')
| 5,350,011 |
def loop_and_return_fabric(lines):
"""
loops lines like:
#1196 @ 349,741: 17x17
"""
fabric = {}
for line in lines:
[x, y, x_length, y_length] = parse_line(line)
i_x, i_y = 0, 0
while i_y < y_length:
i_x = 0
while i_x < x_length:
this_coords = (x + i_x, y - i_y)
if fabric.get(this_coords, None) != None:
fabric[this_coords] += 1
else:
fabric[this_coords] = 1
i_x += 1
i_y += 1
return fabric
| 5,350,012 |
def get_sample(id):
"""Returns sample possessing id."""
for sample in samples_global:
if sample.id == id:
return sample
raise Exception(f'sample "{id}" could not be found')
| 5,350,013 |
def test_parse_null_as_none():
"""
Tests whether None may be passed via yaml kwarg null.
"""
initialize()
yamlfile = """{
"model": !obj:pylearn2.models.autoencoder.Autoencoder {
"nvis" : 1024,
"nhid" : 64,
"act_enc" : Null,
"act_dec" : null
}
}"""
load(yamlfile)
| 5,350,014 |
def plot_reion_properties(rank, size, comm, reion_ini_files, gal_ini_files,
model_tags, reion_plots, output_dir, output_format):
"""
Wrapper function to handle reading in of data + calculating reionization
properties, then calling the specified plotting routines.
Parameters
----------
rank : Integer
This processor rank.
size : Integer
The total number of processors executing the pipeline.
comm : Class ``mpi4py.MPI.Intracomm``
The ``mpi4py`` communicator.
reion_ini_files, gal_ini_files : List of strings
``.ini`` file corresponding to each model that we're plotting. We need
both the galaxy (``SAGE``) and reionization (``cifog``) ``.ini`` files.
model_tags : List of strings
String that will appear on the legend of the plot for each model.
reion_plots : Dictionary
Controls which of the plots we will make. Keys are the name of each
plot (e.g., ``history``) and the value specifies if we are plotting it.
output_dir : String
Directory where the plots are saved. If this directory does not exist,
it is created beforehand.
output_format : String
The format of the saved figures.
Returns
---------
None. All figures are saved to the ``output_dir`` in format
``output_format``.
"""
# Check to see if the output directory exists.
if rank == 0:
if not os.path.exists(output_dir):
os.makedirs(output_dir)
print("Made output directory {0}".format(output_dir))
MC_dir = "{0}/MC".format(output_dir)
os.makedirs(MC_dir)
print("Made directory {0}".format(MC_dir))
# First calculate all the properties and statistics we need.
reion_data = generate_data(rank, size, comm, reion_ini_files,
gal_ini_files, reion_plots, output_dir,
model_tags, output_format)
# Gather all the fractions onto the master process.
# This will be used for many different plots.
master_mass_frac = collective.collect_hist_across_tasks(rank, comm,
reion_data["mass_frac_allmodels"])
master_mass_frac = comm.bcast(master_mass_frac, root = 0)
# Then find out what we need and plot em!
if reion_plots["history"] and rank == 0:
duration_z, duration_t, reion_completed = \
calc_duration(reion_data["z_array_reion_allmodels"],
reion_data["lookback_array_reion_allmodels"],
master_mass_frac, reion_plots["duration_definition"])
for model_number in range(len(master_mass_frac)):
print("Model {0}: Start {1:.2f} \tMid {2:.2f}\tEnd {3:.2f}\t"
"dz {4:.2f}\tdt {5:.1f}Myr\tReion Completed {6}" \
.format(model_number, duration_z[model_number][0],
duration_z[model_number][1], duration_z[model_number][-1],
duration_z[model_number][0]-duration_z[model_number][-1],
duration_t[model_number][-1]-duration_t[model_number][0],
reion_completed[model_number]))
print("Plotting the reionization history.")
reionplot.plot_history(reion_data["z_array_reion_allmodels"],
reion_data["lookback_array_reion_allmodels"],
reion_data["cosmology_allmodels"],
reion_data["t_bigbang_allmodels"],
master_mass_frac,
model_tags, output_dir, "history",
output_format)
if reion_plots["nion"]:
master_nion = collective.collect_hist_across_tasks(rank, comm,
reion_data["nion_allmodels"])
if rank == 0:
print("Plotting the ionizing emissivity.")
reionplot.plot_nion(reion_data["z_array_reion_allmodels"],
reion_data["lookback_array_reion_allmodels"],
reion_data["cosmology_allmodels"],
reion_data["t_bigbang_allmodels"],
master_nion,
reion_data["nion_factor_allmodels"],
model_tags, output_dir, "nion", output_format)
if reion_plots["ps_fixed_XHI"]:
k, P21, PHII = determine_ps_fixed_XHI(rank, size, comm,
reion_data["z_array_reion_allmodels"],
reion_data["cosmology_allmodels"],
master_mass_frac,
reion_data["XHII_fbase_allmodels"],
reion_data["XHII_precision_allmodels"],
reion_data["density_fbase_allmodels"],
reion_data["density_precision_allmodels"],
reion_data["GridSize_allmodels"],
reion_data["boxsize_allmodels"],
reion_data["first_snap_allmodels"],
reion_plots["fixed_XHI_values"])
if rank == 0:
print("Plotting PS at fixed neutral fraction.")
reionplot.plot_ps_fixed_XHI(k, P21, PHII,
reion_plots["fixed_XHI_values"],
model_tags, output_dir, "ps_fixed_XHI",
output_format)
if reion_plots["contours"] and rank == 0:
# tau is used for multiple plots. So check if we need to calculate it.
try:
tau_allmodels
except NameError:
tau_allmodels = calc_tau(reion_data["z_array_reion_allmodels"],
reion_data["cosmology_allmodels"],
reion_data["helium_allmodels"],
master_mass_frac)
# For the contours, only plot the optical depth at the highest z.
tau_highz = []
for model_number in range(len(tau_allmodels)):
tau_highz.append(tau_allmodels[model_number][0])
duration_z, duration_t, reion_completed = \
calc_duration(reion_data["z_array_reion_allmodels"],
reion_data["lookback_array_reion_allmodels"],
master_mass_frac, reion_plots["duration_definition"])
print("Plotting contours of constant tau.")
reionplot.plot_tau_contours(tau_highz, reion_completed,
reion_plots["alpha_beta_limits"],
output_dir, "tau_contours", output_format)
print("Plotting contours of constant reionization duration.")
reionplot.plot_duration_contours(duration_t, reion_completed,
reion_plots["alpha_beta_limits"],
output_dir, "duration_contours",
output_format)
if reion_plots["optical_depth"] and rank == 0:
# tau is used for multiple plots. So check if we need to calculate it.
try:
tau_allmodels
except NameError:
tau_allmodels = calc_tau(reion_data["z_array_reion_allmodels"],
reion_data["cosmology_allmodels"],
reion_data["helium_allmodels"],
master_mass_frac)
print("Plotting the optical depth.")
reionplot.plot_tau(reion_data["z_array_reion_allmodels"],
reion_data["lookback_array_reion_allmodels"],
reion_data["cosmology_allmodels"],
reion_data["t_bigbang_allmodels"],
tau_allmodels,
model_tags, output_dir, "optical_depth",
output_format)
if reion_plots["optical_depth"] and reion_plots["history"] and rank == 0:
print("Plotting the combined optical depth/ionization history.")
reionplot.plot_combined_history_tau(reion_data["z_array_reion_allmodels"],
reion_data["lookback_array_reion_allmodels"],
reion_data["cosmology_allmodels"],
reion_data["t_bigbang_allmodels"],
master_mass_frac, tau_allmodels,
model_tags, output_dir,
"history_tau", output_format)
if reion_plots["optical_depth"] and reion_plots["nion"] and rank == 0:
print("Plotting the combined optical depth/ionizing emissivity.")
reionplot.plot_combined_nion_tau(reion_data["z_array_reion_allmodels"],
reion_data["lookback_array_reion_allmodels"],
reion_data["cosmology_allmodels"],
reion_data["t_bigbang_allmodels"],
master_nion,
reion_data["nion_factor_allmodels"],
tau_allmodels, model_tags, output_dir,
"nion_tau", output_format)
if reion_plots["ps_scales"] or reion_plots["ps_scales_beta"]:
print("Gathering the 21cm Power Spectra across processors")
k, P21, PHII = gather_ps(rank, size, comm,
reion_data["k_allmodels"],
reion_data["P21_allmodels"],
reion_data["PHII_allmodels"],
reion_data["first_snap_allmodels"],
reion_data["last_snap_allmodels"])
if rank == 0:
print("Plotting the large scale power as a function of small "
"scale.")
if reion_plots["ps_scales_beta"]:
calc_beta = True
else:
calc_beta = False
# Now that we have all the PS on the master rank, calculate the
# amplitude at the specified scales.
scale_power_dict = calc_scale_power(k, P21, PHII,
reion_data["z_array_reion_allmodels"],
reion_plots["small_scale_def"],
reion_plots["large_scale_def"],
reion_plots["small_scale_err"],
reion_plots["large_scale_err"],
calc_beta=calc_beta)
k_small_scale = scale_power_dict["k_small_scale"]
k_large_scale = scale_power_dict["k_large_scale"]
P21_small_scale = scale_power_dict["P21_small_scale"]
P21_large_scale = scale_power_dict["P21_large_scale"]
PHII_small_scale = scale_power_dict["PHII_small_scale"]
PHII_large_scale = scale_power_dict["PHII_large_scale"]
if reion_plots["ps_scales"]:
reionplot.plot_ps_scales(P21_small_scale,
P21_large_scale, master_mass_frac,
reion_data["z_array_reion_allmodels"],
reion_plots["fixed_XHI_values"],
reion_plots["ps_scales_z"],
reion_plots["small_scale_def"],
reion_plots["large_scale_def"],
reion_plots["small_scale_err"],
reion_plots["large_scale_err"],
model_tags, output_dir, "ps_scales",
output_format)
if reion_plots["ps_scales_beta"]:
P21_beta = scale_power_dict["P21_beta"]
P21_beta_error = scale_power_dict["P21_beta_error"]
PHII_beta = scale_power_dict["PHII_beta"]
reionplot.plot_ps_beta(P21_beta, P21_beta_error,
reion_data["z_array_reion_allmodels"],
reion_data["lookback_array_reion_allmodels"],
reion_data["cosmology_allmodels"],
reion_data["t_bigbang_allmodels"],
reion_plots["small_scale_def"],
reion_plots["large_scale_def"],
model_tags, output_dir,
"ps_scales_beta", output_format)
if reion_plots["slices_fixed_XHI"] and rank == 0:
print("Plotting slices at fixed XHI fractions.")
reionplot.plot_slices_XHI(reion_data["z_array_reion_allmodels"],
reion_data["cosmology_allmodels"],
master_mass_frac,
reion_data["XHII_fbase_allmodels"],
reion_data["XHII_precision_allmodels"],
reion_data["GridSize_allmodels"],
reion_data["boxsize_allmodels"],
reion_data["first_snap_allmodels"],
reion_plots["fixed_XHI_values"],
reion_plots["cut_slice"],
reion_plots["cut_thickness"],
model_tags, output_dir, "slices_XHI",
output_format)
if reion_plots["bubble_size"] and rank == 0:
print("Determining bubble sizes at fixed XHI.")
reionplot.determine_bubble_size(reion_data["z_array_reion_allmodels"],
master_mass_frac,
reion_data["first_snap_allmodels"],
reion_data["GridSize_allmodels"],
reion_data["boxsize_allmodels"],
reion_plots["fixed_XHI_values"],
model_tags, output_dir)
if reion_plots["zreion_dens_cross"] and rank == 0:
print("Calculating the zreion-density cross correlation.")
k, crosspspec, crosscorr, bias = \
zreion_dens_cross(reion_data["density_fbase_allmodels"],
reion_data["density_precision_allmodels"],
reion_data["zreion_path_allmodels"],
reion_data["GridSize_allmodels"],
reion_data["boxsize_allmodels"],
reion_data["last_snap_allmodels"])
reionplot.plot_zreion_dens_cross(k, crosscorr, bias, model_tags,
output_dir, "zreion_dens_crosscorr",
output_format)
if reion_plots["dens_ion_contours"] and rank == 0:
print("Plotting contours of density-ionization.")
reionplot.plot_dens_reion_contours(master_mass_frac,
reion_data["XHII_fbase_allmodels"],
reion_data["XHII_precision_allmodels"],
reion_data["density_fbase_allmodels"],
reion_data["density_precision_allmodels"],
reion_data["GridSize_allmodels"],
reion_data["first_snap_allmodels"],
reion_plots["fixed_XHI_values"],
model_tags, output_dir,
"dens_ion_contours", output_format)
if reion_plots["dens_zreion_contours"] and rank == 0:
print("Plotting contours of density-zreion.")
reionplot.plot_dens_zreion_contours(reion_data["density_fbase_allmodels"],
reion_data["density_precision_allmodels"],
reion_data["zreion_path_allmodels"],
reion_data["GridSize_allmodels"],
reion_data["last_snap_allmodels"],
model_tags, output_dir,
"dens_zreion_contours", output_format)
| 5,350,015 |
def transcriptIterator(transcriptsBedStream, transcriptDetailsBedStream):
""" Iterates over the transcripts detailed in the two streams, producing
Transcript objects. Streams are any iterator that returns bedlines or empty
strings.
"""
transcriptsAnnotations = {}
for tokens in tokenizeBedStream(transcriptDetailsBedStream):
assert (len(tokens) == 4 or len(tokens) == 9) # 9 if it has color data.
tA = TranscriptAnnotation(
ChromosomeInterval(tokens[0], tokens[1], tokens[2], None),
tokens[3].split('/')[-1], tokens[3].split('/')[:-1])
# normalizeAnnotation(tA) # removed this to improve xml
key = (tA.name, tA.chromosomeInterval.chromosome)
if key not in transcriptsAnnotations:
transcriptsAnnotations[key] = []
transcriptsAnnotations[key].append(tA)
for tokens in tokenizeBedStream(transcriptsBedStream):
assert len(tokens) == 12
# Transcript
name = tokens[3]
# Get the chromosome interval
assert tokens[5] in ['+', '-']
cI = ChromosomeInterval(tokens[0], tokens[1], tokens[2], tokens[5] == '+')
# Get the exons
def getExons(exonNumber, blockSizes, blockStarts):
assert exonNumber == len(blockSizes)
assert exonNumber == len(blockStarts)
return [ChromosomeInterval(
cI.chromosome, cI.start + int(blockStarts[i]),
cI.start + int(blockStarts[i]) + int(blockSizes[i]), cI.strand)
for i in range(exonNumber)]
exons = getExons(int(tokens[9]),
tokens[10].split(','), tokens[11].split(','))
# Get the name annotations
annotations = []
key = (name, cI.chromosome)
if key in transcriptsAnnotations:
annotations = transcriptsAnnotations[key]
filteredAnnotations = []
for tA in annotations:
if cI.contains(tA.chromosomeInterval):
tA.chromosomeInterval.strand = cI.strand
filteredAnnotations.append(tA)
yield Transcript(
cI, name, exons, filteredAnnotations,
int(tokens[4]), int(tokens[6]),
int(tokens[7]), tokens[8])
| 5,350,016 |
def convert_check_filter(tok):
"""Convert an input string into a filter function.
The filter function accepts a qualified python identifier string
and returns a bool.
The input can be a regexp or a simple string. A simple string must
match a component of the qualified name exactly. A regexp is
matched against the entire qualified name.
Matches are case-insensitive.
Examples::
convert_check_filter('foo')('a.foo.b') == True
convert_check_filter('foo')('a.foobar') == False
convert_check_filter('foo.*')('a.foobar') == False
convert_check_filter('foo.*')('foobar') == True
"""
tok = tok.lower()
if '+' in tok or '*' in tok:
return re.compile(tok, re.I).match
else:
toklist = tok.split('.')
def func(name):
chunks = name.lower().split('.')
if len(toklist) > len(chunks):
return False
for i in range(len(chunks)):
if chunks[i:i + len(toklist)] == toklist:
return True
return False
return func
| 5,350,017 |
def region_stats(x, r_start, r_end):
"""
Generate basic stats on each region. Return a dict for easy insertion into a DataFrame.
"""
stats = Munch()
stats["start"] = r_start
stats["end"] = r_end
stats["l"] = r_end - r_start
stats["min"] = np.min(x[r_start:r_end])
stats["max"] = np.max(x[r_start:r_end])
stats["rng"] = stats["max"] - stats["min"]
stats["mean"] = np.mean(x[r_start:r_end])
stats["std"] = np.std(x[r_start:r_end])
stats["var"] = np.var(x[r_start:r_end])
stats["med"] = np.median(x[r_start:r_end])
stats["mad"] = scistat.median_abs_deviation(x[r_start:r_end])
return stats
| 5,350,018 |
def test_value_with_brackets():
"""
Value is a dict
Expected Status : True
"""
log.info("Executing test_value_with_regex")
command_line = {"cmdline" : 'mesos-journald-logger --journald_labels={"labels":[{"key":"DCOS_PACKAGE_IS_FRAMEWORK","value":"false"}]} --logrotate_max_size={"size":"50MB"}'}
key_aliases = ["journald_labels"]
params = {
'key_aliases': key_aliases,
'delimiter': '='
}
val = command_line_parser.parse_cmdline(params=params, chained=command_line)
log.debug("return value is %s", val)
expected_value = (True, ['{"labels":[{"key":"DCOS_PACKAGE_IS_FRAMEWORK","value":"false"}]}'])
assert val == expected_value
| 5,350,019 |
def resnet_v1_generator(block_fn, layers, num_classes,
data_format='channels_first', dropblock_keep_probs=None,
dropblock_size=None):
"""Generator for ResNet v1 models.
Args:
block_fn: `function` for the block to use within the model. Either
`residual_block` or `bottleneck_block`.
layers: list of 4 `int`s denoting the number of blocks to include in each
of the 4 block groups. Each group consists of blocks that take inputs of
the same resolution.
num_classes: `int` number of possible classes for image classification.
data_format: `str` either "channels_first" for `[batch, channels, height,
width]` or "channels_last for `[batch, height, width, channels]`.
dropblock_keep_probs: `list` of 4 elements denoting keep_prob of DropBlock
for each block group. None indicates no DropBlock for the corresponding
block group.
dropblock_size: `int`: size parameter of DropBlock.
Returns:
Model `function` that takes in `inputs` and `is_training` and returns the
output `Tensor` of the ResNet model.
Raises:
if dropblock_keep_probs is not 'None' or a list with len 4.
"""
if dropblock_keep_probs is None:
dropblock_keep_probs = [None] * 4
if not isinstance(dropblock_keep_probs,
list) or len(dropblock_keep_probs) != 4:
raise ValueError('dropblock_keep_probs is not valid:', dropblock_keep_probs)
def model(inputs, is_training):
"""Creation of the model graph."""
inputs = conv2d_fixed_padding(
inputs=inputs, filters=64, kernel_size=7, strides=2,
data_format=data_format)
inputs = tf.identity(inputs, 'initial_conv')
inputs = batch_norm_relu(inputs, is_training, data_format=data_format)
inputs = tf.layers.max_pooling2d(
inputs=inputs, pool_size=3, strides=2, padding='SAME',
data_format=data_format)
inputs = tf.identity(inputs, 'initial_max_pool')
inputs = block_group(
inputs=inputs, filters=64, block_fn=block_fn, blocks=layers[0],
strides=1, is_training=is_training, name='block_group1',
data_format=data_format, dropblock_keep_prob=dropblock_keep_probs[0],
dropblock_size=dropblock_size)
inputs = block_group(
inputs=inputs, filters=128, block_fn=block_fn, blocks=layers[1],
strides=2, is_training=is_training, name='block_group2',
data_format=data_format, dropblock_keep_prob=dropblock_keep_probs[1],
dropblock_size=dropblock_size)
inputs = block_group(
inputs=inputs, filters=256, block_fn=block_fn, blocks=layers[2],
strides=2, is_training=is_training, name='block_group3',
data_format=data_format, dropblock_keep_prob=dropblock_keep_probs[2],
dropblock_size=dropblock_size)
inputs = block_group(
inputs=inputs, filters=512, block_fn=block_fn, blocks=layers[3],
strides=2, is_training=is_training, name='block_group4',
data_format=data_format, dropblock_keep_prob=dropblock_keep_probs[3],
dropblock_size=dropblock_size)
# The activation is 7x7 so this is a global average pool.
# TODO(huangyp): reduce_mean will be faster.
pool_size = (inputs.shape[1], inputs.shape[2])
inputs = tf.layers.average_pooling2d(
inputs=inputs, pool_size=pool_size, strides=1, padding='VALID',
data_format=data_format)
inputs = tf.identity(inputs, 'final_avg_pool')
inputs = tf.reshape(
inputs, [-1, 2048 if block_fn is bottleneck_block else 512])
inputs = tf.layers.dense(
inputs=inputs,
units=num_classes,
kernel_initializer=tf.random_normal_initializer(stddev=.01))
inputs = tf.identity(inputs, 'final_dense')
return inputs
model.default_image_size = 224
return model
| 5,350,020 |
def read_hotw(filename):
"""
Read cross-section file fetched from HITRAN-on-the-Web.
The format of the file line must be as follows:
nu, coef
Other lines are omitted.
"""
import sys
f = open(filename,'r')
nu = []
coef = []
for line in f:
pars = line.split()
try:
nu.append(float(pars[0]))
coef.append(float(pars[1]))
except:
if False:
print(sys.exc_info())
else:
pass
return array(nu),array(coef)
| 5,350,021 |
def _sql_type(ptype):
"""Convert python type to SQL type"""
if "Union" in ptype.__class__.__name__:
assert len(ptype.__args__) == 2, "Cannot create sql column with more than one type."
assert type(None) in ptype.__args__, "Cannot create sql column with more than one type."
return f"{ptype.__args__[0].__name__} NULL"
elif ptype in SQLTypes.__dict__.values() and hasattr(ptype, "__name__"):
return f"{ptype.__name__} NOT NULL"
else:
raise ValueError(f"Cannot parse type {ptype}.")
| 5,350,022 |
def rescale_data(data: np.ndarray,
option: str,
args: t.Optional[t.Dict[str, t.Any]] = None) -> np.ndarray:
"""Rescale numeric fitted data accordingly to user select option.
Args:
data (:obj:`np.ndarray`): data to rescale.
option (:obj:`str`): rescaling strategy. Must be one in ``VALID_RESCA-
LE`` attribute.
args (:obj:`dict`, optional): additional arguments for the scaler. All
scaler used are from ``sklearn`` package, so you should consult
their documentation for a complete list of available arguments to
user customization. The used scalers for each available ``option``
are:
- ``min-max``: ``sklearn.preprocessing.MinMaxScaler``
- ``standard``: ``sklearn.preprocessing.StandardScale``
- ``robust``: ``sklearn.preprocessing.RobustScaler``
Returns:
np.ndarray: scaled ``data`` based in ``option`` correspondent strategy.
Raises:
ValueError: if ``option`` is not in ``VALID_RESCALE``.
Any exception caused by arguments from ``args`` into the
scaler model is also raised by this function.
"""
if option not in VALID_RESCALE:
raise ValueError("Unknown data rescaling option '{0}'. Please choose "
"one value among {1}".format(option, VALID_RESCALE))
if not args:
args = {}
scaler_model = _RESCALE_SCALERS.get(option, "min-max")(**args)
return scaler_model.fit_transform(data.astype(float))
| 5,350,023 |
def extract_labels(text, spacy_model):
"""Extract entities using libratom.
Returns: core.Label list
"""
try:
document = spacy_model(text)
except ValueError:
logger.exception(f"spaCy error")
raise
labels = set()
for entity in document.ents:
label, _ = Label.objects.get_or_create(type=Label.IMPORTER, name=entity.label_)
labels.add(label)
return list(labels)
| 5,350,024 |
def variant_option(command: Callable[..., None]) -> Callable[..., None]:
"""
An option decorator for a DC/OS variant.
"""
function = click.option(
'--variant',
type=click.Choice(['auto', 'oss', 'enterprise']),
default='auto',
help=(
'Choose the DC/OS variant. '
'If the variant does not match the variant of the given '
'installer, an error will occur. '
'Using "auto" finds the variant from the installer. '
'Finding the variant from the installer takes some time and so '
'using another option is a performance optimization.'
),
)(command) # type: Callable[..., None]
return function
| 5,350,025 |
def GetSegByName(name):
"""
@return Address of the first byte in the Segment
with the provided name, or BADADDR
"""
for Segment in ida.Segments():
if ida.SegName(Segment) == name:
return Segment
return ida.BADADDR
| 5,350,026 |
def sample_points_from_plateaus(all_plateaus, mode, stack_size=10, n_samples=1):
"""
Samples points from each plateau in each video
:param all_plateaus: dictionary containing all plateaus, keys are plateaus's ids, values are the plateau objects
:param mode: either `flow` or `rgb`
:param stack_size: optical flow stack size
:param n_samples: number of samples you want to draw from each plateau
:return: sampled_points, dictionary whose keys are video ids and whose values are dictionary containing the sampled
points as values as the plateaus ids as keys
"""
sampled_points = {}
h_stack_c = np.ceil(stack_size / 2)
for g_id, g in all_plateaus.items():
if mode == 'flow':
x_range = np.arange(h_stack_c+1, g.n - h_stack_c, dtype=np.int32)
else:
x_range = None # will take the whole x later for sampling
if g.video not in sampled_points:
sampled_points[g.video] = {}
sampled_points[g.video][g_id] = g.sample_points(n_samples, x_range=x_range)
return sampled_points
| 5,350,027 |
def debug(debug):
""" Prints debug to stdout, flushing the output. """
print("DEBUG: {}".format(debug), flush = True)
| 5,350,028 |
def mobile_user_meeting_list(request):
"""
返回用户会议列表
:param request:
:return:
"""
dbs = request.dbsession
user_id = request.POST.get('user_id', '')
start_date = request.POST.get('start_date', '')
end_date = request.POST.get('end_date', '')
error_msg = ''
if not user_id:
error_msg = '用户ID不能为空!'
elif not start_date:
error_msg = '开始时间不能为空!'
elif not end_date:
error_msg = '结束时间不能为空!'
else:
meetings = mob_find_user_meetings(dbs, user_id, start_date, end_date)
if error_msg:
json_str = {
'status': False,
'meeting': '',
'error_msg': error_msg
}
else:
json_str = {
'status': True,
'meeting': meetings,
'error_msg':error_msg
}
resp = Response()
resp.headers['Access-Control-Allow-Origin'] = '*'
resp.json = json_str
return resp
| 5,350,029 |
def get_current_thread_cpu_time():
"""
<Purpose>
Gets the total CPU time for the currently executing thread.
<Exceptions>
An AssertionError will be raised if the underlying system call fails.
<Returns>
A floating amount of time in seconds.
"""
# Get the current thread handle
current_thread = _mach_thread_self()
# Allocate a structure
thread_info = thread_basic_info()
# Structure size
struct_size = ctypes.c_uint(THREAD_BASIC_INFO_SIZE)
# Make the system call
result = _thread_info(current_thread, THREAD_BASIC_INFO,ctypes.byref(thread_info), ctypes.byref(struct_size))
# Sum up the CPU usage
cpu_time = thread_info.user_time.seconds + thread_info.user_time.microseconds / 1000000.0
cpu_time += thread_info.system_time.seconds + thread_info.system_time.microseconds / 1000000.0
# Safety check, result should be 0
# Do the safety check after we free the memory to avoid leaks
assert(result == 0)
# Return the structure
return cpu_time
| 5,350,030 |
def get_data_upload_id(jwt: str) -> str:
"""Function to get a temporary upload ID from
DAFNI data upload API
Args:
jwt (str): Users JWT
Returns:
str: Temporary Upload ID
"""
url = f"{DATA_UPLOAD_API_URL}/nid/upload/"
data = {"cancelToken": {"promise": {}}}
return dafni_post_request(url, jwt, data, allow_redirect=True)
| 5,350,031 |
def StationMagnitudeContribution_TypeInfo():
"""StationMagnitudeContribution_TypeInfo() -> RTTI"""
return _DataModel.StationMagnitudeContribution_TypeInfo()
| 5,350,032 |
def _get_operations(rescale=0.003921, normalize_weight=0.48):
"""Get operations."""
operation_0 = {
'tensor_op_module': 'minddata.transforms.c_transforms',
'tensor_op_name': 'RandomCrop',
'weight': [32, 32, 4, 4, 4, 4],
'padding_mode': "constant",
'pad_if_needed': False,
'fill_value': 0
}
operation_1 = {
'tensor_op_module': 'minddata.transforms.c_transforms',
'tensor_op_name': 'Rescale',
'rescale': rescale,
'shift': 0,
'num_classes': 10
}
operation_2 = {
'tensor_op_module': 'minddata.transforms.c_transforms',
'tensor_op_name': 'Normalize',
'weights': [normalize_weight]
}
return [operation_0, operation_1, operation_2]
| 5,350,033 |
def itm_command(
ticker: str = None,
):
"""Options ITM"""
# Check for argument
if ticker is None:
raise Exception("Stock ticker is required")
dates = yfinance_model.option_expirations(ticker)
if not dates:
raise Exception("Stock ticker is invalid")
current_price = yfinance_model.get_price(ticker)
df_date, df_cotm, df_citm, df_potm, df_pitm = [], [], [], [], []
for date in dates:
df_date.append(date)
options = yfinance_model.get_option_chain(ticker, date)
call_oi = options.calls.set_index("strike")["openInterest"].fillna(0)
put_oi = options.puts.set_index("strike")["openInterest"].fillna(0)
df_cotm.append(int(call_oi[call_oi.index >= current_price].sum()))
df_citm.append(int(call_oi[call_oi.index <= current_price].sum()))
df_pitm.append(int(put_oi[put_oi.index >= current_price].sum()))
df_potm.append(int(put_oi[put_oi.index <= current_price].sum()))
# Calculate the total per column
df_date.append("<b>Total</b>")
total = [df_citm, df_cotm, df_pitm, df_potm]
for x in total:
x.append(sum(x))
# Create the DataFrame
df = pd.DataFrame(
{
"Expiry": df_date,
"Calls ITM": df_citm,
"Calls OTM": df_cotm,
"Puts ITM": df_pitm,
"Puts OTM": df_potm,
}
)
formats = {
"Calls ITM": "{:,}",
"Calls OTM": "{:,}",
"Puts ITM": "{:,}",
"Puts OTM": "{:,}",
}
for col, value in formats.items():
df[col] = df[col].map(lambda x: value.format(x)) # pylint: disable=W0640
df.set_index("Expiry", inplace=True)
fig = imps.plot_df(
df,
fig_size=(600, (35 * len(df.index))),
col_width=[3, 2.5],
tbl_header=imps.PLT_TBL_HEADER,
tbl_cells=imps.PLT_TBL_CELLS,
font=imps.PLT_TBL_FONT,
row_fill_color=imps.PLT_TBL_ROW_COLORS,
paper_bgcolor="rgba(0, 0, 0, 0)",
)
fig.update_traces(
cells=dict(
align=["center", "right"],
font=dict(
color=["white"]
+ [imps.PLT_TBL_INCREASING] * 2
+ [imps.PLT_TBL_DECREASING] * 2
),
),
)
imagefile = imps.save_image("opt-itm.png", fig)
return {
"title": f"{ticker.upper()} Options: In The Money",
"imagefile": imagefile,
}
| 5,350,034 |
def click(context):
"""
Locate the desired hyperlink
"""
context.browser.find_element_by_partial_link_text('2').click()
# context.browser.get('http://localhost:5000/genres_details')
| 5,350,035 |
def create_nx_suite(seed=0, rng=None):
"""
returns a dict of graphs generated by networkx for testing,
designed to be used in a pytest fixture
"""
if rng is None:
rng = np.random.RandomState(seed)
out_graphs = {}
for N in [1, 2, 4, 8, 16, 32, 64, 128]:
for dtype in [np.bool, np.int32, np.float32, np.complex64]:
basename = f"{N}_{str(dtype)[8:-2]}"
name = f"ladder_{basename}"
out_graphs[name] = [gen_ladder(N, dtype)]
SAMPLE_N = 5
# smp = [(4,.1),(4,.5),(4,.7),(7,.1),(7,.5),(16,.1),(16,.5),(32,.1),(100,.1)]
# for N, prob_edge in smp:
for N in [4,7,16,32,100]:
for prob_edge in [.1,.5,.7]:
dtype = np.bool
name = f"random_lobster_{prob_edge:.1f}_{str(dtype)[8:-2]}_{N}"
out_graphs[name] = []
for i in range(SAMPLE_N):
ng = nx.generators.random_graphs.random_lobster(N,prob_edge,\
prob_edge,rng)
if ng.number_of_nodes() == 0:
continue
t = from_nx(ng,adj_type=dtype)
out_graphs[name].append(t)
dtype = np.int32
name = f"random_lobster_{prob_edge:.1f}_{str(dtype)[8:-2]}_{N}"
out_graphs[name] = []
for i in range(SAMPLE_N):
edge_weights = rng.randint(1, rng.randint(2, max(N//2, 3)),
size=rng.randint(1, N//2))
ng = nx.generators.random_graphs.random_lobster(N,prob_edge,\
prob_edge,rng)
if ng.number_of_nodes() == 0:
continue
t = from_nx(ng,adj_type=dtype)
for e1, e2 in t.edges():
t[e1, e2] = rng.choice(edge_weights)
out_graphs[name].append(t)
dtype = np.float64
name = f"random_lobster_{prob_edge:.1f}_{str(dtype)[8:-2]}_{N}"
out_graphs[name] = []
for i in range(SAMPLE_N):
edge_weights = rng.rand(rng.randint(1, N//2)) + 0.5
ng = nx.generators.random_graphs.random_lobster(N,prob_edge,\
prob_edge,rng)
if ng.number_of_nodes() == 0:
continue
t = from_nx(ng,adj_type=dtype)
for e1, e2 in t.edges():
t[e1, e2] = rng.choice(edge_weights)
out_graphs[name].append(t)
return out_graphs
| 5,350,036 |
def test_get_countries_by_country_codes(nigeria, egypt, kenya):
"""
Test that multiple countries can be retrieved by multiple country codes.
"""
countries = rapi.get_countries_by_country_codes(["ng", "eg", "ken"])
assert sorted(countries) == sorted([nigeria, egypt, kenya])
| 5,350,037 |
def main(): # pragma: no cover
"""test example for AnimationWindow"""
# kills the program when you hit Cntl+C from the command line
# doesn't save the current state as presumably there's been an error
import signal
signal.signal(signal.SIGINT, signal.SIG_DFL)
import sys
# Someone is launching this directly
# Create the QApplication
app = QApplication(sys.argv)
#The Main window
#from pyNastran.gui.menus.legend.animation import AnimationWindow
data2 = {
'font_size' : 8,
'icase_fringe' : 1,
'icase_disp' : 2,
'icase_vector' : 3,
'title' : 'cat',
'time' : 2,
'frames/sec' : 30,
'resolution' : 1,
'iframe' : 0,
'is_scale' : False,
'dirname' : os.getcwd(),
'scale' : 2.0,
'default_scale' : 10,
'arrow_scale' : 3.0,
'default_arrow_scale' : 30,
#'phase' : 0.,
'phase' : None,
'default_phase' : 120.,
#'default_phase' : None,
#'start_time' : 0.,
#'end_time' : 0.,
'default_time' : 0.,
'icase_start' : 10,
'icase_delta' : 3,
'stress_min' : 0.,
'stress_max' : 1000.,
}
data2['phase'] = 0. # uncomment for phase
form = [
[u'Geometry', None, [
(u'NodeID', 0, []),
(u'ElementID', 1, []),
(u'PropertyID', 2, []),
(u'MaterialID', 3, []),
(u'E', 4, []),
(u'Element Checks', None, [
(u'ElementDim', 5, []),
(u'Min Edge Length', 6, []),
(u'Min Interior Angle', 7, []),
(u'Max Interior Angle', 8, [])],
),],
],
]
#[0, 1, 2, 3, 4, 5, 6, 7, 8]
main_window = AnimationWindow(data2, fringe_cases=form)
main_window.show()
# Enter the main loop
app.exec_()
| 5,350,038 |
def test_egg(virtualenv, cache_dir, use_static_requirements, src_dir):
"""
test building and installing a bdist_egg package
"""
# TODO: We should actually disallow generating an egg file
# Let's create the testing virtualenv
with virtualenv as venv:
ret = venv.run(
venv.venv_python, "-c", "import setuptools; print(setuptools.__version__)",
)
setuptools_version = ret.stdout.strip()
ret = venv.run(venv.venv_python, "-m", "easy_install", "--version", check=False)
if ret.exitcode != 0:
pytest.skip(
"Setuptools version, {}, does not include the easy_install module".format(
setuptools_version
)
)
venv.run(venv.venv_python, "setup.py", "clean", cwd=src_dir)
# Setuptools installs pre-release packages if we don't pin to an exact version
# Let's download and install requirements before, running salt's install test
venv.run(
venv.venv_python,
"-m",
"pip",
"download",
"--dest",
str(cache_dir),
src_dir,
)
packages = []
for fname in cache_dir.iterdir():
if (
fname.name.startswith("pycurl")
and salt.utils.platform.is_windows()
and not use_static_requirements
):
# On windows, the latest pycurl release, 7.43.0.6 at the time of writing,
# does not have wheel files uploaded, so, delete the downloaded source
# tarball and will later force pycurl==7.43.0.5 to be pre-installed before
# installing salt
fname.unlink()
continue
packages.append(fname)
venv.install(*[str(pkg) for pkg in packages])
for package in packages:
package.unlink()
# Looks like, at least on windows, setuptools also get's downloaded as a salt dependency.
# Let's check and see if this newly installed version also has easy_install
ret = venv.run(
venv.venv_python, "-c", "import setuptools; print(setuptools.__version__)",
)
setuptools_version = ret.stdout.strip()
ret = venv.run(venv.venv_python, "-m", "easy_install", "--version", check=False)
if ret.exitcode != 0:
pytest.skip(
"Setuptools version, {}, does not include the easy_install module".format(
setuptools_version
)
)
if salt.utils.platform.is_windows() and not use_static_requirements:
# Like mentioned above, install pycurl==7.43.0.5
# However, on windows, the latest pycurl release, 7.43.0.6 at the time of writing,
# does not have wheel files uploaded, so, we force pycurl==7.43.0.5 to be
# pre-installed before installing salt
venv.install("pycurl==7.43.0.5")
venv.run(
venv.venv_python,
"setup.py",
"bdist_egg",
"--dist-dir",
str(cache_dir),
cwd=src_dir,
)
venv.run(venv.venv_python, "setup.py", "clean", cwd=src_dir)
salt_generated_package = list(cache_dir.glob("*.egg"))
if not salt_generated_package:
pytest.fail("Could not find the generated egg file")
salt_generated_package = salt_generated_package[0]
# Assert generate wheel version matches what salt reports as its version
egg_ver = [
x
for x in salt_generated_package.name.split("-")
if re.search(r"^\d.\d*", x)
][0]
egg_ver_cmp = egg_ver.replace("_", "-")
assert egg_ver_cmp == salt.version.__version__, "{} != {}".format(
egg_ver_cmp, salt.version.__version__
)
# We cannot pip install an egg file, let's go old school
venv.run(venv.venv_python, "-m", "easy_install", str(salt_generated_package))
# Let's ensure the version is correct
cmd = venv.run(venv.venv_python, "-m", "pip", "list", "--format", "json")
for details in json.loads(cmd.stdout):
if details["name"] != "salt":
continue
installed_version = details["version"]
break
else:
pytest.fail("Salt was not found installed")
# Let's compare the installed version with the version salt reports
assert installed_version == salt.version.__version__, "{} != {}".format(
installed_version, salt.version.__version__
)
# Let's also ensure we have a salt/_version.py from the installed salt egg
subdir = [
"lib",
"python{}.{}".format(*sys.version_info),
"site-packages",
]
if salt.utils.platform.is_windows():
subdir.pop(1)
site_packages_dir = pathlib.Path(venv.venv_dir)
site_packages_dir = site_packages_dir.joinpath(*subdir)
assert site_packages_dir.is_dir()
installed_salt_path = list(site_packages_dir.glob("salt*.egg"))
if not installed_salt_path:
pytest.fail("Failed to find the installed salt path")
log.debug("Installed salt path glob matches: %s", installed_salt_path)
installed_salt_path = installed_salt_path[0] / "salt"
assert installed_salt_path.is_dir()
salt_generated_version_file_path = installed_salt_path / "_version.py"
assert salt_generated_version_file_path.is_file(), "{} is not a file".format(
salt_generated_version_file_path
)
| 5,350,039 |
def _nearest_neighbor_features_per_object_in_chunks(
reference_embeddings_flat, query_embeddings_flat, reference_labels_flat,
ref_obj_ids, k_nearest_neighbors, n_chunks):
"""Calculates the nearest neighbor features per object in chunks to save mem.
Uses chunking to bound the memory use.
Args:
reference_embeddings_flat: Tensor of shape [n, embedding_dim],
the embedding vectors for the reference frame.
query_embeddings_flat: Tensor of shape [m, embedding_dim], the embedding
vectors for the query frames.
reference_labels_flat: Tensor of shape [n], the class labels of the
reference frame.
ref_obj_ids: int tensor of unique object ids in the reference labels.
k_nearest_neighbors: Integer, the number of nearest neighbors to use.
n_chunks: Integer, the number of chunks to use to save memory
(set to 1 for no chunking).
Returns:
nn_features: A float32 tensor of nearest neighbor features of shape
[m, n_objects, feature_dim].
"""
chunk_size = tf.cast(tf.ceil(tf.cast(tf.shape(query_embeddings_flat)[0],
tf.float32) / n_chunks), tf.int32)
wrong_label_mask = tf.not_equal(reference_labels_flat,
ref_obj_ids[:, tf.newaxis])
all_features = []
for n in range(n_chunks):
if n_chunks == 1:
query_embeddings_flat_chunk = query_embeddings_flat
else:
chunk_start = n * chunk_size
chunk_end = (n + 1) * chunk_size
query_embeddings_flat_chunk = query_embeddings_flat[chunk_start:chunk_end]
# Use control dependencies to make sure that the chunks are not processed
# in parallel which would prevent any peak memory savings.
with tf.control_dependencies(all_features):
features = _nn_features_per_object_for_chunk(
reference_embeddings_flat, query_embeddings_flat_chunk,
wrong_label_mask, k_nearest_neighbors
)
all_features.append(features)
if n_chunks == 1:
nn_features = all_features[0]
else:
nn_features = tf.concat(all_features, axis=0)
return nn_features
| 5,350,040 |
def heap_pop(heap):
"""
Wrapper around heapq's heappop method to support updating priorities of
items in the queue.
Main difference here is that we toss out any queue entries that have been
updated since insertion.
"""
while len(heap) > 0:
pri_board_tup = heapq.heappop(heap)
board = pri_board_tup[1]
if not board == None:
del ENTRY_FINDER[board]
return pri_board_tup
raise KeyError('Pop from empty queue :(')
| 5,350,041 |
def _variant_po_to_dict(tokens) -> CentralDogma:
"""Convert a PyParsing data dictionary to a central dogma abundance (i.e., Protein, RNA, miRNA, Gene).
:type tokens: ParseResult
"""
dsl = FUNC_TO_DSL.get(tokens[FUNCTION])
if dsl is None:
raise ValueError('invalid tokens: {}'.format(tokens))
concept = tokens[CONCEPT]
return dsl(
namespace=concept[NAMESPACE],
name=concept[NAME],
identifier=concept.get(IDENTIFIER),
xrefs=tokens.get(XREFS),
variants=[
_variant_to_dsl_helper(variant_tokens)
for variant_tokens in tokens[VARIANTS]
],
)
| 5,350,042 |
def project(name, param):
"""a tilemill project description, including a basic countries-of-the-world layer."""
return {
"bounds": [-180, -85.05112877980659, 180, 85.05112877980659],
"center": [0, 0, 2],
"format": "png",
"interactivity": False,
"minzoom": 0,
"maxzoom": 22,
"srs": "+proj=merc +a=6378137 +b=6378137 +lat_ts=0.0 +lon_0=0.0 +x_0=0.0 "
"+y_0=0.0 +k=1.0 +units=m +nadgrids=@null +wktext +no_defs +over",
"Stylesheet": ["style.mss"],
"Layer": [
{
"id": "countries",
"name": "countries",
"srs": "+proj=merc +a=6378137 +b=6378137 +lat_ts=0.0 +lon_0=0.0 +x_0=0.0 "
"+y_0=0.0 +k=1.0 +units=m +nadgrids=@null +wktext +no_defs +over",
"geometry": "polygon",
"Datasource": {
"file": "http://mapbox-geodata.s3.amazonaws.com/natural-earth-1.4.0/"
"cultural/10m-admin-0-countries.zip",
"type": "shape"
}
},
],
"scale": 1,
"metatile": 2,
"name": name,
"description": param['properties']['name'],
}
| 5,350,043 |
def writescript( fname, content ):
"""
same as writefile except the first line is removed if empty and the
resulting file is made executable to the owner
"""
m = pat_empty_line.match( content )
if m:
content = content[ m.end(): ]
writefile( fname, content )
perm = stat.S_IMODE( os.stat(fname)[stat.ST_MODE] )
perm = perm | stat.S_IXUSR
try:
os.chmod( fname, perm )
except Exception:
pass
| 5,350,044 |
def get_deepest():
"""Return tokens with largest liquidities.
Returns:
str: HTML-formatted message.
"""
url = config.URLS['deepest']
api_params = {'limit': 5,
'orderBy': 'usdLiquidity',
'direction': 'desc',
'key': POOLS_KEY
}
response = api_call(url, params=api_params)
formatted_response = ft.format_deepest(response['results'])
return formatted_response
| 5,350,045 |
def _grompp_str(op_name, gro_name, checkpoint_file=None):
"""Helper function, returns grompp command string for operation."""
mdp_file = signac.get_project().fn('mdp_files/{op}.mdp'.format(op=op_name))
cmd = '{gmx} grompp -f {mdp_file} -c {gro_file} {checkpoint} -o {op}.tpr -p'.format(
gmx=gmx_exec, mdp_file=mdp_file, op=op_name, gro_file=gro_name,
checkpoint='' if checkpoint_file is None else ('-t ' + checkpoint_file))
return workspace_command(cmd)
| 5,350,046 |
def run(
master_cls: typing.Type[master.Master],
make_parser: typing.Callable[[options.Options], argparse.ArgumentParser],
arguments: typing.Sequence[str],
extra: typing.Callable[[typing.Any], dict] = None
) -> master.Master: # pragma: no cover
"""
extra: Extra argument processing callable which returns a dict of
options.
"""
debug.register_info_dumpers()
opts = options.Options()
master = master_cls(opts)
parser = make_parser(opts)
# To make migration from 2.x to 3.0 bearable.
if "-R" in sys.argv and sys.argv[sys.argv.index("-R") + 1].startswith("http"):
print("-R is used for specifying replacements.\n"
"To use mitmproxy in reverse mode please use --mode reverse:SPEC instead")
try:
args = parser.parse_args(arguments)
except SystemExit:
arg_check.check()
sys.exit(1)
try:
opts.set(*args.setoptions, defer=True)
optmanager.load_paths(
opts,
os.path.join(opts.confdir, OPTIONS_FILE_NAME),
)
pconf = process_options(parser, opts, args)
server: typing.Any = None
if pconf.options.server:
try:
server = proxy.server.ProxyServer(pconf)
except exceptions.ServerException as v:
print(str(v), file=sys.stderr)
sys.exit(1)
else:
server = proxy.server.DummyServer(pconf)
master.server = server
if args.options:
print(optmanager.dump_defaults(opts))
sys.exit(0)
if args.commands:
master.commands.dump()
sys.exit(0)
if extra:
opts.update(**extra(args))
loop = asyncio.get_event_loop()
for signame in ('SIGINT', 'SIGTERM'):
try:
loop.add_signal_handler(getattr(signal, signame), master.shutdown)
except NotImplementedError:
# Not supported on Windows
pass
# Make sure that we catch KeyboardInterrupts on Windows.
# https://stackoverflow.com/a/36925722/934719
if os.name == "nt":
async def wakeup():
while True:
await asyncio.sleep(0.2)
asyncio.ensure_future(wakeup())
master.run()
except exceptions.OptionsError as e:
print("%s: %s" % (sys.argv[0], e), file=sys.stderr)
sys.exit(1)
except (KeyboardInterrupt, RuntimeError):
pass
return master
| 5,350,047 |
def get_role_with_name(role_name: str) -> Role:
"""Get role with given name."""
role = Role.query.filter(Role.name == role_name).one()
return role
| 5,350,048 |
def namespace_store_factory(
request, cld_mgr, mcg_obj_session, cloud_uls_factory_session, pvc_factory_session
):
"""
Create a NamespaceStore factory.
Calling this fixture lets the user create namespace stores.
Args:
request (object): Pytest built-in fixture
cld_mgr (CloudManager): Cloud Manager object containing all
connections to clouds
mcg_obj (MCG): MCG object containing data and utils
related to MCG
cloud_uls_factory: Factory for creation of underlying storage
Returns:
func: Factory method - allows the user to create namespace stores
"""
created_nss = []
cmdMap = {
"cli": cli_create_namespacestore,
"oc": oc_create_namespacestore,
}
def _create_nss(method, nss_dict):
"""
Tracks creation and cleanup of all the namespace stores that were created in the current scope
Args:
method (str): String for selecting method of namespace store creation (CLI/OC)
nss_dict (dict): Dictionary containing storage provider as key and a list of tuples
as value.
Namespace store dictionary examples - 'CloudName': [(amount, region), (amount, region)]
i.e. - 'aws': [(3, us-west-1),(2, eu-west-2)]
Returns:
list: A list of the NamespaceStore objects created by the factory in the current scope
"""
current_call_created_nss = []
for platform, nss_lst in nss_dict.items():
for nss_tup in nss_lst:
if platform.lower() == "nsfs":
uls_name = nss_tup[0] or create_unique_resource_name(
constants.PVC.lower(), platform
)
pvc_factory_session(
custom_data=template_pvc(uls_name, size=nss_tup[1])
)
else:
# Create the actual target bucket on the request service
uls_dict = cloud_uls_factory_session({platform: [(1, nss_tup[1])]})
uls_name = list(uls_dict[platform])[0]
nss_name = create_unique_resource_name(constants.MCG_NSS, platform)
# Create the actual namespace resource
cmdMap[method.lower()](
nss_name, platform, mcg_obj_session, uls_name, cld_mgr, nss_tup
)
nss_obj = NamespaceStore(
name=nss_name,
method=method.lower(),
mcg_obj=mcg_obj_session,
uls_name=uls_name,
)
created_nss.append(nss_obj)
current_call_created_nss.append(nss_obj)
nss_obj.verify_health()
return current_call_created_nss
def nss_cleanup():
for nss in created_nss:
nss.delete()
request.addfinalizer(nss_cleanup)
return _create_nss
| 5,350,049 |
def get_user(request, username):
"""
Gets a user's information.
return:
{
status: HTTP status,
name: string,
gender: string,
marital_status: string,
first_name: string
}
"""
data = get_user_info(username)
if data:
return Response({'data': data}, status=200)
else:
return Response(status=404)
| 5,350,050 |
def find_changes(d_before, d_after):
"""
Returns a dictionary of changes in the format:
{
<system id>: {
<changed key>: <Change type>,
...
},
...
}
The changes should describe the differences between d_before and d_after.
"""
changes = dict()
for k in d_after:
if k not in d_before:
changes[k] = Change.Addition
elif type(d_before[k]) is dict and type(d_after[k]) is dict:
nested = find_changes(d_before[k], d_after[k])
if len(nested) > 0:
changes[k] = nested
elif d_before[k] != d_after[k]:
changes[k] = Change.Edit
# Apply removals
for k in d_before:
if k not in d_after:
changes[k] = Change.Removal
return changes
| 5,350,051 |
def check_vector_inbetween(v1, v2, point):
""" Checks if point lies inbetween two vectors v1, v2. Returns boolean. """
if (np.dot(np.cross(v1, point), np.cross(v1, v2))) >= 0 and (np.dot(np.cross(v2, point), np.cross(v2, v1))) >= 0:
return True
else:
return False
| 5,350,052 |
def accuracy(y_preds, y_test):
"""
Function to calculate the accuracy of algorithm
:param y_preds: predictions for test data
:param y_test: actual labels for test data
:return: accuracy in percentage
"""
return np.sum(np.where(y_preds == y_test, 1, 0)) * 100 / len(y_test)
| 5,350,053 |
def make_forecasts_at_incidents_for_mala(get_new=False):
"""Lager csv med alle varsomhendelser sammen med faregrad og de aktuelle skredproblemene
(svakt lag, skredtype og skredproblemnavnert). Der det er gjort en regObs observasjon
med «hendelse/ulykke» skjema fylt ut har jeg også lagt på skadeomfangsvurderingen.
"""
pickle_file_name = '{0}dl_inci_mala.pickle'.format(env.local_storage)
output_incident_and_dl = '{0}incidents_mala.csv'.format(env.output_folder)
if get_new:
varsom_incidents = gm.get_varsom_incidents(add_forecast_regions=True, add_forecasts=True, add_observations=False)
mp.pickle_anything(varsom_incidents, pickle_file_name)
else:
varsom_incidents = mp.unpickle_anything(pickle_file_name)
incident_and_dl = []
for i in varsom_incidents:
incident_date = i.date
danger_level = None
problem_1 = None
problem_2 = None
problem_3 = None
avalanche_type_1 = None
avalanche_type_2 = None
avalanche_type_3 = None
weak_layer_1 = None
weak_layer_2 = None
weak_layer_3 = None
dato_regobs = None
damage_extent = None
if i.forecast:
danger_level = i.forecast.danger_level
for p in i.forecast.avalanche_problems:
if p.order == 1:
problem_1 = p.problem
weak_layer_1 = p.cause_name
avalanche_type_1 = p.aval_type
if p.order == 2:
problem_2 = p.problem
weak_layer_2 = p.cause_name
avalanche_type_2 = p.aval_type
if p.order == 3:
problem_3 = p.problem
weak_layer_3 = p.cause_name
avalanche_type_3 = p.aval_type
if i.observations:
dato_regobs = i.observations[0].DtObsTime.date()
for obs in i.observations:
for o in obs.Observations:
if isinstance(o, go.Incident):
damage_extent = o.DamageExtentName
incident_and_dl.append({'Date': incident_date,
# 'Dato (regObs)': dato_regobs,
'Region_id': i.region_id,
'Region': i.region_name,
'Fatalities': i.fatalities,
'Damage_extent': damage_extent,
'People_involved': i.people_involved,
'Activity': i.activity,
'Danger_level': danger_level,
'Avalanche_problem_1': problem_1,
'Avalanche_type_1': avalanche_type_1,
'Weak_layer_1': weak_layer_1,
'Avalanche_problem_2': problem_2,
'Avalanche_type_2': avalanche_type_2,
'Weak_layer_2': weak_layer_2,
'Avalanche_problem_3': problem_3,
'Avalanche_type_3': avalanche_type_3,
'Weak_layer_3': weak_layer_3,
'Comment': i.comment,
'regObs_id': '{}'.format(i.regid)})
# Write observed problems to file
with open(output_incident_and_dl, 'w', encoding='utf-8') as f:
make_header = True
for i in incident_and_dl:
if make_header:
f.write(';'.join([fe.make_str(d) for d in i.keys()]) + '\n')
make_header = False
f.write(';'.join([fe.make_str(d) for d in i.values()]).replace('[', '').replace(']', '') + '\n')
| 5,350,054 |
def get_all(factory='official', **kwargs):
"""Construct and return an list of Class `Event`.
hookを呼び出す.
Args:
factory: `Event` の取得用マネージャ 今のところ,京大公式HP用のみ.
EventFactoryMixin classを継承したクラスか 'official' に対応
date (:obj:`datetime`, optional): 欲しいイベントのdatetime.
`month` , `year` とどちらかを選択.両方指定した場合,こちらが優先される.
year (int, optional): イベントを取得する年.
両方指定した場合, `date` が優先される.
month (int, optional): イベントを取得する月.
両方指定した場合, `date` が優先される.
Returns:
generator of Events
"""
return kueventparser(factory=factory, method='get_all', **kwargs)
| 5,350,055 |
def gpu_queue(options):
"""
Queued up containers waiting for GPU resources
"""
import docker
import json
import os
import time
from vent.helpers.meta import GpuUsage
status = (False, None)
if (os.path.isfile('/root/.vent/vent.cfg') and os.path.isfile('/root/.vent/plugin_manifest.cfg')):
path_dir = '/root/.vent'
else:
path_dir = '/vent'
print('gpu queue', str(options))
print('gpu queue', str(GpuUsage(base_dir=path_dir+'/',
meta_dir=path_dir)))
options = json.loads(options)
configs = options['configs']
gpu_options = configs['gpu_options']
devices = []
options['auto_remove'] = True
# device specified, remove all other devices
if 'device' in gpu_options:
dev = '/dev/nvidia' + gpu_options['device'] + ':/dev/nvidia'
dev += gpu_options['device'] + ':rwm'
if 'devices' in configs:
d = list(configs['devices'])
for device in d:
if any(str.isdigit(str(char)) for char in device):
if dev == device:
devices.append(device)
else:
configs['devices'].remove(device)
else:
d = configs['devices']
for device in d:
if any(str.isdigit(str(char)) for char in device):
devices.append(device)
# check if devices is still an empty list
if not devices:
status = (False, 'no valid devices match the requested device')
print(str(status))
return status
mem_needed = 0
dedicated = False
# need a gpu to itself
if ('dedicated' in configs['gpu_options'] and
configs['gpu_options']['dedicated'] == 'yes'):
dedicated = True
if 'mem_mb' in configs['gpu_options']:
# TODO input error checking
mem_needed = int(configs['gpu_options']['mem_mb'])
print('mem_needed: ', mem_needed)
print('dedicated: ', dedicated)
device = None
while not device:
usage = GpuUsage(base_dir=path_dir+'/', meta_dir=path_dir)
if usage[0]:
usage = usage[1]
else:
return usage
print(usage)
# {"device": "0",
# "mem_mb": "1024",
# "dedicated": "yes",
# "enabled": "yes"}
for d in devices:
dev = str(d.split(':')[0].split('nvidia')[1])
print(dev)
# if the device is already dedicated, can't be used
dedicated_gpus = usage['vent_usage']['dedicated']
is_dedicated = False
for gpu in dedicated_gpus:
if dev in gpu:
is_dedicated = True
print('is_dedicated: ', is_dedicated)
if not is_dedicated:
ram_used = 0
if dev in usage['vent_usage']['mem_mb']:
ram_used = usage['vent_usage']['mem_mb'][dev]
# check for vent usage/processes running
if (dedicated and
dev not in usage['vent_usage']['mem_mb'] and
mem_needed <= usage[int(dev)]['global_memory'] and
not usage[int(dev)]['processes']):
device = dev
# check for ram constraints
elif mem_needed <= (usage[int(dev)]['global_memory'] - ram_used):
device = dev
# TODO make this sleep incremental up to a point, potentially kill
# after a set time configured from vent.cfg, outputting as it goes
time.sleep(1)
# lock jobs to a specific gpu (no shared GPUs for a single process) this is
# needed to calculate if memory requested (but not necessarily in use)
# would become oversubscribed
# store which device was mapped
options['labels']['vent.gpu.device'] = device
gpu_device = '/dev/nvidia' + device + ':/dev/nvidia' + device + ':rwm'
if 'devices' in configs:
d = configs['devices']
for dev in d:
if any(str.isdigit(str(char)) for char in dev):
if gpu_device != dev:
configs['devices'].remove(dev)
try:
d_client = docker.from_env()
del options['configs']
del configs['gpu_options']
params = options.copy()
params.update(configs)
container = d_client.containers.run(**params)
status = (True, None)
except Exception as e: # pragma: no cover
status = (False, str(e))
print(str(status))
return status
| 5,350,056 |
def nudupl(f):
"""Square(f) following Cohen, Alg. 5.4.8.
"""
L = int(((abs(f.discriminant))/4)**(1/4))
a, b, c = f[0], f[1], f[2]
# Step 1 Euclidean step
d1, u, v = extended_euclid_xgcd(b, a)
A = a//d1
B = b//d1
C = (-c*u) % A
C1 = A-C
if C1 < C:
C = -C1
# Step 2 Partial reduction
d, v, v2, v3, z = parteucl(A, C, L)
# Step 3 Special case
if z==0:
g = (B*v3+c)//d
a2 = d**2
c2 = v3**2
b2 = b + (d+v3)**2 - a2 - c2
c2 = c2 + g*d1
else:
# Step 4 Final computations
e = (c*v + B*d)//A
g = (e*v2 - B)//v
b2 = e*v2 + v*g
if d1>1:
b2 = d1*b2
v = d1*v
v2 = d1*v2
a2 = d**2
c2 = v3**2
b2 = b2 + (d+v3)**2 - a2 - c2
a2 = a2 + e*v
c2 = c2 + g*v2
f2 = type(f)((a2, b2, c2))
return f2
| 5,350,057 |
def remove_schema(name):
"""Removes a configuration schema from the database"""
schema = controller.ConfigurationSchema()
schema.remove(name)
return 0
| 5,350,058 |
def test_get_row_mask_with_min_zeros():
"""Tests the `get_row_mask_with_min_zeros` function."""
zeros_mask = tf.constant(
[[True, False, False], [True, True, False], [True, True, True]]
)
actual = get_row_mask_with_min_zeros(zeros_mask)
expected = tf.constant([[True], [False], [False]], tf.bool)
assert tf.reduce_all(tf.equal(actual, expected))
# Tests a zeros_mask with one row containing only zeros.
zeros_mask = tf.constant(
[[False, False, False], [True, True, False], [True, True, True]]
)
actual = get_row_mask_with_min_zeros(zeros_mask)
expected = tf.constant([[False], [True], [False]], tf.bool)
assert tf.reduce_all(tf.equal(actual, expected))
| 5,350,059 |
def get_unassigned_independent_hyperparameters(outputs):
"""Going backward from the outputs provided, gets all the independent
hyperparameters that are not set yet.
Setting an hyperparameter may lead to the creation of additional hyperparameters,
which will be most likely not set. Such behavior happens when dealing with,
for example, hyperparameters associated with substitutition
modules such as :func:`deep_architect.modules.siso_optional`,
:func:`deep_architect.modules.siso_or`, and :func:`deep_architect.modules.siso_repeat`.
Args:
outputs (dict[str, deep_architect.core.Output]): Dictionary of named
outputs to start the traversal at.
Returns:
OrderedSet[deep_architect.core.Hyperparameter]:
Ordered set of hyperparameters that are currently present in the
graph and not have been assigned a value yet.
"""
assert not is_specified(outputs)
unassigned_indep_hs = OrderedSet()
for h in get_all_hyperparameters(outputs):
if not isinstance(
h, DependentHyperparameter) and not h.has_value_assigned():
unassigned_indep_hs.add(h)
return unassigned_indep_hs
| 5,350,060 |
def get_theta_benchmark_matrix(theta_type, theta_value, benchmarks, morpher=None):
"""Calculates vector A such that dsigma(theta) = A * dsigma_benchmarks"""
if theta_type == "benchmark":
n_benchmarks = len(benchmarks)
index = list(benchmarks).index(theta_value)
theta_matrix = np.zeros(n_benchmarks)
theta_matrix[index] = 1.0
elif theta_type == "morphing":
theta_matrix = morpher.calculate_morphing_weights(theta_value)
else:
raise ValueError("Unknown theta {}".format(theta_type))
return theta_matrix
| 5,350,061 |
def get_plugins(plugin_dir=None):
"""Load plugins from PLUGIN_DIR and return a dict with the plugin name
hashed to the imported plugin.
PLUGIN_DIR is the name of the dir from which to load plugins. If
it is None, use the plugin dir in the dir that holds this func.
We load plugins and run them in asciibetical order. We ignore
plugins that begin with a character other than a digit or a
letter.
PLUGIN API:
run_p(text, meta, opt): predicate returns True if this
plugin thinks it should run in the pipeline.
run(text, meta, opt): runs the plugin, returns text, meta
after_p(pdf_fname, meta, opt): predicate returns True if this plugin
thinks it should run after the pdf is produced.
after(pdf_fname, meta, opt): runs the plugin, returns meta. May change the pdf
"""
if not plugin_dir:
plugin_dir = os.path.join(
os.path.dirname(
os.path.abspath(__file__)), "plugins")
plugins = {}
pat = {
"enabled": r"^[0-9a-zA-Z]",
"prefix": r"^[0-9]+_",
}
pat = {k: re.compile(v) for k, v in pat.items()}
for fname in sorted(os.listdir(plugin_dir)):
if (not fname.endswith(".py")
or fname.startswith('.')
or '#' in fname
or not pat['enabled'].match(fname)):
continue
spec = importlib.util.spec_from_file_location(
fname, os.path.join(plugin_dir, fname))
fname = pat["prefix"].sub("", fname)
plugins[fname] = importlib.util.module_from_spec(spec)
spec.loader.exec_module(plugins[fname])
return plugins
| 5,350,062 |
def reorder_points(point_list):
"""
Reorder points of quadrangle.
(top-left, top-right, bottom right, bottom left).
:param point_list: List of point. Point is (x, y).
:return: Reorder points.
"""
# Find the first point which x is minimum.
ordered_point_list = sorted(point_list, key=lambda x: (x[0], x[1]))
first_point = ordered_point_list[0]
# Find the third point. The slope is middle.
slope_list = [[cal_slope(first_point, p), p] for p in ordered_point_list[1:]]
ordered_slope_point_list = sorted(slope_list, key=lambda x: x[0])
first_third_slope, third_point = ordered_slope_point_list[1]
# Find the second point which is above the line between the first point and the third point.
# All that's left is the fourth point.
if above_line(ordered_slope_point_list[0][1], third_point, first_third_slope):
second_point = ordered_slope_point_list[0][1]
fourth_point = ordered_slope_point_list[2][1]
reverse_flag = False
else:
second_point = ordered_slope_point_list[2][1]
fourth_point = ordered_slope_point_list[0][1]
reverse_flag = True
# Find the top left point.
second_fourth_slope = cal_slope(second_point, fourth_point)
if first_third_slope < second_fourth_slope:
if reverse_flag:
reorder_point_list = [fourth_point, first_point, second_point, third_point]
else:
reorder_point_list = [second_point, third_point, fourth_point, first_point]
else:
reorder_point_list = [first_point, second_point, third_point, fourth_point]
return reorder_point_list
| 5,350,063 |
def ppg_acoustics_collate(batch):
"""Zero-pad the PPG and acoustic sequences in a mini-batch.
Also creates the stop token mini-batch.
Args:
batch: An array with B elements, each is a tuple (PPG, acoustic).
Consider this is the return value of [val for val in dataset], where
dataset is an instance of PPGSpeechLoader.
Returns:
ppg_padded: A (batch_size, feature_dim_1, num_frames_1) tensor.
input_lengths: A batch_size array, each containing the actual length
of the input sequence.
acoustic_padded: A (batch_size, feature_dim_2, num_frames_2) tensor.
gate_padded: A (batch_size, num_frames_2) tensor. If "1" means reaching
stop token. Currently assign "1" at the last frame and the padding.
output_lengths: A batch_size array, each containing the actual length
of the output sequence.
"""
# Right zero-pad all PPG sequences to max input length.
# x is (PPG, acoustic), x[0] is PPG, which is an (L(varied), D) tensor.
input_lengths, ids_sorted_decreasing = torch.sort(
torch.LongTensor([x[0].shape[0] for x in batch]), dim=0,
descending=True)
max_input_len = input_lengths[0]
ppg_dim = batch[0][0].shape[1]
ppg_padded = torch.FloatTensor(len(batch), max_input_len, ppg_dim)
ppg_padded.zero_()
for i in range(len(ids_sorted_decreasing)):
curr_ppg = batch[ids_sorted_decreasing[i]][0]
ppg_padded[i, :curr_ppg.shape[0], :] = curr_ppg
# Right zero-pad acoustic features.
feat_dim = batch[0][1].shape[1]
max_target_len = max([x[1].shape[0] for x in batch])
# Create acoustic padded and gate padded
acoustic_padded = torch.FloatTensor(len(batch), max_target_len, feat_dim)
acoustic_padded.zero_()
gate_padded = torch.FloatTensor(len(batch), max_target_len)
gate_padded.zero_()
output_lengths = torch.LongTensor(len(batch))
for i in range(len(ids_sorted_decreasing)):
curr_acoustic = batch[ids_sorted_decreasing[i]][1]
acoustic_padded[i, :curr_acoustic.shape[0], :] = curr_acoustic
gate_padded[i, curr_acoustic.shape[0] - 1:] = 1
output_lengths[i] = curr_acoustic.shape[0]
ppg_padded = ppg_padded.transpose(1, 2)
acoustic_padded = acoustic_padded.transpose(1, 2)
return ppg_padded, input_lengths, acoustic_padded, gate_padded,\
output_lengths
| 5,350,064 |
def test_atomic_unsigned_int_enumeration_3_nistxml_sv_iv_atomic_unsigned_int_enumeration_4_4(mode, save_output, output_format):
"""
Type atomic/unsignedInt is restricted by facet enumeration.
"""
assert_bindings(
schema="nistData/atomic/unsignedInt/Schema+Instance/NISTSchema-SV-IV-atomic-unsignedInt-enumeration-4.xsd",
instance="nistData/atomic/unsignedInt/Schema+Instance/NISTXML-SV-IV-atomic-unsignedInt-enumeration-4-4.xml",
class_name="NistschemaSvIvAtomicUnsignedIntEnumeration4",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
| 5,350,065 |
def problem004():
"""
Find the largest palindrome made from the product of two 3-digit numbers.
"""
return largest_palindrome_from_product_of_two_n_digit_numbers(3)
| 5,350,066 |
def salt_minion(salt_minion_factory):
"""
A running salt-minion fixture
"""
with salt_minion_factory.started():
# Sync All
salt_call_cli = salt_minion_factory.get_salt_call_cli()
ret = salt_call_cli.run("saltutil.sync_all", _timeout=120)
assert ret.exitcode == 0, ret
yield salt_minion_factory
| 5,350,067 |
def validate_params(service_code: str, params: dict) -> None:
"""
Check that query inputs are of the correct format.
Will fail on missing arguments,
"""
if service_code not in REQUIRED_D.keys():
raise ElexonAPIException(f"Unknown service_code: {service_code}.")
passed_set = set(params.keys())
required_set = set(REQUIRED_D[service_code])
if passed_set != required_set:
for extra_arg in passed_set - required_set:
logger.info(f"Extra argument for {service_code}: {extra_arg}")
if not required_set.issubset(passed_set):
for missing_arg in required_set - passed_set:
logger.warning(f"Missing argument for {service_code}: {missing_arg}.")
raise ElexonAPIException("Missing arguments for {service_code}.")
return
| 5,350,068 |
def residual_l1_max(reconstruction: Tensor, original: Tensor) -> Tensor:
"""Construct l1 difference between original and reconstruction.
Note: Only positive values in the residual are considered, i.e. values below zero are clamped.
That means only cases where bright pixels which are brighter in the input (likely lesions) are kept."""
residual = original - reconstruction
return torch.where(residual > 0.0, residual, torch.zeros_like(residual))
| 5,350,069 |
def build_url(path):
"""
Construct an absolute url by appending a path to a domain.
"""
return 'http://%s%s' % (DOMAIN, path)
| 5,350,070 |
def get_registration_url(request, event_id):
"""
Compute the absolute URL to create a booking on a given event
@param request: An HttpRequest used to discover the FQDN and path
@param event_id: the ID of the event to register to
"""
registration_url_rel = reverse(booking_create, kwargs={"event_id": event_id})
return request.build_absolute_uri(registration_url_rel)
| 5,350,071 |
def create_no_args_decorator(decorator_function,
function_for_metadata=None,
):
"""
Utility method to create a decorator that has no arguments at all and is implemented by `decorator_function`, in
implementation-first mode or usage-first mode.
The created decorator is a function with var-args. When called it checks the length
(0=called with parenthesis, 1=called without, 2=error).
Note: we prefer to use this var-arg signature rather than a "(_=None)" signature, because it is more readable for
the decorator's help.
:param decorator_function:
:param function_for_metadata: an alternate function to use for the documentation and module metadata of the
generated function
:return:
"""
if function_for_metadata is None:
function_for_metadata = decorator_function
@with_signature(None,
func_name=function_for_metadata.__name__,
doc=function_for_metadata.__doc__,
module_name=function_for_metadata.__module__)
def new_decorator(*_):
"""
Code for your decorator, generated by decopatch to handle the case when it is called without parenthesis
"""
if len(_) == 0:
# called with no args BUT parenthesis: @foo_decorator().
return with_parenthesis_usage(decorator_function, *_)
elif len(_) == 1:
first_arg_value = _[0]
if can_arg_be_a_decorator_target(first_arg_value):
# called with no arg NOR parenthesis: @foo_decorator
return no_parenthesis_usage(decorator_function, first_arg_value)
# more than 1 argument or non-decorable argument: not possible
raise TypeError("Decorator function '%s' does not accept any argument."
"" % decorator_function.__name__)
return new_decorator
| 5,350,072 |
def load_electric_devices_segmentation():
"""Load the Electric Devices segmentation problem and returns X.
We group TS of the UCR Electric Devices dataset by class label and concatenate
all TS to create segments with repeating temporal patterns and
characteristics. The location at which different classes were
concatenated are marked as change points.
We resample the resulting TS to control the TS resolution.
The window sizes for these datasets are hand-selected to capture
temporal patterns but are approximate and limited to the values
[10,20,50,100] to avoid over-fitting.
-----------
Returns
-------
X : pd.Series
Single time series for segmentation
period_length : int
The annotated period length by a human expert
change_points : numpy array
The change points annotated within the dataset
-----------
"""
dir = "segmentation"
name = "ElectricDevices"
fname = name + ".csv"
period_length = int(10)
change_points = np.int32([1090, 4436, 5712, 7923])
path = os.path.join(MODULE, DIRNAME, dir, fname)
ts = pd.read_csv(path, index_col=0, header=None, squeeze=True)
return ts, period_length, change_points
| 5,350,073 |
def reshape( w, h):
"""Reshapes the scene when the window is resized."""
lightPos = (-50.0, 50.0, 100.0, 1.0)
nRange = 2.0
if h==0:
h = 1
glViewport(0, 0, w, h)
glMatrixMode(GL_PROJECTION)
glLoadIdentity()
if w <= h:
glOrtho(-nRange, nRange, -nRange*h/w, nRange*h/w, -nRange, nRange)
else:
glOrtho(-nRange*w/h, nRange*w/h, -nRange, nRange, -nRange, nRange)
glMatrixMode(GL_MODELVIEW)
glLoadIdentity()
glLightfv(GL_LIGHT0, GL_POSITION, lightPos)
| 5,350,074 |
def compute_batch_jacobian(input, output, retain_graph=False):
"""
Compute the Jacobian matrix of a batch of outputs with respect to
some input (normally, the activations of a hidden layer).
Returned Jacobian has dimensions Batch x SizeOutput x SizeInput
Args:
input (list or torch.Tensor): Tensor or sequence of tensors
with the parameters to which the Jacobian should be
computed. Important: the requires_grad attribute of input needs to
be True while computing output in the forward pass.
output (torch.Tensor): Tensor with the values of which the Jacobian is
computed
Returns (torch.Tensor): 3D tensor containing the Jacobian of output with
respect to input: batch_size x output_size x input_size.
"""
batch_jacobian = torch.Tensor(output.shape[0], output.shape[1], input.shape[1])
assert output.shape[0] == input.shape[0], \
"Batch size needs to be the same for both input and output"
for batch_idx in range(output.shape[0]):
for i, output_elem in enumerate(output[batch_idx]):
if i < output.shape[1]: rg = True
else: rg = retain_graph
gradients = torch.autograd.grad(output_elem, input, retain_graph=rg)[0][batch_idx].detach()
batch_jacobian[batch_idx, i, :] = gradients
return batch_jacobian
| 5,350,075 |
def glIndexdv(v):
"""
v - seq( GLdouble, 1)"""
if 1 != len(v):
raise TypeError(len(v), "1-array expected")
_gllib.glIndexdv(v)
| 5,350,076 |
def is_associative(value):
"""Checks if `value` is an associative object meaning that it can be
accessed via an index or key
Args:
value (mixed): Value to check.
Returns:
bool: Whether `value` is associative.
Example:
>>> is_associative([])
True
>>> is_associative({})
True
>>> is_associative(1)
False
>>> is_associative(True)
False
.. versionadded:: 2.0.0
"""
return hasattr(value, '__getitem__')
| 5,350,077 |
def list_notebook():
"""List notebooks"""
COLS_TO_SHOW = ["Name", "ID", "Environment", "Resources", "Status"]
console = Console()
# using user_id hard coded in SysUserRestApi.java
# https://github.com/apache/submarine/blob/5040068d7214a46c52ba87e10e9fa64411293cf7/submarine-server/server-core/src/main/java/org/apache/submarine/server/workbench/rest/SysUserRestApi.java#L228
try:
thread = notebookClient.list_notebooks_async(user_id="4291d7da9005377ec9aec4a71ea837f")
timeout = time.time() + TIMEOUT
with console.status("[bold green] Fetching Notebook..."):
while not thread.ready():
time.sleep(POLLING_INTERVAL)
if time.time() > timeout:
console.print("[bold red] Timeout!")
return
result = thread.get()
results = result.result
results = list(
map(
lambda r: [
r["name"],
r["notebookId"],
r["spec"]["environment"]["name"],
r["spec"]["spec"]["resources"],
r["status"],
],
results,
)
)
table = Table(title="List of Notebooks")
for col in COLS_TO_SHOW:
table.add_column(col, overflow="fold")
for res in results:
table.add_row(*res)
console.print(table)
except ApiException as err:
if err.body is not None:
errbody = json.loads(err.body)
click.echo("[Api Error] {}".format(errbody["message"]))
else:
click.echo("[Api Error] {}".format(err))
| 5,350,078 |
def test_dispersion_curve_calculation(hlm):
""" Test the method for calculating dispersion curves. """
# TODO: come up with a way to test this
pass
| 5,350,079 |
def join_audio(audio1, audio2):
"""
>>> join_audio(([1], [4]), ([2, 3], [5, 6]))
([1, 2, 3], [4, 5, 6])
"""
(left1, right1) = audio1
(left2, right2) = audio2
left = left1 + left2
right = right1 + right2
audio = (left, right)
return audio
| 5,350,080 |
def csi_from_sr_and_pod(success_ratio_array, pod_array):
"""Computes CSI (critical success index) from success ratio and POD.
POD = probability of detection
:param success_ratio_array: np array (any shape) of success ratios.
:param pod_array: np array (same shape) of POD values.
:return: csi_array: np array (same shape) of CSI values.
"""
return (success_ratio_array ** -1 + pod_array ** -1 - 1.) ** -1
| 5,350,081 |
def portrait_plot(
data,
xaxis_labels,
yaxis_labels,
fig=None,
ax=None,
annotate=False,
annotate_data=None,
annotate_fontsize=15,
annotate_format="{x:.2f}",
figsize=(12, 10),
vrange=None,
xaxis_fontsize=15,
yaxis_fontsize=15,
cmap="RdBu_r",
cmap_bounds=None,
cbar_label=None,
cbar_label_fontsize=15,
cbar_tick_fontsize=12,
cbar_kw={},
colorbar_off=False,
missing_color="grey",
invert_yaxis=True,
box_as_square=False,
legend_on=False,
legend_labels=None,
legend_box_xy=None,
legend_box_size=None,
legend_lw=1,
legend_fontsize=14,
logo_rect=None,
logo_off=False,
debug=False,
):
"""
Parameters
----------
- `data`: 2d numpy array, a list of 2d numpy arrays, or a 3d numpy array (i.e. stacked 2d numpy arrays)
- `xaxis_labels`: list of strings, labels for xaixs. Number of list element must consistent to x-axis,
or 0 (empty list) to turn off xaxis tick labels
- `yaxis_labels`: list of strings, labels for yaxis. Number of list element must consistent to y-axis,
or 0 (empty list) to turn off yaxis tick labels
- `fig`: `matplotlib.figure` instance to which the portrait plot is plotted.
If not provided, use current axes or create a new one. Optional.
- `ax`: `matplotlib.axes.Axes` instance to which the portrait plot is plotted.
If not provided, use current axes or create a new one. Optional.
- `annotate`: bool, default=False, add annotating text if true,
but work only for heatmap style map (i.e., no triangles)
- `annotate_data`: 2d numpy array, default=None. If None, the image's data is used. Optional.
- `annotate_fontsize`: number (int/float), default=15. Font size for annotation
- `annotate_format`: format for annotate value, default="{x:.2f}"
- `figsize`: tuple of two numbers (width, height), default=(12, 10), figure size in inches
- `vrange`: tuple of two numbers, range of value for colorbar. Optional.
- `xaxis_fontsize`: number, default=15, font size for xaxis tick labels
- `yaxis_fontsize`: number, default=15, font size for yaxis tick labels
- `cmap`: string, default="RdBu_r", name of matplotlib colormap
- `cmap_bounds`: list of numbers. If given, discrete colors are applied. Optional.
- `cbar_label`: string, default=None, label for colorbar
- `cbar_label_fontsize`: number, default=15, font size for colorbar labels
- `cbar_tick_fontsize`: number, default=12, font size for colorbar tick labels
- `cbar_kw`: A dictionary with arguments to `matplotlib.Figure.colorbar`. Optional.
- `colorbar_off`: Trun off colorbar if True. Optional.
- `missing_color`: color, default="grey", `matplotlib.axes.Axes.set_facecolor` parameter
- `invert_yaxis`: bool, default=True, place y=0 at top on the plot
- `box_as_square`: bool, default=False, make each box as square
- `legend_on`: bool, default=False, show legend (only for 2 or 4 triangles portrait plot)
- `legend_labels`: list of strings, legend labels for triangls
- `legend_box_xy`: tuple of numbers, position of legend box's upper-left corner
(lower-left if `invert_yaxis=False`), in `axes` coordinate
- `legend_box_size`: number, size of legend box
- `legend_lw`: number, line width of legend, default=1
- `legend_fontsize`: number, font size for legend, default=14
- `logo_rect`: sequence of float. The dimensions [left, bottom, width, height] of the the PMP logo.
All quantities are in fractions of figure width and height. Optional
- `logo_off`: bool, default=False, turn off PMP logo
- `debug`: bool, default=False, if true print more message when running that help debugging
Return
------
- `fig`: matplotlib component for figure
- `ax`: matplotlib component for axis
- `cbar`: matplotlib component for colorbar (not returned if colorbar_off=True)
Author: Jiwoo Lee @ LLNL (2021. 7)
"""
# ----------------
# Prepare plotting
# ----------------
data, num_divide = prepare_data(data, xaxis_labels, yaxis_labels, debug)
if num_divide not in [1, 2, 4]:
sys.exit("Error: Number of (stacked) array is not 1, 2, or 4.")
if annotate:
if annotate_data is None:
annotate_data = data
num_divide_annotate = num_divide
else:
annotate_data, num_divide_annotate = prepare_data(
annotate_data, xaxis_labels, yaxis_labels, debug
)
if num_divide_annotate != num_divide:
sys.exit("Error: annotate_data does not have same size as data")
# ----------------
# Ready to plot!!
# ----------------
if fig is None and ax is None:
fig, ax = plt.subplots(figsize=figsize)
ax.set_facecolor(missing_color)
if vrange is None:
vmin = np.nanmin(data)
vmax = np.nanmax(data)
else:
vmin = min(vrange)
vmax = max(vrange)
# Normalize colorbar
if cmap_bounds is None:
norm = matplotlib.colors.Normalize(vmin=vmin, vmax=vmax)
else:
cmap = plt.get_cmap(cmap)
norm = matplotlib.colors.BoundaryNorm(cmap_bounds, cmap.N, **cbar_kw)
# [1] Heatmap-style portrait plot (no triangles)
if num_divide == 1:
ax, im = heatmap(
data,
yaxis_labels,
xaxis_labels,
ax=ax,
invert_yaxis=invert_yaxis,
cmap=cmap,
edgecolors="k",
linewidth=0.5,
norm=norm,
)
if annotate:
if annotate_data is not None:
if annotate_data.shape != data.shape:
sys.exit("Error: annotate_data has different size than data")
else:
annotate_data = data
annotate_heatmap(
im,
ax=ax,
data=data,
annotate_data=annotate_data,
valfmt=annotate_format,
threshold=(2, -2),
fontsize=annotate_fontsize,
)
# [2] Two triangle portrait plot
elif num_divide == 2:
# data order is upper, lower
upper = data[0]
lower = data[1]
ax, im = triamatrix_wrap_up(
upper,
lower,
ax,
xaxis_labels=xaxis_labels,
yaxis_labels=yaxis_labels,
cmap=cmap,
invert_yaxis=invert_yaxis,
norm=norm,
)
# [4] Four triangle portrait plot
elif num_divide == 4:
# data order is clockwise from top: top, right, bottom, left
top = data[0]
right = data[1]
bottom = data[2]
left = data[3]
ax, im = quatromatrix(
top,
right,
bottom,
left,
ax=ax,
tripcolorkw={
"cmap": cmap,
"norm": norm,
"edgecolors": "k",
"linewidth": 0.5,
},
xaxis_labels=xaxis_labels,
yaxis_labels=yaxis_labels,
invert_yaxis=invert_yaxis,
)
# Let the horizontal axes labeling appear on top.
ax.tick_params(top=True, bottom=False, labeltop=True, labelbottom=False)
# Rotate the tick labels and set their alignment.
plt.setp(
ax.get_xticklabels(),
fontsize=xaxis_fontsize,
rotation=-30,
ha="right",
rotation_mode="anchor",
)
# Set font size for yaxis tick labels
plt.setp(ax.get_yticklabels(), fontsize=yaxis_fontsize)
# Legend
if legend_on:
if legend_labels is None:
sys.exit("Error: legend_labels was not provided.")
else:
add_legend(
num_divide,
ax,
legend_box_xy,
legend_box_size,
labels=legend_labels,
lw=legend_lw,
fontsize=legend_fontsize,
)
if box_as_square:
ax.set_aspect("equal")
if not logo_off:
if logo_rect is None:
logo_rect = [0.9, 0.15, 0.15, 0.15]
fig, ax = add_logo(fig, ax, logo_rect)
if not colorbar_off:
# Create colorbar
cbar = ax.figure.colorbar(im, ax=ax, **cbar_kw)
# Label for colorbar
if cbar_label is not None:
cbar.ax.set_ylabel(
cbar_label, rotation=-90, va="bottom", fontsize=cbar_label_fontsize
)
cbar.ax.tick_params(labelsize=cbar_tick_fontsize)
return fig, ax, cbar
else:
return fig, ax
| 5,350,082 |
def compute_secondary_observables(data):
"""Computes secondary observables and extends matrix of observables.
Argument
--------
data -- structured array
must contains following fields: length, width, fluo, area, time
Returns
-------
out -- structured array
new fields are added (check `out.dtype.names`)
"""
ell, w, fluo, area, time = map(np.array,
zip(*data[['length',
'width',
'fluo',
'area',
'time']])
)
if len(time) > 1:
delta_t = time[1]-time[0]
age = (time - time[0] + delta_t/2.)/(time[-1] - time[0] + delta_t)
else:
age = np.nan
volume = spherocylinder_volume(ell, w)
concentration = fluo/volume
density = fluo/area
ALratio = area/ell
out = append_fields(data,
['volume',
'concentration',
'density',
'ALratio',
'age'],
[volume,
concentration,
density,
ALratio,
age],
usemask=False, fill_value=np.nan)
return out
| 5,350,083 |
def get_account_number(arn):
"""
Extract the account number from an arn.
:param arn: IAM SSL arn
:return: account number associated with ARN
"""
return arn.split(":")[4]
| 5,350,084 |
def get_hashtags(tweet):
"""return hashtags from a given tweet
Args:
tweet (object): an object representing a tweet
Returns:
list: list of hastags in a tweet
"""
entities = tweet.get('entities', {})
hashtags = entities.get('hashtags', [])
return [get_text(tag) for tag in hashtags if get_text(
tag) not in ['rdc', 'drc', 'rdcongo', 'drcongo']]
| 5,350,085 |
async def get_organization_catalogs(filter: FilterEnum) -> OrganizationCatalogList:
"""Return all organization catalogs."""
logging.debug("Fetching all catalogs")
async with ClientSession() as session:
(
organizations,
datasets,
dataservices,
concepts,
informationmodels,
) = await asyncio.gather(
asyncio.ensure_future(fetch_all_organizations(session)),
asyncio.ensure_future(
query_all_datasets_ordered_by_publisher(filter, session)
),
asyncio.ensure_future(
query_all_dataservices_ordered_by_publisher(filter, session)
),
asyncio.ensure_future(
query_all_concepts_ordered_by_publisher(filter, session)
),
asyncio.ensure_future(
query_all_informationmodels_ordered_by_publisher(filter, session)
),
return_exceptions=True,
)
if isinstance(organizations, BaseException):
logging.warning("Unable to fetch all organizations")
organizations = {}
if isinstance(datasets, BaseException):
logging.warning("Unable to fetch datasets")
datasets = []
if isinstance(dataservices, BaseException):
logging.warning("Unable to fetch dataservices")
dataservices = []
if isinstance(concepts, BaseException):
logging.warning("Unable to fetch concepts")
concepts = []
if isinstance(informationmodels, BaseException):
logging.warning("Unable to fetch informationmodels")
informationmodels = []
return OrganizationCatalogList(
organizations=map_org_summaries(
organizations=cast(Dict, organizations),
datasets=cast(List, datasets),
dataservices=cast(List, dataservices),
concepts=cast(List, concepts),
informationmodels=cast(List, informationmodels),
)
)
| 5,350,086 |
def get_appliance_ospf_neighbors_state(
self,
ne_id: str,
) -> dict:
"""Get appliance OSPF neighbors state
.. list-table::
:header-rows: 1
* - Swagger Section
- Method
- Endpoint
* - ospf
- GET
- /ospf/state/neighbors/{neId}
:param ne_id: Appliance id in the format of integer.NE e.g. ``3.NE``
:type ne_id: str
:return: Returns dictionary of OSPF neighbors state
:rtype: dict
"""
return self._get("/ospf/state/interfaces/{}".format(ne_id))
| 5,350,087 |
def filter_toolchain_files(dirname, files):
"""Callback for shutil.copytree. Return lists of files to skip."""
split = dirname.split(os.path.sep)
for ign in IGNORE_LIST:
if ign in split:
print('Ignoring dir %s' % dirname)
return files
return []
| 5,350,088 |
def info(obj):
"""Return info on shape and dtype of a numpy array or TensorFlow tensor."""
if obj is None:
return 'None.'
elif isinstance(obj, list):
if obj:
return 'List of %d... %s' % (len(obj), info(obj[0]))
else:
return 'Empty list.'
elif isinstance(obj, tuple):
if obj:
return 'Tuple of %d... %s' % (len(obj), info(obj[0]))
else:
return 'Empty tuple.'
else:
if is_a_numpy_array(obj):
return 'Array with shape: %s, dtype: %s' % (obj.shape, obj.dtype)
else:
return str(obj)
| 5,350,089 |
def flowing(where, to, parallel_converges):
"""
mark target's stream from target
:param where:
:param to:
:param parallel_converges:
:return:
"""
is_parallel = where[PE.type] in PARALLEL_GATEWAYS
stream = None
if is_parallel:
# add parallel's stream to its converge
parallel_converge = to[where[PE.converge_gateway_id]]
blend(source=where,
target=parallel_converge,
custom_stream=stream)
if len(parallel_converge[STREAM]) > 1:
raise exceptions.StreamValidateError(node_id=parallel_converge)
# flow to target
for i, target_id in enumerate(where[PE.target]):
target = to[target_id]
fake = False
# generate different stream
if is_parallel:
stream = '%s_%s' % (where[PE.id], i)
if target_id in parallel_converges:
is_valid_branch = where[STREAM].issubset(parallel_converges[target_id][P_STREAM])
is_direct_connect = where.get(PE.converge_gateway_id) == target_id
if is_valid_branch or is_direct_connect:
# do not flow when branch of parallel converge to its converge gateway
fake = True
if not fake:
blend(source=where,
target=target,
custom_stream=stream)
# sanity check
if len(target[STREAM]) != 1:
raise exceptions.StreamValidateError(node_id=target_id)
| 5,350,090 |
def first_index_k_zeros_left(qstr, k, P):
"""
For a binary string qstr, return the first index of q with k (mod P) zeros to the left.
Return: index in [0, qstr.length]
"""
num_zeros_left = 0
for j in range(qstr.length+1):
if (num_zeros_left - k) % P == 0:
return j
if j == qstr.length:
raise Exception("No valid position found")
if qstr[j] == 0:
num_zeros_left += 1
| 5,350,091 |
def transform_points(points, transf_matrix):
"""
Transform (3,N) or (4,N) points using transformation matrix.
"""
if points.shape[0] not in [3, 4]:
raise Exception("Points input should be (3,N) or (4,N) shape, received {}".format(points.shape))
return transf_matrix.dot(np.vstack((points[:3, :], np.ones(points.shape[1]))))[:3, :]
| 5,350,092 |
def obter_resposta(escolha_nivel):
"""
Entrada: Parâmetro do nível (facil, médio, difícil)
Tarefa: Avaliar resposta do usuário e informar estado atual
Saída: Parâmetros do dicionário do menu QUIZ
"""
os.system('clear')
escolha = escolha_nivel.lower()
if escolha == '':
navegar_menu['nivel_quiz']()
else:
try: # try ... except tem uma cláusula else opcional, com todas as cláusulas exceto.
navegar_menu[escolha]()
except KeyError: #Chave não encontrada em mapeamento do dicionário
print "Seleção ERRADA, por favor, tente novamente.\n"
navegar_menu['nivel_quiz']()
return
| 5,350,093 |
def get_lat_long(zip):
"""
This function takes a zip code and looks up the latitude and longitude using
the uszipcode package. Documentation: https://pypi.python.org/pypi/uszipcode
"""
search = ZipcodeSearchEngine()
zip_data = search.by_zipcode(zip)
lat = zip_data['Latitude']
long = zip_data['Longitude']
return lat, long
| 5,350,094 |
def propose_perturbation_requests(current_input, task_idx, perturbations):
"""Wraps requests for perturbations of one task in a EvaluationRequest PB.
Generates one request for each perturbation, given by adding the perturbation
to current_input.
Args:
current_input: the current policy weights
task_idx: The index of the task to evaluate.
perturbations: A list of perturbations.
Returns:
A list of requests, one for each perturbation.
"""
requests = []
for p_idx, p in enumerate(perturbations):
perturbed_input = current_input + p
requests.append(
first_order_pb2.TaskEvaluationRequest(
request_task_idx=task_idx,
input_idx=p_idx,
eval_order=TASK_VALUE_EVAL_ORDER,
current_input=perturbed_input.tolist()))
return requests
| 5,350,095 |
def validateJson(js, schema):
"""
Confirms that a given json follows the given schema.
Throws a "jsonschema.exceptions.ValidationError" otherwise.
"""
assert type(js) == dict or type(js) == str, "JSON must be a dictionary or string JSON or path!"
assert type(schema) == dict or type(schema) == str, "Schema must be a dictionary or string JSON or path!"
validate(instance=loadJson(js), schema=loadJson(schema))
| 5,350,096 |
def process_enable(ctx, process):
"""Enable process maintenance"""
ctx.obj["output"](enable_process(ctx.obj["cfg"], process))
| 5,350,097 |
def id_token_call_credentials(credentials):
"""Constructs `grpc.CallCredentials` using
`google.auth.Credentials.id_token`.
Args:
credentials (google.auth.credentials.Credentials): The credentials to use.
Returns:
grpc.CallCredentials: The call credentials.
"""
request = google.auth.transport.requests.Request()
return grpc.metadata_call_credentials(
IdTokenAuthMetadataPlugin(credentials, request)
)
| 5,350,098 |
def delete_note(note_id):
"""Delete note by id"""
print(req.delete(f'http://{host}/notes/{note_id}').status_code)
| 5,350,099 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.