content
stringlengths 22
815k
| id
int64 0
4.91M
|
---|---|
def org_repos(info):
"""
处理组织的仓库
:param info: 字典
:return: 两个列表,第一个包含字典(id,全名,url),第二个包含所用到的语言
"""
repo_info = []
languages = []
if info:
for repo in info:
temp = {"id": repo["id"], "full_name": repo["full_name"], "url": repo["url"], "language": repo["language"]}
repo_info.append(temp)
languages.append(repo["language"])
return repo_info, languages
| 5,349,500 |
def year(yyyy_mm_dd: Union[str, datetime.date]) -> int:
"""
Extracts the year of a given date, similar to yyyy function but returns an int
>>> year('2020-05-14')
2020
"""
date, _ = _parse(yyyy_mm_dd, at_least="%Y")
return date.year
| 5,349,501 |
def getTypeLevel(Type):
"""Checks whether a spectral data type is available in the endmember library.
Args:
Type: the type of spectra to select.
Returns:
level: the metadata "level" of the group for subsetting. returns 0 if not found.
"""
for i in range(4):
level = i + 1
available_types = listTypes(level=level)
if Type in available_types:
return level
return 0
| 5,349,502 |
def main():
"""The main entry."""
app = wx.App(False)
frame = SuhpysisFrame(None, title='Jarry Test')
app.MainLoop()
| 5,349,503 |
def construct_creator(creator: Union[dict, str], ignore_email):
"""Parse input and return an instance of Person."""
if not creator:
return None, None
if isinstance(creator, str):
person = Person.from_string(creator)
elif isinstance(creator, dict):
person = Person.from_dict(creator)
else:
raise errors.ParameterError("Invalid creator type")
message = 'A valid format is "Name <email> [affiliation]"'
if not person.name: # pragma: no cover
raise errors.ParameterError(f'Name is invalid: "{creator}".\n{message}')
if not person.email:
if not ignore_email: # pragma: no cover
raise errors.ParameterError(f'Email is invalid: "{creator}".\n{message}')
else:
no_email_warning = creator
else:
no_email_warning = None
return person, no_email_warning
| 5,349,504 |
def _encode_query(items: dict, *, mask=False) -> str:
"""Encode a dict to query string per CLI specifications."""
pairs = []
for key in sorted(items.keys()):
value = _MASK if mask and key in _MASKED_PARAMS else items[key]
item = "{}={}".format(key, _quote(value))
# Ensure 'url' goes last per CLI spec
if key == "url":
pairs.append(item)
else:
pairs.insert(0, item)
return "&".join(pairs)
| 5,349,505 |
def spatial_shape_after_conv(input_spatial_shape, kernel_size, strides, dilation, padding):
""" This function calculates the spatial shape after conv layer.
The formula is obtained from: https://www.tensorflow.org/api_docs/python/tf/nn/convolution
It should be note that current function assumes PS is done before conv
:param input_spatial_shape:
:param kernel_size:
:param strides:
:param dilation:
:param padding:
:return:
"""
if isinstance(input_spatial_shape, (list, tuple)):
return [spatial_shape_after_conv(
one_shape, kernel_size, strides, dilation, padding) for one_shape in input_spatial_shape]
else:
if padding in ['same', 'SAME']:
return np.int(np.ceil(input_spatial_shape / strides))
else:
return np.int(np.ceil((input_spatial_shape - (kernel_size - 1) * dilation) / strides))
| 5,349,506 |
def sample_partition(dependency_tensor, null_distribution,
updates=100,
initial_partition=None
):
"""
Sample partition for a multilayer network with specified interlayer dependencies
:param dependency_tensor: dependency tensor
:param null_distribution: null distribution (function that takes a state-node as input and returns a random mesoset
assignment
:param updates: expected number of (pseudo-)Gibbs-sampling updates per state-node (has no effect for fully ordered
dependency tensor. (optional, default=100)
:param initial_partition: mapping of state-nodes to initial meso-set assignment.
(optional, default=sampled from null distribution)
:return: sampled partition as a mapping (dict) from state-nodes to meso-set assignments.
"""
if initial_partition is None:
partition = {node: null_distribution(node) for node in dependency_tensor.state_nodes()}
else:
partition = {node: initial_partition[node] for node in dependency_tensor.state_nodes()}
random_layers = list(dependency_tensor.random_aspect_layers())
if len(random_layers) <= 1:
n_updates = 1
else:
n_updates = updates * len(random_layers)
for ordered_layer in dependency_tensor.ordered_aspect_layers():
for it in range(n_updates):
random_layer = _rand.choice(random_layers)
layer = tuple(o+r for o, r in zip(ordered_layer, random_layer))
for node in dependency_tensor.state_nodes(layer):
update_node = dependency_tensor.getrandneighbour(node)
if update_node == node:
partition[node] = null_distribution(node)
else:
partition[node] = partition[update_node]
return partition
| 5,349,507 |
def normalize_spaces(s: str) -> str:
"""
連続する空白を1つのスペースに置き換え、前後の空白を削除した新しい文字列を取得する。
"""
return re.sub(r'\s+', ' ', s).strip()
| 5,349,508 |
def power_iter(mat_g, error_tolerance=1e-6, num_iters=100):
"""Power iteration.
Args:
mat_g: the symmetric PSD matrix.
error_tolerance: Iterative exit condition.
num_iters: Number of iterations.
Returns:
eigen vector, eigen value, num_iters
"""
mat_g_size = mat_g.shape[-1]
def _iter_condition(state):
i, unused_v, unused_s, unused_s_v, run_step = state
return jnp.logical_and(i < num_iters, run_step)
def _iter_body(state):
"""One step of power iteration."""
i, new_v, s, s_v, unused_run_step = state
new_v = new_v / jnp.linalg.norm(new_v)
s_v = jnp.einsum(
'ij,j->i', mat_g, new_v, precision=_INVERSE_PTH_ROOT_PRECISION)
s_new = jnp.einsum(
'i,i->', new_v, s_v, precision=_INVERSE_PTH_ROOT_PRECISION)
return (i + 1, s_v, s_new, s_v,
jnp.greater(jnp.abs(s_new - s), error_tolerance))
# Figure out how to use step as seed for random.
v_0 = onp.random.uniform(-1.0, 1.0, mat_g_size).astype(mat_g.dtype)
init_state = tuple([0, v_0, jnp.zeros([], dtype=mat_g.dtype), v_0, True])
num_iters, v_out, s_out, _, _ = lax.while_loop(
_iter_condition, _iter_body, init_state)
v_out = v_out / jnp.linalg.norm(v_out)
return v_out, s_out, num_iters
| 5,349,509 |
def _get_values_target_representation(
val: Union[str, Any],
target_representation: str,
conversion_type: str,
conversion_rate: float,
n_round: int,
split: bool,
input_symbol: str,
target_symbol: str,
) -> Any:
"""
Returns the value of the converted currency in the specified format.
The two formats specified are "abbr", "decimal".
"""
val_new = 0.0
val = float(val)
# 1. for fiat-to-fiat and crypto-to-fiat we multiply
# 2. for fiat-to-crypto we divide
if conversion_type in ("fiat_to_fiat", "crypto_to_fiat"):
val_new = val * conversion_rate
else:
val_new = val / conversion_rate
if target_representation == "abbr":
val = "{:,.{a}f}".format(val, a=n_round)
target_val = "{:,.{a}f}".format(val_new, a=n_round)
if split:
return val, target_val
else:
return input_symbol.upper() + str(val), target_symbol.upper() + str(target_val)
else:
return np.round(val, n_round), np.round(val_new, n_round)
| 5,349,510 |
def GetResidues(mol, atom_list=None):
"""Create dictrionary that maps residues to atom IDs:
(res number, res name, chain id) --> [atom1 idx, atom2 idx, ...]
"""
residues = OrderedDict()
if atom_list is None:
atom_list = range(mol.GetNumAtoms())
for aid in atom_list:
res_id = GetAtomResidueId(mol.GetAtomWithIdx(aid))
if res_id not in residues:
residues[res_id] = []
residues[res_id].append(aid)
return residues
| 5,349,511 |
def _parse_tokenize(filter_rule):
"""Tokenizer for the attribute filtering language.
Most of the single-character tokens are specified in the
_tokenize_re; however, parentheses need to be handled specially,
because they can appear inside a check string. Thankfully, those
parentheses that appear inside a check string can never occur at
the very beginning or end ("%(variable)s" is the correct syntax).
"""
main_tokens = _tokenize_re.split(filter_rule)
index = 0
for tok in main_tokens:
# Skip empty tokens
if not tok or tok.isspace():
continue
# Handle leading parens on the token
clean = tok.lstrip('(')
for i in range(len(tok) - len(clean)):
yield '(', '('
# If it was only parentheses, continue
if not clean:
continue
else:
tok = clean
# Handle trailing parens on the token
clean = tok.rstrip(')')
trail = len(tok) - len(clean)
# Yield the cleaned token
lowered = clean.lower()
if lowered in (';', 'and'):
# Special tokens
yield lowered, clean
elif clean:
# Not a special token, but not composed solely of ')'
if len(tok) >= 2 and ((tok[0], tok[-1]) in
[('"', '"'), ("'", "'")]):
# It's a quoted string
yield 'string', tok[1:-1]
else:
yield 'filter', _parse_filter(clean)
# Yield the trailing parens
for i in range(trail):
yield ')', ')'
if (index < len(main_tokens) - 1) and len(main_tokens) > 1:
yield 'and', 'and'
index += 1
| 5,349,512 |
def main(n):
"""Stream the video into a Kafka producer in an infinite loop"""
topic = choose_channel(n)
video_reader = imageio.get_reader(DATA + topic + '.mp4', 'ffmpeg')
metadata = video_reader.get_meta_data()
fps = metadata['fps']
producer = KafkaProducer(bootstrap_servers='localhost:9092',
batch_size=15728640,
linger_ms=1000,
max_request_size=15728640,
value_serializer=lambda v: json.dumps(v.tolist()))
while True:
video_loop(video_reader, producer, topic, fps)
| 5,349,513 |
def create_huoguoml_folders(huoguoml_path: str):
"""
Create HuoguoML folders if not exist
Args:
huoguoml_path: path to the huoguoml dir
NOTE: Required for later stages, when huoguoml folder gets larger
"""
os.makedirs(huoguoml_path, exist_ok=True)
| 5,349,514 |
def cli(obj, username, password, hostname, session):
""" CLI for the GAMS-Hydra application. """
obj['hostname'] = hostname
obj['username'] = username
obj['password'] = password
obj['session'] = session
| 5,349,515 |
def BIC(y_pred, y, k, llf = None):
"""Bayesian Information Criterion
Args:
y_pred (array-like)
y (array-like)
k (int): number of featuers
llf (float): result of log-likelihood function
"""
n = len(y)
if llf is None:
llf = np.log(SSE(y_pred, y))
return np.log(n) * k - 2 * llf
| 5,349,516 |
def create_abstract_insert(table_name, row_json, return_field=None):
"""Create an abstracted raw insert psql statement for inserting a single
row of data
:param table_name: String of a table_name
:param row_json: dictionary of ingestion data
:param return_field: String of the column name to RETURNING in statement
:return: String of an insert statement
"""
columns = []
for key, value in row_json.items():
if key in columns:
continue
else:
columns.append(key)
values = [':' + item for item in columns]
values = ', '.join(map(str, values))
list_columns = ', '.join(map(str, columns))
if return_field is not None:
statement = 'INSERT INTO ' + str(table_name) + '(' + list_columns + ')' \
+ ' VALUES (' + values + ') RETURNING ' + str(return_field)
else:
statement = 'INSERT INTO ' + str(table_name) + '(' + list_columns + ')' \
+ ' VALUES (' + values + ')'
return statement
| 5,349,517 |
def get_childs(root_dir, is_dir=False, extension='.jpg', max_depth=0):
"""
get files or directories related root dir
:param root_dir:
:param is_dir:
:param extension:
:param max_depth:
:return:
"""
if os.path.exists(root_dir) is False:
raise FileNotFoundError("not exist dir : {}".format(root_dir))
target_items = []
childs, next_dirs = _get_sub_childs(root_dir, is_dir, extension)
target_items.extend(childs)
while max_depth > 0:
next_sub_dirs = []
for sub in next_dirs:
if not os.path.isdir(sub):
continue
sub_child_items, sub_dirs = _get_sub_childs(sub, is_dir, extension)
next_sub_dirs.extend(sub_dirs)
target_items.extend(sub_child_items)
max_depth -= 1
next_dirs = next_sub_dirs
return target_items
| 5,349,518 |
def descsum_create(s):
"""Add a checksum to a descriptor without"""
symbols = descsum_expand(s) + [0, 0, 0, 0, 0, 0, 0, 0]
checksum = descsum_polymod(symbols) ^ 1
return s + '#' + ''.join(CHECKSUM_CHARSET[(checksum >> (5 * (7 - i))) & 31] for i in range(8))
| 5,349,519 |
def test(device):
"""
Test if read_measured_values_raw() returns the expected values.
"""
device.start_measurement()
time.sleep(1.1)
# check the read values
raw_humidity, raw_temperature, raw_voc_ticks, raw_nox_ticks = \
device.read_measured_values_raw()
# raw types
assert type(raw_humidity) is Humidity
assert type(raw_humidity.ticks) is int
assert type(raw_humidity.percent_rh) is float
assert type(raw_temperature) is Temperature
assert type(raw_temperature.ticks) is int
assert type(raw_temperature.degrees_celsius) is float
assert type(raw_temperature.degrees_fahrenheit) is float
assert type(raw_voc_ticks) is int
assert type(raw_nox_ticks) is int
| 5,349,520 |
def _get_header(key):
"""Return message header"""
try:
return request.headers[key]
except KeyError:
abort(400, "Missing header: " + key)
| 5,349,521 |
def compute_solution(primes_list, triangle_sequence):
""" Auxiliary function to compute the solution to the problem.
"""
factorise_w_primes = partial(factorise, primes=primes_list)
all_factors = vmap(factorise_w_primes)(triangle_sequence)
# number of divisors = number of possible combinations of prime factors
# = inner product(number of states for each prime in a number)
# e.g. 1024 has 11 states for prime=2, and 1 state for the others
# 3072 has 11 states for prime=2 and 2 states for prime=3 -> 22 divisors
all_factors = all_factors + 1
n_combinations = jnp.prod(all_factors, axis=1).astype(jnp.int32)
return n_combinations
| 5,349,522 |
def key_description(character):
"""
Return the readable description for a key.
:param character: An ASCII character.
:return: Readable description for key.
"""
if "Windows" in platform.system():
for key, value in hex_keycodes.items():
if value == character:
return key
else:
return ""
else:
ascii_code = ord(chr(character))
if ascii_code < 32:
return 'Ctrl+{:c}'.format(ord('@') + ascii_code)
else:
return repr(character)
| 5,349,523 |
def find_paste_config():
"""Find freezer's paste.deploy configuration file.
freezer's paste.deploy configuration file is specified in the
``[paste_deploy]`` section of the main freezer-api configuration file,
``freezer-api.conf``.
For example::
[paste_deploy]
config_file = freezer-paste.ini
:returns: The selected configuration filename
:raises: exception.ConfigFileNotFound
"""
if CONF.paste_deploy.config_file:
paste_config = CONF.paste_deploy.config_file
if not os.path.isabs(paste_config):
paste_config = CONF.find_file(paste_config)
elif CONF.config_file:
paste_config = CONF.config_file[0]
else:
# this provides backwards compatibility for keystone.conf files that
# still have the entire paste configuration included, rather than just
# a [paste_deploy] configuration section referring to an external file
paste_config = CONF.find_file('freezer-api.conf')
if not paste_config or not os.path.exists(paste_config):
raise Exception('paste configuration file {0} not found !'.
format(paste_config))
return paste_config
| 5,349,524 |
def combine_index(df, n1, n2):
"""將dataframe df中的股票代號與股票名稱合併
Keyword arguments:
Args:
df (pandas.DataFrame): 此dataframe含有column n1, n2
n1 (str): 股票代號
n2 (str): 股票名稱
Returns:
df (pandas.DataFrame): 此dataframe的index為「股票代號+股票名稱」
"""
return df.set_index(df[n1].str.replace(' ', '') + \
' ' + df[n2].str.replace(' ', '')).drop([n1, n2], axis=1)
| 5,349,525 |
def safe_get_stopwords(stopwords_language):
"""
:type stopwords_language: basestring
:rtype: list
"""
try:
return get_stopwords(stopwords_language)
except LanguageNotAvailable:
return []
| 5,349,526 |
def fetch_dataloader(types, data_dir, params):
"""
Fetches the DataLoader object for each type in types from data_dir.
Args:
types: (list) has one or more of 'train', 'val', 'test' depending on which data is required
data_dir: (string) directory containing the dataset
params: (Params) hyperparameters
Returns:
data: (dict) contains the DataLoader object for each type in types
"""
img_dimension = params.img_dimension
dataloaders = {}
train_transformer, eval_transformer = get_transformer(img_dimension)
for split in ['train', 'val', 'test']:
if split in types:
path = os.path.join(data_dir, split)
# use the train_transformer if training data, else use eval_transformer without random flip
if split == 'train':
dl = DataLoader(FaceMaskDataset(path, train_transformer),
num_workers=params.num_workers, batch_size=params.batch_size, shuffle=True,
pin_memory=params.cuda)
else:
dl = DataLoader(FaceMaskDataset(path, eval_transformer), batch_size=params.batch_size, shuffle=False,
num_workers=params.num_workers,
pin_memory=params.cuda)
dataloaders[split] = dl
return dataloaders
| 5,349,527 |
def setup_mock_device(mock_device):
"""Prepare mock ONVIFDevice."""
mock_device.async_setup = AsyncMock(return_value=True)
mock_device.available = True
mock_device.name = NAME
mock_device.info = DeviceInfo(
MANUFACTURER,
MODEL,
FIRMWARE_VERSION,
SERIAL_NUMBER,
MAC,
)
mock_device.capabilities = Capabilities()
mock_device.profiles = []
def mock_constructor(hass, config):
"""Fake the controller constructor."""
return mock_device
mock_device.side_effect = mock_constructor
| 5,349,528 |
def execution_environment(tmp_path: Path, config_yaml: Path, request: FixtureRequest) -> Generator[Path, None, None]:
"""Move to temporary directory with ./config.yml and ./output/ directory."""
copyfile(config_yaml, tmp_path / "config.yml")
(tmp_path / "output").mkdir()
os.chdir(tmp_path)
yield tmp_path
os.chdir(request.config.invocation_dir)
| 5,349,529 |
def hap_cli():
"""
Work with Haplotigs
"""
pass
| 5,349,530 |
def join(*args):
"""Join multiple path - join('c:', 'pp', 'c.txt') -> 'c:\pp\c.txt'"""
assert len(args) >= 2
ret_arg = args[0]
for arg in args[1:]:
ret_arg = os.path.join(ret_arg, arg)
return ret_arg
| 5,349,531 |
def for_loop(*args):
"""
Creates a for-loop container to be executed on the server
Parameters
----------
args : :class:`sasoptpy.abstract.Set` objects
Any number of :class:`sasoptpy.abstract.Set` objects can be given
Returns
-------
set_iterator : :class:`sasoptpy.abstract.SetIterator`, :class:`sasoptpy.abstract.SetIteratorGroup`
Set iterators to be used inside for-loop
Examples
--------
Regular for loop:
>>> with so.Workspace('w') as w:
>>> r = so.exp_range(1, 11)
>>> x = so.VariableGroup(r, name='x')
>>> for i in for_loop(r):
>>> x[i] = 1
>>> print(so.to_optmodel(w))
proc optmodel;
var x {{1,2,3,4,5,6,7,8,9,10}};
for {o13 in 1..10} do;
x[o13] = 1;
end;
quit;
Nested for loops:
>>> from sasoptpy.actions import put_item
>>> with so.Workspace('w') as w:
>>> for i in for_loop(range(1, 3)):
>>> for j in for_loop(['a', 'b']):
>>> put_item(i, j)
>>> print(so.to_optmodel(w))
proc optmodel;
for {o2 in 1..2} do;
for {o5 in {'a','b'}} do;
put o2 o5;
end;
end;
quit;
Multiple set for-loops:
>>> with so.Workspace('w') as w:
>>> r = so.Set(name='R', value=range(1, 11))
>>> c = so.Set(name='C', value=range(1, 6))
>>> a = so.ParameterGroup(r, c, name='A', ptype=so.number)
>>> for (i, j) in for_loop(r, c):
>>> a[i, j] = 1
>>> print(so.to_optmodel(w))
proc optmodel;
set R = 1..10;
set C = 1..5;
num A {R, C};
for {o5 in R, o7 in C} do;
A[o5, o7] = 1;
end;
quit;
See also
--------
:func:`sasoptpy.actions.cofor_loop`
Notes
-----
For tasks that can be run concurrently, consider using
:func:`sasoptpy.actions.cofor_loop`
"""
pass
| 5,349,532 |
def test_get_model_components_and_override_from_model_template_single():
"""Tests `get_model_components_and_override_from_model_template` for single model template"""
sst = SimpleSilverkiteTemplate()
model_components = sst._SimpleSilverkiteTemplate__get_model_components_and_override_from_model_template(
template="DAILY_CP_LT_FEASET_AUTO_AR_OFF",
model_components=ModelComponentsParam(
seasonality={"daily_seasonality": 12},
regressors={"regressor_cols": ["x"]}
)
)
# Checks it pulls the correct model template and overrides the parameters.
assert model_components[0] == ModelComponentsParam(
seasonality={
"yearly_seasonality": 8,
"quarterly_seasonality": 0,
"monthly_seasonality": 0,
"weekly_seasonality": 3,
"daily_seasonality": 12
},
growth={
"growth_term": "linear"
},
changepoints={
"changepoints_dict": {
"method": "auto",
"resample_freq": "7D",
"regularization_strength": 0.6,
"potential_changepoint_distance": "15D",
"no_changepoint_distance_from_end": "90D",
"yearly_seasonality_order": 15,
"yearly_seasonality_change_freq": None
},
"seasonality_changepoints_dict": None
},
events={
"holidays_to_model_separately": [],
"holiday_lookup_countries": [],
"holiday_pre_num_days": 0,
"holiday_post_num_days": 0,
"holiday_pre_post_num_dict": None,
"daily_event_df_dict": None,
},
custom={
"feature_sets_enabled": "auto",
"fit_algorithm_dict": {
"fit_algorithm": "linear",
"fit_algorithm_params": None
},
"max_daily_seas_interaction_order": 0,
"max_weekly_seas_interaction_order": 2,
"extra_pred_cols": [],
"min_admissible_value": None,
"max_admissible_value": None
},
autoregression={
"autoreg_dict": None
},
regressors={
"regressor_cols": ["x"]
},
lagged_regressors={
"lagged_regressor_dict": None
},
uncertainty={
"uncertainty_dict": None
},
hyperparameter_override={})
| 5,349,533 |
def branches(directory=None, verbose=False):
"""Show current branch points"""
config = migrate_manager.migrate_config.migrate.get_config(directory)
command.branches(config, verbose=verbose)
| 5,349,534 |
def test_lugaro_label_parser(val):
"""Parse a label."""
label = val[0]
expected = val[1]
assert lugaro.label_parser(label) == expected
| 5,349,535 |
def renyientropy(px,alpha=1,logbase=2,measure='R'):
"""
Renyi's generalized entropy
Parameters
----------
px : array-like
Discrete probability distribution of random variable X. Note that
px is assumed to be a proper probability distribution.
logbase : int or np.e, optional
Default is 2 (bits)
alpha : float or inf
The order of the entropy. The default is 1, which in the limit
is just Shannon's entropy. 2 is Renyi (Collision) entropy. If
the string "inf" or numpy.inf is specified the min-entropy is returned.
measure : str, optional
The type of entropy measure desired. 'R' returns Renyi entropy
measure. 'T' returns the Tsallis entropy measure.
Returns
-------
1/(1-alpha)*np.log(sum(px**alpha))
In the limit as alpha -> 1, Shannon's entropy is returned.
In the limit as alpha -> inf, min-entropy is returned.
"""
#TODO:finish returns
#TODO:add checks for measure
if not _isproperdist(px):
print("px is not a proper probability distribution")
alpha = float(alpha)
# gets here if alpha != (1 or inf)
px = px**alpha
genent = np.log(px.sum())
if logbase == 2:
return 1/(1-alpha) * genent
else:
return 1/(1-alpha) * logbasechange(2, logbase) * genent
| 5,349,536 |
def convert(digits, base1, base2):
"""Convert given digits in base1 to digits in base2.
digits: str -- string representation of number (in base1)
base1: int -- base of given number
base2: int -- base to convert to
return: str -- string representation of number (in base2)"""
# Handle up to base 36 [0-9a-z]
assert 2 <= base1 <= 36, 'base1 is out of range: {}'.format(base1)
assert 2 <= base2 <= 36, 'base2 is out of range: {}'.format(base2)
return encode(decode(digits, base1), base2)
| 5,349,537 |
def setup_platform(hass, config, add_devices, discovery_info=None):
"""Set up the Fast.com sensor."""
data = SpeedtestData(hass, config)
sensor = SpeedtestSensor(data)
add_devices([sensor])
def update(call=None):
"""Update service for manual updates."""
data.update(dt_util.now())
sensor.update()
hass.services.register(DOMAIN, 'update_fastdotcom', update)
| 5,349,538 |
def reset_sensors():
"""
Resets sensors
"""
if DEBUG:
print("\tResets all gopigo sensors")
dexgp = gopigo3.GoPiGo3()
dexgp.reset_all()
| 5,349,539 |
def visualizeVoxels(voxelGrids, frameRate=10):
"""
Args:
voxelGrid (tensor): 4D tensor indicating (batchSize x zSize x ySize x xSize)
frameRate (float) : rate at which the plot should update (in Hz)
"""
batchSize = voxelGrids.shape[0]
print("Batch Size = ",batchSize)
# batchSize = voxelGrid.shape[0]
print("X0 = ",voxelGrids[0][2])
for i in range(0, batchSize):
# creating a dummy dataset
voxelGrid = voxelGrids[i]
xSize = int(voxelGrid[3])
ySize = int(voxelGrid[2])
zSize = int(voxelGrid[1])
# print("Size = ",int(xSize),ySize,zSize)
x = []
y = []
z = []
colo = []
for j in range (0, xSize):
for k in range (0, ySize):
for l in range (0, zSize):
if voxelGrid[0] > 0.5:
x.append(j)
y.append(k)
z.append(l)
colo.append(voxelGrid[0])
else:
continue
# creating figures
print("Creating Figures")
fig = plt.figure(figsize=(100, 100))
ax = fig.add_subplot(111, projection='3d')
# setting color bar
color_map = cm.ScalarMappable(cmap=cm.Greens_r)
color_map.set_array(colo)
# creating the heatmap
img = ax.scatter(x, y, z, marker='s',
s=200, color='green')
plt.colorbar(color_map)
# adding title and labels
ax.set_title("3D Heatmap")
ax.set_xlabel('X-axis')
ax.set_ylabel('Y-axis')
ax.set_zlabel('Z-axis')
plt.show(block=False)
plt.pause(1/frameRate)
import time
time.sleep(2)
plt.close()
| 5,349,540 |
def h(b, W ,X):
"""
This function implments the softmax regression hypothesis function
Argument:
b -- bias
W -- predictive weight matrix
X -- data matrix of size (numbers_examples, number_predictors)
Returns:
softmax(XW + b)
"""
return softmax( (X @ W) + b)
| 5,349,541 |
def get_random_points(N):
"""
- Takes number of parameters N
- Returns tuple (x1,x2), where x1 and x2 are vectors
"""
x1 = np.random.uniform(-1,1,N)
x2 = np.random.uniform(-1,1,N)
return (x1,x2)
| 5,349,542 |
def n_bit_color(clk, din, vga_signals, vga_ports):
"""
Maps n bit input, din, to n bit vga color ports
Ex: din=10010101, r=100, g=101, b=01
"""
blu = len(vga_ports.blu)
grn = len(vga_ports.grn) + blu
red = len(vga_ports.red) + grn
assert len(din) == red
@always(clk.posedge)
def colors():
vga_ports.h_sync.next = vga_signals.h_sync
vga_ports.v_sync.next = vga_signals.v_sync
vga_ports.red.next = 0
vga_ports.grn.next = 0
vga_ports.blu.next = 0
if vga_signals.video_on == 1:
vga_ports.red.next = din[red:grn]
vga_ports.grn.next = din[grn:blu]
vga_ports.blu.next = din[blu:0]
return colors
| 5,349,543 |
def point_cloud_transform_net(point_cloud: nn.Variable, train: bool) -> Tuple[nn.Variable, Dict[str, nn.Variable]]:
"""T net, create transformation matrix for point cloud
Args:
point_cloud (nn.Variable): point cloud, shape(batch, number of points, 3)
train (bool): training flag
Returns:
Tuple[nn.Variable, Dict[str, nn.Variable]]: transformation matrix and internal variables
"""
batch_size, num_points, _ = point_cloud.shape
# expand dim to B*C(=K)*H(=num_points)*W(=dim)
point_cloud = F.reshape(point_cloud, shape=(batch_size, 1, num_points, 3))
with nn.parameter_scope("conv1"):
conv_h1 = PF.convolution(
point_cloud, 64, (1, 3), stride=(1, 1), with_bias=False)
conv_h1 = PF.batch_normalization(conv_h1, batch_stat=train)
conv_h1 = F.relu(conv_h1)
with nn.parameter_scope("conv2"):
conv_h2 = PF.convolution(conv_h1, 128, (1, 1),
stride=(1, 1), with_bias=False)
conv_h2 = PF.batch_normalization(conv_h2, batch_stat=train)
conv_h2 = F.relu(conv_h2)
with nn.parameter_scope("conv3"):
conv_h3 = PF.convolution(
conv_h2, 1024, (1, 1), stride=(1, 1), with_bias=False)
conv_h3 = PF.batch_normalization(conv_h3, batch_stat=train)
conv_h3 = F.relu(conv_h3)
pool_h = F.max_pooling(conv_h3, (num_points, 1))
pool_h = F.reshape(pool_h, (batch_size, -1))
with nn.parameter_scope("affine1"):
affine_h1 = PF.affine(pool_h, 512, with_bias=False)
affine_h1 = PF.batch_normalization(affine_h1, batch_stat=train)
affine_h1 = F.relu(affine_h1)
with nn.parameter_scope("affine2"):
affine_h2 = PF.affine(affine_h1, 256, with_bias=False)
affine_h2 = PF.batch_normalization(affine_h2, batch_stat=train)
affine_h2 = F.relu(affine_h2)
with nn.parameter_scope("affine3"):
# transform points (3 dim) so the matrix size is (3*3)
transform_h = PF.affine(affine_h2, 3 * 3)
eye_mat = nn.Variable.from_numpy_array(
np.array([1, 0, 0, 0, 1, 0, 0, 0, 1], dtype=np.float32))
eye_mat = F.reshape(eye_mat, (1, 9))
transform_h = transform_h + eye_mat
transform_h = F.reshape(transform_h, (batch_size, 3, 3))
return transform_h, {
"conv_h1": conv_h1,
"conv_h2": conv_h2,
"conv_h3": conv_h3,
"pool_h": pool_h,
"affine_h1": affine_h1,
"affine_h2": affine_h2,
"transform_h": transform_h,
}
| 5,349,544 |
def assert_array_less(x: numpy.float64, y: numpy.float64, err_msg: str):
"""
usage.scipy: 2
"""
...
| 5,349,545 |
def diffie_hellman_server(p, g, public_key_pem):
"""
Function used to apply the Diffie Hellman algorithm in the server.
It calculates the private and public components of server.
:param p: Shared parameter
:param g: Shared parameter
:param public_key_pem: Public component of client
:return: The private component and the public component
"""
pn = dh.DHParameterNumbers(p, g)
parameters = pn.parameters(default_backend())
private_key = parameters.generate_private_key()
public_key = private_key.public_key()
p = parameters.parameter_numbers().p
g = parameters.parameter_numbers().g
public_key_pem = public_key.public_bytes(
serialization.Encoding.PEM, serialization.PublicFormat.SubjectPublicKeyInfo
)
logger.debug(f"My Public Key: {public_key}")
logger.debug(f"My Public Key in Bytes: {public_key_pem}")
return private_key, public_key_pem
| 5,349,546 |
def posix_getpgid(space, pid):
""" posix_getpgid - Get process group id for job control """
try:
return space.newint(os.getpgid(pid))
except OSError, e:
space.set_errno(e.errno)
return space.newbool(False)
except OverflowError:
return space.newbool(False)
| 5,349,547 |
def test_list_double_length_4_nistxml_sv_iv_list_double_length_5_4(mode, save_output, output_format):
"""
Type list/double is restricted by facet length with value 10.
"""
assert_bindings(
schema="nistData/list/double/Schema+Instance/NISTSchema-SV-IV-list-double-length-5.xsd",
instance="nistData/list/double/Schema+Instance/NISTXML-SV-IV-list-double-length-5-4.xml",
class_name="NistschemaSvIvListDoubleLength5",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
| 5,349,548 |
def on_display_disconnected():
"""Shortcut for registering handlers for ACTION_DISPLAY_DISCONNECTED events.
Functions decorated with this decorator will be called when push2-python loses connection with the Push2
display. It will have the following positional arguments:
* Push2 object instance
Examples:
@push2_python.on_display_disconnected()
def function(push):
print('Connection with Push2 display was just lost!')
"""
return action_handler(ACTION_DISPLAY_DISCONNECTED)
| 5,349,549 |
def parse_received_data(blockpage_matcher: BlockpageMatcher,
received: Union[str, Dict[str,
Any]], anomaly: bool) -> Row:
"""Parse a received field into a section of a row to write to bigquery.
Args:
blockpage_matcher: Matcher object
received: a dict parsed from json data, or a str
anomaly: whether data may indicate blocking
Returns:
a dict containing the 'received_' keys/values in SCAN_BIGQUERY_SCHEMA
"""
if isinstance(received, str):
row: Row = {'received_status': received}
_add_blockpage_match(blockpage_matcher, received, anomaly, row)
return row
row = {
'received_status': received['status_line'],
'received_body': received['body'],
'received_headers': parse_received_headers(received.get('headers', {})),
}
full_http_response = _reconstruct_http_response(row)
_add_blockpage_match(blockpage_matcher, full_http_response, anomaly, row)
# hyperquack v1 TLS format
tls = received.get('tls', None)
if tls:
tls_row = {
'received_tls_version': tls['version'],
'received_tls_cipher_suite': tls['cipher_suite'],
'received_tls_cert': tls['cert']
}
row.update(tls_row)
# hyperquack v2 TLS format
if 'TlsVersion' in received:
tls_row = {
'received_tls_version': received['TlsVersion'],
'received_tls_cipher_suite': received['CipherSuite'],
'received_tls_cert': received['Certificate']
}
row.update(tls_row)
return row
| 5,349,550 |
def change_cwd(change_to):
"""A context manager to temporarily change current working directory
:param change_to: Path to change current working directory to
:type change_to: ``str``
"""
curdir = os.getcwd()
os.chdir(change_to)
try:
yield
finally:
os.chdir(curdir)
| 5,349,551 |
def get_filters():
""" Returns sidebar filters """
filters = {
'organisations': Organisation.objects.all(),
'topics': Topic.objects.all(),
'licenses': License.objects.all(),
'formats': Format.objects.all()
}
return filters
| 5,349,552 |
def irdl_op_builder(cls: typing.Type[OpT], operands: List,
operand_defs: List[Tuple[str, OperandDef]],
res_types: List, res_defs: List[Tuple[str, ResultDef]],
attributes: typing.Dict[str, typing.Any],
attr_defs: typing.Dict[str, AttributeDef], successors,
regions, options) -> OpT:
"""Builder for an irdl operation."""
# We need irdl to define DenseIntOrFPElementsAttr, but here we need
# DenseIntOrFPElementsAttr.
# So we have a circular dependency that we solve by importing in this function.
from xdsl.dialects.builtin import DenseIntOrFPElementsAttr, IntegerAttr, VectorType, IntegerType, i32
# Build operands by forwarding the values to SSAValue.get
if len(operand_defs) != len(operands):
raise ValueError(
f"Expected {len(operand_defs)} operands, got {len(operands)}")
built_operands = []
for ((_, operand_def), operand) in zip(operand_defs, operands):
if isinstance(operand_def, VarOperandDef):
if not isinstance(operand, list):
raise ValueError(
f"Expected list for variadic operand builder, got {operand}"
)
built_operands.extend([SSAValue.get(arg) for arg in operand])
else:
built_operands.append(SSAValue.get(operand))
# Build results by forwarding the values to the attribute builders
if len(res_defs) != len(res_types):
raise ValueError(
f"Expected {len(res_defs)} results, got {len(res_types)}")
built_res_types = []
for ((_, res_def), res_type) in zip(res_defs, res_types):
if isinstance(res_def, VarResultDef):
if not isinstance(res_type, list):
raise ValueError(
f"Expected list for variadic result builder, got {res_type}"
)
built_res_types.extend([
irdl_build_attribute(res_def.constr, res) for res in res_type
])
else:
built_res_types.append(
irdl_build_attribute(res_def.constr, res_type))
# Build attributes by forwarding the values to the attribute builders
attr_defs = {name: def_ for (name, def_) in attr_defs}
built_attributes = dict()
for attr_name, attr in attributes.items():
if attr_name not in attr_defs:
if isinstance(attr, Attribute):
built_attributes[attr_name] = attr
continue
raise ValueError(
f"Unexpected attribute name {attr_name} for operation {cls.name}"
)
built_attributes[attr_name] = irdl_build_attribute(
attr_defs[attr_name].constr, attr)
# Take care of variadic operand and result segment sizes.
if AttrSizedOperandSegments() in options:
sizes = [
(len(operand) if isinstance(operand_def, VarOperandDef) else 1)
for operand, (_, operand_def) in zip(operands, operand_defs)
]
built_attributes[AttrSizedOperandSegments.attribute_name] =\
DenseIntOrFPElementsAttr.vector_from_list(sizes, i32)
if AttrSizedResultSegments() in options:
sizes = [(len(result) if isinstance(result_def, VarResultDef) else 1)
for result, (_, result_def) in zip(res_types, res_defs)]
built_attributes[AttrSizedResultSegments.attribute_name] =\
DenseIntOrFPElementsAttr.vector_from_list(sizes, i32)
# Build regions using `Region.get`.
regions = [Region.get(region) for region in regions]
return cls.create(operands=built_operands,
result_types=built_res_types,
attributes=built_attributes,
successors=successors,
regions=regions)
| 5,349,553 |
def _listify(single: st.SearchStrategy) -> st.SearchStrategy:
"""
Put the result of `single` strategy into a list
(all strategies should return lists)
"""
@st.composite
def listify_(draw):
return [draw(single)]
strategy = listify_()
strategy.function.__name__ = f"listified<{repr(single)}>"
return strategy
| 5,349,554 |
def get_text(selector):
"""
Type the keys specified into the element, or the currently active element.
"""
if not get_instance():
raise Exception("You need to start a browser first with open_browser()")
return get_text_g(get_instance(), selector)
| 5,349,555 |
def _eps(code, version, file_or_path, scale=1, module_color=(0, 0, 0),
background=None, quiet_zone=4):
"""This function writes the QR code out as an EPS document. The
code is drawn by drawing only the modules corresponding to a 1. They
are drawn using a line, such that contiguous modules in a row
are drawn with a single line. The file parameter is used to
specify where to write the document to. It can either be a writable (text)
stream or a file path. The scale parameter is sets how large to draw
a single module. By default one point (1/72 inch) is used to draw a single
module. This may make the code to small to be read efficiently.
Increasing the scale will make the code larger. This function will accept
fractional scales (e.g. 2.5).
:param module_color: Color of the QR code (default: ``(0, 0, 0)`` (black))
The color can be specified as triple of floats (range: 0 .. 1) or
triple of integers (range: 0 .. 255) or as hexadecimal value (i.e.
``#36c`` or ``#33B200``).
:param background: Optional background color.
(default: ``None`` (no background)). See `module_color` for the
supported values.
:param quiet_zone: Border around the QR code (also known as quiet zone)
(default: ``4``). Set to zero (``0``) if the code shouldn't
have a border.
"""
from functools import partial
import time
import textwrap
def write_line(writemeth, content):
"""\
Writes `content` and ``LF``.
"""
# Postscript: Max. 255 characters per line
for line in textwrap.wrap(content, 255):
writemeth(line)
writemeth('\n')
def line(offset, length):
"""\
Returns coordinates to draw a line with the provided length.
"""
res = ''
if offset > 0:
res = ' {0} 0 m'.format(offset)
res += ' {0} 0 l'.format(length)
return res
def rgb_to_floats(color):
"""\
Converts the provided color into an acceptable format for Postscript's
``setrgbcolor``
"""
def to_float(clr):
if isinstance(clr, float):
if not 0.0 <= clr <= 1.0:
raise ValueError('Invalid color "{0}". Not in range 0 .. 1'
.format(clr))
return clr
if not 0 <= clr <= 255:
raise ValueError('Invalid color "{0}". Not in range 0 .. 255'
.format(clr))
return 1/255.0 * clr if clr != 1 else clr
if not isinstance(color, (tuple, list)):
color = _hex_to_rgb(color)
return tuple([to_float(i) for i in color])
f, autoclose = _get_writable(file_or_path, 'w')
writeline = partial(write_line, f.write)
size = tables.version_size[version] * scale + (2 * quiet_zone * scale)
# Write common header
writeline('%!PS-Adobe-3.0 EPSF-3.0')
writeline('%%Creator: PyQRCode <https://pypi.python.org/pypi/PyQRCode/>')
writeline('%%CreationDate: {0}'.format(time.strftime("%Y-%m-%d %H:%M:%S")))
writeline('%%DocumentData: Clean7Bit')
writeline('%%BoundingBox: 0 0 {0} {0}'.format(size))
# Write the shortcuts
writeline('/M { moveto } bind def')
writeline('/m { rmoveto } bind def')
writeline('/l { rlineto } bind def')
mod_color = module_color if module_color == (0, 0, 0) else rgb_to_floats(module_color)
if background is not None:
writeline('{0:f} {1:f} {2:f} setrgbcolor clippath fill'
.format(*rgb_to_floats(background)))
if mod_color == (0, 0, 0):
# Reset RGB color back to black iff module color is black
# In case module color != black set the module RGB color later
writeline('0 0 0 setrgbcolor')
if mod_color != (0, 0, 0):
writeline('{0:f} {1:f} {2:f} setrgbcolor'.format(*mod_color))
if scale != 1:
writeline('{0} {0} scale'.format(scale))
writeline('newpath')
# Current pen position y-axis
# Note: 0, 0 = lower left corner in PS coordinate system
y = tables.version_size[version] + quiet_zone + .5 # .5 = linewidth / 2
last_bit = 1
# Loop through each row of the code
for row in code:
offset = 0 # Set x-offset of the pen
length = 0
y -= 1 # Move pen along y-axis
coord = '{0} {1} M'.format(quiet_zone, y) # Move pen to initial pos
for bit in row:
if bit != last_bit:
if length:
coord += line(offset, length)
offset = 0
length = 0
last_bit = bit
if bit == 1:
length += 1
else:
offset += 1
if length:
coord += line(offset, length)
writeline(coord)
writeline('stroke')
writeline('%%EOF')
if autoclose:
f.close()
| 5,349,556 |
def test_generate(
stemming_file, lexicon_file, test_file,
global_tags=None, debug=False
):
"""
generates all the forms in the test_file using the lexicon_file and
stemming_file and outputs any discrepancies (or everything if debug on)
"""
ginflexion = GreekInflexion(stemming_file, lexicon_file)
with open(test_file) as f:
for test in yaml.safe_load(f):
source = test.pop("source", None)
test.pop("test_length", False)
lemma = test.pop("lemma")
tags = set(test.pop("tags", []))
if source:
tags.update({source})
if global_tags:
tags.update(global_tags)
segmented_lemma = ginflexion.segmented_lemmas.get(lemma)
for key, form in sorted(test.items()):
stem = ginflexion.find_stems(lemma, key, tags)
generated = ginflexion.generate(lemma, key, tags)
if stem:
stem_guess = None
else:
stem_guess = [
stem for key, stem in
ginflexion.possible_stems(form, "^" + key + "$")]
if [strip_length(w) for w in sorted(generated)] == \
[strip_length(w) for w in sorted(form.split("/"))]:
correct = "✓"
else:
correct = "✕"
if debug or correct == "✕":
output_item(
lemma, segmented_lemma, key, None, form, None, stem,
stem_guess, None, None, generated, correct)
| 5,349,557 |
def _group_energy_terms(ener_xvg):
"""Parse energy.xvg file to extract and group the energy terms in a dict. """
with open(ener_xvg) as f:
all_lines = f.readlines()
energy_types = [line.split('"')[1] for line in all_lines if line[:3] == '@ s']
energy_values = [float(x) * units.kilojoule_per_mole for x in all_lines[-1].split()[1:]]
e_out = OrderedDict(zip(energy_types, energy_values))
# Discard non-energy terms.
for group in unwanted:
if group in e_out:
del e_out[group]
# Dispersive energies.
# TODO: Do buckingham energies also get dumped here?
dispersive = ['LJ (SR)', 'LJ-14', 'Disper. corr.']
e_out['Dispersive'] = 0 * units.kilojoules_per_mole
for group in dispersive:
if group in e_out:
e_out['Dispersive'] += e_out[group]
# Electrostatic energies.
electrostatic = ['Coulomb (SR)', 'Coulomb-14', 'Coul. recip.']
e_out['Electrostatic'] = 0 * units.kilojoules_per_mole
for group in electrostatic:
if group in e_out:
e_out['Electrostatic'] += e_out[group]
e_out['Non-bonded'] = e_out['Electrostatic'] + e_out['Dispersive']
all_angles = ['Angle', 'U-B', 'G96Angle', 'Restricted Angles', 'Bond-Cross',
'BA-Cross', 'Quartic Angles']
e_out['All angles'] = 0 * units.kilojoules_per_mole
for group in all_angles:
if group in e_out:
e_out['All angles'] += e_out[group]
all_dihedrals = ['Ryckaert-Bell.', 'Proper Dih.', 'Improper Dih.']
e_out['All dihedrals'] = 0 * units.kilojoules_per_mole
for group in all_dihedrals:
if group in e_out:
e_out['All dihedrals'] += e_out[group]
return e_out, ener_xvg
| 5,349,558 |
def test_list_boolean_pattern_3_nistxml_sv_iv_list_boolean_pattern_4_5(mode, save_output, output_format):
"""
Type list/boolean is restricted by facet pattern with value [1]{1}
false [0]{1} true true [0]{1}.
"""
assert_bindings(
schema="nistData/list/boolean/Schema+Instance/NISTSchema-SV-IV-list-boolean-pattern-4.xsd",
instance="nistData/list/boolean/Schema+Instance/NISTXML-SV-IV-list-boolean-pattern-4-5.xml",
class_name="NistschemaSvIvListBooleanPattern4",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
| 5,349,559 |
def get_packager_targets(
targets: List[Target], connections: Dict[str, Connection], remote_api: ConnectionClient
) -> List[PackagerTarget]:
"""
Build targets for calling packager. Fetch and base64 decode connections by names using local manifest and
ODAHU connections API
:param targets: Targets from packaging manifest
:param connections: Connections found in local manifest files
:param remote_api: ConnectionClient to fetch missing Connections
"""
packager_targets: List[PackagerTarget] = []
for t in targets:
conn = connections.get(t.connection_name)
if not conn:
click.echo(
f'The "{t.connection_name}" connection of "{t.name}" target is not found in the manifest files. '
f'Trying to retrieve it from API server'
)
conn = remote_api.get_decrypted(t.connection_name)
_decode_connection(conn)
packager_targets.append(
PackagerTarget(conn, t.name)
)
return packager_targets
| 5,349,560 |
def test_name2idfobject():
"""py.test for name2idfobject"""
idf = IDF(StringIO(""))
plantloopname = "plantloopname"
branchname = "branchname"
pumpname = "pumpname"
zonename = "zonename"
plantloop = idf.newidfobject('PlantLoop',
Name=plantloopname,
Plant_Side_Inlet_Node_Name='CW Supply Inlet Node')
branch = idf.newidfobject('Branch',
Name=branchname,
Component_1_Inlet_Node_Name='CW Supply Inlet Node')
pump = idf.newidfobject('Pump:VariableSpeed',
Name=pumpname,
Inlet_Node_Name='CW Supply Inlet Node')
zone = idf.newidfobject('zone', Name=zonename)
simulation = idf.newidfobject('SimulationControl')
# - test
names = [plantloopname, branchname, pumpname, zonename]
idfobjs = [plantloop, branch, pump, zone]
for name, idfobj in zip(names, idfobjs):
result = idf_helpers.name2idfobject(idf, Name=name)
assert result == idfobj
# test when objkeys!=None
objkey = 'ZoneHVAC:EquipmentConnections'
equipconnections = idf.newidfobject(objkey,
Zone_Name=zonename)
result = idf_helpers.name2idfobject(idf, Zone_Name=zonename,
objkeys=[objkey, ])
assert result == equipconnections
| 5,349,561 |
def export_documents(documents, csvfilepath):
"""
Writes the documents data in `documents` to the CSV file at `csvfilepath`.
"""
with open(csvfilepath, "w") as csv_file:
csvwriter = csv.DictWriter(csv_file, CURRICULUM_DOCUMENT_HEADER_V0)
csvwriter.writeheader()
for document in documents:
rowdict = document_to_rowdict(document)
csvwriter.writerow(rowdict)
| 5,349,562 |
def create_installer_repo():
"""Execute setup.sh corresponding to contrail-installer-packages in
all nodes
"""
execute("create_installer_repo_node", env.host_string)
| 5,349,563 |
def elina_texpr0_permute_dimensions(texpr2, dimperm):
"""
Permute dimensions of an ElinaTexpr0 following the semantics of an ElinaDimperm.
Parameters
----------
texpr2 : ElinaTexpr0Ptr
Pointer to the ElinaTexpr0 which dimensions we want to permute.
dimperm : ElinaDimpermPtr
Pointer to the ElinaDimperm which semantics we want to follow.
Returns
-------
texpr1 : ElinaTexpr0Ptr
Pointer to the newly created ElinaTexpr0 with permuted dimensions.
"""
texpr1 = None
try:
elina_texpr0_permute_dimensions_c = elina_auxiliary_api.elina_texpr0_permute_dimensions
elina_texpr0_permute_dimensions_c.restype = ElinaTexpr0Ptr
elina_texpr0_permute_dimensions_c.argtypes = [ElinaTexpr0Ptr, ElinaDimpermPtr]
texpr1 = elina_texpr0_permute_dimensions_c(texpr2, dimperm)
except:
print('Problem with loading/calling "elina_texpr0_permute_dimensions" from "libelinaux.so"')
print('Make sure you are passing ElinaTexpr0Ptr, ElinaDimpermPtr to the function')
return texpr1
| 5,349,564 |
def rgb(red: int, green: int, blue: int, background: bool = False) -> Chalk:
"""Generate a new truecolor chalk from an RGB tuple.
Args:
red (int):
The intensity of red (0-255).
green (int):
The intensity of green (0-255).
blue (int):
The intensity of blue (0-255).
background (bool, optional):
If ``True`` will generate the new chalk to be applied as a background color.
Defaults to False.
Returns:
:class:`~.chalk.Chalk`:
The new chalk instance.
"""
color = TrueColor(red, green, blue)
return Chalk(background=color) if background else Chalk(foreground=color)
| 5,349,565 |
def add_log_arguments(parser: argparse.ArgumentParser):
"""Adds arguments for setting up the logger to the given argument parser.
Arguments added:
- log_name
- log_filename
- log_dir
- log_level
Params:
- parser (argparse.ArgumentParser): The argument parser to which to add the logger arguments
"""
group = parser.add_argument_group('Logging Args')
group.add_argument('-ln', '--log_name', default=constants.LOG_NAME,
help="The name of the logger to be used. Defaults to %s" % constants.LOG_NAME)
group.add_argument('-lf', '--log_filename', default=constants.LOG_FILENAME,
help='The name of the file to which the logging will be done.')
group.add_argument('-ld', '--log_dir', default=constants.LOG_DIR,
help='The path to the directory where the log file will be stored.')
group.add_argument('-ll', '--log_level', default=constants.LOG_LEVEL,
help='The level at which the logger logs data.')
| 5,349,566 |
def first(x: pd.Series) -> pd.Series:
"""
First value of series
:param x: time series
:return: time series of first value
**Usage**
Return series with first value of `X` for all dates:
:math:`R_t = X_0`
where :math:`X_0` is the first value in the series
**Examples**
Last value of series:
>>> series = generate_series(100)
>>> returns = first(series)
**See also**
:func:`last`
"""
return pd.Series(x[0], x.index)
| 5,349,567 |
def simulate(iterations, graph_generator, graph_params, n_nodes, beta, rho, steps, n_infected_init, vacc=None):
"""Perform `iterations` simulations and compute averages. If vacc is not
None, run the simulation using the SIRV model, otherwise use SIR."""
# Initialize arrays for computing averages over simulations
s = np.zeros((iterations, steps + 1), dtype=int)
i = np.zeros((iterations, steps + 1), dtype=int)
r = np.zeros((iterations, steps + 1), dtype=int)
ni = np.zeros((iterations, steps + 1), dtype=int)
if vacc is not None:
v = np.zeros((iterations, steps + 1), dtype=int)
nv = np.zeros((iterations, steps + 1), dtype=int)
for sim_id in range(iterations):
graph = graph_generator(**{'n': n_nodes, **graph_params})
if vacc is not None:
epidemic = Epidemic('sirv', graph, steps,
beta=beta, rho=rho, n_infected_init=n_infected_init, vacc=vacc)
else:
epidemic = Epidemic('sir', graph, steps,
beta=beta, rho=rho, n_infected_init=n_infected_init)
sim = epidemic.simulate()
# Compute four (steps, ) array containing the total number, at each
# step, of susceptible (S), infected (I), recovered (R) and vaccinated
# (V) respectively.
s[sim_id] = np.ma.masked_not_equal(sim, 0).count(axis=1)
i[sim_id] = np.ma.masked_not_equal(sim, 1).count(axis=1)
r[sim_id] = np.ma.masked_not_equal(sim, 2).count(axis=1)
if vacc is not None:
v[sim_id] = np.ma.masked_not_equal(sim, 3).count(axis=1)
# Compute a (steps, ) array containing the number of newly infected
# individuals at each step. The number of newly infected at time t is
# defined as the sum of nodes that went from state 0 (S) at time t-1
# to state 1 (I) at time t.
ni[sim_id] = np.array(
[n_infected_init] + [((sim[t - 1] == 0) & (sim[t] == 1)).sum() for t in range(1, steps + 1)],
dtype=int)
# Compute the same kind of array for newly vaccinated individuals.
if vacc is not None:
nv[sim_id] = np.array(
[v[sim_id, 0]] + [((sim[t - 1] != 3) & (sim[t] == 3)).sum() for t in range(1, steps + 1)],
dtype=int)
# Compute the average total number of susceptible, infected, recovered and
# vaccinated nodes at each week.
s = s.mean(axis=0)
i = i.mean(axis=0)
r = r.mean(axis=0)
if vacc is not None:
v = v.mean(axis=0)
# Compute the average number of newly infected and vaccinated individuals
# each week.
ni = ni.mean(axis=0)
if vacc is not None:
nv = nv.mean(axis=0)
if vacc is not None:
return s, i, r, v, ni, nv
else:
return s, i, r, ni
| 5,349,568 |
def check_collections_are_supported(saved_model_handler, supported):
"""Checks that SavedModelHandler only uses supported collections."""
for meta_graph in saved_model_handler.meta_graphs:
used_collection_keys = set(meta_graph.collection_def.keys())
unsupported = used_collection_keys - supported
if unsupported:
raise ValueError("Unsupported collections in graph: %s\n"
"Use hub.create_module_spec(..., drop_collections=[...])"
" as appropriate." % list(unsupported))
| 5,349,569 |
def lookup_location():
"""
Geolocation lookup of current position.
Determines latitude and longitude coordinates of the system's position
using the ipinfo.io service.
Returns:
Tuple (lat, lon) containing the latitude and longitude coordinates
associated with the IP from which the request is performed.
"""
response = urllib.request.urlopen("https://ipinfo.io/json")
data = json.loads(response.read())
coordinates = data["loc"]
lat, lon = coordinates.split(",")
return float(lat), float(lon)
| 5,349,570 |
def _str_or_none(value):
"""Helper: serialize value to JSON string."""
if value is not None:
return str(value)
| 5,349,571 |
def archive_results(filename,results,algo,script):
"""
:type algo: basestring
:type script: basestring
:type results: DataFrame
"""
#assert results == pd.DataFrame
now=time.time()[0:5]
dirname='../archive'
subdirfmt='%4d-%02d-%02d-%02d-%02d'
subdir=subdirfmt %now
if not os.path.exists(os.path.join(dirname,str(algo))):
os.mkdir(os.path.join(dirname,str(algo)))
dir_to_create=os.path.join(dirname,str(algo),subdir)
if not os.path.exists(dir_to_create):
os.mkdir(dir_to_create)
os.chdir(dir_to_create)
results.to_csv(filename,index=False,float_format='%.6f')
shutil.copy2(script,'.')
return
| 5,349,572 |
def cleanup():
"""
Clean up Environmental Variable for enable before and after tests
"""
if XRAY_ENABLED_KEY in os.environ:
del os.environ[XRAY_ENABLED_KEY]
yield
if XRAY_ENABLED_KEY in os.environ:
del os.environ[XRAY_ENABLED_KEY]
global_sdk_config.set_sdk_enabled(True)
| 5,349,573 |
def test_matches_regex():
"""Test matches_regex validator."""
schema = vol.Schema(cv.matches_regex(".*uiae.*"))
with pytest.raises(vol.Invalid):
schema(1.0)
with pytest.raises(vol.Invalid):
schema(" nrtd ")
test_str = "This is a test including uiae."
assert schema(test_str) == test_str
| 5,349,574 |
def create_song_graph_from_songs(songs: list[Song],
parent_graph: song_graph.SongGraph = None,
year_separation: int = 10) -> song_graph.SongGraph:
"""Create and return a song graph from a list of songs.
(Optional) Add a parent graph from a larger dataset to the new song graph.
(Optional) year_separation defines the way year attribute vertices are to be
created. I.e. the intervals in year attribute vertices. For example,
a year_separation of 10 will create year attribute vertices
for each decade spanned by the playlist.
Preconditions:
- parent_graph is None or parent_graph.are_attributes_created()
# parent_graph is not None implies parent_graph.are_attributes_created()
"""
graph = song_graph.SongGraph(parent_graph)
for song in songs:
graph.add_song(song)
if parent_graph is None:
graph.generate_attribute_vertices(year_separation)
else:
graph.generate_attribute_vertices(use_parent=True, year_separation=year_separation)
return graph
| 5,349,575 |
def convert(secs):
"""Takes a time in seconds and converts to min:sec:msec"""
mins = int(secs // 60)
secs %= 60
msecs = int(round(((secs - int(secs)) * 1000)))
secs = int(secs)
return f'{mins} mins, {secs} secs, {msecs} msecs'
| 5,349,576 |
def from_data(symbols, key_matrix, name_matrix, one_indexed=False):
""" z-matrix constructor
:param symbols: atomic symbols
:type symbols: tuple[str]
:param key_matrix: key/index columns of the z-matrix, zero-indexed
:type key_matrix: tuple[tuple[float, float or None, float or None]]
:param name_matrix: coordinate name columns of the z-matrix
:type name_matrix; tuple[tuple[str, str or None, str or None]]
"""
syms = list(map(pt.to_E, symbols))
natms = len(syms)
key_mat = _key_matrix(key_matrix, natms, one_indexed)
name_mat = _name_matrix(name_matrix, natms)
vma = tuple(zip(syms, key_mat, name_mat))
return vma
| 5,349,577 |
def should_drop_from_right_deck(n_left: int, n_right:int, seed: int=None) -> bool:
"""
Determine whether we drop a card from the right or left sub-deck.
Either `n_left` or `n_right` (or both) must be greater than zero.
:param n_left: the number of cards in the left sub-deck.
:param n_right: the number of cards in the right sub-deck.
:param seed: optional seed for the random number generator to
enable deterministic behavior.
:return: True if we should drop a card from the right sub-deck,
False otherwise.
Examples:
>>> should_drop_from_right_deck(n_left=32, n_right=5, seed=0, )
True
>>> should_drop_from_right_deck(n_left=0, n_right=5, )
True
>>> should_drop_from_right_deck(n_left=7, n_right=0, )
False
>>> should_drop_from_right_deck(n_left=0, n_right=0, )
Traceback (most recent call last):
...
ValueError: Either `n_left` or `n_right` (or both) must be greater than zero.
"""
if n_left > 0 and n_right > 0:
# There are cards left in both sub-decks, so pick a
# sub-deck at random.
random = sklearn.utils.check_random_state(seed=seed)
num = random.randint(low=0, high=2)
boolean = (num == 0)
return boolean
elif n_left == 0 and n_right > 0:
# There are no more cards in the left sub-deck, only
# the right sub-deck, so we drop from the right sub-deck.
return True
elif n_left > 0 and n_right == 0:
# There are no more cards in the right sub-deck, only
# the left sub-deck, so we drop from the left sub-deck.
return False
else:
# There are no more cards in either sub-deck.
raise ValueError ('Either `n_left` or `n_right` ' '(or both) must be greater than zero.')
| 5,349,578 |
def which(program):
"""
Locate an executable binary's full path by its name.
:param str program: The executable's name.
:return: The full path to the executable.
:rtype: str
"""
is_exe = lambda fpath: (os.path.isfile(fpath) and os.access(fpath, os.X_OK))
for path in os.environ['PATH'].split(os.pathsep):
path = path.strip('"')
exe_file = os.path.join(path, program)
if is_exe(exe_file):
return exe_file
if is_exe(program):
return os.path.abspath(program)
return None
| 5,349,579 |
def EXPOSED(mu=1.0):
"""
matrix of exposed sites
Parameters
----------
mu: rate
"""
pis = np.array([0.088367,0.078147,0.047163,0.087976,0.004517,0.058526,0.128039,0.056993,0.024856,0.025277,0.045202,0.094639,0.012338,0.016158,0.060124,0.055346,0.051290,0.006771,0.021554,0.036718])
W = np.array([
[0.0,0.526738,0.48315,0.658902,2.051872,1.280002,1.306565,1.370782,0.540809,0.171986,0.430511,0.697731,1.043937,0.265209,1.270693,4.826665,2.131819,0.143081,0.208643,2.544463],
[0.526738,0.0,0.505837,0.051052,2.214326,2.039552,0.137928,0.363365,2.288922,0.237023,0.670514,3.881079,0.656943,0.097443,0.166534,0.751947,0.584329,0.47559,0.196271,0.313443],
[0.48315,0.505837,0.0,3.902456,0.961103,1.301786,0.285806,1.8201,4.949307,0.337226,0.158937,1.677194,0.539827,0.182522,0.068692,4.412265,2.133604,0.061094,0.599369,0.172264],
[0.658902,0.051052,3.902456,0.0,0.129989,0.399061,3.100403,0.885317,0.70089,0.018315,0.021949,0.10545,0.066925,0.026918,0.228829,0.975564,0.368887,0.042618,0.121313,0.073705],
[2.051872,2.214326,0.961103,0.129989,0.0,0.456521,0.033946,0.886564,2.172284,1.037046,1.702066,0.146263,1.846562,3.002586,0.156216,5.294149,2.067387,1.603125,3.842632,4.207648],
[1.280002,2.039552,1.301786,0.399061,0.456521,0.0,2.514377,0.320746,3.755421,0.212032,1.261113,2.570254,1.973592,0.080193,0.362501,1.033459,1.013613,0.210329,0.15847,0.497398],
[1.306565,0.137928,0.285806,3.100403,0.033946,2.514377,0.0,0.303966,0.270957,0.084442,0.110508,0.730337,0.18816,0.023999,0.214847,0.382235,0.51139,0.048276,0.064648,0.48462],
[1.370782,0.363365,1.8201,0.885317,0.886564,0.320746,0.303966,0.0,0.401311,0.012279,0.052946,0.279865,0.158136,0.084663,0.1489,1.970857,0.174527,0.186382,0.03928,0.132496],
[0.540809,2.288922,4.949307,0.70089,2.172284,3.755421,0.270957,0.401311,0.0,0.317239,0.869247,0.598289,0.519993,2.047163,0.323141,0.99331,0.58096,0.961546,8.230282,0.329895],
[0.171986,0.237023,0.337226,0.018315,1.037046,0.212032,0.084442,0.012279,0.317239,0.0,8.675343,0.338782,9.483497,2.193062,0.071992,0.190509,2.56363,0.208313,0.517123,23.711178],
[0.430511,0.670514,0.158937,0.021949,1.702066,1.261113,0.110508,0.052946,0.869247,8.675343,0.0,0.313102,14.176858,4.802817,0.343919,0.389101,0.522334,1.130724,0.713426,3.466991],
[0.697731,3.881079,1.677194,0.10545,0.146263,2.570254,0.730337,0.279865,0.598289,0.338782,0.313102,0.0,1.013268,0.044792,0.19547,0.592156,1.147459,0.052858,0.084962,0.348362],
[1.043937,0.656943,0.539827,0.066925,1.846562,1.973592,0.18816,0.158136,0.519993,9.483497,14.176858,1.013268,0.0,3.261401,0.099252,0.557254,2.960091,1.328785,0.812142,4.136445],
[0.265209,0.097443,0.182522,0.026918,3.002586,0.080193,0.023999,0.084663,2.047163,2.193062,4.802817,0.044792,3.261401,0.0,0.08702,0.668834,0.24442,5.210001,23.228875,1.199764],
[1.270693,0.166534,0.068692,0.228829,0.156216,0.362501,0.214847,0.1489,0.323141,0.071992,0.343919,0.19547,0.099252,0.08702,0.0,1.223981,0.413148,0.045945,0.043249,0.368231],
[4.826665,0.751947,4.412265,0.975564,5.294149,1.033459,0.382235,1.970857,0.99331,0.190509,0.389101,0.592156,0.557254,0.668834,1.223981,0.0,7.384701,0.316078,0.40531,0.266531],
[2.131819,0.584329,2.133604,0.368887,2.067387,1.013613,0.51139,0.174527,0.58096,2.56363,0.522334,1.147459,2.960091,0.24442,0.413148,7.384701,0.0,0.144393,0.234217,3.184874],
[0.143081,0.47559,0.061094,0.042618,1.603125,0.210329,0.048276,0.186382,0.961546,0.208313,1.130724,0.052858,1.328785,5.210001,0.045945,0.316078,0.144393,0.0,4.903887,0.252132],
[0.208643,0.196271,0.599369,0.121313,3.842632,0.15847,0.064648,0.03928,8.230282,0.517123,0.713426,0.084962,0.812142,23.228875,0.043249,0.40531,0.234217,4.903887,0.0,0.459187],
[2.544463,0.313443,0.172264,0.073705,4.207648,0.497398,0.48462,0.132496,0.329895,23.711178,3.466991,0.348362,4.136445,1.199764,0.368231,0.266531,3.184874,0.252132,0.459187,0.0]
])
gtr = GTR(alphabet=alphabets['aa_nogap'])
gtr.assign_rates(mu=mu, pi=pis, W=W)
return gtr
| 5,349,580 |
async def rename_conflicting_targets(
ptgts: PutativeTargets, all_existing_tgts: AllUnexpandedTargets
) -> UniquelyNamedPutativeTargets:
"""Ensure that no target addresses collide."""
existing_addrs: set[str] = {tgt.address.spec for tgt in all_existing_tgts}
uniquely_named_putative_targets: list[PutativeTarget] = []
for ptgt in ptgts:
if not ptgt.addressable:
# Non-addressable PutativeTargets never have collision issues.
uniquely_named_putative_targets.append(ptgt)
continue
idx = 0
possibly_renamed_ptgt = ptgt
# Targets in root-level BUILD files must be named explicitly.
if possibly_renamed_ptgt.path == "" and possibly_renamed_ptgt.kwargs.get("name") is None:
possibly_renamed_ptgt = possibly_renamed_ptgt.rename("root")
# Eliminate any address collisions.
while possibly_renamed_ptgt.address.spec in existing_addrs:
possibly_renamed_ptgt = ptgt.rename(f"{ptgt.name}{idx}")
idx += 1
uniquely_named_putative_targets.append(possibly_renamed_ptgt)
existing_addrs.add(possibly_renamed_ptgt.address.spec)
return UniquelyNamedPutativeTargets(PutativeTargets(uniquely_named_putative_targets))
| 5,349,581 |
def get_time_and_time():
"""
Get the current date and time and speak it
"""
current_time = datetime.datetime.now().strftime("%d %B %Y %H:%M:%S")
speak("Current date is ")
speak(current_time)
| 5,349,582 |
def select(filename):
"""
Выбрать человека по фамилии.
"""
people = load_people(filename)
who = input('Кого ищем?: ')
count = 0
line = '+-{}-+-{}-+-{}-+-{}-+'.format(
'-' * 4,
'-' * 30,
'-' * 20,
'-' * 20
)
print(line)
print(
'| {:^4} | {:^30} | {:^20} | {:^20} |'.format(
"№",
"Ф.И.О.",
"Знак Зодиака",
"Дата рождения"
)
)
print(line)
for i, num in enumerate(people, 1):
if who == num['name']:
count += 1
print(
'| {:^4} | {:^30} | {:^20} | {:^20} |'.format(
count,
num['name'],
num['zodiac'],
' '.join((str(i) for i in num['year']))))
print(line)
if count == 0:
print('Никто не найден')
| 5,349,583 |
def outer2D(v1, v2):
"""Calculates the magnitude of the outer product of two 2D vectors, v1 and v2"""
return v1[0]*v2[1] - v1[1]*v2[0]
| 5,349,584 |
def get_job_details(jobId=None):
"""
Returns information about a job. Used for custom actions only.
See also: AWS API Documentation
Exceptions
:example: response = client.get_job_details(
jobId='string'
)
:type jobId: string
:param jobId: [REQUIRED]\nThe unique system-generated ID for the job.\n
:rtype: dict
ReturnsResponse Syntax{
'jobDetails': {
'id': 'string',
'data': {
'actionTypeId': {
'category': 'Source'|'Build'|'Deploy'|'Test'|'Invoke'|'Approval',
'owner': 'AWS'|'ThirdParty'|'Custom',
'provider': 'string',
'version': 'string'
},
'actionConfiguration': {
'configuration': {
'string': 'string'
}
},
'pipelineContext': {
'pipelineName': 'string',
'stage': {
'name': 'string'
},
'action': {
'name': 'string',
'actionExecutionId': 'string'
},
'pipelineArn': 'string',
'pipelineExecutionId': 'string'
},
'inputArtifacts': [
{
'name': 'string',
'revision': 'string',
'location': {
'type': 'S3',
's3Location': {
'bucketName': 'string',
'objectKey': 'string'
}
}
},
],
'outputArtifacts': [
{
'name': 'string',
'revision': 'string',
'location': {
'type': 'S3',
's3Location': {
'bucketName': 'string',
'objectKey': 'string'
}
}
},
],
'artifactCredentials': {
'accessKeyId': 'string',
'secretAccessKey': 'string',
'sessionToken': 'string'
},
'continuationToken': 'string',
'encryptionKey': {
'id': 'string',
'type': 'KMS'
}
},
'accountId': 'string'
}
}
Response Structure
(dict) --Represents the output of a GetJobDetails action.
jobDetails (dict) --The details of the job.
Note
If AWSSessionCredentials is used, a long-running job can call GetJobDetails again to obtain new credentials.
id (string) --The unique system-generated ID of the job.
data (dict) --Represents other information about a job required for a job worker to complete the job.
actionTypeId (dict) --Represents information about an action type.
category (string) --A category defines what kind of action can be taken in the stage, and constrains the provider type for the action. Valid categories are limited to one of the following values.
owner (string) --The creator of the action being called.
provider (string) --The provider of the service being called by the action. Valid providers are determined by the action category. For example, an action in the Deploy category type might have a provider of AWS CodeDeploy, which would be specified as CodeDeploy. For more information, see Valid Action Types and Providers in CodePipeline .
version (string) --A string that describes the action version.
actionConfiguration (dict) --Represents information about an action configuration.
configuration (dict) --The configuration data for the action.
(string) --
(string) --
pipelineContext (dict) --Represents information about a pipeline to a job worker.
Note
Includes pipelineArn and pipelineExecutionId for custom jobs.
pipelineName (string) --The name of the pipeline. This is a user-specified value. Pipeline names must be unique across all pipeline names under an Amazon Web Services account.
stage (dict) --The stage of the pipeline.
name (string) --The name of the stage.
action (dict) --The context of an action to a job worker in the stage of a pipeline.
name (string) --The name of the action in the context of a job.
actionExecutionId (string) --The system-generated unique ID that corresponds to an action\'s execution.
pipelineArn (string) --The Amazon Resource Name (ARN) of the pipeline.
pipelineExecutionId (string) --The execution ID of the pipeline.
inputArtifacts (list) --The artifact supplied to the job.
(dict) --Represents information about an artifact that is worked on by actions in the pipeline.
name (string) --The artifact\'s name.
revision (string) --The artifact\'s revision ID. Depending on the type of object, this could be a commit ID (GitHub) or a revision ID (Amazon S3).
location (dict) --The location of an artifact.
type (string) --The type of artifact in the location.
s3Location (dict) --The S3 bucket that contains the artifact.
bucketName (string) --The name of the S3 bucket.
objectKey (string) --The key of the object in the S3 bucket, which uniquely identifies the object in the bucket.
outputArtifacts (list) --The output of the job.
(dict) --Represents information about an artifact that is worked on by actions in the pipeline.
name (string) --The artifact\'s name.
revision (string) --The artifact\'s revision ID. Depending on the type of object, this could be a commit ID (GitHub) or a revision ID (Amazon S3).
location (dict) --The location of an artifact.
type (string) --The type of artifact in the location.
s3Location (dict) --The S3 bucket that contains the artifact.
bucketName (string) --The name of the S3 bucket.
objectKey (string) --The key of the object in the S3 bucket, which uniquely identifies the object in the bucket.
artifactCredentials (dict) --Represents an AWS session credentials object. These credentials are temporary credentials that are issued by AWS Secure Token Service (STS). They can be used to access input and output artifacts in the S3 bucket used to store artifacts for the pipeline in AWS CodePipeline.
accessKeyId (string) --The access key for the session.
secretAccessKey (string) --The secret access key for the session.
sessionToken (string) --The token for the session.
continuationToken (string) --A system-generated token, such as a AWS CodeDeploy deployment ID, required by a job to continue the job asynchronously.
encryptionKey (dict) --Represents information about the key used to encrypt data in the artifact store, such as an AWS Key Management Service (AWS KMS) key.
id (string) --The ID used to identify the key. For an AWS KMS key, you can use the key ID, the key ARN, or the alias ARN.
Note
Aliases are recognized only in the account that created the customer master key (CMK). For cross-account actions, you can only use the key ID or key ARN to identify the key.
type (string) --The type of encryption key, such as an AWS Key Management Service (AWS KMS) key. When creating or updating a pipeline, the value must be set to \'KMS\'.
accountId (string) --The AWS account ID associated with the job.
Exceptions
CodePipeline.Client.exceptions.ValidationException
CodePipeline.Client.exceptions.JobNotFoundException
:return: {
'jobDetails': {
'id': 'string',
'data': {
'actionTypeId': {
'category': 'Source'|'Build'|'Deploy'|'Test'|'Invoke'|'Approval',
'owner': 'AWS'|'ThirdParty'|'Custom',
'provider': 'string',
'version': 'string'
},
'actionConfiguration': {
'configuration': {
'string': 'string'
}
},
'pipelineContext': {
'pipelineName': 'string',
'stage': {
'name': 'string'
},
'action': {
'name': 'string',
'actionExecutionId': 'string'
},
'pipelineArn': 'string',
'pipelineExecutionId': 'string'
},
'inputArtifacts': [
{
'name': 'string',
'revision': 'string',
'location': {
'type': 'S3',
's3Location': {
'bucketName': 'string',
'objectKey': 'string'
}
}
},
],
'outputArtifacts': [
{
'name': 'string',
'revision': 'string',
'location': {
'type': 'S3',
's3Location': {
'bucketName': 'string',
'objectKey': 'string'
}
}
},
],
'artifactCredentials': {
'accessKeyId': 'string',
'secretAccessKey': 'string',
'sessionToken': 'string'
},
'continuationToken': 'string',
'encryptionKey': {
'id': 'string',
'type': 'KMS'
}
},
'accountId': 'string'
}
}
:returns:
CodePipeline.Client.exceptions.ValidationException
CodePipeline.Client.exceptions.JobNotFoundException
"""
pass
| 5,349,585 |
def get_item_indent(item: Union[int, str]) -> Union[int, None]:
"""Gets the item's indent.
Returns:
indent as a int or None
"""
return internal_dpg.get_item_configuration(item)["indent"]
| 5,349,586 |
def test_7_custom_docker_lines_predict():
"""Check that setup.py dockerlines works as expected for prediction"""
# Build the distribution without building the image
code = baklava.predict(example('7-custom-docker-lines'), ['--nobuild'])
assert code == 0
test_7_build_and_check()
| 5,349,587 |
def V_RSJ_asym(i, ic_pos, ic_neg, rn, io, vo):
"""Return voltage with asymmetric Ic's in RSJ model"""
if ic_pos < 0 or ic_neg > 0 or rn < 0:
#or abs(ic_neg/ic_pos) > 1.2 or abs(ic_pos/ic_neg) > 1.2 :
# set boundaries for fitting
#pass
v = 1e10
else:
v = np.zeros(len(i))
n = i>io+ic_pos; v[n] = rn*np.sqrt((i[n]-io)**2-ic_pos**2)+vo
n = i<io+ic_neg; v[n] = -rn*np.sqrt((i[n]-io)**2-ic_neg**2)+vo
n = np.logical_and(i>=io+ic_neg, i<=io+ic_pos); v[n]=vo
return v
| 5,349,588 |
def h2_gas_costs(pipe_dist=-102.75, truck_dist=-106.0, pipeline=True, max_pipeline_dist=2000):
"""Calculates the transport cost of H2 gas. Requires as input the distance that H2 will be piped and
trucked."""
if max_pipeline_dist > pipe_dist > 400:
pipe = 0.0004 * pipe_dist + 0.0424
elif pipe_dist < 400:
pipe = 0.0004 * 400 + 0.0424
else:
pipe = np.nan
if pipeline == False:
pipe = np.nan
truck = 0.003 * truck_dist + 0.3319
return pipe + truck
| 5,349,589 |
def number_to_words(input_: Union[int, str], capitalize: bool = False) -> str:
"""Converts integer version of a number into words.
Args:
input_: Takes the integer version of a number as an argument.
capitalize: Boolean flag to capitalize the first letter.
Returns:
str:
String version of the number.
"""
result = inflect.engine().number_to_words(num=input_)
return result[0].upper() + result[1:] if capitalize else result
| 5,349,590 |
def get_size_reduction_by_cropping(analyzer: DatasetAnalyzer) -> Dict[str, Dict]:
"""
Compute all size reductions of each case
Args:
analyzer: analzer which calls this property
Returns:
Dict: computed size reductions
`size_reductions`: dictionary with each case id and reduction
"""
size_reduction = OrderedDict()
for case_id in analyzer.case_ids:
props = load_properties_of_cropped(analyzer.cropped_data_dir / case_id)
shape_before_crop = props["original_size_of_raw_data"]
shape_after_crop = props['size_after_cropping']
size_red = np.prod(shape_after_crop) / np.prod(shape_before_crop)
size_reduction[case_id] = size_red
return {"size_reductions": size_reduction}
| 5,349,591 |
def getProxyFile(path):
"""
Opens a text file and returns the contents with any setting of a certificate file
replaced with the mitmproxy certificate.
"""
with open(path, "r") as fd:
contents = fd.read()
certReferences = re.findall("setcertificatesfile\(.*\)", contents, re.IGNORECASE)
for certReference in certReferences:
msg = "using mitmproxy certificate: %s (%s)" % (certReference, path)
print(bcolors.OKBLUE + msg + bcolors.ENDC)
contents = contents.replace(certReference, 'setCertificatesFile("pkg:/source/mitmproxy.crt")')
return contents
| 5,349,592 |
def ceph_version(ctx, package):
"""Commands related to fetching of Ceph version."""
ctx.obj["packages"] = sorted(set(package))
| 5,349,593 |
def bytes_filesize_to_readable_str(bytes_filesize: int) -> str:
"""Convert bytes integer to kilobyte/megabyte/gigabyte/terabyte equivalent string"""
if bytes_filesize < 1024:
return "{} B"
num = float(bytes_filesize)
for unit in ["B", "KB", "MB", "GB"]:
if abs(num) < 1024.0:
return "{:.1f} {}".format(num, unit)
num /= 1024.0
return "{:.1f} {}".format(num, "TB")
| 5,349,594 |
def braf_mane_data():
"""Create test fixture for BRAF MANE data."""
return {
"#NCBI_GeneID": "GeneID:673",
"Ensembl_Gene": "ENSG00000157764.14",
"HGNC_ID": "HGNC:1097",
"symbol": "BRAF",
"name": "B-Raf proto-oncogene, serine/threonine kinase",
"RefSeq_nuc": "NM_004333.6",
"RefSeq_prot": "NP_004324.2",
"Ensembl_nuc": "ENST00000646891.2",
"Ensembl_prot": "ENSP00000493543.1",
"MANE_status": "MANE Select",
"GRCh38_chr": "7",
"chr_start": 140730665,
"chr_end": 140924929,
"chr_strand": "-"
}
| 5,349,595 |
def get_decopath_genesets(decopath_ontology, gmt_dir: str):
"""Generate DecoPath gene sets with super pathways."""
concatenated_genesets_dict = {}
dc_mapping = defaultdict(list)
if not os.path.isdir(gmt_dir):
make_geneset_dir()
super_pathway_mappings, ontology_df = get_equivalent_pathway_dc_ids(decopath_ontology)
# Get gene sets from individual databases
gmt_files = [os.path.join(GMT_FILES_DIR, filename) for filename in os.listdir(gmt_dir) if filename.endswith('.gmt')]
genesets = [_get_gmt_dict(file) for file in gmt_files]
# Concatenate gene sets from individual databases
for geneset in genesets:
concatenated_genesets_dict.update(geneset)
# Get super pathway gene sets with DecoPath IDs
for pathway_id, dc_id, dc_name in super_pathway_mappings.values:
if pathway_id in concatenated_genesets_dict:
dc_mapping[dc_id].append(concatenated_genesets_dict[pathway_id])
# Return DecoPath gene sets
return {
pathway_id: {gene for sublist in geneset for gene in sublist}
for pathway_id, geneset in dc_mapping.items()
}
| 5,349,596 |
def rename_state_dict_keys(source, key_transformation, target=None):
"""
source -> Path to the saved state dict.
key_transformation -> Function that accepts the old key names of the state
dict as the only argument and returns the new key name.
target (optional) -> Path at which the new state dict should be saved
(defaults to `source`)
Example:
Rename the key `layer.0.weight` `layer.1.weight` and keep the names of all
other keys.
```py
def key_transformation(old_key):
if old_key == "layer.0.weight":
return "layer.1.weight"
return old_key
rename_state_dict_keys(state_dict_path, key_transformation)
```
"""
if target is None:
target = source
state_dict = torch.load(source)
# state_dict = state_dict.state_dict()
new_state_dict = OrderedDict()
for key, value in state_dict.items():
new_key = key_transformation(key)
new_state_dict[new_key] = value
torch.save(new_state_dict, target)
| 5,349,597 |
def test_iot_get_raci_default_email(monkeypatch):
"""
Scenario: checking the responsiblie email is the default one specified in IOT_CONFIG
Given
- A device with an IoT Vulnerability
When
- Calculating the RACI model result
Then
- Ensure the r_email is the default email in IOT_CONFIG
"""
monkeypatch.setattr(iot_get_raci, 'get_iot_config', lambda x: _CONFIG_WITH_DEFAULT)
outputs = get_raci({
'alert_name': '',
'raw_type': 'IoT Vulnerability',
'category': 'Audio Streaming',
'profile': 'Profusion Media Player'
}).outputs
assert outputs == {
'owner': 'IT_AUDIO_VIDEO',
'r': 'IT_AUDIO_VIDEO',
'r_email': '[email protected]',
'r_snow': None,
'i': 'INFOSEC, SOC',
'i_email': '[email protected], [email protected]'
}
| 5,349,598 |
def import_class(path):
"""
Import a class from a dot-delimited module path. Accepts both dot and
colon seperators for the class portion of the path.
ex::
import_class('package.module.ClassName')
or
import_class('package.module:ClassName')
"""
if ':' in path:
module_path, class_name = path.split(':')
else:
module_path, class_name = path.rsplit('.', 1)
module = __import__(module_path, fromlist=[class_name], level=0)
return getattr(module, class_name)
| 5,349,599 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.