content
stringlengths 22
815k
| id
int64 0
4.91M
|
---|---|
def get_tasks(
id: Optional[str], name: Optional[str], completed: Optional[bool], comment: Optional[str], limit: Optional[str],
) -> str:
"""
:param id: This optional parameter accepts a string and response is filtered based on this value
:param name: This optional parameter accepts a string and response is filtered based on this value
:param completed: This optional parameter accepts a boolean and response is filtered based on this value
:param comment: This optional parameter accepts a string and response is filtered based on this value
:param limit: This optional parameter accepts a string and response is filtered based on this value
:return: Returns a string which contains success or failure response based on the API response status
"""
response = requests.get(BASE_URL + BASE_PATH + "/todo/" + str(id))
if response.status_code == 200:
print("Here is your task(s) list:")
return json.dumps(response.json(), indent=4)
else:
return build_error_response(response.text)
| 5,350,700 |
def f_test_probability(N, p1, Chi2_1, p2, Chi2_2):
"""Return F-Test probability that the simpler model is correct.
e.g. p1 = 5.; //number of PPM parameters
e.g. p2 = p1 + 7.; // number of PPM + orbital parameters
:param N: int
Number of data points
:param p1: int
Number of parameters of the simpler model
:param Chi2_1: float
chi^2 corresponding to the simpler model
:param p2: int
Number of parameters of the model with more parameters
p2 > p1
:param Chi2_2: float
chi^2 corresponding to the model with more parameters
:return:
prob: float
probability
"""
nu1 = p2 - p1
nu2 = N - p2 # degrees of freedom
if (Chi2_1 < Chi2_2):
raise RuntimeWarning('Solution better with less parameters')
# F test
F0 = nu2 / nu1 * (Chi2_1 - Chi2_2) / Chi2_2
# probability
prob = betai(0.5 * nu2, 0.5 * nu1, nu2 / (nu2 + F0 * nu1))
return prob
| 5,350,701 |
def validate_user(headers):
"""Validate the user and return the results."""
user_id = headers.get("User", "")
token = headers.get("Authorization", "")
registered = False
if user_id:
valid_user_id = user_id_or_guest(user_id)
registered = valid_user_id > 1
else:
valid_user_id = 1
is_token_invalid = invalid_token(user_id, token)
return valid_user_id, registered, is_token_invalid
| 5,350,702 |
def load_csv_to_journal(batch_info):
"""Take a dict of batch and csv info and load into journal table."""
# Create batch for testing
filename = batch_info['filename']
journal_batch_name = batch_info['journal_batch_name']
journal_batch_description = batch_info['journal_batch_description']
journal_batch_entity = batch_info['journal_batch_entity']
journal_batch_currency = batch_info['journal_batch_currency']
gl_post_reference = batch_info['gl_post_reference']
gl_batch_status = batch_info['gl_batch_status']
insert_new_batch_name(journal_batch_name,
journal_batch_description,
str(journal_batch_entity),
str(journal_batch_currency),
gl_post_reference,
str(gl_batch_status),
)
# Set up csv file to use
batch_row_id = get_journal_batch_row_id_by_name(journal_batch_name)
batch_row_id = batch_row_id[0][0][0]
# Load csv file to journal_loader
load_file = batch_load_je_file(filename, str(batch_row_id))
status_ = [0, batch_row_id] # [load_file status, batch_row_id]
if load_file == 'LOAD OKAY':
status_[0] = 0
else:
status_[0] = 99
raise Exception('Error posting csv file to Journal table')
# Compare csv totals loaded into pandas dataframe to journal
# table totals.
# Load batch in journal_loader to journal
if status_[0] == 0:
load_status_journal = batch_load_insert(batch_row_id)
print(f'load_status_journal: {load_status_journal}')
return status_
else:
print(f'Error loading to journal_loader: {status_}')
raise Exception('Error posting csv file to journal_loader')
return status_
| 5,350,703 |
def _init():
"""
Loads the TRICAL library and sets up the ctypes interface.
Called automatically the first time a Python instance is created.
"""
global _TRICAL
# TODO: search properly
lib = os.path.join(os.path.dirname(__file__), "libTRICAL.dylib")
_TRICAL = cdll.LoadLibrary(lib)
# Set up the _Instance structure based on the definition in TRICAL.h
_Instance._fields_ = [
("field_norm", c_float),
("measurement_noise", c_float),
("state", c_float * 9),
("state_covariance", c_float * 9 * 9),
("measurement_count", c_uint)
]
_TRICAL.TRICAL_init.argtypes = [POINTER(_Instance)]
_TRICAL.TRICAL_init.restype = None
_TRICAL.TRICAL_norm_set.argtypes = [POINTER(_Instance), c_float]
_TRICAL.TRICAL_norm_set.restype = None
_TRICAL.TRICAL_norm_get.argtypes = [POINTER(_Instance)]
_TRICAL.TRICAL_norm_get.restype = c_float
_TRICAL.TRICAL_noise_set.argtypes = [POINTER(_Instance), c_float]
_TRICAL.TRICAL_noise_set.restype = None
_TRICAL.TRICAL_noise_get.argtypes = [POINTER(_Instance)]
_TRICAL.TRICAL_noise_get.restype = c_float
_TRICAL.TRICAL_measurement_count_get.argtypes = [POINTER(_Instance)]
_TRICAL.TRICAL_measurement_count_get.restype = c_uint
_TRICAL.TRICAL_estimate_update.argtypes = [POINTER(_Instance),
POINTER(c_float * 3)]
_TRICAL.TRICAL_estimate_update.restype = None
_TRICAL.TRICAL_estimate_get.argtypes = [POINTER(_Instance),
POINTER(c_float * 3),
POINTER(c_float * 9)]
_TRICAL.TRICAL_estimate_get.restype = None
_TRICAL.TRICAL_estimate_get_ext.argtypes = [POINTER(_Instance),
POINTER(c_float * 3),
POINTER(c_float * 9),
POINTER(c_float * 3),
POINTER(c_float * 9)]
_TRICAL.TRICAL_estimate_get_ext.restype = None
_TRICAL.TRICAL_measurement_calibrate.argtypes = [POINTER(_Instance),
POINTER(c_float * 3),
POINTER(c_float * 3)]
_TRICAL.TRICAL_measurement_calibrate.restype = None
| 5,350,704 |
def test_paragraph_series_b_cr():
"""
Test case: Paragraph containing a character reference.
was: test_paragraph_extra_14
"""
# Arrange
source_markdown = """fun & joy"""
expected_tokens = [
"[para(1,1):]",
"[text(1,1):fun \a&\a\a&\a&\a\a joy:]",
"[end-para:::True]",
]
expected_gfm = """<p>fun & joy</p>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
| 5,350,705 |
def validate_guid(guid: str) -> bool:
"""Validates that a guid is formatted properly"""
valid_chars = set('0123456789abcdef')
count = 0
for char in guid:
count += 1
if char not in valid_chars or count > 32:
raise ValueError('Invalid GUID format.')
if count != 32:
raise ValueError('Invalid GUID format.')
return guid
| 5,350,706 |
def load_key_string(string, callback=util.passphrase_callback):
# type: (AnyStr, Callable) -> RSA
"""
Load an RSA key pair from a string.
:param string: String containing RSA key pair in PEM format.
:param callback: A Python callable object that is invoked
to acquire a passphrase with which to unlock the
key. The default is util.passphrase_callback.
:return: M2Crypto.RSA.RSA object.
"""
bio = BIO.MemoryBuffer(string)
return load_key_bio(bio, callback)
| 5,350,707 |
def AddSourceToFunction(function, function_ref, update_mask, source_arg,
stage_bucket, messages, service):
"""Add sources to function."""
_CleanOldSourceInfo(function)
if source_arg is None:
source_arg = '.'
source_arg = source_arg or '.'
if source_arg.startswith('gs://'):
update_mask.append('sourceArchiveUrl')
function.sourceArchiveUrl = source_arg
return
if source_arg.startswith('https://'):
update_mask.append('sourceRepository')
function.sourceRepository = messages.SourceRepository(
url=_AddDefaultBranch(source_arg)
)
return
with file_utils.TemporaryDirectory() as tmp_dir:
zip_file = _CreateSourcesZipFile(tmp_dir, source_arg)
upload_url = UploadFile(
zip_file, stage_bucket, messages, service, function_ref)
if upload_url.startswith('gs://'):
update_mask.append('sourceArchiveUrl')
function.sourceArchiveUrl = upload_url
else:
update_mask.append('sourceUploadUrl')
function.sourceUploadUrl = upload_url
| 5,350,708 |
def start_search(keyword):
"""检索入口
:param keyword: 检索关键词"""
search_url = get_full_url(kw=keyword)
headers = {'User-Agent': USER_AGENT}
resp = requests.get(url=search_url, headers=headers)
print('请求响应状态码:', resp.status_code)
print('请求响应内容:', resp.json())
| 5,350,709 |
def load_lbidd(n=5000, observe_counterfactuals=False, return_ites=False,
return_ate=False, return_params_df=False, link='quadratic',
degree_y=None, degree_t=None, n_shared_parents='median', i=0,
dataroot=None,
print_paths=True):
"""
Load the LBIDD dataset that is specified
:param n: size of dataset (1k, 2.5k, 5k, 10k, 25k, or 50k)
:param observe_counterfactuals: if True, return double-sized dataset with
both y0 (first half) and y1 (second half) observed
:param return_ites: if True, return ITEs
:param return_ate: if True, return ATE
:param return_params_df: if True, return the DataFrame of dataset parameters
that match
:param link: link function (linear, quadratic, cubic, poly, log, or exp)
:param degree_y: degree of function for Y (e.g. 1, 2, 3, etc.)
:param degree_t: degree of function for T (e.g. 1, 2, 3, etc.)
:param n_shared_parents: number covariates that T and Y share as causal parents
:param i: index of parametrization to choose among the ones that match
:return: dictionary of results
"""
folder, scaling_zip, scaling_folder, covariates_path, params_path, counterfactuals_folder, factuals_folder = \
get_paths(dataroot=dataroot)
if print_paths:
print(scaling_folder)
print(covariates_path)
# Check if files exist
if not (os.path.isfile(scaling_zip) and os.path.isfile(covariates_path)):
raise FileNotFoundError(
'You must first download scaling.tar.gz and x.csv from '
'https://www.synapse.org/#!Synapse:syn11738963 and put them in the '
'datasets/lbidd/ folder. This requires creating an account on '
'Synapse and accepting some terms and conditions.'
)
# Process dataset size (n)
if n is not None:
if not isinstance(n, str):
n = str(n)
if n.lower() not in VALID_N:
raise ValueError('Invalid n: {} ... Valid n: {}'.format(n, list(VALID_N)))
n = N_STR_TO_INT[n]
# Unzip 'scaling.tar.gz' if not already unzipped
if not os.path.exists(scaling_folder):
print('Unzipping {} ...'.format(SCALING_TAR_ZIP), end=' ')
tar = tarfile.open(scaling_zip, "r:gz")
tar.extractall(folder)
tar.close()
print('DONE')
# Load and filter the params DataFrame
params_df = pd.read_csv(params_path)
if n is not None:
params_df = params_df[params_df['size'] == n] # Select dataset size
if link is not None:
if link not in VALID_LINKS:
raise ValueError('Invalid link function type: {} ... Valid links: {}'
.format(link, VALID_LINKS))
if link == 'linear':
link = 'poly'
degree_y = 1
degree_t = 1
elif link == 'quadratic':
link = 'poly'
degree_y = 2
degree_t = 2
elif link == 'cubic':
link = 'poly'
degree_y = 3
degree_t = 3
params_df = params_df[params_df['link_type'] == link] # Select link function
if degree_y is not None:
params_df = params_df[params_df['deg(y)'] == degree_y] # Select degree Y
if degree_t is not None:
params_df = params_df[params_df['deg(z)'] == degree_t] # Select degree T
# Filter by number of parents that T and Y share
valid_n_shared_parents = params_df['n_conf(yz)'].unique().tolist()
if n_shared_parents in valid_n_shared_parents:
params_df = params_df[params_df['n_conf(yz)'] == n_shared_parents]
elif isinstance(n_shared_parents, str) and n_shared_parents.lower() == 'max':
max_shared_parents = params_df['n_conf(yz)'].max()
params_df = params_df[params_df['n_conf(yz)'] == max_shared_parents]
elif isinstance(n_shared_parents, str) and n_shared_parents.lower() == 'median':
median_i = len(params_df) // 2
median_shared_parents = params_df['n_conf(yz)'].sort_values().iloc[median_i]
params_df = params_df[params_df['n_conf(yz)'] == median_shared_parents]
elif n_shared_parents is None:
pass
else:
raise ValueError('Invalid n_shared_parents ... must be either None, "max", "median", or in {}'
.format(valid_n_shared_parents))
if params_df.empty:
raise ValueError('No datasets have that combination of parameters.')
output = {}
if return_params_df:
output['params_df'] = params_df
# Get ith dataset that has the right parameters
if i < len(params_df):
ufid = params_df['ufid'].iloc[i]
else:
raise ValueError('Invalid i: {} ... with that parameter combination, i must be an int such that 0 <= i < {}'
.format(i, len(params_df)))
covariates_df = pd.read_csv(covariates_path, index_col=INDEX_COL_NAME)
factuals_path = os.path.join(factuals_folder, ufid + FILE_EXT)
factuals_df = pd.read_csv(factuals_path, index_col=INDEX_COL_NAME)
joint_factuals_df = covariates_df.join(factuals_df, how='inner')
output['t'] = joint_factuals_df['z'].to_numpy()
output['y'] = joint_factuals_df['y'].to_numpy()
output['w'] = joint_factuals_df.drop(['z', 'y'], axis='columns').to_numpy()
if observe_counterfactuals or return_ites or return_ate:
counterfactuals_path = os.path.join(counterfactuals_folder, ufid + COUNTERFACTUAL_FILE_SUFFIX + FILE_EXT)
counterfactuals_df = pd.read_csv(counterfactuals_path, index_col=INDEX_COL_NAME)
joint_counterfactuals_df = covariates_df.join(counterfactuals_df, how='inner')
# Add t column and stack y0 potential outcomes and y1 potential outcomes in same df
if observe_counterfactuals:
joint_y0_df = joint_counterfactuals_df.drop(['y1'], axis='columns').rename(columns={'y0': 'y'})
joint_y0_df['t'] = 0
joint_y1_df = joint_counterfactuals_df.drop(['y0'], axis='columns').rename(columns={'y1': 'y'})
joint_y1_df['t'] = 1
stacked_y_counterfactuals_df = pd.concat([joint_y0_df, joint_y1_df])
output['obs_counterfactual_t'] = stacked_y_counterfactuals_df['t'].to_numpy()
output['obs_counterfactual_y'] = stacked_y_counterfactuals_df['y'].to_numpy()
output['obs_counterfactual_w'] = stacked_y_counterfactuals_df.drop(['t', 'y'], axis='columns').to_numpy()
if return_ites:
ites = joint_counterfactuals_df['y1'] - joint_counterfactuals_df['y0']
output['ites'] = ites.to_numpy()
if return_ate:
ites = joint_counterfactuals_df['y1'] - joint_counterfactuals_df['y0']
output['ate'] = ites.to_numpy().mean()
return output
| 5,350,710 |
def generate_iface_status_html(iface=u'lo', status_txt="UNKNOWN"):
"""Generates the html for interface of given status. Status is UNKNOWN by default."""
status = "UNKNOWN"
valid_status = html_generator.HTML_LABEL_ROLES[0]
if status_txt is not None:
if (str(" DOWN") in str(status_txt)):
status = "DOWN"
valid_status = html_generator.HTML_LABEL_STATUS[u'CRITICAL']
elif (str(" UP") in str(status_txt)):
status = "UP"
valid_status = html_generator.HTML_LABEL_STATUS[u'OK']
return generate_iface_status_html_raw(iface, status, valid_status)
| 5,350,711 |
def generalization(self, source, target):
"""Create generalization.
"""
if isprofilemember(source):
return
tok = token('sourcetotargetuuidmapping', False)
containeruuid = tok.uuids[source.__parent__.uuid]
container = target.anchor.node(containeruuid)
general_source = source.refindex[source.attributes['general']]
generaluuid = tok.uuids[general_source.uuid]
general = target.anchor.node(generaluuid)
generalization = Generalization()
generalization.general = general
container[str(generalization.uuid)] = generalization
target.finalize(source, generalization)
| 5,350,712 |
def board_size_by_user(board):
"""Get and set board size by user input.
Prompt user to enter number of rows and columns for the board. Default are
the currant board settings (used in user enter nothing).
If no valid inputs are given, user warning will be printed and he will be
prompted for input again.
:param board: Board object.
:return: None
"""
while True:
print('Please set game board size (need to have even number of cells, that is between 4-52).')
try:
rows = input(f'Number of rows ({board.rows}):')
rows = board.rows if '' == rows else int(rows)
cols = input(f'Number of rows ({board.columns}):')
cols = board.columns if '' == cols else int(cols)
except (TypeError, ValueError):
print(f'Invalid input. Please use numbers only.')
continue
try:
board.set_board_size(cols, rows)
except UserWarning as e:
print(e)
continue
break
| 5,350,713 |
def test__get_dependency_names():
"""Test that the regex in _get_dependency_names() parses a requirements file correctly."""
mock_dependencies: List[str] = [
"# This is a comment",
"#This is another comment",
"--requirement requirements.txt",
"latest-package",
"duplicate-package",
"duplicate-package", # Should not be filtered in this method
"pinned-package1==0.2.4",
"pinned-package2==0.2.4 # With comment after",
"lower-bound-package1>=5.2.4",
"lower-bound-package2>=5.2.4 # With comment after",
"lower-bound-package3>5.2.4",
"upper-bound-package1<=2.3.8",
"upper-bound-package2<=2.3.8 # With comment after",
"upper-bound-package3<2.3.8",
"two-bounds-package1>=0.8.4,<2.0.0",
"two-bounds-package2>=0.8.4,<2.0.0 # With comment after",
"package_with_underscores",
"1",
"-",
"",
]
expected_dependendencies: List[str] = [
"latest-package",
"duplicate-package",
"duplicate-package",
"pinned-package1",
"pinned-package2",
"lower-bound-package1",
"lower-bound-package2",
"lower-bound-package3",
"upper-bound-package1",
"upper-bound-package2",
"upper-bound-package3",
"two-bounds-package1",
"two-bounds-package2",
"package_with_underscores",
"1",
"-",
]
ge_dependencies: GEDependencies = GEDependencies()
observed_dependencies = ge_dependencies._get_dependency_names(mock_dependencies)
assert observed_dependencies == expected_dependendencies
| 5,350,714 |
def is_a(file_name):
"""
Tests whether a given file_name corresponds to a CRSD file. Returns a reader instance, if so.
Parameters
----------
file_name : str
the file_name to check
Returns
-------
CRSDReader1_0|None
Appropriate `CRSDReader` instance if CRSD file, `None` otherwise
"""
try:
crsd_details = CRSDDetails(file_name)
logger.info('File {} is determined to be a CRSD version {} file.'.format(file_name, crsd_details.crsd_version))
return CRSDReader(crsd_details)
except SarpyIOError:
# we don't want to catch parsing errors, for now?
return None
| 5,350,715 |
def Closure(molecules):
"""
Returns the set of the closure of a given list of molecules
"""
newmol=set(molecules)
oldmol=set([])
while newmol:
gen=ReactSets(newmol,newmol)
gen|=ReactSets(newmol,oldmol)
gen|=ReactSets(oldmol,newmol)
oldmol|=newmol
newmol=gen-oldmol
return oldmol
| 5,350,716 |
def get_logs(repo_folder):
""" Get the list of logs """
def get_status(path, depth, statuses):
if depth == 3:
for f in os.listdir(path):
if f == STATUS_FILE_NAME:
f = os.path.join(path,f)
statuses.append(f)
else:
for d in os.listdir(path):
d = os.path.join(path,d)
if not os.path.isdir(d): continue
get_status(d, depth + 1, statuses)
statuses = []
get_status(repo_folder, 0, statuses)
return statuses
| 5,350,717 |
def get_celery_task():
"""get celery task, which takes user id as its sole argument"""
global _celery_app
global _celery_task
if _celery_task:
return _celery_task
load_all_fetcher()
_celery_app = Celery('ukfetcher', broker=ukconfig.celery_broker)
_celery_app.conf.update(
CELERY_ACCEPT_CONTENT=['pickle', 'json', 'msgpack', 'yaml'])
@_celery_app.task
def on_user_activated(user_id):
try:
user_fetcher = get_db_set(user_id, 'fetcher')
for i in user_fetcher:
fetcher = register_fetcher.fetcher_map.get(i)
if fetcher is None:
uklogger.log_err(
'fetcher {} not exist, requested by user {}'.format(
i, user_id))
else:
uklogger.log_info('run fetcher {} for user {}'.format(
i, user_id))
fetcher.run(user_id)
except Exception as ex:
uklogger.log_exc(ex)
if is_in_unittest():
_celery_task = on_user_activated
else:
_celery_task = on_user_activated.delay
return _celery_task
| 5,350,718 |
def plot_keras_activations(activations):
"""Plot keras activation functions.
Args:
activations (list): List of Keras
activation functions
Returns:
[matplotlib figure]
[matplotlib axis]
"""
fig, axs = plt.subplots(1,len(activations),figsize=(3*len(activations),5),sharex=True,sharey=True,dpi=150)
x = tf.constant(tf.range(-3,3,0.1), dtype=tf.float32)
for i, activation in enumerate(activations):
axs[i].plot(x.numpy(), activation(x).numpy())
axs[i].set_title(activation.__name__)
axs[i].set_xlabel(r'$x$')
if i == 0:
axs[i].set_ylabel(r'$\phi(x)$')
despine(ax=axs[i])
fig.tight_layout()
return fig, axs
| 5,350,719 |
def check_enum_struct_names():
"""Ensure that none of the items in ENUM_DATA_TYPE_NAMES are in STRUCT_DATA_TYPE_ALIASES"""
from msl.equipment.resources.picotech.picoscope.enums import ENUM_DATA_TYPE_NAMES
from msl.equipment.resources.picotech.picoscope.structs import STRUCT_DATA_TYPE_ALIASES
for item in ENUM_DATA_TYPE_NAMES:
if item in STRUCT_DATA_TYPE_ALIASES:
raise ValueError('{}'.format(item))
print('The enum and struct names are unique')
| 5,350,720 |
def hog_feature(image, pixel_per_cell=8):
"""
Compute hog feature for a given image.
Important: use the hog function provided by skimage to generate both the
feature vector and the visualization image. **For block normalization, use L1.**
Args:
image: an image with object that we want to detect.
pixel_per_cell: number of pixels in each cell, an argument for hog descriptor.
Returns:
score: a vector of hog representation.
hogImage: an image representation of hog provided by skimage.
"""
### YOUR CODE HERE
(hogFeature, hogImage) = feature.hog(image, pixels_per_cell=(pixel_per_cell, pixel_per_cell), visualize=True);
#hogFeature = normalize(hogFeature.reshape(500,-1), 'l1', return_norm=False)
### END YOUR CODE
return (hogFeature, hogImage)
| 5,350,721 |
def create_parser() -> ArgumentParser:
"""
Helper function parsing the command line options.
"""
parser = ArgumentParser(description="torchx CLI")
subparser = parser.add_subparsers(
title="sub-commands",
description=sub_parser_description,
)
subcmds = {
"describe": CmdDescribe(),
"log": CmdLog(),
"run": CmdRun(),
"builtins": CmdBuiltins(),
"runopts": CmdRunopts(),
"status": CmdStatus(),
}
for subcmd_name, cmd in subcmds.items():
cmd_parser = subparser.add_parser(subcmd_name)
cmd.add_arguments(cmd_parser)
cmd_parser.set_defaults(func=cmd.run)
return parser
| 5,350,722 |
def test_contains_always_match():
"""
Contains handler should always match if no rate is specified.
"""
handler = core.ContainsHandler(name='#', func=None)
assert handler.match('Tell me about #foo', channel='bar')
| 5,350,723 |
def process_word(word):
"""Remove all punctuation and stem words"""
word = re.sub(regex_punc, '', word)
return stemmer.stem(word)
| 5,350,724 |
def no_autoflush(fn):
"""Wrap the decorated function in a no-autoflush block."""
@wraps(fn)
def wrapper(*args, **kwargs):
with db.session.no_autoflush:
return fn(*args, **kwargs)
return wrapper
| 5,350,725 |
def print_exception(exc, msg=None):
"""Print the given exception. If a message is given it will be prepended to the exception message with a \n."""
if msg:
exc = "\n".join((msg, str(exc)))
_, _, exc_tb = sys.exc_info()
traceback.print_exception(exc.__class__, exc, exc_tb)
| 5,350,726 |
def default_model():
"""Get a path for a default value for the model. Start searching in the
current directory."""
project_root = get_project_root()
models_dir = os.path.join(project_root, "models")
curr_dir = os.getcwd()
if (
os.path.commonprefix([models_dir, curr_dir]) == models_dir
and curr_dir != models_dir
):
latest_model = curr_dir
else:
latest_model = get_latest_folder(models_dir)
return latest_model
| 5,350,727 |
def int_to_ip(ip):
"""
Convert a 32-bit integer into IPv4 string format
:param ip: 32-bit integer
:return: IPv4 string equivalent to ip
"""
if type(ip) is str:
return ip
return '.'.join([str((ip >> i) & 0xff) for i in [24, 16, 8, 0]])
| 5,350,728 |
def audit_work_timer_cancel(id):
"""
Cancel timer set.
:param id:
:return:
"""
work = Work.query.get(id)
celery.control.revoke(work.task_id, terminate=True)
work.task_id = None
work.timer = None
db.session.add(work)
db.session.commit()
return redirect(url_for('.audit_work_timer', id=id))
| 5,350,729 |
def drawDestCxIds2(ax, layer, fontsize, markersize, partitionColors, xShift):
"""Draw the destination compartment ids into an existing figure.
The displayed ids are interleaved and are grouped into partitions.
:param plt.Axes ax: Matplotlib axes.
:param Layer layer: The layer to visualize.
:param int fontsize: Fontsize of numeric values in plot.
:param int markersize: Size of the squares drawn at each id location.
:param list partitionColors: Color scheme to distinguish partitions.
:param int xShift: Horizontal offset.
"""
outputSize = np.sum([partition.sizeInterleaved
for partition in layer.partitions]).item()
ax.clear()
ax.axis('off')
ax.set_xlim(-0.5, xShift)
ax.set_ylim(-0.5, outputSize)
ax.invert_yaxis()
xShiftDestIds = 0
coreOffset = 0
for partition in layer.partitions:
cxIds = partition.compartmentGroup.cxIds
# Draw destCxIds numbers.
for cxId in cxIds:
ax.text(xShiftDestIds, coreOffset + cxId, str(cxId),
va='center', ha='center', fontsize=fontsize)
# Draw colors.
x = xShiftDestIds * np.ones(len(cxIds))
colors = partitionColors[partition.id]
ax.scatter(x, coreOffset + cxIds, c=colors, marker='s', s=markersize)
coreOffset += partition.sizeInterleaved
| 5,350,730 |
def team_pos_evolution(team_id):
"""
returns the evolution of position
for a team for the season
"""
pos_evo = []
for week in team_played_weeks(team_id):
try:
teams_pos = [x[0] for x in league_table_until_with_teamid(week)]
pos = teams_pos.index(int(team_id)) + 1
pos_evo.append(pos)
except:
pass
return pos_evo
| 5,350,731 |
def ifft_method(x, y, interpolate=True):
"""
Perfoms IFFT on data.
Parameters
----------
x: array-like
the x-axis data
y: array-like
the y-axis data
interpolate: bool
if True perform a linear interpolation on dataset before transforming
Returns
-------
xf: array-like
the transformed x data
yf: array-like
transformed y data
"""
N = len(x)
if interpolate:
x, y = _fourier_interpolate(x, y)
xf = np.fft.fftfreq(N, d=(x[1] - x[0]) / (2 * np.pi))
yf = np.fft.ifft(y)
return xf, yf
| 5,350,732 |
def getExecutable():
"""
Returns the executable this session is running from.
:rtype: str
"""
return sys.executable
| 5,350,733 |
def run(namespace=None, action_prefix='action_', args=None):
"""Run the script. Participating actions are looked up in the caller's
namespace if no namespace is given, otherwise in the dict provided.
Only items that start with action_prefix are processed as actions. If
you want to use all items in the namespace provided as actions set
action_prefix to an empty string.
:param namespace: An optional dict where the functions are looked up in.
By default the local namespace of the caller is used.
:param action_prefix: The prefix for the functions. Everything else
is ignored.
:param args: the arguments for the function. If not specified
:data:`sys.argv` without the first argument is used.
"""
if namespace is None:
namespace = sys._getframe(1).f_locals
actions = find_actions(namespace, action_prefix)
if args is None:
args = sys.argv[1:]
if not args or args[0] in ('-h', '--help'):
return print_usage(actions)
elif args[0] not in actions:
fail('Unknown action \'%s\'' % args[0])
arguments = {}
types = {}
key_to_arg = {}
long_options = []
formatstring = ''
func, doc, arg_def = actions[args.pop(0)]
for idx, (arg, shortcut, default, option_type) in enumerate(arg_def):
real_arg = arg.replace('-', '_')
if shortcut:
formatstring += shortcut
if not isinstance(default, bool):
formatstring += ':'
key_to_arg['-' + shortcut] = real_arg
long_options.append(isinstance(default, bool) and arg or arg + '=')
key_to_arg['--' + arg] = real_arg
key_to_arg[idx] = real_arg
types[real_arg] = option_type
arguments[real_arg] = default
try:
optlist, posargs = getopt.gnu_getopt(args, formatstring, long_options)
except getopt.GetoptError, e:
fail(str(e))
specified_arguments = set()
for key, value in enumerate(posargs):
try:
arg = key_to_arg[key]
except IndexError:
fail('Too many parameters')
specified_arguments.add(arg)
try:
arguments[arg] = converters[types[arg]](value)
except ValueError:
fail('Invalid value for argument %s (%s): %s' % (key, arg, value))
for key, value in optlist:
arg = key_to_arg[key]
if arg in specified_arguments:
fail('Argument \'%s\' is specified twice' % arg)
if types[arg] == 'boolean':
if arg.startswith('no_'):
value = 'no'
else:
value = 'yes'
try:
arguments[arg] = converters[types[arg]](value)
except ValueError:
fail('Invalid value for \'%s\': %s' % (key, value))
newargs = {}
for k, v in arguments.iteritems():
newargs[k.startswith('no_') and k[3:] or k] = v
arguments = newargs
return func(**arguments)
| 5,350,734 |
def get_file_with_suffix(d, suffix):
"""
Generate a list of all files present below a given directory.
"""
items = os.listdir(d)
for file in items:
if file.endswith(suffix):
return file.split(suffix)[0]
return None
| 5,350,735 |
def app():
"""Create and configure a new app instance for each test."""
# create a temporary file to isolate the database for each test
db_fd, db_path = tempfile.mkstemp()
# warnings.warn(UserWarning("db in file: " + db_path))
# create the app with common test config
app = create_app({"TESTING": True, "DATABASE": db_path})
# create the database and load test data
with app.app_context():
empty_tbl_products()
get_db().executescript(data_sql)
yield app
# close and remove the temporary database
os.close(db_fd)
os.unlink(db_path)
| 5,350,736 |
def force_range(m1, m2):
""" Generate a graph on the relationship between Force and distance between two masses
:param m1: INT: The mass of object 1
:param m2: INT: The mass of object 2
"""
# Generate list of values for range from 100 - 1000 (increments of 50)
r = range(100, 1001, 50)
# Gravity is a constant variable
G = 6.674 * (10**-11)
# Empty list to store calculated values of F
F = []
# For each increment of r
for dist in r:
# Calculate force
force = G*(m1*m2)/(dist**2)
# Then append that value to F
F.append(force)
# Draw the graph
plt.plot(r, F)
return
| 5,350,737 |
def PolyMod(f, g):
"""
return f (mod g)
"""
return f % g
| 5,350,738 |
def policy_improvement(nS, nA, P, full_state_to_index, g=.75,t=0.05):
"""Iteratively evaluates and improves a policy until an optimal policy is found
or reaches threshold of iterations
Parameters:
nS: number of states
nA: number of actions
P: transitional tuples given state and action
full_state_to_index: dictionary of state to index Values
g: gamma which is discount factor
t: theta or stopping condition
Returns: tuple of policy and value of policy
"""
policy = np.ones([nS, nA]) / nA # random policy (equal chance all actions)
i=0
while True:
i+=1
if i%100==0:
print(i)
V = policy_eval(policy, nS, nA, P, full_state_to_index, gamma=g, theta=t) # eval current policy
is_policy_stable = True # true is no changes false if we make changes
for s in range(nS):
chosen_a = np.random.choice(np.argwhere(policy[s] == np.amax(policy[s])).flatten().tolist())
action_values = value(s, V, full_state_to_index, nA, P, gamma=g, theta=t)
best_a = np.random.choice(np.argwhere(action_values == np.amax(action_values)).flatten().tolist())
if chosen_a != best_a: # greedy update
is_policy_stable = False
policy[s] = np.eye(nA)[best_a]
if is_policy_stable or i==10000:
print(i, 'Iterations')
return policy, V
| 5,350,739 |
def get_train_val_test_data(args):
"""Load the data on rank zero and boradcast number of tokens to all GPUS."""
(train_data, val_data, test_data) = (None, None, None)
# Data loader only on rank 0 of each model parallel group.
if mpu.get_model_parallel_rank() == 0:
data_config = configure_data()
data_config.set_defaults(data_set_type='BERT', transpose=False)
(train_data, val_data, test_data), tokenizer = data_config.apply(args)
before = tokenizer.num_tokens
after = before
multiple = args.make_vocab_size_divisible_by * \
mpu.get_model_parallel_world_size()
while (after % multiple) != 0:
after += 1
print_rank_0('> padded vocab (size: {}) with {} dummy '
'tokens (new size: {})'.format(
before, after - before, after))
# Need to broadcast num_tokens and num_type_tokens.
token_counts = torch.cuda.LongTensor([after,
tokenizer.num_type_tokens,
int(args.do_train), int(args.do_valid), int(args.do_test)])
else:
token_counts = torch.cuda.LongTensor([0, 0, 0, 0, 0])
# Broadcast num tokens.
torch.distributed.broadcast(token_counts,
mpu.get_model_parallel_src_rank(),
group=mpu.get_model_parallel_group())
num_tokens = token_counts[0].item()
num_type_tokens = token_counts[1].item()
args.do_train = token_counts[2].item()
args.do_valid = token_counts[3].item()
args.do_test = token_counts[4].item()
return train_data, val_data, test_data, num_tokens, num_type_tokens
| 5,350,740 |
def user_data_check(data_file):
"""
1 - Check user data file, and if necessary coerce to correct format.
2 - Check for fold calculation errors, and if correct, return data frame
for passing to later functions.
3 - If incorrect fold calculations detected, error message returned.
:param data_file: user data table.
:return orig_file_parsed: Dataframe (if error checks pass).
:return error_message: Text string (error message).
"""
# Read user_data and assign to dataframe variable.
orig_file = pd.read_table(data_file)
# Subset source df by the first 7 columns.
# Note: last index should be +1 bigger than number of fields.
# AZ20.tsv file has 86 total columns, 80 of which are empty cells.
# Necessary step to maintain indexing references at a later stage!
orig_file_subset = orig_file.iloc[:, 0:7]
# Coerce column 1 to object.
orig_file_subset.iloc[:, 0] = orig_file_subset.iloc[:, 0].astype(object)
# Coerce column 2-7 to float.
orig_file_subset.iloc[:, 1:7] = orig_file_subset.iloc[:, 1:7].astype(float)
# Subset data frame by checking if mean intensities in both columns,
# are greater than zero.
orig_file_subset = orig_file_subset[(orig_file_subset.iloc[:, 1] > 0) |\
(orig_file_subset.iloc[:, 2] > 0)]
# A data file that has been edited such that columns have been deleted,
# i.e. in excel, may introduce "phantom" columns in python environment.
# Such columns are coerced to "un-named" fields with nan entries.
# If cv columns present with values, original data frame unaffected.
# Code drops columns that contain all nan in columns.
orig_file_subset = orig_file_subset.dropna(axis=1, # Iterate by columns.
how="all") # Drop if all na
# in columns.
# Determine number of columns.
num_col = orig_file_subset.shape[1]
# Check if number of cols = 5 and append new columns with all entries
# = to 1 for cv calculations that are missing.
# If number of columns adhere to correct format, data frame unaffected.
if num_col == 5:
orig_file_subset["control_cv"] = 1
orig_file_subset["condition_cv"] = 1
# Add fold calculation column to df.
orig_file_subset["calc_fold_change"] = \
orig_file_subset.iloc[:, 2].divide(orig_file_subset.iloc[:,1])
# Define user and script calculated fold changes as series variables.
user_fold_calc = orig_file_subset.iloc[:, 3]
script_fold_calc = orig_file_subset.iloc[:, 7]
# Determine if fold change calculations match by
# an absolute tolerance of 3 signifcant figures.
# Numpy "isclose()" function used to check closeness of match.
# Boolean series returned to new column in data frame.
orig_file_subset["check_fold_match"] = \
np.isclose(user_fold_calc, script_fold_calc, atol=10**3)
# Determine number of true matches for fold change calculations.
# Summing of boolean series carried out: True = 1, False = 0.
sum_matches = sum(orig_file_subset.iloc[:, 8] == 1)
# Define error message if fold calculation matching determines
# existance of errors.
error_message = \
("Anomaly detected..PhosQuest will self-destruct in T minus 10 seconds"+
"...just kidding! Please check your fold change calculations, "+
"a discrepancy has been detected.")
# If "sum_matches" equal to length of data frame, then return data frame.
# If not, return error message.
# Note: if first logical test passes, this indicates that fold change
# calculations in original user data are correct (within tolerance),
# and filtered dataframe returned for further analysis.
if sum_matches == len(orig_file_subset):
orig_file_parsed = orig_file_subset.iloc[:, 0:7]
return orig_file_parsed
elif sum_matches != len(orig_file_subset):
return error_message
| 5,350,741 |
def build_task_inputs_spec(
task_spec: pipeline_spec_pb2.PipelineTaskSpec,
pipeline_params: List[dsl.PipelineParam],
tasks_in_current_dag: List[str],
) -> None:
"""Builds task inputs spec from pipeline params.
Args:
task_spec: The task spec to fill in its inputs spec.
pipeline_params: The list of pipeline params.
tasks_in_current_dag: The list of tasks names for tasks in the same dag.
"""
for param in pipeline_params or []:
input_name = param.full_name
if type_utils.is_parameter_type(param.param_type):
if param.op_name in tasks_in_current_dag:
task_spec.inputs.parameters[
input_name].task_output_parameter.producer_task = (
dsl_utils.sanitize_task_name(param.op_name))
task_spec.inputs.parameters[
input_name].task_output_parameter.output_parameter_key = (
param.name)
else:
task_spec.inputs.parameters[
input_name].component_input_parameter = input_name
else:
if param.op_name in tasks_in_current_dag:
task_spec.inputs.artifacts[
input_name].task_output_artifact.producer_task = (
dsl_utils.sanitize_task_name(param.op_name))
task_spec.inputs.artifacts[
input_name].task_output_artifact.output_artifact_key = (
param.name)
else:
task_spec.inputs.artifacts[
input_name].component_input_artifact = input_name
| 5,350,742 |
def get_modules(request: HttpRequest) -> JsonResponse:
"""Gets a list of modules for the provided course from the Canvas API based on current user
A module ID has to be provided in order to access the correct course
:param request: The current request as provided by django
:return: A JSONResponse containing either an error or the data provided by Canvas
"""
# Note: For functionality documentation, see get_courses, as much of it is the same
error = expire_checker(request)
url = request.user.canvas_oauth2_token.url
if error[0] is not None:
return error[0]
client = error[1]
header = {"Authorization": f"Bearer {request.user.canvas_oauth2_token.access_token}"}
course_id = request.GET.get("course_id", "")
if not course_id: return error_generator("There was no provided course ID!", 404) # Returns without module ID
modules = requests.get(
"{}/api/v1/courses/{}/modules?per_page=50".format(url, course_id),
headers=header, verify=False is client.dev)
return content_helper(modules)
| 5,350,743 |
def async_client(async_app):
"""A test async client for the app."""
with async_app.test_client() as testing_client:
yield testing_client
| 5,350,744 |
def create_workflow(session, workflow_spec=dict(), result_schema=None):
"""Create a new workflow handle for a given workflow specification. Returns
the workflow identifier.
Parameters
----------
session: sqlalchemy.orm.session.Session
Database session.
workflow_spec: dict, default=dict()
Optional workflow specification.
result_schema: dict, default=None
Optional result schema.
Returns
-------
string
"""
workflow_id = util.get_unique_identifier()
workflow = WorkflowObject(
workflow_id=workflow_id,
name=workflow_id,
workflow_spec=workflow_spec,
result_schema=result_schema
)
session.add(workflow)
return workflow_id
| 5,350,745 |
def scan(graph, connectionInfo, logger, thread):
"""
Get hardware information for each host in a Xen Environment.
Method collects hardware data from XenServer.
Updates will be stored in existing hosts.
Necessary values in the configuration file of this collector module:
- timeout Timeout this collector module shall use (Integer)
- xenuri The URI opf the Xen server
- xenuser The username we use to connect to the Management API of the Xen server
- xenpw Password used for the connection
:param graph: Data interface object for this collector module
:type graph: insalata.model.Graph.Graph
:param connectionInfo: Information needed to connect to xen server
:type connectionInfo: dict
:param logger: The logger this scanner shall use
:type logger: logging:Logger
:param thread: Thread executing this collector
:type thread: insalata.scanner.Worker.Worker
"""
logger.info("Collecting hardware information from Xen")
timeout = int(connectionInfo['timeout'])
name = connectionInfo['name']
rpcConn = None
xen = None
session = None
try:
rpcConn = RpcConnection.RpcConnection(connectionInfo['xenuri'], connectionInfo['xenuser'], connectionInfo['xenpw'])
xen, session = rpcConn.getConnectionSession()
except:
logger.error("Connection to Xen Server {0} not possible.".format(connectionInfo['xenuri']))
return
#Get required data from xen
answer = xen.VM.get_all_records(session)
if answer['Status'] == 'Failure':
logger.error("Hardware scan on Xen server {0} failed. Server sent failure while reading all VMs.".format(connectionInfo['xenuri']))
return
hostRecords = answer['Value']
answer = xen.VBD.get_all_records(session)
if answer['Status'] == 'Failure':
logger.error("Hardware scan on Xen server {0} failed. Server sent failure while reading all VBDs.".format(connectionInfo['xenuri']))
return
VBDRecords = answer['Value']
answer = xen.VDI.get_all_records(session)
if answer['Status'] == 'Failure':
logger.error("Hardware scan on Xen server {0} failed. Server sent failure while reading all VDIs.".format(connectionInfo['xenuri']))
return
VDIRecords = answer['Value']
#Insert data into graph
for host in graph.getAllNeighbors(Host):
logger.debug("Starting hardware scan on host: {0}".format(host.getID()))
answer = xen.VM.get_by_name_label(session, host.getID())
if answer['Status'] == 'Failure':
logger.error("Hardware scan on Xen server {0} for host {1} failed. Server sent failure while getting record.".format(connectionInfo['xenuri'], host.getID()))
continue
if len(answer['Value']) == 0:
logger.error("Hardware scan on Xen server {0} for host {1} failed. No record found for host.".format(connectionInfo['xenuri'], host.getID()))
hostRecord = answer['Value'][0]
host.setMemory(hostRecords[hostRecord]['memory_dynamic_min'], hostRecords[hostRecord]['memory_dynamic_max']) #RAM info
host.setCPUs(hostRecords[hostRecord]['VCPUs_max']) #CPU info
for host in graph.getAllNeighbors(Host):
#Get Record for host
answer = xen.VM.get_by_name_label(session, host.getID())
if answer['Status'] == 'Failure':
logger.error("Hardware scan on Xen server {0} for host {1} failed. Server sent failure while getting record for host.".format(connectionInfo['xenuri'], host.getID()))
continue
if len(answer['Value']) == 0:
logger.error("Hardware scan on Xen server {0} for host {1} failed. No record found for host.".format(connectionInfo['xenuri'], host.getID()))
hostRecord = answer['Value'][0]
#Get a list of all vdis of the current host on the server
nameToRecordDict = dict() #Disks on server
for vbd in hostRecords[hostRecord]['VBDs']:
if VBDRecords[vbd]['type'] == 'Disk' and VBDRecords[vbd]['VDI'] != "OpaqueRef:NULL":
name = VDIRecords[VBDRecords[vbd]['VDI']]['name_label']
nameToRecordDict[name] = VBDRecords[vbd]['VDI']
deletedDisks = set()
stillExistingDisks = set()
for disk in host.getAllNeighbors(Disk):
if disk.getID() in nameToRecordDict:
stillExistingDisks.add(disk)
else:
disk.removeVerification(name)
nameToRecordDict.pop(disk.getID()) #Determine which disks have to be created
for diskName in list(nameToRecordDict.keys()): #Create new disks
size = VDIRecords[nameToRecordDict[diskName]]['virtual_size']
disk = graph.getOrCreateDisk(diskName, size, timeout, host, size=size)
stillExistingDisks.add(disk)
for disk in stillExistingDisks:
disk.verify(name, timeout)
host.addDisk(disk, name, timeout)
#Get the VDIs on server which are not plugged
nameToRecordDict = dict() #Disks on server
for vdi in VDIRecords:
if VDIRecords[vdi]['VBDs'] == []:
nameToRecordDict[VDIRecords[vdi]['name_label']] = vdi
currentDisks = set([disk for disk in graph.getAllNeighbors(Disk) if len(disk.getAllNeighbors(Host)) == 0]) #Current disks that are not plugged into a host
deletedDisks = set()
stillExistingDisks = set()
for disk in currentDisks:
if disk.getID() in list(nameToRecordDict.keys()):
stillExistingDisks.add(disk)
else:
disk.removeVerification(name)
nameToRecordDict.pop(disk.getID())
for diskName in nameToRecordDict: #Create new disks
size = int(VDIRecords[nameToRecordDict[diskName]]['virtual_size'])
disk = graph.getOrCreateDisk(diskName, name, timeout, host, size=size)
stillExistingDisks.add(disk)
for disk in stillExistingDisks:
disk.verify(name, timeout)
rpcConn.logout()
| 5,350,746 |
def poppler_page_get_text_layout(page):
"""
Wrapper of an underlying c-api function not yet exposed by the
python-poppler API.
Returns a list of text rectangles on the pdf `page`
"""
n = c_uint(0)
rects = CRectangle.ptr()
# From python-poppler internals it is known that hash(page) returns the
# c-pointer to the underlying glib object. See also the repr(page).
page_ptr = hash(page)
_c_text_layout(page_ptr, rects, n)
# Obtain pointer to array of rectangles of the correct length
rectangles = POINTER(CRectangle * n.value).from_address(addressof(rects))
get_text = page.get_selected_text
poppler_rect = poppler.Rectangle()
result = []
for crect in rectangles.contents:
# result.append(Rectangle(
# x1=crect.x1, y1=crect.y1, x2=crect.x2, y2=crect.y2))
_ = (crect.x1, crect.y1, crect.x2, crect.y2)
poppler_rect.x1, poppler_rect.y1, poppler_rect.x2, poppler_rect.y2 = _
text = get_text(GLYPH, poppler_rect).decode("utf8")
if text.endswith(" \n"):
text = text[:-2]
elif text.endswith(" ") and len(text) > 1:
text = text[:-1]
elif text.endswith("\n"):
text = text[:-1]
rect = Box(
rect=Rectangle(x1=crect.x1, y1=crect.y1, x2=crect.x2, y2=crect.y2),
text=text,
)
result.append(rect)
# TODO(pwaller): check that this free is correct
g_free(rectangles)
return result
| 5,350,747 |
def get_volume(name: Optional[str] = None,
namespace: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetVolumeResult:
"""
## Example Usage
```python
import pulumi
import pulumi_harvester as harvester
ubuntu20_dev_mount_disk = harvester.get_volume(name="ubuntu20-dev-mount-disk",
namespace="default")
```
:param str name: A unique name
"""
__args__ = dict()
__args__['name'] = name
__args__['namespace'] = namespace
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('harvester:index/getVolume:getVolume', __args__, opts=opts, typ=GetVolumeResult).value
return AwaitableGetVolumeResult(
access_mode=__ret__.access_mode,
attached_vm=__ret__.attached_vm,
description=__ret__.description,
id=__ret__.id,
image=__ret__.image,
name=__ret__.name,
namespace=__ret__.namespace,
phase=__ret__.phase,
size=__ret__.size,
state=__ret__.state,
storage_class_name=__ret__.storage_class_name,
tags=__ret__.tags,
volume_mode=__ret__.volume_mode)
| 5,350,748 |
def both_block_num_missing(record):
"""
Returns true of both block numbers are missing
:param record: dict - The record being evaluated
:return: bool
"""
rpt_block_num = record.get("rpt_block_num", "") or ""
rpt_sec_block_num = record.get("rpt_sec_block_num", "") or ""
# True, if neither address has a block number.
if rpt_block_num == "" and rpt_sec_block_num == "":
return True
return False
| 5,350,749 |
def map_keys(func,dic):
"""
TODO:
Test against all types
handle python recursion limit
"""
return {func(k):map_keys(func,v)
if isinstance(v,dict) else v
for k,v in dic.items()}
| 5,350,750 |
def jsonize(v):
"""
Convert the discount configuration into a state in which it can be
stored inside the JSON field.
Some information is lost here; f.e. we only store the primary key
of model objects, so you have to remember yourself which objects
are meant by the primary key values.
"""
if isinstance(v, dict):
return dict((i1, jsonize(i2)) for i1, i2 in v.items())
if hasattr(v, "__iter__"):
return [jsonize(i) for i in v]
if isinstance(v, Model):
return v.pk
return v
| 5,350,751 |
def cli(env, identifier, network_type, speed):
"""Manage network settings."""
public = (network_type == 'public')
vsi = SoftLayer.VSManager(env.client)
vs_id = helpers.resolve_id(vsi.resolve_ids, identifier, 'VS')
vsi.change_port_speed(vs_id, public, speed)
| 5,350,752 |
def clean_kaggle_movies(movies_df):
"""
Clean the Kaggle movie data with the following steps:
1. Drop duplicate rows
2. Filter out adult videos and drop unnecessary columns
3. Recast columns to appropriate data types
Parameters
----------
movies_df : Pandas dataframe
Kaggle movie data
Returns
-------
Pandas dataframe
Clean Kaggle movie data
"""
# Drop duplicate rows
movies_df = udf_movies.drop_duplicates(movies_df)
# Filter out adult videos and drop unnecessary columns
movies_df = drop_cols(movies_df)
# Recast columns to appropriate data types
movies_df = recast_cols(movies_df)
return movies_df
| 5,350,753 |
async def test_get_read_only_properties(hass, hass_ws_client, iolinc_properties_data):
"""Test getting an Insteon device's properties."""
mock_read_only = ExtendedProperty(
"44.44.44", "mock_read_only", bool, is_read_only=True
)
mock_read_only.load(False)
ws_client, devices = await _setup(
hass, hass_ws_client, "44.44.44", iolinc_properties_data
)
device = devices["44.44.44"]
device.configuration["mock_read_only"] = mock_read_only
with patch.object(insteon.api.properties, "devices", devices):
await ws_client.send_json(
{
ID: 2,
TYPE: "insteon/properties/get",
DEVICE_ADDRESS: "44.44.44",
SHOW_ADVANCED: False,
}
)
msg = await ws_client.receive_json()
assert msg["success"]
assert len(msg["result"]["properties"]) == 6
await ws_client.send_json(
{
ID: 3,
TYPE: "insteon/properties/get",
DEVICE_ADDRESS: "44.44.44",
SHOW_ADVANCED: True,
}
)
msg = await ws_client.receive_json()
assert msg["success"]
assert len(msg["result"]["properties"]) == 15
| 5,350,754 |
async def async_setup_platform(
hass, config, async_add_entities, discovery_info=None):
"""Set up RainMachine sensors based on the old way."""
pass
| 5,350,755 |
def generate_trainer(
datafiles: List[str],
labelfiles: List[str],
class_label: str,
batch_size: int,
num_workers: int,
optim_params: Dict[str, Any]={
'optimizer': torch.optim.Adam,
'lr': 0.02,
},
weighted_metrics: bool=None,
scheduler_params: Dict[str, float]=None,
wandb_name: str=None,
weights: torch.Tensor=None,
max_epochs=500,
*args,
**kwargs,
):
"""
Generates PyTorch Lightning trainer and datasets for model training.
:param datafiles: List of absolute paths to datafiles
:type datafiles: List[str]
:param labelfiles: List of absolute paths to labelfiles
:type labelfiles: List[str]
:param class_label: Class label to train on
:type class_label: str
:param weighted_metrics: To use weighted metrics in model training
:type weighted_metrics: bool
:param batch_size: Batch size in dataloader
:type batch_size: int
:param num_workers: Number of workers in dataloader
:type num_workers: int
:param optim_params: Dictionary defining optimizer and any needed/optional arguments for optimizer initializatiom
:type optim_params: Dict[str, Any]
:param wandb_name: Name of run in Wandb.ai, defaults to ''
:type wandb_name: str, optional
:return: Trainer, model, datamodule
:rtype: Trainer, model, datamodule
"""
device = ('cuda:0' if torch.cuda.is_available() else 'cpu')
print(f'Device is {device}')
here = pathlib.Path(__file__).parent.absolute()
data_path = os.path.join(here, '..', '..', '..', 'data')
wandb_logger = WandbLogger(
project=f"tabnet-classifer-sweep",
name=wandb_name
)
uploadcallback = UploadCallback(
path=os.path.join(here, 'checkpoints'),
desc=wandb_name
)
early_stop_callback = EarlyStopping(
monitor=("weighted_val_accuracy" if weighted_metrics else "val_accuarcy"),
min_delta=0.00,
patience=3,
verbose=False,
mode="max"
)
module = DataModule(
datafiles=datafiles,
labelfiles=labelfiles,
class_label=class_label,
batch_size=batch_size,
num_workers=num_workers,
)
model = TabNetLightning(
input_dim=module.num_features,
output_dim=module.num_labels,
weighted_metrics=weighted_metrics,
optim_params=optim_params,
scheduler_params=scheduler_params,
weights=weights,
)
trainer = pl.Trainer(
gpus=(1 if torch.cuda.is_available() else 0),
auto_lr_find=False,
# gradient_clip_val=0.5,
logger=wandb_logger,
max_epochs=max_epochs,
# callbacks=[
# uploadcallback,
# ],
# val_check_interval=0.25, # Calculate validation every quarter epoch instead of full since dataset is large, and would like to test this
)
return trainer, model, module
| 5,350,756 |
def WriteToFileOrStdout(path, content, overwrite=True, binary=False,
private=False):
"""Writes content to the specified file or stdout if path is '-'.
Args:
path: str, The path of the file to write.
content: str, The content to write to the file.
overwrite: bool, Whether or not to overwrite the file if it exists.
binary: bool, True to open the file in binary mode.
private: bool, Whether to write the file in private mode.
Raises:
Error: If the file cannot be written.
"""
if path == '-':
if binary:
files.WriteStreamBytes(sys.stdout, content)
else:
out.write(content)
elif binary:
files.WriteBinaryFileContents(path, content, overwrite=overwrite,
private=private)
else:
files.WriteFileContents(path, content, overwrite=overwrite, private=private)
| 5,350,757 |
def make_shift_x0(shift, ndim):
"""
Returns a callable that calculates a shifted origin for each derivative
of an operation derivatives scheme (given by ndim) given a shift object
which can be a None, a float or a tuple with shape equal to ndim
"""
if shift is None:
return lambda s, d, i, j: None
elif isinstance(shift, float):
return lambda s, d, i, j: d + s * d.spacing
elif type(shift) is tuple and np.shape(shift) == ndim:
if len(ndim) == 1:
return lambda s, d, i, j: d + s[j] * d.spacing
elif len(ndim) == 2:
return lambda s, d, i, j: d + s[i][j] * d.spacing
else:
raise ValueError("ndim length must be equal to 1 or 2")
raise ValueError("shift parameter must be one of the following options: "
"None, float or tuple with shape equal to %s" % (ndim,))
| 5,350,758 |
def load_pca_tsne(pca, name, tpmmode=True, logmode=True, exclude=[], cache=True, dir='.'):
"""
Run t-sne using pca result
Parameters
----------
pca : array, shape (n_samples, n_pca)
pca matrix.
name: str
name of pca results
Returns
-------
tsne : array, shape (n_samples, 2)
The tsne coordinates for each sample
"""
file = get_tsne_file(dir, name, tpmmode=tpmmode, logmode=logmode)
if not os.path.isfile(file) or not cache:
print('{} was not found, creating it...'.format(file))
# perplexity = 5, n_iter = 5000, learning = 10
tsne = new_tsne()
if isinstance(pca, SparseDataFrame):
tsne_results = SparseDataFrame(tsne.fit_transform(pca.data), pca.index, pca.columns)
else:
tsne_results = tsne.fit_transform(pca)
data = pd.DataFrame({'Barcode':pca.index, 'TSNE-1':tsne_results[:, 0], 'TSNE-2':tsne_results[:, 1]})
data = data[['Barcode', 'TSNE-1', 'TSNE-2']]
data = data.set_index('Barcode')
data.to_csv(file, sep='\t', header=True)
return read_tsne(file)
| 5,350,759 |
def reconstruct(ctx: click.Context, keyfile: BufferedIOBase, secretfile: Path) -> None:
"""
Reconstruct the secret.
Decrypt re-encrypted shares with the receiver's private key KEYFILE and
join the shares to reconstruct the secret. It is written into SECRETFILE.
"""
pvss = pvss_from_datadir(ctx.obj)
secret = pvss.reconstruct_secret(keyfile.read())
write_private(secretfile, secret)
| 5,350,760 |
def test_get_part_count_lambda_b(subcategory_id):
"""get_part_count_lambda_b() should return a float value for the base hazard rate on success."""
ATTRIBUTES["subcategory_id"] = subcategory_id
ATTRIBUTES["environment_active_id"] = 3
ATTRIBUTES["construction_id"] = 1
_lambda_b = switch.get_part_count_lambda_b(ATTRIBUTES)
assert isinstance(_lambda_b, float)
assert _lambda_b == {1: 0.018, 5: 1.7}[subcategory_id]
| 5,350,761 |
def one_zone_numerical(params, ref_coeff, num_molecules=1e-9):
"""Returns one zone reactor exit flow."""
time = np.array(params[0], dtype=float)
gradient = np.array(params[1], dtype=float)
gridpoints = int(params[2])
step_size, area = float(params[3]), float(params[4])
solu = odeint(
_one_zone_fd, np.zeros(int(gradient.size)), time,
args=(ref_coeff, gradient, gridpoints, step_size, area)
)
return solu[:, -2] * ref_coeff * area / (step_size * num_molecules)
| 5,350,762 |
async def find_pets_by_status(
new: int,
status: List[str] = Query(None, description="Status values that need to be considered for filter"),
token_petstore_auth: TokenModel = Security(get_token_petstore_auth, scopes=["read:pets"]),
) -> List[Pet]:
"""Multiple status values can be provided with comma separated strings"""
...
| 5,350,763 |
def simple_mock_home_fixture():
"""Return a simple mocked connection."""
mock_home = Mock(
spec=AsyncHome,
name="Demo",
devices=[],
groups=[],
location=Mock(),
weather=Mock(
temperature=0.0,
weatherCondition=WeatherCondition.UNKNOWN,
weatherDayTime=WeatherDayTime.DAY,
minTemperature=0.0,
maxTemperature=0.0,
humidity=0,
windSpeed=0.0,
windDirection=0,
vaporAmount=0.0,
),
id=42,
dutyCycle=88,
connected=True,
currentAPVersion="2.0.36",
)
with patch(
"homeassistant.components.homematicip_cloud.hap.AsyncHome",
autospec=True,
return_value=mock_home,
):
yield
| 5,350,764 |
def get_all_event_history_links():
"""From ufcstat website finds all completed fights and saves
the http into the current working directory
"""
url = "http://www.ufcstats.com/statistics/events/completed?page=all"
href_collection = get_all_a_tags(url)
#Add all links to list that have event-details in them
links = []
for i in href_collection:
site_regex = re.search('event-details', i)
if site_regex is not None:
links.append(i)
links = list(dict.fromkeys(links))
return links
| 5,350,765 |
def profile_plot(x,z,data,ax,context_label = None,add_labels = False,xlabel = None,xmin = None, xmax=None, max_depth=None):
"""
UnTRIM-like profile plot of salinity
xmin,xmax are the bounds (in km) of the profile
max_depth is the maximum depth, data assumed to be
"""
import matplotlib
import numpy as np
import matplotlib.cm as cm
import matplotlib.mlab as mlab
import matplotlib.pyplot as plt
import matplotlib.ticker as ticker
import matplotlib.colors as colors
global x_part
global z_part
matplotlib.rcParams['xtick.direction'] = 'out'
matplotlib.rcParams['ytick.direction'] = 'out'
if (not max_depth):
max_depth = data.shape[0]
if (xmin):
min_station = x[0,:].searchsorted(xmin)
else:
min_station = 0
if (xmax):
max_station = x[0,:].searchsorted(xmax)
else:
max_station = x.shape[1]
set_index_bounds(min_station,max_station,max_depth)
print("context label: %s" % context_label)
print("add label: %s" % add_labels)
print("min x dist %s max x dist %s" %(xmin,xmax))
print("min x station %s max x station %s max_depth %s" %(min_station,max_station,max_depth))
x_part = x[0:max_depth,min_station:max_station]
z_part = z[0:max_depth,min_station:max_station]
data_part = data[0:max_depth,min_station:max_station]
data_part = np.ma.masked_where(np.isnan(data_part),data_part)
norml = ThreePointLinearNorm(2,0,20)
cmap=cm.get_cmap("RdBu_r").copy()
cmap.set_bad("white",0.0)
do_image=False
if do_image:
lev = [0.0,0.1,0.2,0.5,1.0,2.0,4.0,8.0,16.0,24.0,32.0]
norml = colors.BoundaryNorm(lev, 256)
im = ax.imshow(vertical_fill(data_part), interpolation='bilinear', origin='upper',
aspect = 'auto', vmin = 0.0, vmax = 32.0,
norm=norml, cmap=cmap,
extent=(x[0,min_station],x[0,max_station-1],max_depth,0))
bad_data = np.ma.masked_where(~data_part.mask, data_part.mask)
ax.imshow(bad_data, interpolation='nearest', aspect = 0.75, cmap=cm.gray,extent=(x[0,min_station],x[0,max_station-1],max_depth,0))
# Colorbar for the image.
cbi = ax.colorbar(im, orientation='vertical', shrink=0.6,ticks = lev)
cbi.set_label("Salinity (psu)", size = 14)
else:
im = None
do_line_contour = True
if do_line_contour:
lev = np.array([2.0, 4.0, 8.0, 16.0])
greys = 1.0-lev/32.
cs = ax.contour(x_part,z_part,data_part,levels = lev,colors=['black','black','black','black'],linewidths=2)
greylev = 1.0
for c in cs.collections:
c.set_linestyle('solid')
#Thicken the zero contour.
zc = cs.collections[0]
#ax.setp(zc, linewidth=3)
#ax.setp(zc, linestyle = 'dotted')
ax.clabel(cs, lev, # label every second level
inline=1,
inline_spacing = 3,
fmt='%1.1f',
fontsize=12)
else:
cs = None
do_filled_contour = True
if do_filled_contour:
lev = [0.0,0.1,0.2,0.5,1.0,2.0,4.0,8.0,16.0,32.0]
norml = colors.BoundaryNorm(lev, 256)
filled_data_part = vertical_fill(data_part)
bad_data = np.ma.masked_where(~data_part.mask, data_part.mask, copy=True)
maxz = np.argmax(bad_data,axis=0)
maxz[maxz == 0] = max_depth
maxz = np.concatenate(([max_depth],maxz,[max_depth]))
xstat = np.concatenate(([x_part[0,0]],x_part[0,:],[x_part[0,-1]]))
ax.set_ylim([max_depth,0])
cs = ax.contourf(x_part,z_part,filled_data_part,levels = lev, cmap = cm.RdBu_r,
norm = norml,extent=(x[0,min_station],x[0,max_station-1],max_depth,0))
ax.fill(xstat,maxz,"darkgray")
#cb = ax.colorbar(cs, orientation='vertical', shrink=0.8,ticks = [32,16,8,4,2,1,0.5,0.2,0.1,0])
#cb.set_label("Salinity (psu)", size = 14)
add_cruise_loc = False
if add_cruise_loc:
xloc = x_part[0]
zloc = np.ones_like(xloc)*19
stops, = ax.plot(xloc,zloc,'o',label="USGS cast")
xloc = np.array([84.86])
yloc = np.ones_like(xloc)*19
dayflow, = ax.plot(xloc,yloc,"*",label="Dayflow X2",markersize=14)
add_labels = True
if (add_labels):
inbound_label_dists = [x for x in location_labels.keys() if (x>xmin and x<xmax)]
bbox_props = dict(boxstyle="rarrow,pad=0.3", fc="white", lw=2)
for dist in inbound_label_dists:
t = ax.text(dist, max_depth-2, location_labels[dist], ha="center", va="bottom", rotation=270,
size=12,
bbox=bbox_props)
if (add_cruise_loc and add_labels):
font= FontProperties(color="white");
leg = ax.legend(("USGS cast","Dayflow X2"),"center left",numpoints=1,frameon=False)
leg_texts = leg.get_texts()
if len(leg_texts) > 0:
leg_texts[0].set_color("white")
leg_texts[1].set_color("white")
if context_label:
ttxt = ax.text(x_part[0,0]+2,5,context_label,size = 18, color = 'white')
#ax.title('Vertical Salinity Profile', size = 14)
if xlabel:
ax.set_xlabel(xlabel, size = 14)
ax.set_ylabel('Depth (m)', size = 14)
return im, cs, ttxt
| 5,350,766 |
def plot_book_wordbags(urn, wordbags, window=5000, pr = 100):
"""Generate a diagram of wordbags in book """
return plot_sammen_vekst(urn, wordbags, window=window, pr=pr)
| 5,350,767 |
def map_line2citem(decompilation_text):
"""
Map decompilation line numbers to citems.
This function allows us to build a relationship between citems in the
ctree and specific lines in the hexrays decompilation text.
Output:
+- line2citem:
| a map keyed with line numbers, holding sets of citem indexes
|
| eg: { int(line_number): sets(citem_indexes), ... }
'
"""
line2citem = {}
#
# it turns out that citem indexes are actually stored inline with the
# decompilation text output, hidden behind COLOR_ADDR tokens.
#
# here we pass each line of raw decompilation text to our crappy lexer,
# extracting any COLOR_ADDR tokens as citem indexes
#
for line_number in range(decompilation_text.size()):
line_text = decompilation_text[line_number].line
line2citem[line_number] = lex_citem_indexes(line_text)
return line2citem
| 5,350,768 |
def from_dict(params, filter_func=None, excludes=[], seeds=[], order=2,
random_seed=None):
"""Generates pair-wise cases from given parameter dictionary."""
if random_seed is None or isinstance(random_seed, int):
return _from_dict(params, filter_func, excludes, seeds, order, random_seed)
# Find the best (smallest) test suite by trying multiple seeds.
best = None
for rs in random_seed:
case = _from_dict(params, filter_func, excludes, seeds, order, rs)
if best is None or len(case) < len(best):
best = case
return best
| 5,350,769 |
def ensure_min_topology(*args, **kwargs):
"""
verifies if the current testbed topology satifies the
minimum topology required by test script
:param spec: needed topology specification
:type spec: basestring
:return: True if current topology is good enough else False
:rtype: bool
"""
return getwa().ensure_min_topology(*args, **kwargs)
| 5,350,770 |
def encode_mode(mode):
"""
JJ2 uses numbers instead of strings, but strings are easier for humans to work with
CANNOT use spaces here, as list server scripts may not expect spaces in modes in port 10057 response
:param mode: Mode number as sent by the client
:return: Mode string
"""
if mode == 16:
return "headhunters"
if mode == 15:
return "domination"
if mode == 14:
return "tlrs"
if mode == 13:
return "flagrun"
if mode == 12:
return "deathctf"
if mode == 11:
return "jailbreak"
if mode == 10:
return "teambattle"
if mode == 9:
return "pestilence"
if mode == 8:
return "xlrs"
if mode == 7:
return "lrs"
if mode == 6:
return "roasttag"
if mode == 5:
return "coop"
if mode == 4:
return "race"
if mode == 3:
return "ctf"
if mode == 2:
return "treasure"
if mode == 1:
return "battle"
return "unknown"
| 5,350,771 |
def detect_properties_uri(uri):
"""Detects image properties in the file located in Google Cloud Storage or
on the Web."""
vision_client = vision.Client()
image = vision_client.image(source_uri=uri)
props = image.detect_properties()
print('Properties:')
for color in props.colors:
print('frac: {}'.format(color.pixel_fraction))
print('\tr: {}'.format(color.color.red))
print('\tg: {}'.format(color.color.green))
print('\tb: {}'.format(color.color.blue))
print('\ta: {}'.format(color.color.alpha))
| 5,350,772 |
def split_array(arr, num_of_splits):
"""split an array into equal pieces"""
# TODO Replace this function with gluon.utils.split_data() once targeting MXNet 1.7
size = arr.shape[0]
if size < num_of_splits:
return [arr[i:i + 1] for i in range(size)]
slice_len, rest = divmod(size, num_of_splits)
div_points = [0] + [(slice_len * index + min(index, rest) + slice_len +
(index < rest)) for index in range(num_of_splits)]
slices = [
arr[div_points[i]:div_points[i + 1]] for i in range(num_of_splits)
]
return slices
| 5,350,773 |
def randclust(SC, k):
""" cluster using random """
# generate labels.
labels = np.array([random.randint(0,k-1) for x in range(SC.shape[1])])
# compute the average.
S, cats = avg_cat(labels, SC)
# return it.
return S, labels, cats
| 5,350,774 |
def get_domains_by_name(kw, c, adgroup=False):
"""Searches for domains by a text fragment that matches the domain name (not the tld)"""
domains = []
existing = set()
if adgroup:
existing = set(c['adgroups'].find_one({'name': adgroup}, {'sites':1})['sites'])
for domain in c['domains'].find({}, {'domain': 1, 'alexa.rank.latest':1}):
try:
rank = domain['alexa']['rank']['latest']
domain_name = domain['domain'].replace('#', '.')
if kw in domain_name:
if domain_name not in existing:
domains.append({
"domain": domain_name,
"rank": rank
})
except KeyError:
pass
return domains[:50]
| 5,350,775 |
def SetCommandFlag(device, engine_ip, engine_port):
"""Set up adb Chrome command line flags
Args:
device: (str) Serial number of device we should use.
engine_ip: (str) Blimp engine IP address.
engine_port: (str) Port on the engine.
"""
cmd_helper.GetCmdStatusAndOutput([
os.path.join(SRC_PATH, 'build', 'android',
'adb_chrome_public_command_line'),
'--device=' + str(device),
'--enable-blimp',
'--engine-ip=' + engine_ip,
'--engine-port=' + engine_port,
'--engine-transport=tcp',
'-v=0',
'--vmodule=*blimp*=1',
'--blimp-client-token-path=' + _CLIENT_TOKEN_PATH])
| 5,350,776 |
def read_bbgt(filename):
"""
Read ground truth from bbGt file.
See Piotr's Toolbox for details
"""
boxes = []
with open(filename,"r") as f:
signature = f.readline()
if not signature.startswith("% bbGt version=3"):
raise ValueError("Wrong file signature")
rects = []
ignore = []
labels = []
for line in f:
elms = line.strip().split()
assert len(elms) == 12, "Invalid file"
lbl = elms[0]
rect = tuple(map(float, elms[1:5]))
ign = int(elms[10])
rects.append(rect)
ignore.append(ign)
labels.append(lbl)
if not rects:
rects = np.empty((0,4),"f")
ignore = np.empty(0,"i")
labels = np.empty(0,"<U1")
boxes = bbox_list(np.array(rects,"f"),
format=RectFormat.XYWH,
ignore=np.array(ignore,"i"),
labels=np.array(labels))
return boxes
| 5,350,777 |
async def test_aliases_gateway(mocker):
"""Возврат данных в виде списка."""
json = [
{"secid": "OGKB", "isin": "1-02-65105-D"},
{"secid": "OGK2", "isin": "1-02-65105-D"},
{"secid": "OGK2-001D", "isin": "1-02-65105-D-001D"},
]
outer_call = mocker.patch.object(moex.aiomoex, "find_securities", return_value=json)
fake_session = mocker.Mock()
loader = moex.AliasesGateway(fake_session)
assert await loader.__call__("1-02-65105-D") == ["OGKB", "OGK2"]
outer_call.assert_called_once_with(fake_session, "1-02-65105-D", columns=("secid", "isin"))
| 5,350,778 |
def measure(G, wire, get_cb_delay = False, meas_lut_access = False):
"""Calls HSPICE to obtain the delay of the wire.
Parameters
----------
G : nx.MultiDiGraph
The routing-resource graph.
wire : str
Wire type.
get_cb_delay : Optional[bool], default = False
Determines the position of the wire and the connection block and then calls
>>meas_local_wire.py<< to obtain the delay from the wire to a LUT input pin.
Returns
-------
float
Delay.
"""
#------------------------------------------------------------------------#
def run():
"""Runs HSPICE and parses the delay."""
with open(netlist_filename, "w") as outf:
outf.write(conv_nx_to_spice(net, meas_lut_access = meas_lut_access))
hspice_call = os.environ["HSPICE"] + " %s > %s" % (netlist_filename, hspice_dump)
os.system(hspice_call)
scale_dict = {'f' : 1e-15, 'p' : 1e-12, 'n' : 1e-9}
with open(hspice_dump, "r") as inf:
lines = inf.readlines()
#os.system("rm " + hspice_dump)
td_dict = {}
get_td = lambda l : round(float(l.split()[1][:-1]), 1) * scale_dict[l.split()[1][-1]]
get_tap = lambda l : wire + '_' + l.split('=', 1)[0].split('_', 1)[1]
for line in lines:
if "tfall=" in line:
tfall = get_td(line)
elif "trise=" in line:
trise = get_td(line)
elif meas_lut_access:
if "tfall_ble_mux" in line or "trise_ble_mux" in line:
td = get_td(line)
if td < 0:
print "Negative time!"
raise ValueError
try:
td_dict["ble_mux"] = 0.5 * (td_dict["ble_mux"] + td)
except:
td_dict.update({"ble_mux" : td})
elif wire[0] == 'V':
if "tfall_tap" in line or "trise_tap" in line:
tap = get_tap(line)
td = get_td(line)
if td < 0:
print "Negative time!"
raise ValueError
try:
td_dict[tap] = 0.5 * (td_dict[tap] + td)
except:
td_dict.update({tap : td})
if trise < 0 or tfall < 0:
print "Negative time!"
raise ValueError
if wire[0] == 'V':
td_dict.update({"whole" : 0.5 * (trise + tfall)})
if meas_lut_access:
td_dict.update({"lut_access" : 0.5 * (trise + tfall) - td_dict["ble_mux"]})
return td_dict
if wire[0] == 'V':
return td_dict
return 0.5 * (trise + tfall)
#------------------------------------------------------------------------#
netlist_filename = "sim_global_%s_%s.sp" % (args.arc_name, wire)
hspice_dump = "hspice_%s_%s.dump" % (args.arc_name, wire)
if meas_lut_access:
net = meas_lut_access_delay(G)
return run()
else:
pins, all_sizes = stack_muxes(G, get_pins = True)
source_dict = {}
for mux in pins:
if wire in mux and mux.startswith("ble_%d_" % NEUTRAL_BLE):
if ROBUSTNESS_LEVEL == 0:
source = mux
if get_cb_delay:
return get_netlist(G, wire, source, get_cb_delay = True)
net = get_netlist(G, wire, source)
return run()
key = mux.split("_tap")[0]
offset = pins[mux]['o'][0 if wire[0] == 'V' else 1]
deg = 0
for fanout in G:
if fanout.startswith(key):
deg += G.in_degree(fanout) + G.out_degree(fanout)
source_dict.update({key : {"mux" : mux, "deg" : deg, "offset" : offset}})
sorted_keys = sorted(source_dict, key = lambda s : source_dict[s]["deg"]\
* abs(source_dict[s]["offset"]))
if ROBUSTNESS_LEVEL == 1 or get_cb_delay:
#NOTE: Connection-block delays are very robust to changing the multiplexer as they usually
#assume only one or two columns, immediately next to the crossbar. Hence, the x-offset is
#less varialbe. Also, the load is within the cluster itself. If there is any variation in
#multiplexer sizes, that is more of an artifact of parametrized architecture generation.
#Median fanin should be a good representative in this case.
source = source_dict[sorted_keys[len(source_dict) / 2]]["mux"]
if get_cb_delay:
return get_netlist(G, wire, source, get_cb_delay = True)
net = get_netlist(G, wire, source)
return run()
td_dicts = []
for source_key in sorted_keys:
source = source_dict[source_key]["mux"]
net = get_netlist(G, wire, source)
td_dicts.append(run())
if ROBUSTNESS_LEVEL == 3:
potential_targets = [u for u, attrs in net.nodes(data = True) if attrs.get("potential_target", False)]
for i, u in enumerate(potential_targets):
relabeling_dict = {}
if u == 't':
continue
relabeling_dict.update({'t' : "prev_t_%d" % i})
relabeling_dict.update({u : 't'})
net = nx.relabel_nodes(net, relabeling_dict)
td_dicts.append(run())
if (wire[0] == 'H' and not meas_lut_access) or get_cb_delay:
return sum(td_dicts) / len(td_dicts)
for v in td_dicts[0]:
for td_dict in td_dicts[1:]:
td_dicts[0][v] += td_dict[v]
td_dicts[0][v] /= len(td_dicts)
return td_dicts[0]
| 5,350,779 |
def direction_to(front,list_of_others,what="average") :
"""
Compute the direction vector towards *some other entities*.
Parameters
----------
front : :py:class:`front.Front`
Front to be used as the origin (starting point) of the direction \
vector
list_of_others : list
List created by ***get_enity*** LINK
what : string
String options:
- 'nearest': compute the direction vector to the nearest \
point in the list_of_others.
- 'average': default. TODO. Currently returns a list of direction \
vectors
- 'all': Return a list of all direction vectors
"""
if len(list_of_others) == 0 :
# shouldn't this be array([0,0,0])??? No items, null vector
#return np.array([0,0,0])
return None # handle the error somewhere else
pos = front.xyz
vecs = []
smallest_vec = np.array([100000000,100000000,100000000])
for loc in list_of_others :
vec = loc-pos#pos-loc
vecs.append(vec)
# THIS SHOULD BE MEMORIZED / TABULATED ++++++++++++++++++++ <-----
if np.sqrt(np.sum((loc-pos)**2)) < np.sqrt(np.sum((smallest_vec)**2)) :
smallest_vec = vec
if what == "nearest" :
return smallest_vec
else :
return vecs
| 5,350,780 |
def live_ferc_db(request):
"""Use the live FERC DB or make a temporary one."""
return request.config.getoption("--live_ferc_db")
| 5,350,781 |
async def get_eth_hash(timestamp: int) -> Optional[str]:
"""Fetches next Ethereum blockhash after timestamp from API."""
try:
this_block = w3.eth.get_block("latest")
except Exception as e:
logger.error(f"Unable to retrieve latest block: {e}")
return None
if this_block["timestamp"] < timestamp:
logger.error(
f"Timestamp {timestamp} is older than current "
"block timestamp {this_block['timestamp']}"
)
return None
block_num = block_num_from_timestamp(timestamp)
if block_num is None:
logger.warning("Unable to retrieve block number from Etherscan API")
return None
try:
block = w3.eth.get_block(block_num)
except Exception as e:
logger.error(f"Unable to retrieve block {block_num}: {e}")
return None
return str(block["hash"].hex())
| 5,350,782 |
def resize_opencv(method, *args, **kwargs):
"""Direct arguments to one of the resize functions.
Parameters
----------
method
One among 'crop', 'cover', 'contain', 'width', 'height' or 'thumbnail'
image
Numpy array
size
Size object with desired size
"""
method = f"resize_{method}_opencv"
valid_methods = [
x for x in globals().keys() if x.endswith("opencv") and x != "resize_opencv"
]
LOG.info("Resizing with %s()", method)
try:
return getattr(sys.modules[__name__], method)(*args, **kwargs)
except AttributeError:
LOG.critical(
f"Invalid method '{method}'; should be one of {', '.join(valid_methods)}"
)
| 5,350,783 |
def delete_credential(credentials):
"""
Function to delete a Credentials from the credentials list
"""
credentials.delete_credentials()
| 5,350,784 |
def extract_url_dataset(dataset,msg_flag=False):
"""
Given a dataset identifier this function extracts the URL for the page where the actual raw data resides.
"""
import urllib.request, urllib.parse, urllib.error
from bs4 import BeautifulSoup
import ssl
import time
# Ignore SSL certificate errors
ctx = ssl.create_default_context()
ctx.check_hostname = False
ctx.verify_mode = ssl.CERT_NONE
dataset_dict={}
baseurl='https://archive.ics.uci.edu/ml/datasets/'
url = baseurl+dataset
try:
uh= urllib.request.urlopen(url, context=ctx)
html =uh.read().decode()
soup=BeautifulSoup(html,'html5lib')
if soup.text.find("does not appear to exist")!=-1:
if msg_flag:
print(f"{dataset} not found")
return None
else:
for link in soup.find_all('a'):
if link.attrs['href'].find('machine-learning-databases')!=-1:
a=link.attrs['href']
a=a[2:]
dataurl="https://archive.ics.uci.edu/ml/"+str(a)
#print(dataurl)
return str(dataurl)
#dataurls.append(dataurl)
# After finishing the for-loop with a-tags, the first dataurl is added to the dictionary
#dataset_dict['dataurl']=dataurls[0]
except:
#print("Could not retrieve")
return None
| 5,350,785 |
def test__repeated_median(repeated_median):
"""
(1-5)/1 ; (6-5)/2 ; (72-5)/3 => 0.5
(1-5)/-1 ; (6-1)/1 ; (72-1)/3 => 5
(5-6)/-2 ; (1-6)/1 ; (72-6)/2 => 5
(5-72)/-3 ; (1-72)/-2 ; (6-72)/1 => something (23.67)
-------
overall median of (0.5, 5, 5 and something) is (5+5)/2 = 5
"""
x, y, *_ = repeated_median
assert repeated_median_slope(x, y) == 5.0
| 5,350,786 |
def test_encrypt_and_decrypt_one(benchmark: BenchmarkFixture) -> None:
"""Benchmark encryption and decryption run together."""
primitives.encrypt = nacl.bindings.crypto_aead_xchacha20poly1305_ietf_encrypt
primitives.decrypt = nacl.bindings.crypto_aead_xchacha20poly1305_ietf_decrypt
def encrypt_and_decrypt() -> bytes:
token = version2.encrypt(MESSAGE, KEY, FOOTER)
return version2.decrypt(token, KEY, FOOTER)
plain_text = benchmark(encrypt_and_decrypt)
assert plain_text == MESSAGE
| 5,350,787 |
def MPI_ITOps(mintime = 5, maxtime = 20, cap = 60):
"""
Returns a costOfLaborValue object suitable to attach to a sim or other event
Time is in hours
"""
timeDist = LogNormalValue(maxtime, mintime, cap)
costDist = LogNormalValue(235, 115, 340)
team = costOfLaborValue("IT I&O Team", timeDist, costDist)
return team
| 5,350,788 |
def load_private_wallet(path):
"""
Load a json file with the given path as a private wallet.
"""
d = json.load(open(path))
blob = bytes.fromhex(d["key"])
return BLSPrivateHDKey.from_bytes(blob)
| 5,350,789 |
def format_parameters(parameters: str) -> str:
"""
Receives a key:value string and retuns a dictionary string ({"key":"value"}). In the process strips trailing and
leading spaces.
:param parameters: The key-value-list
:return:
"""
if not parameters:
return '{}'
pairs = []
for item in parameters.split(','):
try:
key, value = item.split(':')
except ValueError:
raise ValueError(f"Got unexpected parameters {item}.")
pairs.append((key.strip(), value.strip()))
return json.dumps(dict(pairs))
| 5,350,790 |
def downvote_question(current_user, question_id):
"""Endpoint to downvote a question"""
error = ""
status = 200
response = {}
question = db.get_single_question(question_id)
if not question:
error = "That question does not exist!"
status = 404
elif db.downvote_question(current_user[0], question_id) is False:
error = "You have already downvoted!"
status = 400
else:
db.downvote_question(current_user[0], question_id)
votes = db.get_votes(question_id)
que_details = db.get_question_details(question_id)
data = {
"meetup": que_details[0],
"title": que_details[1].strip(),
"body": que_details[2].strip(),
"votes": votes[0]
}
status = 200
if error:
response.update({"status": status, "error": error})
return jsonify(response), status
response.update({"status": status, "data": data})
return jsonify(response), status
| 5,350,791 |
def test_updating_with_layer_change(make_napari_viewer, monkeypatch):
"""Test that the dialog text updates when the active layer is changed."""
viewer = make_napari_viewer()
view = viewer.window.qt_viewer
# turn off showing the dialog for test
monkeypatch.setattr(QtAboutKeyBindings, 'show', lambda *a: None)
view.show_key_bindings_dialog()
dialog = view.findChild(QtAboutKeyBindings)
# add an image layer
viewer.add_image(np.random.random((5, 5, 10, 15)))
# capture dialog text after active_layer events
active_img_layer_text = dialog.textEditBox.toHtml()
dialog.update_active_layer() # force an to update to dialog
# check that the text didn't update without a change in the active layer
assert dialog.textEditBox.toHtml() == active_img_layer_text
# add a shape layer (different keybindings)
viewer.add_shapes(None, shape_type='polygon')
# check that the new layer is the active_layer
assert viewer.active_layer == viewer.layers[1]
# capture dialog text after active_layer events
active_shape_layer_text = dialog.textEditBox.toHtml()
# check that the text has changed for the new key bindings
assert active_shape_layer_text != active_img_layer_text
dialog.update_active_layer() # force an update to dialog
# check that the text didn't update without a change in the active layer
assert dialog.textEditBox.toHtml() == active_shape_layer_text
| 5,350,792 |
def get_new_generation(generation: GEN, patterns: PATTERNS) -> GEN:
"""Mutate current generation and get the next one."""
new_generation: GEN = dict()
plant_ids = generation.keys()
min_plant_id = min(plant_ids)
max_plant_id = max(plant_ids)
for i in range(min_plant_id - 2, max_plant_id + 2):
pattern = get_pattern(generation, i)
if patterns.get(pattern, Pot.EMPTY) is Pot.PLANT:
new_generation[i] = Pot.PLANT
return new_generation
| 5,350,793 |
def ell2tm(latitude, longitude, longitude_CM, ellipsoid = 'GRS80'):
"""
Convert ellipsoidal coordinates to 3 degree Transversal Mercator
projection coordinates
Input:
latitude: latitude of a point in degrees
longitude: longitude of a point in degrees
longitude_CM: central meridian in degrees
ellipsoid: name of ellipsoid in string format
Output:
Easting, Northing [unit:meters]
"""
Phi = _np.deg2rad(latitude) # degree to radian
Lambda = _np.deg2rad(longitude) # degree to radian
Lambda_CM = _np.deg2rad(longitude_CM) # degree to radian
dlambda = Lambda - Lambda_CM
# -----------------------------------------------------------------------------
# Define Ellipsoid
ell = _ellipsoid(ellipsoid)
# -----------------------------------------------------------------------------
# Some parameters
N = ell.a/_np.sqrt(1-ell.e1**2*_np.sin(Phi)**2)
t = _np.tan(Phi)
n = ell.e2 * _np.cos(Phi)
# -----------------------------------------------------------------------------
# Easting Computation
easting = N*(dlambda*_np.cos(Phi)+((dlambda**3*_np.cos(Phi)**3)/6)*(1-t**2+n**2) +
((dlambda**5*_np.cos(Phi)**5)/120)*(5-18*t**2+t**4+14*n**2-58*t**2*n**2+13*n**4+4*n**6-64*n**4*t**2-24*n**6*t**2) +
((dlambda**7*_np.cos(Phi)**7)/5040)*(61-479*t**2+179*t**4-t**6))
easting += 500000 # false easting
# -----------------------------------------------------------------------------
# Meridian Arc Computation
# Meridian Arc Computation
A0 = 1 - ell.e1**2/4 - (3/64)*ell.e1**4 - (5/256)*ell.e1**6 - (175/16384)*ell.e1**8
A2 = (3/8) * (ell.e1**2 + ell.e1**4/4 + (15/128)*ell.e1**6 - (455/4096)*ell.e1**8)
A4 = (15/256) * (ell.e1**4 + (3/4)*ell.e1**6 - (77/128)*ell.e1**8)
A6 = (35/3072) * (ell.e1**6 - (41/32)*ell.e1**8)
A8 = (-315/131072) * ell.e1**8
S_phi = ell.a * ( A0 * Phi - A2*_np.sin(2*Phi) + A4*_np.sin(4*Phi) - A6*_np.sin(6*Phi) + A8*_np.sin(8*Phi))
# -----------------------------------------------------------------------------
# Northing Computation
northing = S_phi + N * ( (dlambda**2/2) * _np.sin(Phi) * _np.cos(Phi) + (dlambda**4/24) * _np.sin(Phi) * _np.cos(Phi)**3 * (5 - t**2 + 9*n**2 + 4*n**4) +
(dlambda**6/720) * _np.sin(Phi) * _np.cos(Phi)**5 * (61 - 58*t**2 + t**4 + 270*n**2 - 330*t**2*n**2 + 445*n**4 + 324*n**6 - 680*n**4*t**2 + 88*n**8 -
600*n**6*t**2 - 192*n**8*t**2) + (dlambda**8/40320) * _np.sin(Phi) * _np.cos(Phi)**7 * (1385 - 311*t**2 + 543*t**4 - t**6))
return easting, northing
| 5,350,794 |
def Format_Phone(Phone):
"""Function to Format a Phone Number into (999)-999 9999)"""
Phone = str(Phone)
return f"({Phone[0:3]}) {Phone[3:6]}-{Phone[6:10]}"
| 5,350,795 |
def aStarSearch(problem, heuristic=nullHeuristic):
"""Search the node that has the lowest combined cost and heuristic first."""
"*** YOUR CODE HERE ***"
from sets import Set
startState = problem.getStartState()
if problem.isGoalState(startState):
return []
# Each element in the fringe stores the state and the cost to reach it.
fringe = util.PriorityQueue()
fringe.push(startState, 0 + heuristic(startState, problem))
# Each pair in itemsInFringe stores a state and the list of actions
# required to reach it. States are added in itemsInFringe when they are
# added to the fringe. The states are removed from itemsInFringe when
# they get removed from the fringe.
itemsInFringe = {startState: []}
visitedStates = Set()
while not fringe.isEmpty():
currState = fringe.pop()
actionsToCurrState = itemsInFringe[currState]
del itemsInFringe[currState]
costOfActionsToCurrState = problem.getCostOfActions(actionsToCurrState)
if problem.isGoalState(currState):
return actionsToCurrState
visitedStates.add(currState)
for successor, action, stepCost in problem.getSuccessors(currState):
heuristicCostToSuccessor = heuristic(successor, problem)
newCostToSuccessor = costOfActionsToCurrState + stepCost + \
heuristicCostToSuccessor
newActionsToSuccessor = actionsToCurrState + [action]
if successor not in visitedStates:
fringe.update(successor, newCostToSuccessor)
if successor in itemsInFringe and \
problem.getCostOfActions(itemsInFringe[successor]) + \
heuristicCostToSuccessor <= newCostToSuccessor:
# If successor is already in itemsInFringe, only update the
# cost if the current cost is greater than the new cost.
continue
itemsInFringe[successor] = newActionsToSuccessor
# Goal not found, so no action.
return []
| 5,350,796 |
def _stack_add_equal_dataset_attributes(merged_dataset, datasets, a=None):
"""Helper function for vstack and hstack to find dataset
attributes common to a set of datasets, and at them to the output.
Note:by default this function does nothing because testing for equality
may be messy for certain types; to override a value should be assigned
to the add_keys argument.
Parameters
----------
merged_dataset: Dataset
the output dataset to which attributes are added
datasets: tuple of Dataset
Sequence of datasets to be stacked. Only attributes present
in all datasets and with identical values are put in
merged_dataset
a: {'unique','drop_nonunique','uniques','all'} or True or False or None (default: None).
Indicates which dataset attributes from datasets are stored
in merged_dataset. If an int k, then the dataset attributes from
datasets[k] are taken. If 'unique' then it is assumed that any
attribute common to more than one dataset in datasets is unique;
if not an exception is raised. If 'drop_nonunique' then as 'unique',
except that exceptions are not raised. If 'uniques' then, for each
attribute, any unique value across the datasets is stored in a tuple
in merged_datasets. If 'all' then each attribute present in any
dataset across datasets is stored as a tuple in merged_datasets;
missing values are replaced by None. If None (the default) then no
attributes are stored in merged_dataset. True is equivalent to
'drop_nonunique'. False is equivalent to None.
"""
if a is None or a is False:
# do nothing
return
elif a is True:
a = 'drop_nonunique'
if not datasets:
# empty - so nothing to do
return
if type(a) is int:
base_dataset = datasets[a]
for key in base_dataset.a.keys():
merged_dataset.a[key] = base_dataset.a[key].value
return
allowed_values = ['unique', 'uniques', 'drop_nonunique', 'all']
if not a in allowed_values:
raise ValueError("a should be an int or one of "
"%r" % allowed_values)
# consider all keys that are present in at least one dataset
all_keys = set.union(*[set(dataset.a.keys()) for dataset in datasets])
def _contains(xs, y, comparator=all_equal):
for x in xs:
if comparator(x, y):
return True
return False
for key in all_keys:
add_key = True
values = []
for i, dataset in enumerate(datasets):
if not key in dataset.a:
if a == 'all':
values.append(None)
continue
value = dataset.a[key].value
if a in ('drop_nonunique', 'unique'):
if not values:
values.append(value)
elif not _contains(values, value):
if a == 'unique':
raise DatasetError("Not unique dataset attribute value "
" for %s: %s and %s"
% (key, values[0], value))
else:
add_key = False
break
elif a == 'uniques':
if not _contains(values, value):
values.append(value)
elif a == 'all':
values.append(value)
else:
raise ValueError("this should not happen: %s" % a)
if add_key:
if a in ('drop_nonunique', 'unique'):
merged_dataset.a[key] = values[0]
else:
merged_dataset.a[key] = tuple(values)
| 5,350,797 |
def _rstrip_inplace(array):
"""
Performs an in-place rstrip operation on string arrays. This is necessary
since the built-in `np.char.rstrip` in Numpy does not perform an in-place
calculation.
"""
# The following implementation convert the string to unsigned integers of
# the right length. Trailing spaces (which are represented as 32) are then
# converted to null characters (represented as zeros). To avoid creating
# large temporary mask arrays, we loop over chunks (attempting to do that
# on a 1-D version of the array; large memory may still be needed in the
# unlikely case that a string array has small first dimension and cannot
# be represented as a contiguous 1-D array in memory).
dt = array.dtype
if dt.kind not in 'SU':
raise TypeError("This function can only be used on string arrays")
# View the array as appropriate integers. The last dimension will
# equal the number of characters in each string.
bpc = 1 if dt.kind == 'S' else 4
dt_int = "{0}{1}u{2}".format(dt.itemsize // bpc, dt.byteorder, bpc)
b = array.view(dt_int, np.ndarray)
# For optimal speed, work in chunks of the internal ufunc buffer size.
bufsize = np.getbufsize()
# Attempt to have the strings as a 1-D array to give the chunk known size.
# Note: the code will work if this fails; the chunks will just be larger.
if b.ndim > 2:
try:
b.shape = -1, b.shape[-1]
except AttributeError: # can occur for non-contiguous arrays
pass
for j in range(0, b.shape[0], bufsize):
c = b[j:j + bufsize]
# Mask which will tell whether we're in a sequence of trailing spaces.
mask = np.ones(c.shape[:-1], dtype=bool)
# Loop over the characters in the strings, in reverse order. We process
# the i-th character of all strings in the chunk at the same time. If
# the character is 32, this corresponds to a space, and we then change
# this to 0. We then construct a new mask to find rows where the
# i-th character is 0 (null) and the i-1-th is 32 (space) and repeat.
for i in range(-1, -c.shape[-1], -1):
mask &= c[..., i] == 32
c[..., i][mask] = 0
mask = c[..., i] == 0
return array
| 5,350,798 |
def compute_fixpoint_0(graph, max_value):
"""
Computes the fixpoint obtained by the symbolic version of the backward algorithm for safety games.
Starts from the antichain of the safe set and works backwards using controllable predecessors.
The maximum value for the counters is a parameter to facilitate the incremental algorithm.
:param graph:
:type graph:
:param max_value:
:type max_value:
:return:
:rtype:
"""
# wether we want to print the sets during computation
toPrint = False
# get the values to create the create the antichain of maximal elements of the safe set
nbr_functions, nbr_counters_per_function = compute_counters_sizes_0(graph)
start_antichain = Antichain(comparator_generalized_0, intersector_generalized_0)
# create the antichain of maximal elements of the safe set
# every counter in every tuple has the maximal value
for node in graph.get_nodes():
temp = [node]
for func in range(0, nbr_functions):
temp.append(nbr_counters_per_function[func] * [max_value])
start_antichain.insert(temp)
if (toPrint):
print("Start antichain : " + str(start_antichain) + "\n")
antichain1 = start_antichain
cpre1 = Cpre(start_antichain, 1, graph, nbr_functions, max_value)
if (toPrint):
print("CPre_1 of start antichain: " + str(cpre1) + "\n")
cpre0 = Cpre(start_antichain, 0, graph, nbr_functions, max_value)
if (toPrint):
print("CPre_0 of start antichain: " + str(cpre0) + "\n")
# we know the elements of cpre0 and cpre1 to be incomparable. Union of the two antichains can be done through
# simple extend
cpre0.incomparable_elements.extend(cpre1.incomparable_elements)
if (toPrint):
print("Union of CPre_0 and CPre_1 " + str(cpre0) + "\n")
antichain2 = antichain1.intersection(cpre0)
if (toPrint):
print("Inter of start and previous union " + str(antichain2) + "\n")
nb_iter = 0
# while we have not obtained the fixpoint
while not antichain1.compare(antichain2):
nb_iter += 1
antichain1 = antichain2
cpre1 = Cpre(antichain1, 1, graph, nbr_functions, max_value)
if (toPrint):
print("ITER " + str(nb_iter) + " CPre 1 of prev " + str(cpre1) + "\n")
cpre0 = Cpre(antichain1, 0, graph, nbr_functions, max_value)
if (toPrint):
print("ITER " + str(nb_iter) + " CPre 0 of prev " + str(cpre0) + "\n")
temp = cpre0.union(cpre1)
if (toPrint):
print("ITER " + str(nb_iter) + " Union of Pre 0 and Pre 1 " + str(temp) + "\n")
antichain2 = antichain1.intersection(temp)
if (toPrint):
print("ITER " + str(nb_iter) + " final set " + str(antichain2) + "\n")
return antichain1
| 5,350,799 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.