content
stringlengths 22
815k
| id
int64 0
4.91M
|
---|---|
def get_high(pair, path="https://api.kraken.com/0/public"):
""" Get the last 24h high price of `pair`.
Parameters
----------
pair : str
Code of the requested pair(s). Comma delimited if several pair.
path : str
Path of the exchange to request.
Returns
-------
float or dict
Last 24h higher price(s).
"""
return _get_ticker(pair, 'h', path)
| 5,354,000 |
def SIx():
"""
Reads in future LENS SI-x data
Returns
----------
leafmean : array leaf indices (ens x year x lat x lon)
latmean : array last freeze indices (ens x year x lat x lon)
lat : array of latitudes
lon : array of longitudes
lstfrz : list last freeze indices
"""
directory = '/volumes/eas-shared/ault/ecrl/spring-indices/data/'
versions=['002','003','004','005','006','007','008','009','010','011','012','013','014','015','016','017','018','019','020','021','022','023','024','025','026','027','028','029','030']
leaf=[]
lstfrz = []
for version in versions:
years = 'b.e11.BRCP85C5CNBDRD.f09_g16.%s.cam.h.SI-x.2006-2080.nc' % version
filename = directory + years
values = Dataset(filename)
lon = values.variables['lon'][189:240]
lat = values.variables['lat'][:32]
lstfrz_index = values.variables['lstfrz_index'][:,:32,189:240]
leaf_index = values.variables['leaf_index'][:,:32,189:240]
values.close()
leaf.append(leaf_index)
lstfrz.append(lstfrz_index)
latmean = np.asarray(lstfrz)
leafmean = np.asarray(leaf)
print 'Done! 1'
return leafmean, latmean, lstfrz, lat, lon
| 5,354,001 |
def test_destroy_invalid_proxy():
""" Test if we can destroy an invalid proxy """
result = toxiproxy.destroy("invalid_proxy")
assert result is False
| 5,354,002 |
def test_pinned_task_recovers_on_host():
"""Tests that when a pinned task gets killed, it recovers on the node it was pinned to."""
app_def = apps.sleep_app()
app_id = app_def["id"]
host = common.ip_other_than_mom()
common.pin_to_host(app_def, host)
client = marathon.create_client()
client.add_app(app_def)
common.deployment_wait(service_id=app_id)
tasks = client.get_tasks(app_id)
common.kill_process_on_host(host, '[s]leep')
common.deployment_wait(service_id=app_id)
@retrying.retry(wait_fixed=1000, stop_max_attempt_number=30, retry_on_exception=common.ignore_exception)
def check_for_new_task():
new_tasks = client.get_tasks(app_id)
assert tasks[0]['id'] != new_tasks[0]['id'], "The task did not get killed: {}".format(tasks[0]['id'])
assert new_tasks[0]['host'] == host, \
"The task got restarted on {}, but it was supposed to stay on {}".format(new_tasks[0]['host'], host)
check_for_new_task()
| 5,354,003 |
def insert_nhl_ids(tds, players):
"""
Insert nhl ids for each player into the according team's html roster
representation.
"""
# modifying each of the specified table cells
for td in tds:
# retrieving player's jersey number from current table cell
no = int(td.text_content())
# trying to retrieve player id from player dictionary
try:
nhl_id = players[no]
# otherwise retrieving player id from database
except KeyError:
# retrieving player position and full name from current table cell
position, name = td.xpath("following-sibling::*//text()")[:2]
# splitting up full name into first and last name
last_name, first_name = [x.strip() for x in name.split(",")]
# finding player by first and last name as well as position in db
nhl_id = retrieve_player_id(last_name, first_name, position)
# TODO: find player on website and create it if not found in database
# creating a span element using the attribute 'nhl_id' with the
# current player's id as value
span = etree.Element("span", nhl_id=str(nhl_id))
# inserting newly created span element into the document tree
td.insert(0, span)
| 5,354,004 |
def make_truncnorm_gen_with_bounds(mean, std, low_bound, hi_bound):
"""
low_bound and hi_bound are in the same units as mean and std
"""
assert hi_bound > low_bound
clipped_mean = min(max(mean, low_bound), hi_bound)
if clipped_mean == low_bound:
low_sigma = -0.01 * std
hi_sigma = (hi_bound - clipped_mean) / std
elif clipped_mean == hi_bound:
low_sigma = (low_bound - clipped_mean) / std
hi_sigma = 0.01 * std
else:
low_sigma = (low_bound - clipped_mean) / std
hi_sigma = (hi_bound - clipped_mean) / std
return scipy.stats.truncnorm(low_sigma, hi_sigma,
loc=clipped_mean, scale=std)
| 5,354,005 |
def resolve_item_from_loan(item_pid):
"""Resolve the item referenced in loan based on its PID type."""
from invenio_app_ils.ill.api import BORROWING_REQUEST_PID_TYPE
from invenio_app_ils.ill.proxies import current_ils_ill
from invenio_app_ils.items.api import ITEM_PID_TYPE
from invenio_app_ils.proxies import current_app_ils
if item_pid["type"] == ITEM_PID_TYPE:
rec_cls = current_app_ils.item_record_cls
elif item_pid["type"] == BORROWING_REQUEST_PID_TYPE:
rec_cls = current_ils_ill.borrowing_request_record_cls
else:
from invenio_app_ils.errors import UnknownItemPidTypeError
raise UnknownItemPidTypeError(pid_type=item_pid["type"])
return rec_cls.get_record_by_pid(item_pid["value"])
| 5,354,006 |
def mkdir(d):
"""make a directory if it doesn't already exist"""
if not os.path.exists(d):
try:
os.makedirs(d)
except OSError as exc: # Guard against race condition
if exc.errno != errno.EEXIST:
raise
| 5,354,007 |
def lascolor(strPathInLAS, strPathOutLAS, strPathTif, strAdlSwitches = None):
""" Function lascolor
args:
strPathInLAS = input LAS file
strPathOutLAS = output LAS file
strPathTif = Tif source of RGB values
strAdlSwitches = optional additional switches
Command Syntax:
"""
strSwitches = ''
if strAdlSwitches:
strSwitches = strSwitches + ' ' + strAdlSwitches
lstCMD = [strPathLtInstall + os.sep + 'lascolor',
'-i ' + strPathInLAS.strip(),
'-o ' + strPathOutLAS,
'-image ' + strPathTif,
strSwitches]
strCMD = ' '.join(lstCMD)
return strCMD
| 5,354,008 |
def delta(s1, s2):
""" Find the difference in characters between s1 and s2.
Complexity: O(n), n - length of s1 or s2 (they have the same length).
Returns:
dict, format {extra:[], missing:[]}
extra: list, letters in s2 but not in s1
missing: list, letters in s1 but not in s2
"""
letters = {}
for c in s1:
if c not in letters:
letters[c] = 1
else:
letters[c] += 1
extra = [] # letters which are in s2 but not in s1
for c in s2:
if c not in letters:
extra.append(c)
else:
letters[c] -=1
missing = [] # letters which are in s1 but not in s2
for (letter, count) in letters.iteritems():
if count > 0:
missing.append(letter)
return {'extra': extra, 'missing': missing}
| 5,354,009 |
def test_get_involved_cs_handles_error(github, test_client):
"""
If error happens when generating the 'get_involved_cs' page, the view
should handle it and still display most of the content. The issues
section should contain an error message with some useful links
"""
def get_issues(self, *args, **kwargs):
raise RuntimeError('Ouch!')
github.get_issues = get_issues
response = test_client.get('/zapojse/')
html = extract_issues_html(response.get_data(as_text=True))
assert response.status_code == 500
message = "DIV with the 'issues-error' class isn't present in the HTML"
assert 'issues-error' in html, message
message = "Link to alternative issues listing isn't present in the HTML"
assert 'https://github.com/pyvec/zapojse/issues' in html, message
url = '{base_url}?title={title}&body={body}'.format(
base_url='https://github.com/pyvec/python.cz/issues/new',
title=url_quote_plus('Nefunguje Zapoj se'),
body=url_quote_plus('RuntimeError: Ouch!'),
)
assert url in html, "URL for filing a bug report isn't present in the HTML"
| 5,354,010 |
def mean_predictive_value(targets, preds, cm=None, w=None, adjusted=False):
"""
:purpose:
Calculates the mean predictive value between a discrete target and pred array
:params:
targets, preds : discrete input arrays, both of shape (n,)
cm : if you have previously calculated a confusion matrix, pass it here to save the computation.
set as None, which makes the function calculate the confusion matrix
w : weights at each index of true and pred. array of shape (n,)
if no w is set, it is initialized as an array of ones
such that it will have no impact on the output
adjusted : bool. if true, adjust the output for chance (making 0 the worst
and 1 the best score). defaults to false
:returns:
mean_predictive_value : float, the mean predictive value of the targets and preds array
:example:
>>> from fastdist import fastdist
>>> import numpy as np
>>> true = np.random.RandomState(seed=0).randint(2, size=10000)
>>> pred = np.random.RandomState(seed=1).randint(2, size=10000)
>>> fastdist.mean_predictive_value(true, pred)
0.49030739883826424
by saskra
"""
w = init_w(w, len(targets))
if cm is None:
cm = confusion_matrix(targets, preds, w=w)
n = cm.shape[0]
diag, columns_sums = np.zeros(n), np.zeros(n)
for i in range(n):
diag[i] = cm[i][i] # sum of the diagonal = true results
for j in range(n):
columns_sums[j] += cm[i][j] # sums of the columns = predictions per class
class_div = diag / columns_sums # fraction of true results among the predicted ones per class
div_mean = 0
for i in range(n):
div_mean += class_div[i]
div_mean /= n # mean fraction of true results among the predicted ones
if adjusted:
div_mean -= 1 / n
div_mean /= 1 - 1 / n
return div_mean
| 5,354,011 |
def delta(phase,inc, ecc = 0, omega=0):
"""
Compute the distance center-to-center between planet and host star.
___
INPUT:
phase: orbital phase in radian
inc: inclination of the system in radian
OPTIONAL INPUT:
ecc:
omega:
//
OUTPUT:
distance center-to-center, double-float number.
___
"""
phase = 2*np.pi*phase
if ecc == 0 and omega == 0:
delta = np.sqrt(1-(np.cos(phase)**2)*(np.sin(inc)**2))
else:
delta = (1.-ecc**2.)/(1.-ecc*np.sin(phase-omega))* np.sqrt((1.-(np.cos(phase))**2.*(np.sin(inc))**2))
return delta
| 5,354,012 |
def preconfigureExternalDeps(cfgCtx):
"""
Configure external dependencies.
Returns dict with configuration of dependency producers ready to use
in ZenMake command 'configure' but not in others.
"""
resultRules = []
bconfManager = cfgCtx.bconfManager
allTasks = cfgCtx.allTasks
rootdir = bconfManager.root.rootdir
buildtype = bconfManager.root.selectedBuildType
for bconf in bconfManager.configs:
deps = _handleTasksWithDeps(bconf, allTasks)
if not deps:
continue
depConfs = bconf.edeps
for depName in deps:
depConf = depConfs[depName]
depConf['name'] = depName
depRootDir = depConf.get('rootdir')
if depRootDir is None:
msg = "Dependency %r has no 'rootdir'." % depName
raise error.ZenMakeConfError(msg, confpath = bconf.path)
_detectZenMakeProjectRules(depConf, buildtype)
rules = depConf.get('rules', {})
for ruleName, ruleParams in rules.items():
ruleParams = _initRule(ruleName, ruleParams, rootdir, depRootDir)
if not ruleParams.get('cmd'):
msg = "Dependency %r: parameter 'cmd' is empty" % depName
msg += " for rule %r." % ruleName
log.warn(msg)
continue
ruleParams['$from-deps'] = [depConf]
resultRules.append(ruleParams)
cfgCtx.zmdepconfs = _dispatchRules(resultRules)
| 5,354,013 |
def count_mementos():
""" Helper function to count mementos from timemap in 11/2016 """
directory = "data/timemaps/"
print("For the month of November 2016")
for filename in os.listdir(directory):
with open(directory + filename) as f:
resp = json.load(f)
counter = 0
for i, uri_m in enumerate(resp):
datetime_val = uri_m[1]
if datetime_val.startswith("201611"):
counter += 1
print("URI {} has {} mementos".format(
resp[1][0], counter))
| 5,354,014 |
def theoritical_spectrum(peptide_sequence):
"""Returns the theoritical spectrum of a given amino acid sequence.
INPUT :
peptide_sequence: string. The peptide sequence to get its theoritical spectrum
OUTPUT:
.: List. The theoritical spectrum of the given peptide sequence.
"""
linear_kmers = []
cyclic_kmers = []
for i in range(len(peptide_sequence)):
for j in range(i,len(peptide_sequence)):
linear_kmers.append(peptide_sequence[i:j+1])
for i in range(2,len(peptide_sequence)):
for j in range(i-1):
cyclic_kmers.append(peptide_sequence[i:len(peptide_sequence)]+peptide_sequence[0:j+1])
kmers = linear_kmers+cyclic_kmers
return sorted(list(map(get_molecular_weight,kmers)))
| 5,354,015 |
def is_amazon(source_code):
"""
Method checks whether a given book is a physical book or a ebook giveaway for a linked Amazon account.
:param source_code:
:return:
"""
for line in source_code:
if "Your Amazon Account" in line:
return True
return False
| 5,354,016 |
def transdecodeToPeptide(sample_name, output_dir, rerun_rules, sample_dir,
mets_or_mags = "mets", transdecoder_orf_size = 100,
nt_ext = ".fasta", pep_ext = ".faa", run_transdecoder = False):
"""
Use TransDecoder to convert input nucleotide metatranscriptomic sequences to peptide sequences.
"""
if (not run_transdecoder):
return 0
print("Running TransDecoder for sample " + str(sample_name) + "...", flush = True)
os.system("mkdir -p " + os.path.join(output_dir, mets_or_mags, "transdecoder"))
if (os.path.isfile(os.path.join(output_dir, mets_or_mags,
sample_name + pep_ext))) & (not rerun_rules):
print("TransDecoder file already detected for sample " +
str(sample_name) + "; will not re-run step.", flush = True)
return 0
elif (os.path.isfile(os.path.join(sample_dir, sample_name + pep_ext))) & (not rerun_rules):
print("Protein files detected for sample in sample directory; " +
"will not TransDecode.", flush = True)
os.system("cp " + os.path.join(sample_dir, sample_name + pep_ext) + " " +
os.path.join(output_dir, mets_or_mags, sample_name + pep_ext))
return 0
TD_log = open(os.path.join(output_dir,"log","transdecoder_longorfs_" + sample_name + ".log"), "w+")
TD_err = open(os.path.join(output_dir,"log","transdecoder_longorfs_" + sample_name + ".err"), "w+")
if (not os.path.isfile(os.path.join(sample_dir, sample_name + nt_ext))):
print("File: " + os.path.join(sample_dir, sample_name + nt_ext) + " was called by TransDecoder and "
"does not exist. Check for typos.")
sys.exit(1)
rc1 = subprocess.Popen(["TransDecoder.LongOrfs", "-t", os.path.join(sample_dir, sample_name + nt_ext),
"-m", str(transdecoder_orf_size)], stdout = TD_log, stderr = TD_err).wait()
TD_log.close()
TD_err.close()
TD_log = open(os.path.join(output_dir,"log","transdecoder_predict_" + sample_name + ".log"), "w+")
TD_err = open(os.path.join(output_dir,"log","transdecoder_predict_" + sample_name + ".err"), "w+")
rc2 = subprocess.Popen(["TransDecoder.Predict", "-t", os.path.join(sample_dir, sample_name + nt_ext),
"--no_refine_starts"], stdout = TD_log, stderr = TD_err).wait()
#rc2 = p2.returncode
TD_log.close()
TD_err.close()
if (rc1 + rc2) != 0:
print("TransDecoder did not complete successfully for sample " +
str(sample_name) + ". Check <output_dir>/log/ folder for details.")
sys.exit(1)
merged_name = sample_name + nt_ext
os.system("mkdir -p " + os.path.join(output_dir, mets_or_mags))
os.system("mkdir -p " + os.path.join(output_dir, mets_or_mags, "transdecoder"))
os.replace(merged_name + ".transdecoder.pep", os.path.join(output_dir, mets_or_mags,
sample_name + pep_ext))
os.replace(merged_name + ".transdecoder.cds", os.path.join(output_dir, mets_or_mags,
"transdecoder", sample_name +
".fasta.transdecoder.cds"))
os.replace(merged_name + ".transdecoder.gff3", os.path.join(output_dir, mets_or_mags,
"transdecoder", sample_name +
".fasta.transdecoder.gff3"))
os.replace(merged_name + ".transdecoder.bed", os.path.join(output_dir, mets_or_mags,
"transdecoder", sample_name +
".fasta.transdecoder.bed"))
#shutil.rmtree
os.system("rm -rf " + merged_name + "*.transdecoder_dir*")
return rc1 + rc2
| 5,354,017 |
def fechaTurmaSeNaoTemAlunoForm(modelo, y, p, turmasPermitidas):
"""Fecha as turmas que nao possuem alunos de formulario."""
for t in turmasPermitidas:
alunosForm = [y[k][t] for k in y]
modelo.solver.Add(p[t] <= sum(alunosForm))
| 5,354,018 |
def test_convert_invalid(schema, value, exception):
"""
GIVEN invalid schema and expected exception
WHEN convert is called with the schema
THEN the expected exception is raised.
"""
with pytest.raises(exception):
utility_base.to_dict.simple.convert(schema=schema, value=value)
| 5,354,019 |
def data_dim(p):
""" Return the dimensionality of the dataset """
dataset_class = DATASETS[p.dataset]
return dataset_class(p).get_in_dim()
| 5,354,020 |
def create_supervised_evaluator(model, metrics=None,
device=None, non_blocking=False,
prepare_batch=_prepare_batch,
output_transform=
lambda x, y, y_pred: (y_pred, y,)):
"""
Factory function for creating an evaluator for supervised models.
Args:
model (`torch.nn.Module`): the model to train.
metrics (dict of str - :class:`~ignite.metrics.Metric`): a map of metric names to Metrics.
device (str, optional): device type specification (default: None).
Applies to both model and batches.
non_blocking (bool, optional): if True and this copy is between CPU and GPU, the copy may occur asynchronously
with respect to the host. For other cases, this argument has no effect.
prepare_batch (callable, optional): function that receives `batch`, `device`, `non_blocking` and outputs
tuple of tensors `(batch_x, batch_y)`.
output_transform (callable, optional): function that receives 'x', 'y', 'y_pred' and returns value
to be assigned to engine's state.output after each iteration. Default is returning `(y_pred, y,)` which fits
output expected by metrics. If you change it you should use `output_transform` in metrics.
Note: `engine.state.output` for this engine is defind by `output_transform` parameter and is
a tuple of `(batch_pred, batch_y)` by default.
Returns:
Engine: an evaluator engine with supervised inference function.
"""
metrics = metrics or {}
if device:
model.to(device)
def _inference(engine, batch):
model.eval()
with torch.no_grad():
# z is optional (e.g. task ids)
x, y, *z = prepare_batch(batch, device=device, non_blocking=non_blocking)
y_pred = model(*(x, *z))
# if hasattr(model, 'arch_sampler'):
# ent = model.arch_sampler.entropy().mean()
return output_transform(x, y, y_pred)
engine = Engine(_inference)
for name, metric in metrics.items():
metric.attach(engine, name)
return engine
| 5,354,021 |
def historical_earning_calendar(
apikey: str, symbol: str, limit: int = DEFAULT_LIMIT
) -> typing.Optional[typing.List[typing.Dict]]:
"""
Query FMP /historical/earning_calendar/ API.
Note: Between the "from" and "to" parameters the maximum time interval can be 3 months.
:param apikey: Your API key.
:param symbol: Company ticker.
:param limit: Number of rows to return.
:return: A list of dictionaries.
"""
path = f"historical/earning_calendar/{symbol}"
query_vars = {
"apikey": apikey,
"symbol": symbol,
"limit": limit,
}
return __return_json_v3(path=path, query_vars=query_vars)
| 5,354,022 |
def muteblockdelmenu():
"""
Submenu to hide away all the unpalatable things.
Arguments:
none
User input:
The command to execute.
"""
choice = input("""
| mute/block/delete |
delp = delete a post
muteu = mute a user
unmuteu = unmute a user
mutec = mute a channel
unmutec = unmute a channel
[return] = back
""")
if choice == "delp":
deletepost()
elif choice == "mutec":
mutechannel()
elif choice == "muteu":
muteuser()
elif choice == "unmutec":
unmutechannel()
elif choice == "unmuteu":
unmuteuser()
| 5,354,023 |
def config_section_data():
"""Produce the default configuration section for app.config,
when called by `resilient-circuits config [-c|-u]`
"""
config_data = u"""[fn_query_tor_network]
base_url = https://onionoo.torproject.org/details
#The Flag can be 'Running','Exit' for more information on flag settings - https://metrics.torproject.org/onionoo.html
flag = Exit
# The data fields should be comma separated and no space should be given in between each fields
data_fields = exit_addresses,or_addresses,host_name"""
return config_data
| 5,354,024 |
def test_PearsonCorrlation_MIP_AlgoTesting_3p1():
"""
Results from 2019_MIP_Algo_Testing/PearsonCorrelation
var1 vs var2
Pearson's r -0.006
p-value 0.867
95% CI Upper 0.067
95% CI Lower -0.079
"""
logging.info("---------- TEST : Pearson Correlation MIP_Algo_Testing_3p1")
data = [
{
"name" : "x",
"value": "var1"
},
{
"name" : "y",
"value": "var2"
},
{ "name": "pathology",
"value":"dementia"
},
{
"name" : "dataset",
"value": "data_pr1"
},
{
"name" : "filter",
"value": ""
},
]
headers = {'Content-type': 'application/json', "Accept": "text/plain"}
r = requests.post(endpointUrl, data=json.dumps(data), headers=headers)
result = json.loads(r.text)
check_result(
result['result'][0]['data'][0], 'var1 ~ var2', -0.006, 0.867, -0.079, 0.067
)
| 5,354,025 |
def show_object_id_by_date(
move_data,
create_features=True,
kind=None,
figsize=(21, 9),
return_fig=True,
save_fig=True,
name='shot_points_by_date.png',
):
"""
Generates four visualizations based on datetime feature:
- Bar chart trajectories by day periods
- Bar chart trajectories day of the week
- Line chart trajectory by date
- Line chart of trajectory byhours of the day.
Parameters
----------
move_data : pymove.core.MoveDataFrameAbstract subclass.
Input trajectory data.
create_features : bool, optional, default True.
Represents whether or not to delete features created for viewing.
kind: list or None
Determines the kinds of each plot
figsize : tuple, optional, default (21,9).
Represents dimensions of figure.
return_fig : bool, optional, default True.
Represents whether or not to save the generated picture.
save_fig : bool, optional, default True.
Represents whether or not to save the generated picture.
name : String, optional, default 'shot_points_by_date.png'.
Represents name of a file.
Returns
-------
matplotlib.pyplot.figure or None
The generated picture.
References
----------
https://matplotlib.org/3.1.1/api/_as_gen/matplotlib.pyplot.plot.html
"""
if kind is None:
kind = ['bar', 'bar', 'line', 'line']
fig, ax = plt.subplots(2, 2, figsize=figsize)
move_data.generate_date_features()
move_data.generate_hour_features()
move_data.generate_time_of_day_features()
move_data.generate_day_of_the_week_features()
move_data.groupby([PERIOD])[TRAJ_ID].nunique().plot(
subplots=True, kind=kind[0], rot=0, ax=ax[0][0], fontsize=12
)
move_data.groupby([DAY])[TRAJ_ID].nunique().plot(
subplots=True, kind=kind[1], ax=ax[0][1], rot=0, fontsize=12
)
move_data.groupby([DATE])[TRAJ_ID].nunique().plot(
subplots=True,
kind=kind[2],
grid=True,
ax=ax[1][0],
rot=90,
fontsize=12,
)
move_data.groupby([HOUR])[TRAJ_ID].nunique().plot(
subplots=True, kind=kind[3], grid=True, ax=ax[1][1], fontsize=12
)
if not create_features:
move_data.drop(columns=[DATE, HOUR, PERIOD, DAY], inplace=True)
if save_fig:
plt.savefig(fname=name, fig=fig)
if return_fig:
return fig
| 5,354,026 |
def raw_escape(pattern, unix=None, raw_chars=True):
"""Apply raw character transform before applying escape."""
return _wcparse.escape(util.norm_pattern(pattern, False, raw_chars, True), unix=unix, pathname=True, raw=True)
| 5,354,027 |
def interpolate_effective_area_per_energy_and_fov(
effective_area,
grid_points,
target_point,
min_effective_area=1. * u.Unit('m2'),
method='linear',
):
"""
Takes a grid of effective areas for a bunch of different parameters
and interpolates (log) effective areas to given value of those parameters
Parameters
----------
effective_area: np.array of astropy.units.Quantity[area]
grid of effective area, of shape (n_grid_points, n_fov_offset_bins, n_energy_bins)
grid_points: np.array
list of parameters corresponding to effective_area, of shape (n_grid_points, n_interp_dim)
target_point: np.array
values of parameters for which the interpolation is performed, of shape (n_interp_dim)
min_effective_area: astropy.units.Quantity[area]
Minimum value of effective area to be considered for interpolation
method: 'linear’, ‘nearest’, ‘cubic’
Interpolation method
Returns
-------
aeff_interp: astropy.units.Quantity[area]
Interpolated Effective area array with shape (n_energy_bins, n_fov_offset_bins)
"""
# get rid of units
effective_area = effective_area.to_value(u.m**2)
min_effective_area = min_effective_area.to_value(u.m**2)
# remove zeros and log it
effective_area[effective_area < min_effective_area] = min_effective_area
effective_area = np.log(effective_area)
# interpolation
aeff_interp = griddata(grid_points, effective_area, target_point, method=method).T
# exp it and set to zero too low values
aeff_interp = np.exp(aeff_interp)
aeff_interp[aeff_interp < min_effective_area * 1.1] = 0 # 1.1 to correct for numerical uncertainty and interpolation
return u.Quantity(aeff_interp, u.m**2, copy=False)
| 5,354,028 |
def extract_edge(stats:np.ndarray, idxs_upper:np.ndarray, runner:int, max_index:int, maximum_offset:float, iso_charge_min:int = 1, iso_charge_max:int = 6, iso_mass_range:int=5)->list:
"""Extract edges.
Args:
stats (np.ndarray): Stats array that contains summary statistics of hills.
idxs_upper (np.ndarray): Upper index for comparing.
runner (int): Index.
max_index (int): Unused.
maximum_offset (float): Maximum offset when comparing edges.
iso_charge_min (int, optional): Minimum isotope charge. Defaults to 1.
iso_charge_max (int, optional): Maximum isotope charge. Defaults to 6.
iso_mass_range (float, optional): Mass search range. Defaults to 5.
Returns:
list: List of edges.
"""
edges = []
mass1 = stats[runner, 0]
delta_mass1 = stats[runner, 1]
for j in range(runner+1, idxs_upper[runner]):
mass2 = stats[j, 0]
if np.abs(mass2 - mass1) <= maximum_offset:
delta_mass2 = stats[j, 1]
for charge in range(iso_charge_min, iso_charge_max + 1):
if check_isotope_pattern(mass1, mass2, delta_mass1, delta_mass2, charge, iso_mass_range):
edges.append((runner, j))
break
return edges
| 5,354,029 |
def _neq_attr(node, attr, gens, container):
"""
Calcs fitness based on the fact that node's target shall not have an attr
with a certain value.
"""
trg_nd = container.nodes[gens[node]]
if attr[0] in trg_nd and attr[1] == trg_nd[attr[0]]:
return 10.1
return 0.0
| 5,354,030 |
def test_query_works_with_string(fredo):
"""Str is allowed in query"""
_, query = fredo.parse_query('test')
assert 'test' == query
| 5,354,031 |
def init_call_probabilities(size, calls_per_hrs):
"""Precomputes table of boolean call probabilities shared for all users."""
global _rand_bool, _rand_bool_init, _rand_bool_num, _rand_bool_prob
_rand_bool_prob = float(calls_per_hrs) / 3600.0
_rand_bool = np.random.rand(size) < _rand_bool_prob
_rand_bool_init = True
_rand_bool_num = size
| 5,354,032 |
def getItem( user, list, itempk ):
"""
Get a single item from a list.
:param user: user who owns list
:param list: list containing item
:param itempk: private key of item
:return: item or None
"""
itemType = list.itemType
item = None
if itemType == 'Item':
item = Item.objects.get( pk=itempk, list=list, user=user )
elif itemType == 'Link':
item = Link.objects.get( pk=itempk, list=list, user=user )
elif itemType == 'Book':
item = Book.objects.get( pk=itempk, list=list, user=user )
elif itemType == 'Show' or itemType == 'Movie':
item = Video.objects.get( pk=itempk, list=list, user=user )
return item
| 5,354,033 |
def set_user_to_vendor(sender, instance, **kwargs):
"""Set the user is_vendor attribut to True when VendorProfile is saved."""
instance.user.is_vendor = True
instance.user.save()
| 5,354,034 |
def metric_divergence(neighborhood_vectors: np.ndarray, dL: float, polarity: int) -> float:
"""
Calculates the divergence of a sampling volume neighborhood.
Note: For JIT to work, this must be declared at the top level.
@param neighborhood_vectors: Sampling volume neighborhood vectors (six 3D vectors)
@param dL: Length element
@param polarity: Polarity filter (-1: Keep values <= 0; 0: Keep all values; +1: Keep values >= 0)
"""
dxp = neighborhood_vectors[0][0]
dxn = neighborhood_vectors[3][0]
dyp = neighborhood_vectors[1][1]
dyn = neighborhood_vectors[4][1]
dzp = neighborhood_vectors[2][2]
dzn = neighborhood_vectors[5][2]
value = (dxp - dxn + dyp - dyn + dzp - dzn) / 2 / dL
if polarity == -1:
if value > 0:
return np.NaN
else:
return -value # Keep divergence positive, especially for use as alpha metric
elif polarity == +1:
if value < 0:
return np.NaN
else:
return value
else:
return value
| 5,354,035 |
def parse_args():
"""
Parse command line arguments.
Parameters:
None
Returns:
parser arguments
"""
parser = argparse.ArgumentParser(description='LeNet model')
optional = parser._action_groups.pop()
required = parser.add_argument_group('required arguments')
required.add_argument('--dataset',
dest='dataset',
help='Choice of dataset to train model',
choices=['mnist', 'cifar10'],
default=None)
optional.add_argument('--print_model',
dest='print_model',
help='Print LeNet model',
action='store_true')
optional.add_argument('--train_model',
dest='train_model',
help='Train LeNet on MNIST',
action='store_true')
optional.add_argument('-s', '--save_weights',
dest='save_weights',
help='Save the trained weights',
default=None)
optional.add_argument('-w', '--weights',
dest='weights',
help='Path to weights (hdf5) file',
default=None)
optional.add_argument('-e', '--epochs',
dest='epochs',
help='Number of epochs for training',
type=int,
default=20)
optional.add_argument('--data_augmentation',
dest='data_augmentation',
help='Use data augmentations for input',
action='store_true')
optional.add_argument('--viz_training',
dest='viz_training',
help='Visualize the training curve',
action='store_true')
parser._action_groups.append(optional)
return parser.parse_args()
| 5,354,036 |
def tag(request):
"""
Add/Remove tag to email
"""
if request.is_ajax():
mail = request.POST.get("mail")
tag = request.POST.get("tag")
op = request.POST.get("op")
mail = get_object_or_404(Mail, pk=mail)
if op == "ADD":
mail.tags.add(tag)
elif op == "REMOVE":
mail.tags.remove(tag)
else:
raise Http404("404")
return JsonResponse({"ok": True})
raise Http404("404")
| 5,354,037 |
def sid_to_smiles(sid):
"""Takes an SID and prints the associated SMILES string."""
substance = pc.Substance.from_sid(sid)
cid = substance.standardized_cid
compound = pc.get_compounds(cid)[0]
return compound.isomeric_smiles
| 5,354,038 |
def view_static(request, **kwargs):
"""Outputs static page."""
template = kwargs.get('template', None)
if not template:
raise Http404
template = '.'.join([template, 'html'])
title = kwargs.get('title', 'static page')
img = kwargs.get('img', 'bgag.jpg')
return render_to_response(template, {
'is_mobile': request.user_agent.is_mobile,
'page_title': title,
'menu': MenuItem.active().order_by('order_id'),
'page_img': img,
})
| 5,354,039 |
def start_call(called_ident, skicall):
"""When a call is initially received this function is called.
Unless you want to divert to another page, this function should return called_ident which
would typically be the ident of a Responder or Template page dealing with the call.
If a ServeFile exception is raised, which contains a pathlib.Path object of a local server
file then that server file will be sent to the client. In this case, the end_call function
will not be called."""
# To serve a directory of static files, you can map a url to a server directory with the
# skicall.map_url_to_server method, which returns pathlib.Path objects, and then
# raise a ServeFile exception, which causes the file to be served. For example:
# servedfile = skicall.map_url_to_server("images", "/home/user/thisproject/imagefiles")
# if servedfile:
# raise ServeFile(servedfile)
# Of particular interest at this point are the attributes:
# skicall.received_cookies is a dictionary of cookie name:values received from the client
# skicall.call_data is a dictionary which you can set with your own data and, as skicall is
# passed on to the submit_data and end_call functions defined below, can be used to pass
# data to these functions.
# Normally you would return called_ident, which is the page being called, or None to cause a
# page not found error, or another ident (project, pagenumber) to divert the call to another page.
return called_ident
| 5,354,040 |
def getmemory():
"""
Returns the memory limit for data arrays (in MB).
"""
return NX_MEMORY
| 5,354,041 |
def get_object_syncing_state():
""" Get a dictionary mapping which object trackers are active.
The dictionary contains name:bool pairs that can be fed back into
the func:`set_object_syncing_state()` function.
"""
states = {
"selection": bool(this._on_selection_changed_cb_id),
"duplicate": bool(this._on_before_duplicate_cb_id),
"name": bool(this._on_name_changed_cb_id),
"existence": bool(this._on_object_deleted_cb_id),
"relationship": bool(this._on_parent_changed_cb_id),
}
return states
| 5,354,042 |
def determine_nohit_score(cons, invert):
"""
Determine the value in the matrix assigned to nohit given SeqFindr options
:param cons: whether the Seqfindr run is using mapping consensus data
or not
:param invert: whether the Seqfindr run is inverting (missing hits to
be shown as black bars.
:type cons: None of boolean
:type cons: boolean
:returns: the value defined as no hit in the results matrix
"""
if cons is None:
nohit = 0.5
else:
nohit = 1.0
if invert:
nohit = nohit*-1.0
return nohit
| 5,354,043 |
def handle_requests():
"""
This function handle requests.
"""
while True:
requests_batch = []
while not (
len(requests_batch) > BATCH_SIZE or
(len(requests_batch) > 0 and time.time() - requests_batch[0]['time'] > BATCH_TIMEOUT)
):
try:
requests_batch.append(requests_queue.get(timeout=CHECK_INTERVAL))
except Empty:
continue
texts = [request['input']['text'] for request in requests_batch]
accents = [request['input']['accent'] for request in requests_batch]
speeds = [request['input']['speed'] for request in requests_batch]
sampling_rates = [request['input']['sr'] for request in requests_batch]
try:
sentence = texts[0]
accent = accents[0]
speed = speeds[0]
sampling_rate = sampling_rates[0]
request = requests_batch[0]
data = inference(model_text2mel,
model_mel2audio,
denoiser,
sentence,
accent,
speed,
sampling_rate)
request['output'] = data
except:
request['output'] = "Fail"
continue
| 5,354,044 |
def changesettings():
"""
Changes global settings.
Arguments:
none
User input:
The setting to change:
retrievecount and channelcount = the number of posts fetched from the server. No input validation.
"""
global retrievecount, channelcount
choice = input(("""
| settings |
gc = change general count? ({0})
cc = change channel count? ({1})
[return] = back
""").format(str(retrievecount), str(channelcount)))
# Set global retrieve count
rcount = retrievecount
if choice == "gc":
print("-general count is currently", retrievecount)
rcount = input("*Please change, to (>0)? ")
if int(rcount) > 0:
retrievecount = int(rcount)
print("-general count is now", retrievecount, "posts")
# Set channels retrieve count
ccount = channelcount
if choice == "cc":
print("-channel count is currently", channelcount)
ccount = input("*Please change, to (>0)? ")
if int(ccount) > 0:
channelcount = int(ccount)
print("-channel count is now", channelcount, "posts")
# Every time settings menu is used, save to "ppconfig.ini" file:
config["USER"]["retrievecount"] = str(retrievecount)
config["USER"]["channelcount"] = str(channelcount)
with open("ppconfig.ini", "w") as configfile:
config.write(configfile)
print("-settings saved\n")
| 5,354,045 |
def estimate_csd(lfp, coord_electrode, sigma, method='standard', diam=None,
h=None, sigma_top=None, tol=1E-6, num_steps=200,
f_type='identity', f_order=None):
"""
Estimates current source density (CSD) from local field potential (LFP)
recordings from multiple depths of the cortex.
Parameters
----------
lfp : neo.AnalogSignalArray
LFP signals from which CSD is estimated.
coord_electrode : Quantity array
Depth of evenly spaced electrode contact points.
sigma : Quantity float
Conductivity of tissue.
method : string
CSD estimation method, either of 'standard': the standard
double-derivative method, 'delta': delta-iCSD method, 'step':
step-iCSD method, 'spline': spline-iCSD method. Default is 'standard'
diam : Quantity float
Diamater of the assumed circular planar current sources centered at
each contact, required by iCSD methods (= 'delta', 'step',
'spline'). Default is `None`.
h : float or np.ndarray * quantity.Quantity
assumed thickness of the source cylinders at all or each contact
sigma_top : Quantity float
Conductivity on top of tissue. When set to `None`, the same value as
sigma: is used. Default is `None`.
tol : float
Tolerance of numerical integration, required by step- and
spline-iCSD methods. Default is 1E-6.
num_steps : int
Number of data points for the spatially upsampled LFP/CSD data,
required by spline-iCSD method. Default is 200.
f_type : string
Type of spatial filter used for smoothing of the result, either of
'boxcar' (uses `scipy.signal.baxcar()`), 'hamming' (
`scipy.signal.hamming()`), 'triangular' (`scipy.signal.tri()`),
'gaussian' (`scipy.signal.gaussian`), 'identity' (no smoothing is
applied). Default is 'identity'.
f_order : float tuple
Parameters to be passed to the scipy.signal function associated with
the specified filter type.
Returns
-------
tuple : (csd, csd_filtered)
csd : neo.AnalogSignalArray
Estimated CSD
csd_filtered : neo.AnalogSignalArray
Estimated CSD, spatially filtered
Example
-------
import numpy as np
import matplotlib.pyplot as plt
from scipy import io
import quantities as pq
import neo
import icsd
#loading test data
test_data = io.loadmat('test_data.mat')
#prepare lfp data for use, by changing the units to SI and append
#quantities, along with electrode geometry and conductivities
lfp_data = test_data['pot1'] * 1E-3 * pq.V # [mV] -> [V]
z_data = np.linspace(100E-6, 2300E-6, 23) * pq.m # [m]
diam = 500E-6 * pq.m # [m]
sigma = 0.3 * pq.S / pq.m # [S/m] or [1/(ohm*m)]
sigma_top = 0. * pq.S / pq.m # [S/m] or [1/(ohm*m)]
lfp = neo.AnalogSignalArray(lfp_data.T, sampling_rate=2.0*pq.kHz)
# Input dictionaries for each method
params = {}
params['delta'] = {
'method': 'delta',
'lfp' : lfp,
'coord_electrode' : z_data,
'diam' : diam, # source diameter
'sigma' : sigma, # extracellular conductivity
'sigma_top' : sigma, # conductivity on top of cortex
}
params['step'] = {
'method': 'step',
'lfp' : lfp,
'coord_electrode' : z_data,
'diam' : diam,
'sigma' : sigma,
'sigma_top' : sigma,
'tol' : 1E-12, # Tolerance in numerical integration
}
params['spline'] = {
'method': 'spline',
'lfp' : lfp,
'coord_electrode' : z_data,
'diam' : diam,
'sigma' : sigma,
'sigma_top' : sigma,
'num_steps' : 201, # Spatial CSD upsampling to N steps
'tol' : 1E-12,
}
params['standard'] = {
'method': 'standard',
'lfp' : lfp,
'coord_electrode' : z_data,
'sigma' : sigma,
}
#plot LFP signal
fig, axes = plt.subplots(len(params)+1, 1, figsize=(6, 8))
ax = axes[0]
im = ax.imshow(lfp.magnitude.T, origin='upper', vmin=-abs(lfp).max(),
vmax=abs(lfp).max(), cmap='jet_r', interpolation='nearest')
ax.axis(ax.axis('tight'))
cb = plt.colorbar(im, ax=ax)
cb.set_label('LFP (%s)' % lfp_data.dimensionality.string)
ax.set_xticklabels([])
ax.set_title('LFP')
ax.set_ylabel('ch #')
i_ax = 1
for method, param in params.items():
ax = axes[i_ax]
i_ax += 1
csd = icsd.estimate_csd(**param)
im = ax.imshow(csd.magnitude.T, origin='upper', vmin=-abs(csd).max(),
vmax=abs(csd).max(), cmap='jet_r',
interpolation='nearest')
ax.axis(ax.axis('tight'))
ax.set_title(method)
cb = plt.colorbar(im, ax=ax)
cb.set_label('CSD (%s)' % csd.dimensionality.string)
ax.set_xticklabels([])
ax.set_ylabel('ch #')
plt.show()
"""
supported_methods = ('standard', 'delta', 'step', 'spline')
icsd_methods = ('delta', 'step', 'spline')
if method not in supported_methods:
print("Pamareter `method` must be either of {}".format(
", ".join(supported_methods)))
raise ValueError
elif method in icsd_methods and diam is None:
print("Parameter `diam` must be specified for iCSD methods: {}".format(
", ".join(icsd_methods)))
raise ValueError
if not isinstance(lfp, neo.AnalogSignalArray):
print('Parameter `lfp` must be neo.AnalogSignalArray')
raise TypeError
if f_type is not 'identity' and f_order is None:
print("The order of {} filter must be specified".format(f_type))
raise ValueError
lfp_pqarr = lfp.magnitude.T * lfp.units
if sigma_top is None:
sigma_top = sigma
arg_dict = {'lfp': lfp_pqarr,
'coord_electrode': coord_electrode,
'sigma': sigma,
'f_type': f_type,
'f_order': f_order,
}
if method == 'standard':
csd_estimator = StandardCSD(**arg_dict)
else:
arg_dict['diam'] = diam
arg_dict['sigma_top'] = sigma_top
if method == 'delta':
csd_estimator = DeltaiCSD(**arg_dict)
else:
arg_dict['tol'] = tol
if method == 'step':
arg_dict['h'] = h
csd_estimator = StepiCSD(**arg_dict)
else:
arg_dict['num_steps'] = num_steps
csd_estimator = SplineiCSD(**arg_dict)
csd_pqarr = csd_estimator.get_csd()
csd_pqarr_filtered = csd_estimator.filter_csd(csd_pqarr)
csd = neo.AnalogSignalArray(csd_pqarr.T, t_start=lfp.t_start,
sampling_rate=lfp.sampling_rate)
csd_filtered = neo.AnalogSignalArray(csd_pqarr_filtered.T, t_start=lfp.t_start,
sampling_rate=lfp.sampling_rate)
return csd, csd_filtered
| 5,354,046 |
def library_get_monomer_desc(res_name):
"""Loads/caches/returns the monomer description objec MonomerDesc
for the given monomer residue name.
"""
assert isinstance(res_name, str)
try:
return MONOMER_RES_NAME_CACHE[res_name]
except KeyError:
pass
mon_desc = library_construct_monomer_desc(res_name)
if mon_desc is None:
return None
MONOMER_RES_NAME_CACHE[res_name] = mon_desc
return mon_desc
| 5,354,047 |
def parse_single_sequence_example(
serialized, context_features=None, sequence_features=None,
example_name=None, name=None):
# pylint: disable=line-too-long
"""Parses a single `SequenceExample` proto.
Parses a single serialized [`SequenceExample`](https://www.tensorflow.org/code/tensorflow/core/example/example.proto)
proto given in `serialized`.
This op parses a serialize sequence example into a tuple of dictionaries
mapping keys to `Tensor` and `SparseTensor` objects respectively.
The first dictionary contains mappings for keys appearing in
`context_features`, and the second dictionary contains mappings for keys
appearing in `sequence_features`.
At least one of `context_features` and `sequence_features` must be provided
and non-empty.
The `context_features` keys are associated with a `SequenceExample` as a
whole, independent of time / frame. In contrast, the `sequence_features` keys
provide a way to access variable-length data within the `FeatureList` section
of the `SequenceExample` proto. While the shapes of `context_features` values
are fixed with respect to frame, the frame dimension (the first dimension)
of `sequence_features` values may vary between `SequenceExample` protos,
and even between `feature_list` keys within the same `SequenceExample`.
`context_features` contains `VarLenFeature` and `FixedLenFeature` objects.
Each `VarLenFeature` is mapped to a `SparseTensor`, and each `FixedLenFeature`
is mapped to a `Tensor`, of the specified type, shape, and default value.
`sequence_features` contains `VarLenFeature` and `FixedLenSequenceFeature`
objects. Each `VarLenFeature` is mapped to a `SparseTensor`, and each
`FixedLenSequenceFeature` is mapped to a `Tensor`, each of the specified type.
The shape will be `(T,) + df.dense_shape` for `FixedLenSequenceFeature` `df`, where
`T` is the length of the associated `FeatureList` in the `SequenceExample`.
For instance, `FixedLenSequenceFeature([])` yields a scalar 1-D `Tensor` of
static shape `[None]` and dynamic shape `[T]`, while
`FixedLenSequenceFeature([k])` (for `int k >= 1`) yields a 2-D matrix `Tensor`
of static shape `[None, k]` and dynamic shape `[T, k]`.
Each `SparseTensor` corresponding to `sequence_features` represents a ragged
vector. Its indices are `[time, index]`, where `time` is the `FeatureList`
entry and `index` is the value's index in the list of values associated with
that time.
`FixedLenFeature` entries with a `default_value` and `FixedLenSequenceFeature`
entries with `allow_missing=True` are optional; otherwise, we will fail if
that `Feature` or `FeatureList` is missing from any example in `serialized`.
`example_name` may contain a descriptive name for the corresponding serialized
proto. This may be useful for debugging purposes, but it has no effect on the
output. If not `None`, `example_name` must be a scalar.
Args:
serialized: A scalar (0-D Tensor) of type string, a single binary
serialized `SequenceExample` proto.
context_features: A `dict` mapping feature keys to `FixedLenFeature` or
`VarLenFeature` values. These features are associated with a
`SequenceExample` as a whole.
sequence_features: A `dict` mapping feature keys to
`FixedLenSequenceFeature` or `VarLenFeature` values. These features are
associated with data within the `FeatureList` section of the
`SequenceExample` proto.
example_name: A scalar (0-D Tensor) of strings (optional), the name of
the serialized proto.
name: A name for this operation (optional).
Returns:
A tuple of two `dict`s, each mapping keys to `Tensor`s and `SparseTensor`s.
The first dict contains the context key/values.
The second dict contains the feature_list key/values.
Raises:
ValueError: if any feature is invalid.
"""
# pylint: enable=line-too-long
if not (context_features or sequence_features):
raise ValueError("Missing features.")
(context_sparse_keys, context_sparse_types, context_dense_keys,
context_dense_types, context_dense_defaults,
context_dense_shapes) = _features_to_raw_params(
context_features, [VarLenFeature, FixedLenFeature])
(feature_list_sparse_keys, feature_list_sparse_types,
feature_list_dense_keys, feature_list_dense_types,
feature_list_dense_defaults,
feature_list_dense_shapes) = _features_to_raw_params(
sequence_features, [VarLenFeature, FixedLenSequenceFeature])
return _parse_single_sequence_example_raw(
serialized, context_sparse_keys, context_sparse_types,
context_dense_keys, context_dense_types, context_dense_defaults,
context_dense_shapes, feature_list_sparse_keys,
feature_list_sparse_types, feature_list_dense_keys,
feature_list_dense_types, feature_list_dense_shapes,
feature_list_dense_defaults, example_name, name)
| 5,354,048 |
def getBase64PNGImage(pD, cmapstr, logfloor_quantile=0):
"""
Get an image as a base64 string
"""
D = np.array(pD)
if logfloor_quantile > 0:
floor = np.quantile(pD.flatten(), logfloor_quantile)
D = np.log(D + floor)
c = plt.get_cmap(cmapstr)
D = D-np.min(D)
D = np.round(255.0*D/np.max(D))
C = c(np.array(D, dtype=np.int32))
scipy.misc.imsave("temp.png", C)
b = getBase64File("temp.png")
os.remove("temp.png")
return "data:image/png;base64, " + b
| 5,354,049 |
def iter_objects(inst, exp_type):
"""Iterate over all descriptor objects accessible from inst. Yields
(name, obj) tuples."""
seen = set()
# Note: The visibility rules here aren't exactly right, but I
# think it's fine for our purposes. The difference only comes up
# if the user is dynamically adding things to instances.
yield from iter_objects_from_type(inst.__class__, exp_type, seen)
yield from iter_objects_from_instance(inst, exp_type, seen)
| 5,354,050 |
def method_dispatcher(*args, **kwargs):
"""Try to dispatch to the right HTTP method handler.
If an HTTP method isn't on the approved list, defer
to the error handler. Otherwise, the HTTP Method is
processed by the appropriate handler.
:param args: Expect arguments in format (http_method, url).
:param kwargs: Optional arguments like HTTP headers, cookies and etc.
:returns: Result from the handler.
:rtype: func
"""
http_method, url = args
if http_method not in constants.HTTP_METHOD_NAMES:
raise HTTPMethodNotSupportedError(http_method)
handler = getattr(sys.modules[__name__], http_method)
return handler(*args, **kwargs)
| 5,354,051 |
def filter_pathways(
pathways,
source=None,
target=None,
compounds=[],
enzymes=[],
context={},
):
"""
Yield pathways that meet filtering conditions.
The effect of filterig parameters is explained below. Filters
automatically pathways, that repeatedly consumes and produces a
compound, that is not ignored (see chebi.IGNORED_COMPOUNDS).
Parameters
----------
pathways : iterable
Lists of Rhea ID strings.
source : string
ChEBI ID of pathway's source compound. Filters pathways that
have reactions producing source.
target : string
ChEBI ID of pathway's target compound. Filters pathways that
have reactions consuming target.
compounds : iterable
CheBI ID strings. Filters pathways that don't have all
compounds.
enzymes : iterable
EC number strings. Filters pathways that don't have all enzymes.
context : dict
Key reaction_ecs maps to a dict of Rhea ID string keys to EC
number string list values.
Key stoichiometrics maps to a dict of Rhea ID string keys to
a list of dicts of substrates and products.
Yields
------
tuple
Pathway reaction tuples that meet the filtering conditions of
given arguments.
"""
reaction_ecs = context['reaction_ecs']
stoichiometrics = context['stoichiometrics']
ignored = context.get('ignored', set())
for pathway in pathways:
compounds_pw = set(['any'])
enzymes_pw = set()
for i, reaction in enumerate(pathway):
substrates = set(stoichiometrics[reaction][0])
products = set(stoichiometrics[reaction][1])
if target in substrates:
break
elif source in products:
break
elif i >= 2:
prepre_s = set(stoichiometrics[pathway[i - 2]][0])
pre_p = set(stoichiometrics[pathway[i - 1]][1])
discard_substrates_1 = substrates & (prepre_s - ignored)
discard_substrates_2 = substrates & (pre_p - ignored)
if discard_substrates_1 and discard_substrates_2:
break
prepre_p = set(stoichiometrics[pathway[i - 2]][1])
pre_s = set(stoichiometrics[pathway[i - 1]][0])
discard_products_1 = products & (prepre_p - ignored)
discard_products_2 = products & (pre_s - ignored)
if discard_products_1 and discard_products_2:
break
compounds_pw.update(substrates)
compounds_pw.update(products)
try:
enzymes_pw.update(reaction_ecs[reaction])
except KeyError:
pass
else:
if not set(enzymes) <= enzymes_pw:
continue
elif not set(compounds) <= compounds_pw:
continue
yield tuple(pathway)
| 5,354,052 |
def add_song_to_playlist(result):
"""
Add a song from the search results to the playlist. If the playlist already contains the song it will not be added
again
:param result:
"""
result_track = result["track"]
result_score = result["score"]
result_string = result_track["artist"] + " - " + result_track["title"] + " with score " + (
"%.2f" % result_score)
logging.info(result_string)
song_id = result_track["nid"]
if song_id not in playlist_song_ids:
api.add_songs_to_playlist(playlist_id, song_id)
playlist_song_ids.append(song_id)
logging.debug("Successfully added song")
stats['songs_added'] += 1
else:
logging.debug("Playlist already contains song, skipping")
stats['songs_skipped'] += 1
| 5,354,053 |
def _seqfix(ref_seq, seq, comp_len, rev):
""" Fill or trim a portion of the beginning of a sequence relative to a
reference sequence
Args:
ref_seq (str): reference sequence e.g. germline gene
seq (str): sequence to compare to reference
comp_len (int): length of subsequence to compare e.g. necessary to
exclude the CDR3 rev (bool): whether to reverse the sequences
for J gene
filling / trimming
Returns:
seq_fixed (str): sequence filled / trimmed as necessary
"""
if rev:
ref_comp = ref_seq[::-1][:comp_len]
seq_comp = seq[::-1][:comp_len]
else:
ref_comp = ref_seq[:comp_len]
seq_comp = seq[:comp_len]
ref_aligned, seq_aligned = global_pw_align(ref_comp, seq_comp)
# replace N's in seq if present
seq_aligned = _replace_Ns_with_ref(ref_aligned, seq_aligned)
if ref_aligned.startswith('-'):
# need to trim sequence
fixed = _trim_extra_nt(ref_aligned, seq_aligned)
elif seq_aligned.startswith('-'):
# need to fill sequence
fixed = _fill_missing_nt(ref_aligned, seq_aligned)
else:
fixed = seq_aligned
if rev:
seq_fixed = seq[:-comp_len] + fixed[::-1]
else:
seq_fixed = fixed + seq[comp_len:]
return seq_fixed.replace('-', '')
| 5,354,054 |
def burn_volume_func(func_below, func_above, volume, surface_raster, height_to_z, below=False, ignore_nan=False, inclusive=False):
"""
Reusable function, not for end user. Process parts of a xyz volume given a surface, below or above the intersection of the volume with the surface
"""
dim_x,dim_y,dim_z=volume.shape
z_index_max = dim_z-1
# TODO if surface_raster.shape[0] != dim_x or surface_raster.shape[1] != dim_y
for x in np.arange(0,dim_x,1):
for y in np.arange(0,dim_y,1):
# From the original code I had retrieved something I cannot understand (why 30??)
# erode_until=-(surface_raster.astype(int)-30)[x,y]
dem_height = surface_raster[x,y]
if np.isnan(dem_height):
if not ignore_nan:
volume[x,y,:]=np.nan
else:
z_height = height_to_z(dem_height)
z_height = min(z_index_max, max(0.0, z_height))
z_height = int(round(z_height))
zh_nan = z_height
if below:
if inclusive:
zh_nan = zh_nan + 1
zh_nan = min(z_index_max, max(0.0, zh_nan))
func_below(volume, x, y, zh_nan)
else:
if not inclusive:
zh_nan = zh_nan + 1
zh_nan = min(z_index_max, max(0.0, zh_nan))
func_above(volume, x, y, zh_nan)
| 5,354,055 |
def get_dedup_tokens(logits_batch: torch.Tensor) \
-> Tuple[torch.Tensor, torch.Tensor]:
"""Converts a batch of logits into the batch most probable tokens and their probabilities.
Args:
logits_batch (Tensor): Batch of logits (N x T x V).
Returns:
Tuple: Deduplicated tokens. The first element is a tensor (token indices) and the second element
is a tensor (token probabilities)
"""
logits_batch = logits_batch.softmax(-1)
out_tokens, out_probs = [], []
for i in range(logits_batch.size(0)):
logits = logits_batch[i]
max_logits, max_indices = torch.max(logits, dim=-1)
max_logits = max_logits[max_indices!=0]
max_indices = max_indices[max_indices!=0]
cons_tokens, counts = torch.unique_consecutive(
max_indices, return_counts=True)
out_probs_i = torch.zeros(len(counts), device=logits.device)
ind = 0
for i, c in enumerate(counts):
max_logit = max_logits[ind:ind + c].max()
out_probs_i[i] = max_logit
ind = ind + c
out_tokens.append(cons_tokens)
out_probs.append(out_probs_i)
out_tokens = pad_sequence(out_tokens, batch_first=True, padding_value=0.).long()
out_probs = pad_sequence(out_probs, batch_first=True, padding_value=0.)
return out_tokens, out_probs
| 5,354,056 |
def install(
name=None,
testnr=None,
identity=None,
delete=False,
mount=True,
email=None,
words=None,
server=False,
zerotier=False,
pull=True,
secret=None,
explorer=None,
code_update_force=False,
):
"""
create the 3bot container and install jumpscale inside
identity is the name of your threebot
arguments left empty will be asked interactively
testnr: if not Null the identity will become: $your3botname$testnr.test,
secret for that container will be test
email will be also predefined, and you will become admin automatically in the 3bot
words should be retrieved from 3bot connect app to be used for encryption
"""
delete = core.IT.Tools.bool(delete)
mount = core.IT.Tools.bool(mount)
if code_update_force:
pull = True
if identity:
if identity != args.identity and args.identity:
args.reset()
args.identity = identity
if email:
args.email = email
if words:
args.words = words
if secret:
args.secret = secret
if testnr:
testnr = int(testnr)
identity_you = _containers._identity_ask(identity)
email = f"test{testnr}@{identity_you}"
identity_you = identity_you.split(".", 1)[0]
identity = f"{identity_you}{testnr}.test"
name = f"test{testnr}"
c = _containers.get(
identity=identity,
name=name,
delete=delete,
mount=mount,
email=email,
pull=pull,
code_update_force=code_update_force,
words=words,
secret=secret,
explorer=explorer,
)
if zerotier:
addr = c.zerotier_connect()
print(f" - CONNECT TO YOUR 3BOT ON: https://{addr}:4000/")
if server:
_server(c)
| 5,354,057 |
def com(im):
"""
Compute the center of mass of im.
Expects that im is leveled (ie zero-centered). Ie, a pure noise image should have zero mean.
Sometimes this is improved if you square the im first com(im**2)
Returns:
y, x in array form.
"""
im = np.nan_to_num(im)
mass = np.sum(im)
ry = (
np.arange(im.shape[0]) + 0.5
) # 0.5 because we want the mass of a pixel at the center of the pixel
rx = np.arange(im.shape[1]) + 0.5
y = np.sum(ry * np.sum(im, axis=1))
x = np.sum(rx * np.sum(im, axis=0))
return utils.np_safe_divide(np.array([y, x]), mass)
| 5,354,058 |
def merge_tiffs(root):
"""
merge folds
"""
mergefolder = os.path.join(root, 'merged')
if not os.path.exists(mergefolder):
os.makedirs(mergefolder)
prob_files = {f for f in os.listdir(root) if os.path.splitext(f)[
1] in ['.tif', '.tiff']}
unfolded = {f[6:] for f in prob_files if f.startswith('fold')}
if not unfolded:
unfolded = prob_files
for prob_file in tqdm.tqdm(unfolded):
probs = []
for fold in range(5):
prob = gdal.Open(os.path.join(root, 'fold{}_'.format(
fold) + prob_file), gdal.GA_ReadOnly)
geotrans = prob.GetGeoTransform()
prob_arr = prob.ReadAsArray()
probs.append(prob_arr)
prob_arr = np.mean(probs, axis=0)
res_path_geo = os.path.join(root, 'merged', prob_file)
driver = gdal.GetDriverByName('GTiff')
outRaster = driver.Create(
res_path_geo, prob_arr.shape[1], prob_arr.shape[0], 1, gdal.GDT_Float32)
outRaster.SetGeoTransform(geotrans)
CopyDatasetInfo(prob, outRaster)
outband = outRaster.GetRasterBand(1)
outband.WriteArray(prob_arr)
outRaster.FlushCache()
| 5,354,059 |
def get_fitting_custom_pipeline():
"""
Pipeline looking like this
lagged -> custom -> ridge
"""
lagged_node = PrimaryNode('lagged')
lagged_node.custom_params = {'window_size': 50}
# For custom model params as initial approximation and model as function is necessary
custom_node = SecondaryNode('custom', nodes_from=[lagged_node])
custom_node.custom_params = {'alpha': 5,
'model_predict': custom_ml_model_imitation_predict,
'model_fit': custom_ml_model_imitation_fit}
node_final = SecondaryNode('lasso', nodes_from=[custom_node])
pipeline = Pipeline(node_final)
return pipeline
| 5,354,060 |
def get_regularizable_variables(scope):
"""
Get *all* regularizable variables in the scope.
:param scope: scope to filter variables by
:return:
"""
return tf.get_collection(REGULARIZABLE_VARS, scope)
| 5,354,061 |
def TestRapiInstanceReplaceDisks(instance):
"""Test replacing instance disks via RAPI"""
if not IsDiskReplacingSupported(instance):
print qa_logging.FormatInfo("Instance doesn't support disk replacing,"
" skipping test")
return
fn = _rapi_client.ReplaceInstanceDisks
_WaitForRapiJob(fn(instance.name,
mode=constants.REPLACE_DISK_AUTO, disks=[]))
_WaitForRapiJob(fn(instance.name,
mode=constants.REPLACE_DISK_SEC, disks="0"))
| 5,354,062 |
def check_state(seat):
"""Check whether the rules may be applied on given seat.
If no occupied seat around - add to list of seats to occupy.
If more than or exactly 4 seats are occupied, add to the list to release."""
occupied = 0
neighbours = find_neighbours(seat)
for neighbour in neighbours:
if not is_empty(neighbour):
occupied += 1
if occupied >= 4:
to_release.append(seat)
elif occupied == 0:
to_occupy.append(seat)
| 5,354,063 |
def load(midi_path: str, config: dict):
"""
returns a 3-tuple of `tf.Dataset` each returning `(input_seq, target_seq)`, representing train,
validation, and test portions of the overall dataset. `input_seq` represents the `inp_split`
portion of each midi sequence in `midi_path`.
"""
batch_size = config.get('batch_size', None)
# get midi files
filenames = tf.random.shuffle(glob.glob(f'{midi_path}/**/*.midi', recursive=True))
# get train, validation, and test sizes
train_split, test_split = config.get('train_size', None), config.get('test_size', None)
train_split = int(train_split * len(filenames))
test_split = int(test_split * len(filenames))
val_split = len(filenames) - train_split + test_split
# split filenames to train, test, split
midi_ds, midi_tokenizer = _create_dataset(
filenames=filenames,
inp_len=config.get('inp_len', None),
tar_len=config.get('tar_len', None),
velocity_bins=config.get('velocity_bins', None),
rest_resolution=config.get('rest_resolution', None))
train_ds = midi_ds.take(train_split)
val_ds = train_ds.skip(train_split)
test_ds = val_ds.skip(val_split)
val_ds = val_ds.take(val_split)
return (_optimize_dataset(train_ds.padded_batch(batch_size)),
_optimize_dataset(val_ds.padded_batch(batch_size)),
_optimize_dataset(test_ds.padded_batch(batch_size)),
midi_tokenizer)
| 5,354,064 |
def update_config(from_cli, from_file):
"""Merge the two given dictionaries
Updates the first dict with items from second if they're not already
defined"""
from_cli.update(
{
key: val
for key, val in from_file.items()
# Keep item if it is not already defined
if key not in from_cli or from_cli[key] is None
}
)
| 5,354,065 |
def test_page_title_specified_two_pages(tmp_path, param):
"""For parameters that require that only one page is in config - make sure exception is raised if there are
more pages in the config"""
config_file = mk_tmp_file(
tmp_path,
key_to_update="pages.page2",
value_to_update={"page_title": "Page2", "page_file": "page2.txt"},
)
_ = runner.invoke(
app,
[
"--config",
str(config_file),
f"--{param.replace('_', '-')}",
"Default name",
"validate",
],
)
assert _.exit_code == 1
assert "Please specify them in the config." in _.stdout
| 5,354,066 |
def git(command):
""" Run the given git command.
Args:
command (str): Complete git command with or without the binary name.
"""
command = command.split()
command = ["git"] + command if command[0] != "git" else command
run(command, stdout=PIPE, stderr=PIPE, check=True)
| 5,354,067 |
async def test_form_create_entry_without_auth(hass):
"""Test that the user step without auth works."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": SOURCE_USER}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == SOURCE_USER
assert result["errors"] == {}
with patch(
"homeassistant.components.nam.NettigoAirMonitor.async_check_credentials",
return_value=DEVICE_CONFIG,
), patch(
"homeassistant.components.nam.NettigoAirMonitor.async_get_mac_address",
return_value="aa:bb:cc:dd:ee:ff",
), patch(
"homeassistant.components.nam.async_setup_entry", return_value=True
) as mock_setup_entry:
result = await hass.config_entries.flow.async_configure(
result["flow_id"],
VALID_CONFIG,
)
await hass.async_block_till_done()
assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert result["title"] == "10.10.2.3"
assert result["data"]["host"] == "10.10.2.3"
assert len(mock_setup_entry.mock_calls) == 1
| 5,354,068 |
def _parse_docstring(doc):
"""Extract documentation from a function's docstring."""
if doc is None:
return _Doc('', '', {}, [])
# Convert Google- or Numpy-style docstrings to RST.
# (Should do nothing if not in either style.)
# use_ivar avoids generating an unhandled .. attribute:: directive for
# Attribute blocks, preferring a benign :ivar: field.
cfg = Config(napoleon_use_ivar=True)
doc = str(GoogleDocstring(doc, cfg))
doc = str(NumpyDocstring(doc, cfg))
with _sphinx_common_roles():
tree = docutils.core.publish_doctree(
# Disable syntax highlighting, as 1) pygments is not a dependency
# 2) we don't render with colors and 3) SH breaks the assumption
# that literal blocks contain a single text element.
doc, settings_overrides={'syntax_highlight': 'none'})
class Visitor(NodeVisitor):
optional = [
'document', 'docinfo',
'field_list', 'field_body',
'literal', 'problematic',
# Introduced by our custom passthrough handlers, but the Visitor
# will recurse into the inner text node by itself.
'TextElement',
]
def __init__(self, document):
super().__init__(document)
self.paragraphs = []
self.start_lines = []
self.params = defaultdict(dict)
self.raises = []
self._current_paragraph = None
self._indent_iterator_stack = []
self._indent_stack = []
def _do_nothing(self, node):
pass
def visit_paragraph(self, node):
self.start_lines.append(node.line)
self._current_paragraph = []
def depart_paragraph(self, node):
text = ''.join(self._current_paragraph)
text = ''.join(self._indent_stack) + text
self._indent_stack = [
' ' * len(item) for item in self._indent_stack]
text = text.replace('\n', '\n' + ''.join(self._indent_stack))
self.paragraphs.append(text)
self._current_paragraph = None
visit_block_quote = visit_doctest_block = visit_paragraph
depart_block_quote = depart_doctest_block = depart_paragraph
def visit_Text(self, node):
self._current_paragraph.append(node)
depart_Text = _do_nothing
def visit_emphasis(self, node):
self._current_paragraph.append('\033[3m') # *foo*: italic
def visit_strong(self, node):
self._current_paragraph.append('\033[1m') # **foo**: bold
def visit_title_reference(self, node):
self._current_paragraph.append('\033[4m') # `foo`: underlined
def _depart_markup(self, node):
self._current_paragraph.append('\033[0m')
depart_emphasis = depart_strong = depart_title_reference = \
_depart_markup
def visit_rubric(self, node):
self.visit_paragraph(node)
def depart_rubric(self, node):
# Style consistent with "usage:", "positional arguments:", etc.
self._current_paragraph[:] = [
(t.lower() if t == t.title() else t) + ':'
for t in self._current_paragraph]
self.depart_paragraph(node)
def visit_literal_block(self, node):
text, = node
self.start_lines.append(node.line)
self.paragraphs.append(
re.sub('^|\n', r'\g<0> ', text)) # indent
raise SkipNode
def visit_bullet_list(self, node):
self._indent_iterator_stack.append(
(node['bullet'] + ' ' for _ in range(len(node))))
def depart_bullet_list(self, node):
self._indent_iterator_stack.pop()
def visit_enumerated_list(self, node):
enumtype = node['enumtype']
fmt = {('(', ')'): 'parens',
('', ')'): 'rparen',
('', '.'): 'period'}[node['prefix'], node['suffix']]
start = node.get('start', 1)
enumerators = [Body(None).make_enumerator(i, enumtype, fmt)[0]
for i in range(start, start + len(node))]
width = max(map(len, enumerators))
enumerators = [enum.ljust(width) for enum in enumerators]
self._indent_iterator_stack.append(iter(enumerators))
def depart_enumerated_list(self, node):
self._indent_iterator_stack.pop()
def visit_list_item(self, node):
self._indent_stack.append(next(self._indent_iterator_stack[-1]))
def depart_list_item(self, node):
self._indent_stack.pop()
def visit_field(self, node):
field_name_node, field_body_node = node
field_name, = field_name_node
parts = field_name.split()
if len(parts) == 2:
doctype, name = parts
# docutils>=0.16 represents \* as \0* in the doctree.
name = name.lstrip('*\0')
elif len(parts) == 3:
doctype, type_, name = parts
name = name.lstrip('*\0')
if doctype not in _PARAM_TYPES:
raise SkipNode
if 'type' in self.params[name]:
raise ValueError('type defined twice for {}'.format(name))
self.params[name]['type'] = type_
else:
raise SkipNode
if doctype in _PARAM_TYPES:
doctype = 'param'
if doctype in _TYPE_NAMES:
doctype = 'type'
if doctype in ['param', 'type'] and doctype in self.params[name]:
raise ValueError(
'{} defined twice for {}'.format(doctype, name))
visitor = Visitor(self.document)
field_body_node.walkabout(visitor)
if doctype in ['param', 'type']:
self.params[name][doctype] = ''.join(visitor.paragraphs)
elif doctype in ['raises']:
self.raises.append(name)
raise SkipNode
def visit_comment(self, node):
self.paragraphs.append(comment_token)
# Comments report their line as the *end* line of the comment.
self.start_lines.append(
node.line - node.children[0].count('\n') - 1)
raise SkipNode
def visit_system_message(self, node):
raise SkipNode
comment_token = object()
visitor = Visitor(tree)
tree.walkabout(visitor)
tuples = {name: _Param(values.get('param'), values.get('type'))
for name, values in visitor.params.items()}
if visitor.paragraphs:
text = []
for start, paragraph, next_start in zip(
visitor.start_lines,
visitor.paragraphs,
visitor.start_lines[1:] + [0]):
if paragraph is comment_token:
continue
text.append(paragraph)
# Insert two newlines to separate paragraphs by a blank line.
# Actually, paragraphs may or may not already have a trailing
# newline (e.g. text paragraphs do but literal blocks don't) but
# argparse will strip extra newlines anyways. This means that
# extra blank lines in the original docstring will be stripped, but
# this is less ugly than having a large number of extra blank lines
# arising e.g. from skipped info fields (which are not rendered).
# This means that list items are always separated by blank lines,
# which is an acceptable tradeoff for now.
text.append('\n\n')
parsed = _Doc(text[0], ''.join(text), tuples, visitor.raises)
else:
parsed = _Doc('', '', tuples, visitor.raises)
return parsed
| 5,354,069 |
def extract_remove_outward_edges_filter(exceptions_from_removal):
"""
This creates a closure that goes through the list of tuples to explicitly state which edges are leaving from the first argument of each tuple.
Each tuple that is passed in has two members. The first member is a string representing a single node from which the children will be explicitly stated. The second member is the list of nodes that are in its child set.
If the
This covers both barren_nodes and explicit_parent_offspring.
"""
def remove_outward_edges_filter(G):
graph = G.copy()
list_of_parents = [x[0] for x in exceptions_from_removal if len(x[1]) > 0]
list_of_barrens = [x[0] for x in exceptions_from_removal if len(x[1]) == 0]
for barren in list_of_barrens:
graph.remove_edges_from([edge for edge in graph.edges() if edge[0] == barren])
for parent in list_of_parents:
current_edges = graph.out_edges(parent)
valid_edges = [(x[0],y) for x in exceptions_from_removal if x[0] == parent for y in x[1]]
graph.remove_edges_from([edge for edge in current_edges if edge not in valid_edges])
return graph
return remove_outward_edges_filter
| 5,354,070 |
def test_query_request_validator_username_present_but_empty():
"""Tests the QueryRequestValidator class with empty username.
Expects validation failure and correct error message."""
request = create_mock_request(master_password='abcd',
master_key='1234',
query_type=QUERY_GET_SECRETS_FOR_DOMAIN_AND_USERNAME_TYPE,
domain='some_domain',
username='')
dbclient = create_mock_dbclient_with_master_collection(master_password='abcd',
master_key='1234')
valid, message = QueryRequestValidator(request, dbclient,
acceptable_query_type=[
QUERY_GET_SECRETS_FOR_DOMAIN_AND_USERNAME_TYPE,
QUERY_SEARCH_BY_DOMAIN_AND_USERNAME_TYPE
]).isValid()
assert valid == False
assert message == ERROR_USERNAME_SPECIFIED_BUT_BLANK
| 5,354,071 |
def ln_addTH(x : torch.Tensor, beta : torch.Tensor) -> torch.Tensor:
"""
out = x + beta[None, :, None]
"""
return x + beta[None, :, None]
| 5,354,072 |
def drude2(tags, e, p):
"""dielectric function according to Drude theory for fitting"""
return drude(e, p[0], p[1], p[2], p[3])
| 5,354,073 |
def ParseFile(path):
"""Parse function names and comments from a .h path.
Returns mapping from function name to comment.
"""
result = {}
with open(path, 'r') as fp:
lines = fp.readlines()
i = 0
n = len(lines)
while i < n:
line = lines[i]
m = MCRE.match(line)
if m and not m.group('rest') and not m.group('params'):
# Looks like a function definition. Consume all adjacent following
# comment lines.
name = m.group('name')
tmpl = m.group('tmpl')
params = m.group('params')
if tmpl is not None:
name += '<%s>' % tmpl
if params is not None:
name += '(%s)' % params
# print '%3d: %s' % (i+1, m.groupdict())
comments = []
i += 1
while i < n:
m = CRE.match(lines[i])
if not m: break
comments.append(m.group('line'))
i += 1
result[name] = comments
else:
i += 1
return result
| 5,354,074 |
def get_records(data: Dict[_Expr, Dict], column_order):
"""Output data as a list of records"""
def cell_callback(expr, i, val, spreadsheet_data):
spreadsheet_data[-1].append(val)
return spreadsheet_data
def row_callback(spreadsheet_data):
spreadsheet_data[-1] = tuple(spreadsheet_data[-1])
spreadsheet_data.append([])
return spreadsheet_data
out = [[]]
out = print_analyses_v2(data, column_order, cell_callback, row_callback, out)
return out[:-1]
| 5,354,075 |
def support_mask_to_label(support_masks, n_way, k_shot, num_points):
"""
Args:
support_masks: binary (foreground/background) masks with shape (n_way, k_shot, num_points)
"""
support_masks = support_masks.view(n_way, k_shot*num_points)
support_labels = []
for n in range(support_masks.shape[0]):
support_mask = support_masks[n, :] #(k_shot*num_points)
support_label = torch.zeros_like(support_mask)
mask_index = torch.nonzero(support_mask).squeeze(1)
support_label= support_label.scatter_(0, mask_index, n+1)
support_labels.append(support_label)
support_labels = torch.stack(support_labels, dim=0)
support_labels = support_labels.view(n_way, k_shot, num_points)
return support_labels.long()
| 5,354,076 |
def cal_covered_users(positions, heat_map, radius):
"""
:param positions: $k$ positions array of !!!(y, x)!!!
:param heat_map: grid data with count
:param radius: 0(1 grid), 1(8 grids), 2(25 grids)
:return: coverage score
"""
row_num, col_num = heat_map.shape
mask = np.zeros(heat_map.shape, dtype=int)
for position in positions:
center_x = position[1]
center_y = position[0]
max_x = center_x + radius if center_x + radius < col_num else col_num - 1
min_x = center_x - radius if center_x - radius >= 0 else 0
max_y = center_y + radius if center_y + radius < row_num else row_num - 1
min_y = center_y - radius if center_y - radius >= 0 else 0
for x in range(min_x, max_x + 1):
for y in range(min_y, max_y + 1):
mask[y, x] = 1
return np.sum(np.multiply(mask, heat_map))
| 5,354,077 |
def certificate_from_file(
filename: Union[str, pathlib.Path],
format=OpenSSL.crypto.FILETYPE_PEM,
) -> TS.X509:
"""Load an X509 certificate from ``filename``.
:param filename: The path to the certificate on disk.
:param format: The format of the certificate, from :doc:`OpenSSL:api/crypto`.
"""
with open(filename, 'r') as handle:
return certificate_from_string(handle.read(), format)
| 5,354,078 |
def import_syllable_csv(corpus_context, call_back=None, stop_check=None):
"""
Import a syllable from csv file
Parameters
----------
corpus_context: :class:`~polyglotdb.corpus.syllabic.SyllabicContext`
the corpus to load into
"""
import time
speakers = corpus_context.speakers
if call_back is not None:
call_back('Importing syllables...')
call_back(0, len(speakers))
try:
corpus_context.execute_cypher('CREATE CONSTRAINT ON (node:syllable) ASSERT node.id IS UNIQUE')
except neo4j.exceptions.ClientError as e:
if e.code != 'Neo.ClientError.Schema.EquivalentSchemaRuleAlreadyExists':
raise
try:
corpus_context.execute_cypher('CREATE CONSTRAINT ON (node:syllable_type) ASSERT node.id IS UNIQUE')
except neo4j.exceptions.ClientError as e:
if e.code != 'Neo.ClientError.Schema.EquivalentSchemaRuleAlreadyExists':
raise
try:
corpus_context.execute_cypher('CREATE INDEX ON :syllable(begin)')
except neo4j.exceptions.ClientError as e:
if e.code != 'Neo.ClientError.Schema.EquivalentSchemaRuleAlreadyExists':
raise
try:
corpus_context.execute_cypher('CREATE INDEX ON :syllable(prev_id)')
except neo4j.exceptions.ClientError as e:
if e.code != 'Neo.ClientError.Schema.EquivalentSchemaRuleAlreadyExists':
raise
try:
corpus_context.execute_cypher('CREATE INDEX ON :syllable(end)')
except neo4j.exceptions.ClientError as e:
if e.code != 'Neo.ClientError.Schema.EquivalentSchemaRuleAlreadyExists':
raise
try:
corpus_context.execute_cypher('CREATE INDEX ON :syllable(label)')
except neo4j.exceptions.ClientError as e:
if e.code != 'Neo.ClientError.Schema.EquivalentSchemaRuleAlreadyExists':
raise
try:
corpus_context.execute_cypher('CREATE INDEX ON :syllable_type(label)')
except neo4j.exceptions.ClientError as e:
if e.code != 'Neo.ClientError.Schema.EquivalentSchemaRuleAlreadyExists':
raise
for i, s in enumerate(speakers):
if stop_check is not None and stop_check():
return
if call_back is not None:
call_back('Importing syllables for speaker {} of {} ({})...'.format(i, len(speakers), s))
call_back(i)
discourses = corpus_context.get_discourses_of_speaker(s)
for d in discourses:
path = os.path.join(corpus_context.config.temporary_directory('csv'),
'{}_{}_syllable.csv'.format(re.sub(r'\W', '_', s), d))
if corpus_context.config.debug:
print('Importing syllables for speaker {} in discourse {}, using import file {}'.format(s, d, path))
# If on the Docker version, the files live in /site/proj
if os.path.exists('/site/proj') and not path.startswith('/site/proj'):
csv_path = 'file:///site/proj/{}'.format(make_path_safe(path))
else:
csv_path = 'file:///{}'.format(make_path_safe(path))
begin = time.time()
nucleus_statement = '''
USING PERIODIC COMMIT 2000
LOAD CSV WITH HEADERS FROM "{path}" as csvLine
MATCH (n:{phone_name}:{corpus}:speech {{id: csvLine.vowel_id}})-[r:contained_by]->(w:{word_name}:{corpus}:speech)
SET n :nucleus, n.syllable_position = 'nucleus'
'''
statement = nucleus_statement.format(path=csv_path,
corpus=corpus_context.cypher_safe_name,
word_name=corpus_context.word_name,
phone_name=corpus_context.phone_name)
corpus_context.execute_cypher(statement)
if corpus_context.config.debug:
print('Nucleus definition took {} seconds.'.format(time.time() - begin))
begin = time.time()
node_statement = '''
USING PERIODIC COMMIT 2000
LOAD CSV WITH HEADERS FROM "{path}" as csvLine
MERGE (s_type:syllable_type:{corpus} {{id: csvLine.type_id}})
ON CREATE SET s_type.label = csvLine.label
WITH s_type, csvLine
CREATE (s:syllable:{corpus}:speech {{id: csvLine.id, prev_id: csvLine.prev_id,
label: csvLine.label,
begin: toFloat(csvLine.begin), end: toFloat(csvLine.end)}}),
(s)-[:is_a]->(s_type)
'''
statement = node_statement.format(path=csv_path,
corpus=corpus_context.cypher_safe_name)
corpus_context.execute_cypher(statement)
if corpus_context.config.debug:
print('Syllable node creation took {} seconds.'.format(time.time() - begin))
begin = time.time()
rel_statement = '''
USING PERIODIC COMMIT 2000
LOAD CSV WITH HEADERS FROM "{path}" as csvLine
MATCH (n:{phone_name}:{corpus}:speech:nucleus {{id: csvLine.vowel_id}})-[:contained_by]->(w:{word_name}:{corpus}:speech),
(s:syllable:{corpus}:speech {{id: csvLine.id}})
WITH n, w, s
CREATE (s)-[:contained_by]->(w),
(n)-[:contained_by]->(s)
'''
statement = rel_statement.format(path=csv_path,
corpus=corpus_context.cypher_safe_name,
word_name=corpus_context.word_name,
phone_name=corpus_context.phone_name)
corpus_context.execute_cypher(statement)
if corpus_context.config.debug:
print('Hierarchical relationship creation took {} seconds.'.format(time.time() - begin))
begin = time.time()
rel_statement = '''
USING PERIODIC COMMIT 2000
LOAD CSV WITH HEADERS FROM "{path}" as csvLine
MATCH (n:{phone_name}:{corpus}:speech:nucleus {{id: csvLine.vowel_id}}),
(s:syllable:{corpus}:speech {{id: csvLine.id}}),
(n)-[:spoken_by]->(sp:Speaker),
(n)-[:spoken_in]->(d:Discourse)
WITH sp, d, s
CREATE (s)-[:spoken_by]->(sp),
(s)-[:spoken_in]->(d)
'''
statement = rel_statement.format(path=csv_path,
corpus=corpus_context.cypher_safe_name,
word_name=corpus_context.word_name,
phone_name=corpus_context.phone_name)
corpus_context.execute_cypher(statement)
if corpus_context.config.debug:
print('Spoken relationship creation took {} seconds.'.format(time.time() - begin))
begin = time.time()
prev_rel_statement = '''
USING PERIODIC COMMIT 2000
LOAD CSV WITH HEADERS FROM "{path}" as csvLine
MATCH (s:syllable:{corpus}:speech {{id: csvLine.id}})
with csvLine, s
MATCH (prev:syllable {{id:csvLine.prev_id}})
CREATE (prev)-[:precedes]->(s)
'''
statement = prev_rel_statement.format(path=csv_path,
corpus=corpus_context.cypher_safe_name,
word_name=corpus_context.word_name,
phone_name=corpus_context.phone_name)
corpus_context.execute_cypher(statement)
if corpus_context.config.debug:
print('Precedence relationship creation took {} seconds.'.format(time.time() - begin))
begin = time.time()
del_rel_statement = '''
USING PERIODIC COMMIT 2000
LOAD CSV WITH HEADERS FROM "{path}" as csvLine
MATCH (n:{phone_name}:{corpus}:speech:nucleus {{id: csvLine.vowel_id}})-[r:contained_by]->(w:{word_name}:{corpus}:speech)
DELETE r
'''
statement = del_rel_statement.format(path=csv_path,
corpus=corpus_context.cypher_safe_name,
word_name=corpus_context.word_name,
phone_name=corpus_context.phone_name)
corpus_context.execute_cypher(statement)
if corpus_context.config.debug:
print('Phone-word relationship deletion took {} seconds.'.format(time.time() - begin))
begin = time.time()
onset_statement = '''
USING PERIODIC COMMIT 2000
LOAD CSV WITH HEADERS FROM "{path}" as csvLine
MATCH (n:{phone_name}:nucleus:{corpus}:speech)-[:contained_by]->(s:syllable:{corpus}:speech {{id: csvLine.id}})-[:contained_by]->(w:{word_name}:{corpus}:speech)
WITH csvLine, s, w, n
OPTIONAL MATCH
(onset:{phone_name}:{corpus} {{id: csvLine.onset_id}}),
onspath = (onset)-[:precedes*1..10]->(n)
with n, w,s, csvLine, onspath
UNWIND (case when onspath is not null then nodes(onspath)[0..-1] else [null] end) as o
OPTIONAL MATCH (o)-[r:contained_by]->(w)
with n, w,s, csvLine, [x in collect(o) WHERE x is not NULL| x] as ons,
[x in collect(r) WHERE x is not NULL | x] as rels
FOREACH (o in ons | SET o :onset, o.syllable_position = 'onset')
FOREACH (o in ons | CREATE (o)-[:contained_by]->(s))
FOREACH (r in rels | DELETE r)
'''
statement = onset_statement.format(path=csv_path,
corpus=corpus_context.cypher_safe_name,
word_name=corpus_context.word_name,
phone_name=corpus_context.phone_name)
corpus_context.execute_cypher(statement)
if corpus_context.config.debug:
print('Onset hierarchical relationship creation took {} seconds.'.format(time.time() - begin))
begin = time.time()
coda_statment = '''
USING PERIODIC COMMIT 2000
LOAD CSV WITH HEADERS FROM "{path}" as csvLine
MATCH (n:nucleus:{corpus}:speech)-[:contained_by]->(s:syllable:{corpus}:speech {{id: csvLine.id}})-[:contained_by]->(w:{word_name}:{corpus}:speech)
WITH csvLine, s, w, n
OPTIONAL MATCH
(coda:{phone_name}:{corpus} {{id: csvLine.coda_id}}),
codapath = (n)-[:precedes*1..10]->(coda)
WITH n, w, s, codapath
UNWIND (case when codapath is not null then nodes(codapath)[1..] else [null] end) as c
OPTIONAL MATCH (c)-[r:contained_by]->(w)
WITH n, w,s, [x in collect(c) WHERE x is not NULL | x] as cod,
[x in collect(r) WHERE x is not NULL | x] as rels
FOREACH (c in cod | SET c :coda, c.syllable_position = 'coda')
FOREACH (c in cod | CREATE (c)-[:contained_by]->(s))
FOREACH (r in rels | DELETE r)
'''
statement = coda_statment.format(path=csv_path,
corpus=corpus_context.cypher_safe_name,
word_name=corpus_context.word_name,
phone_name=corpus_context.phone_name)
corpus_context.execute_cypher(statement)
if corpus_context.config.debug:
print('Coda hierarchical relationship creation took {} seconds.'.format(time.time() - begin))
os.remove(path)
| 5,354,079 |
def redistributeHitsForNode(node, hits, rank):
"""
recursive call used by redistributeHits
if rank is not rank and any children have hits, redistribute
hits to children
"""
if rank is not None and rank == node.rank:
logger.debug("Node %s has rank %s, skipping" % (node.name, rank))
return
nodeHits = hits.get(node, [])
if not isinstance(nodeHits, list):
nodeHits = [nodeHits, ]
hits[node] = nodeHits
logger.warn("Hit for %s was a list: %s" % (node.name, nodeHits[0]))
nodeHitCount = len(nodeHits)
# check children for hits
childCounts = {}
total = 0
for child in node.children:
if child is node:
# root is sometimes a child of itself!
continue
# add up all hits to child and its children and on up the tree
kidHits = getTotalHits(child, hits)
if kidHits > 0:
total += kidHits
childCounts[child] = kidHits
logger.debug("Child hits: %s" % (childCounts))
if nodeHitCount != 0:
if total > 0:
# redistribute
logger.debug(
"Redistributing %d hits from %s to %s" %
(nodeHitCount, node.name, [
n.name for n in childCounts.keys()]))
logger.debug(str(childCounts))
remainders = {}
for child in sorted(
childCounts.keys(),
key=lambda kid: childCounts[kid],
reverse=True):
logger.debug("---\n%s\n----" % (nodeHits))
# calculate number of hits for this child (as a float)
newKidHits = nodeHitCount * childCounts[child] / float(total)
logger.debug(
"Add %f = %d * %d / %d new hits to %s" %
(newKidHits, nodeHitCount, childCounts[child],
total, child.name))
if child not in hits:
# special case where child has children with hits, but no
# hits itself
hits[child] = []
# move hits one at a time to child until remainder <1
while newKidHits >= 1 and len(nodeHits) > 0:
nextHit = nodeHits.pop(0)
logger.debug(
"nkh: %f child: %s hit: %s" %
(newKidHits, child.name, nextHit))
hits[child].append(nextHit)
newKidHits -= 1
remainders[child] = newKidHits
# sort children by remainder size and assign remaining hits in that
# order
logger.debug(
"%d hits left. Remainders: %s" %
(len(nodeHits), remainders))
mostDeserving = sorted(
remainders.keys(),
key=lambda kid: remainders[kid],
reverse=True)
while len(nodeHits) > 0:
hits.get(mostDeserving.pop(0), []).append(nodeHits.pop(0))
# Now call this method on all tthe children recursively
for child in childCounts.keys():
redistributeHitsForNode(child, hits, rank)
| 5,354,080 |
def get_welcome_response(session):
"""
Welcome the user to my python skill
"""
card_title = "Welcome"
speech_output = "Welcome to my python skill. You can search for GitHub repositories. "
# If the user either does not reply to the welcome message or says something
# that is not understood, they will be prompted again with this text.
reprompt_text = "Ask me to search GitHub for a repository. "
session_attributes = session.get('attributes', {})
speechlet_response = build_speechlet_response(
card_title,
speech_output,
reprompt_text
)
return build_response(session_attributes, speechlet_response)
| 5,354,081 |
def password_dialog(title, message):
"""
Show a Gtk password dialog.
:param str title:
:param str message:
:returns: the password or ``None`` if the user aborted the operation
:rtype: str
:raises RuntimeError: if Gtk can not be properly initialized
"""
Gtk = require_Gtk()
builder = Gtk.Builder.new()
builder.add_from_string(dialog_definition)
dialog = builder.get_object('entry_dialog')
label = builder.get_object('message')
entry = builder.get_object('entry')
dialog.set_title(title)
label.set_label(message)
dialog.show_all()
response = yield Dialog(dialog)
dialog.hide()
if response == Gtk.ResponseType.OK:
yield Return(entry.get_text())
else:
yield Return(None)
| 5,354,082 |
def addFiles(path, tar, exclude_func):
""" Add files in path to tar """
for root, dirs, files in os.walk(path):
files.sort() # sorted page revs may compress better
for name in files:
path = os.path.join(root, name)
if exclude_func(path):
continue
tar.add(path)
| 5,354,083 |
def rotate_quaternion ( angle, axis, old ):
"""Returns a quaternion rotated by angle about axis relative to old quaternion."""
import numpy as np
# Note that the axis vector should be normalized and we test for this
# In general, the old quaternion need not be normalized, and the same goes for the result
# although in our applications we only ever use unit quaternions (to represent orientations)
assert old.size==4, 'Error in old quaternion dimension'
assert axis.size==3, 'Error in axis dimension'
assert np.isclose (np.sum(axis**2),1.0), 'axis normalization error {} {} {}'.format(*axis)
# Standard formula for rotation quaternion, using half angles
rot = np.sin(0.5*angle) * axis
rot = np.array([np.cos(0.5*angle),rot[0],rot[1],rot[2]],dtype=np.float_)
e = quatmul ( rot, old ) # Apply rotation to old quaternion
return e
| 5,354,084 |
def apogeeSpectroReduxDirPath(dr=None):
"""
NAME:
apogeeSpectroReduxDirPath
PURPOSE:
returns the path of the spectro dir
INPUT:
dr= return the path corresponding to this data release
OUTPUT:
path string
HISTORY:
2014-11-25 - Written - Bovy (IAS)
"""
if dr is None: dr= _default_dr()
if dr.lower() == 'current':
return os.path.join(_APOGEE_DATA,'apogeework',
'apogee','spectro','redux')
else:
return os.path.join(_APOGEE_DATA,'dr%s' % dr,
'apogee','spectro','redux')
| 5,354,085 |
def rochepot_dl(x, y, z, q):
"""
Dimensionless Roche potential (:math:`\\Phi_n`, synchronous rotation)
More massive component (:math:`m_1`) is centered at (x,y,z) = (0,0,0). Less massive
component (:math:`m_2`) is at (1,0,0). The unit of length is the distance between
the objects. Both objects are in the x,y plane (x-axis along the connecting line and
z perpendicular to the orbital plane).
Parameters
----------
x, y, z : float or array
Location(s) at which to calculate the potential. Unit
of length is the distance between the masses m1 and m2.
q : float
Mass ratio (0 <= m2/m1 <= 1)
Returns
-------
Potential : float or array
The potential at the specified location(s)
"""
_checkq(q)
r1, r2 = _r1r2_dl(x, y, z)
p = 2/((1+q)*r1) + 2*q/((1+q)*r2) + (x - q/(1+q))**2 + y**2
return p
| 5,354,086 |
def health_check(config):
"""
Tests the API to ensure it is working.
"""
itglue = ITGlue(config['api_key'], config['itglue_host'])
try:
itglue._make_request('organizations', {})
return True
except:
return False
| 5,354,087 |
def capture_image(resolution=(1024, 768), size=(320, 240), sleep=2):
"""
Captures image from raspberry pi camera
resolution -- resolution of capture
size -- size of output
sleep -- sleep time in seconds
"""
stream = io.BytesIO()
with picamera.PiCamera() as camera:
#camera.led = False
camera.resolution = resolution
camera.start_preview()
time.sleep(sleep)
camera.capture(stream, format='jpeg', resize=size)
# "Rewind" the stream to the beginning so we can read its content
stream.seek(0)
image = Image.open(stream)
return image
| 5,354,088 |
def MLVR(XDATA,YDATA,xreference=0,residual=1,xlabel='',ylabel='',title='',alpha = 0.01,iters = 1000,plot=1):
"""Does Multivariant Linear Regression
properties:
XDATA = The Feature Dataframe
YDATA = The Target Dataframe
xreference = 1/0 -> The column index in XDATA for ploting graph
xlabel = Label for X in Graph
ylabel = Label for Y in Graph
title = title for graph]
alpha = Learning rate for model
iters = the number of iteration to train the model
"""
XDATA.conv_type('float',change_self=True)
xpure = XDATA[xreference]
XDATA.normalize(change_self=True)
YDATA.conv_type('float',change_self=True)
ypure = YDATA.tolist[0]
YDATA.normalize(change_self=True)
X=XDATA
y=YDATA
df =DataFrame()
ones = df.new(X.shape[0],1,elm=1.)
X = df.concat(ones,X,axis=1)
theta = DataFrame().new(1,length(X.columns),elm=0.)
def computeCost(X,y,theta):
dot_product = DataFrame().dot(X,theta.T)
return float( ( (dot_product - y)**2 ).sum(axis=0) )/(2 * X.shape[0])
def gradientDescent(X,y,theta,iters,alpha):
#cost = np.zeros(iters)
cost = []
for i in range(iters):
dot_product = DataFrame().dot(X,theta.T)
derivative = DataFrame(dataframe = [[(alpha/X.shape[0])]]) * ( X*(dot_product - y) ).sum(axis = 0 )
theta = theta - derivative
cost.append( computeCost(X, y, theta) ) #cost[i] = computeCost(X, y, theta)
return theta,cost
def print_equation(g):
stra = "Estimated equation, y = %s"%g[0]
g0 = g[0]
del g[0]
for c in range(length(g)):
stra += " + %s*x%s"%(g[c],c+1)
print(stra)
def predict_li(XDATA,g):
g0 = g[0]
del g[0]
y_pred = []
for row in range(XDATA.shape[0]):
suma = 0
suma += sum(list_multiplication( g , XDATA.row(row) ) )
yres = g0 + suma
y_pred.append(yres)
return y_pred
g,cost = gradientDescent(X,y,theta,iters,alpha)
finalCost = computeCost(X,y,g)
#g = g.T
g = g.two2oneD()
print("Thetas = %s"%g) #print("cost = ",cost)
print("finalCost = %s" % finalCost)
gN = g[:]
print_equation(gN)
gN = g[:]
y_pred = predict_li(XDATA,gN)
y_PRED = reference_reverse_normalize(ypure,y_pred)
emin,emean,emax = minResidual(ypure , y_PRED),meanResidual(ypure , y_PRED),maxResidual(ypure , y_PRED)
print("Min,Mean,Max residual = %s, %s, %s"%( emin,emean,emax ) )
print("Residual Min - Max Range = %s"%(emax-emin))
print("Residual range percentage = %s" %((emax-emin)/(max(ypure) - min(ypure))) )
print("Residual mean percentage = %s" %(emean/ArithmeticMean(ypure)) )
#-- If finalcost is lowest mean Residual or mean Error distance also will be lowest
#y_pred = [g[0] + g[1]*my_data[0][c] + g[2]*my_data[1][c] for c in range(my_data.shape[0])]
y_actual = YDATA.tolist[0]
x = XDATA[xreference]
if plot == 1:
fig, ax = plt.subplots()
ax.plot(numpy.arange(iters), cost, 'r')
ax.set_xlabel('Iterations')
ax.set_ylabel('Cost')
ax.set_title('Error vs. Training Epoch')
plt.show()
x_a, y_a = give_time_series(xpure,y_PRED)#give_time_series(x,y_pred)
plt.plot(x_a,y_a,color='r',marker='.',label='Prediction')
x_a, y_a = give_time_series(xpure,ypure)#give_time_series(x,y_actual)
plt.plot(x_a,y_a,color='g',marker='.',label='Real')
if residual == 1:
plot_error_distance(xpure,y_PRED,ypure)
plt.xlabel(xlabel)
plt.ylabel(ylabel)
plt.title(title)
plt.legend()
plt.show()
else:
print('plot off')
return finalCost
| 5,354,089 |
def test_modifier_with_nested_modifier():
"""Test instantiation of potential forms that has a modifier that has a nested modifier in its argument list"""
k = u"A"
v = u"zero >=1.0 sum(nested(constant 1.0 >2 zero), buck 10.0 0.1 32.0)"
parser = ConfigParser(io.StringIO())
potential_forms = [
PMT(u'nested', [PFI(u'constant', [1.0], MRD(u'>',0.0),
PFI(u'zero', [], MRD(u'>', 2), None))], MRD(u'>', 0.0), None),
PFI(u'buck', [10.0, 0.1, 32.0], MRD(u'>', 0.0), None)]
expect = PFI(u'zero', [], MRD(u'>', 0.0),
PMT(u'sum', potential_forms, MRD(u'>=', 1.0), None))
expect = PairPotentialTuple(species = k, potential_form_instance = expect)
actual = parser._parse_multi_range(k, v)
assert DeepDiff(expect, actual) == {}
| 5,354,090 |
def FilterAndTagWrapper(target, dontRemoveTag=False):
"""\
Returns a component that wraps a target component, tagging all traffic
going into its inbox; and filtering outany traffic coming out of its outbox
with the same unique id.
"""
if dontRemoveTag:
Filter = FilterButKeepTag
else:
Filter = FilterTag
return Graphline( TAGGER = UidTagger(),
FILTER = Filter(),
TARGET = target,
linkages = {
("TARGET", "outbox") : ("FILTER", "inbox"), # filter data coming from target
("FILTER", "outbox") : ("self", "outbox"),
("TAGGER", "uid") : ("FILTER", "uid"), # ensure filter uses right uid
("self", "inbox") : ("TAGGER", "inbox"), # tag data going to target
("TAGGER", "outbox") : ("TARGET", "inbox"),
("self", "control") : ("TARGET", "control"), # shutdown signalling path
("TARGET", "signal") : ("TAGGER", "control"),
("TAGGER", "signal") : ("FILTER", "control"),
("FILTER", "signal") : ("self", "signal"),
},
)
| 5,354,091 |
def print_progress_bar(count: int) -> None:
"""Print progress bar in 20% increments.
Args:
count (int): Number of 1000 query chunks.
"""
# left side of progress bar
progress_bar = '['
# Fill in progress bar
for i in range(0, count):
progress_bar += '#'
# Fill in remaining slots with an underscore
while len(progress_bar) <= PROGRESS_BAR_LENGTH:
progress_bar += '_'
# Right side of progress bar
progress_bar += ']'
print(f'PROGRESS: {progress_bar}')
| 5,354,092 |
def format_time(seconds: Union[int, float]) -> str:
"""Convert the seconds to human readable string with days, hours, minutes and seconds."""
s = int(np.rint(seconds))
if s < 60:
return "{0}s".format(s)
elif s < 60 * 60:
return "{0}m {1:02}s".format(s // 60, s % 60)
elif s < 24 * 60 * 60:
return "{0}h {1:02}m {2:02}s".format(s // (60 * 60), (s // 60) % 60, s % 60)
else:
return "{0}d {1:02}h {2:02}m".format(s // (24 * 60 * 60), (s // (60 * 60)) % 24, (s // 60) % 60)
| 5,354,093 |
def run_model(df, i, name, gscv, calibrate=True):
"""Given customercode values in dict_folds,
1. create balanced dataset
2. split into train, test sets
3. run grid search
4. get probability scores
5. calibrate as directed
6. find optimal cutoff from precision-recall
7. return predictions, data, scores
"""
df_undersampled = pd.concat([
df.query(target==0).sample(frac=0.3, random_state=0),
df.query("target==1")
])
X = df_undersampled.drop("target", axis=1).copy()
y = df_undersampled.loc[:, "target"].copy()
X_tr, X_te, y_tr, y_te = train_test_split(X, y, train_size=0.7, stratify=y)
model = gscv.fit(X_tr, y_tr)
# Probabilities
y_scores = model.predict_proba(X_te)[:, 1]
if calibrate:
sigmoid = CalibratedClassifierCV(model, cv=2, method="sigmoid")
sigmoid.fit(X_tr, y_tr)
y_probs = sigmoid.predict_proba(X_te)[:, 1]
else:
y_probs = np.array(y_scores)
# Cutoff
p, r, t = precision_recall_curve(y_te, y_probs, pos_label=1)
df_pr = (pd.DataFrame(data=zip(p, r, t),
columns=["precision", "recall", "threshold"])
.set_index("threshold"))
cutoff = (pd.Series(data=np.abs(df_pr["precision"] - df_pr["recall"]),
index=df_pr.index)
.idxmin()
.round(2))
# Predictions
y_pred = (y_probs >= cutoff).astype(int)
dict_data = {
"X_tr": X_tr,
"X_te": X_te,
"y_tr": y_tr,
"y_te": y_te,
"y_scores": y_scores,
"y_probs": y_probs,
"y_pred": y_pred,
}
dict_scores = {
"precision": precision_score(y_te, y_pred),
"recall": recall_score(y_te, y_pred),
}
payload = {
"name": name,
"model": model,
"data": dict_data,
"scores": dict_scores
}
return payload
| 5,354,094 |
def launch_process(a_project, a_routine, a_work_text, a_done_text,
a_focus_on_error=False, a_always_focus=False,
post_routine=None):
"""
Standard launch for eiffel tools buffer management. It create/show
the eiffel tools buffer in a window, launch `a_routine' with the
eiffel tools buffer window, and get the focus back to the working
window (if needed).
`a_project' The currently opened Eiffel projectto see the success of
a_routine.
`a_routine' The routine to launch.
`a_work_text' Text to help the user to know what is happening
`a_done_text' Text to print in the status bar when finish
`a_focus_on_error' The Eiffel tools window must be focus if error
`a_always_focus' Always focus the Eiffel tools window on exit.
"""
if a_project:
save_current_window_and_open_tools_window()
set_tools_window_text(a_work_text)
tools_buffer_number = get_tools_buffer_number()
tools_buffer = environment.window(tools_buffer_number, True)
environment.manual_fold()
a_routine(tools_buffer)
if post_routine:
post_routine()
set_tools_window_text(a_done_text)
if not(a_always_focus or (a_focus_on_error and a_project.has_error())):
select_saved_window()
else:
print("No Eiffel project opened")
| 5,354,095 |
def prepare_runkos(main_dir, discard_file=None):
"""Identify the positions with variations between 0.2 to 0.8 in the training population
and calculate the mean and std for the variation.
"""
THRESHOLD_DROPNA = 32 # more than 40 columns should have a value not a nan.
file_count = 0
list_of_dfs = []
list_dfs_var = []
file_csv_list = []
for file_csv in os.listdir(main_dir):
# Ignore the file that is given as the validation dataset
if discard_file and file_csv == discard_file:
continue
file_path = "%s/%s" % (main_dir, file_csv)
dataframe = []
dataframe = pd.read_csv(file_path)
# Ignore Insertions by getting only the first entity of the nucleotide at each row
dataframe['mut'] = dataframe.apply(lambda row: row['mut'][0], axis=1)
# Ignore insertion and deletions by merging the rows with equal pos and mut
dataframe = dataframe.groupby(['pos', 'mut']).sum().reset_index()
dataframe_pivot = []
dataframe_pivot = pd.pivot_table(dataframe, index='pos', columns='mut', values='freq', aggfunc='sum')
# Rename columns
dataframe_pivot.rename(lambda x: "%s_%s" % (x, file_count), axis='columns', inplace=True)
list_dfs_var.append(dataframe_pivot.copy())
file_count += 1
file_csv_list.append(file_csv)
df_merged = reduce(lambda x, y: pd.merge(x, y, how='outer', right_index=True, left_index=True),
list_dfs_var)
df_merged_filtered = df_merged.dropna(thresh=THRESHOLD_DROPNA)
df_mean_std = calc_mean_std_population(df_merged_filtered, discard_file)
return df_mean_std, list_of_dfs
| 5,354,096 |
def extract_date(db):
"""Extract Release Date from metadata and convert it into YYYY MM format"""
date_pattern = 'releaseDate\":(\d{9,10})'
def format_date(x):
"""Takes epoch time as argument and returns date in YYYY MM format"""
date = re.search(date_pattern, x)
if date:
val = pd.to_datetime(date.group(1), unit='s')
val = val.strftime('%Y %b')
return val
else:
return 'No Date'
db['date'] = db['meta'].apply(format_date)
db = db.drop('meta', axis=1)
return db
| 5,354,097 |
def set_provisioner_data(response, metrics, pod_name, pod_details):
""" Update provisioner related metrics"""
# Data from provisioner is the source of truth
# Update only those metrics.storages data which is present in
# provisioner, rest will remain with default values.
storage_data_from_csi = response.json()["storages"]
for index, storage in enumerate(metrics.storages):
try:
if storage["name"] == storage_data_from_csi[index]["name"]:
storage.update(storage_data_from_csi[index])
except IndexError:
# skip comparing rest of storages in metrics[default],
# since storage_data_from_csi has reached its end,
# and it contains no more data from healthy storage-pools
logging.debug(logf(
"Reached end of list of storages from csi. Skip comparing the rest."
))
break
metrics.provisioner.update({"pod_name": pod_name})
metrics.provisioner.update(response.json()["pod"])
metrics.provisioner.update(pod_details)
| 5,354,098 |
def split_test_image(aa):
"""
Separate image created by mk_test_image into x,y components
"""
if aa.dtype.kind == 'f':
y = np.round((aa % 1)*1024)
x = np.floor(aa)
else:
nshift = (aa.dtype.itemsize*8)//2
mask = (1 << nshift) - 1
y = aa & mask
x = aa >> nshift
return x, y
| 5,354,099 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.