content
stringlengths 22
815k
| id
int64 0
4.91M
|
---|---|
def main():
"""
This file creates a csv file by going through each directory in java_files, placing a specific label for it (1 or 0)
and then writing it to a csv file
"""
current_dir = os.getcwd()
cat_directory = os.path.join(current_dir, 'java_files')
os.chdir(cat_directory)
values = []
for directory in os.listdir():
if directory == 'license':
label = 1
else:
label = 0
directory_path = os.path.join(cat_directory, directory)
os.chdir(directory_path)
for txt_file in os.listdir():
with open(directory_path + '/' + txt_file, 'r', encoding='utf-8') as file:
data = file.read()
values.append({'comment_block_text': data, 'label': label})
os.chdir('../../../../all_files_generated/csv_files')
current_dir = os.getcwd()
file_dir = os.path.join(current_dir, 'data_java.csv')
fields = ['comment_block_text', 'label']
# write to csv file
with open(file_dir, 'w', encoding='utf-8', newline='') as output_csv:
output_writer = csv.DictWriter(output_csv, fieldnames=fields)
output_writer.writeheader()
for item in values:
output_writer.writerow(item)
| 5,348,700 |
def main():
"""Tools to create, manage and convert M3U and XSPF playlists"""
| 5,348,701 |
def assert_almost_equal(
actual: Tuple[numpy.float64, numpy.float64, float, float],
desired: Tuple[numpy.float64, numpy.float64, float, float],
decimal: int,
):
"""
usage.statsmodels: 1
"""
...
| 5,348,702 |
def statistical_features(ds, exclude_col_names: list = [],
feature_names=['mean', 'median', 'stddev', 'variance', 'max', 'min', 'skew',
'kurt', 'sqr']):
"""
Compute statistical features.
Args:
ds (DataStream): Windowed/grouped DataStream object
exclude_col_names list(str): name of the columns on which features should not be computed
feature_names list(str): names of the features. Supported features are ['mean', 'median', 'stddev', 'variance', 'max', 'min', 'skew',
'kurt', 'sqr', 'zero_cross_rate'
Returns:
DataStream object with all the existing data columns and FFT features
"""
exclude_col_names.extend(["timestamp", "localtime", "user", "version"])
data = ds._data._df.drop(*exclude_col_names)
df_column_names = data.columns
basic_schema = StructType([
StructField("timestamp", TimestampType()),
StructField("localtime", TimestampType()),
StructField("user", StringType()),
StructField("version", IntegerType()),
StructField("start_time", TimestampType()),
StructField("end_time", TimestampType())
])
features_list = []
for cn in df_column_names:
for sf in feature_names:
features_list.append(StructField(cn + "_" + sf, FloatType(), True))
features_schema = StructType(basic_schema.fields + features_list)
def calculate_zero_cross_rate(series):
"""
How often the signal changes sign (+/-)
"""
series_mean = np.mean(series)
series = [v - series_mean for v in series]
zero_cross_count = (np.diff(np.sign(series)) != 0).sum()
return zero_cross_count / len(series)
def get_sqr(series):
sqr = np.mean([v * v for v in series])
return sqr
@pandas_udf(features_schema, PandasUDFType.GROUPED_MAP)
def get_stats_features_udf(df):
results = []
timestamp = df['timestamp'].iloc[0]
localtime = df['localtime'].iloc[0]
user = df['user'].iloc[0]
version = df['version'].iloc[0]
start_time = timestamp
end_time = df['timestamp'].iloc[-1]
df.drop(exclude_col_names, axis=1, inplace=True)
if "mean" in feature_names:
df_mean = df.mean()
df_mean.index += '_mean'
results.append(df_mean)
if "median" in feature_names:
df_median = df.median()
df_median.index += '_median'
results.append(df_median)
if "stddev" in feature_names:
df_stddev = df.std()
df_stddev.index += '_stddev'
results.append(df_stddev)
if "variance" in feature_names:
df_var = df.var()
df_var.index += '_variance'
results.append(df_var)
if "max" in feature_names:
df_max = df.max()
df_max.index += '_max'
results.append(df_max)
if "min" in feature_names:
df_min = df.min()
df_min.index += '_min'
results.append(df_min)
if "skew" in feature_names:
df_skew = df.skew()
df_skew.index += '_skew'
results.append(df_skew)
if "kurt" in feature_names:
df_kurt = df.kurt()
df_kurt.index += '_kurt'
results.append(df_kurt)
if "sqr" in feature_names:
df_sqr = df.apply(get_sqr)
df_sqr.index += '_sqr'
results.append(df_sqr)
output = pd.DataFrame(pd.concat(results)).T
basic_df = pd.DataFrame([[timestamp, localtime, user, int(version), start_time, end_time]],
columns=['timestamp', 'localtime', 'user', 'version', 'start_time', 'end_time'])
return basic_df.assign(**output)
# check if datastream object contains grouped type of DataFrame
if not isinstance(ds._data, GroupedData):
raise Exception(
"DataStream object is not grouped data type. Please use 'window' operation on datastream object before running this algorithm")
data = ds._data.apply(get_stats_features_udf)
return DataStream(data=data, metadata=Metadata())
| 5,348,703 |
def second_order_difference(t, y):
""" Calculate the second order difference.
Args:
t: ndarray, the list of the three independent variables
y: ndarray, three values of the function at every t
Returns:
double: the second order difference of given points
"""
# claculate the first order difference
first_order_difference = (y[1:] - y[:-1]) / (t[1:] - t[:-1])
return (first_order_difference[1] - first_order_difference[0]) / (t[2] - t[0])
| 5,348,704 |
def timeDelay( gpsTime, rightAscension, declination, unit, det1, det2 ):
"""
timeDelay( gpsTime, rightAscension, declination, unit, det1, det2 )
Calculates the time delay in seconds between the detectors
'det1' and 'det2' (e.g. 'H1') for a sky location at (rightAscension
and declination) which must be given in certain units
('radians' or 'degree'). The time is passes as GPS time.
A positive time delay means the GW arrives first at 'det2', then at 'det1'.
Example:
antenna.timeDelay( 877320548.000, 355.084,31.757, 'degree','H1','L1')
0.0011604683260994519
Given these values, the signal arrives first at detector L1,
and 1.16 ms later at H2
"""
# check the input arguments
if unit =='radians':
ra_rad = rightAscension
de_rad = declination
elif unit =='degree':
ra_rad = rightAscension/180.0*pi
de_rad = declination/180.0*pi
else:
raise ValueError("Unknown unit %s" % unit)
# check input values
if ra_rad<0.0 or ra_rad> 2*pi:
raise ValueError( "ERROR. right ascension=%f "\
"not within reasonable range."\
% (rightAscension))
if de_rad<-pi or de_rad> pi:
raise ValueError( "ERROR. declination=%f not within reasonable range."\
% (declination))
if det1 == det2:
return 0.0
gps = lal.LIGOTimeGPS( gpsTime )
x1 = lalsimulation.DetectorPrefixToLALDetector(det1).location
x2 = lalsimulation.DetectorPrefixToLALDetector(det2).location
timedelay = lal.ArrivalTimeDiff(list(x1), list(x2), ra_rad, de_rad, gps)
return timedelay
| 5,348,705 |
def RngBinStr(n):
"""
Takes a int which represents the length of the final binary number.
Returns a string which represents a number in binary where each char was randomly generated and has lenght n.
"""
num = ""
for i in range(n):
if rng.random() < 0.5:
num += "0"
else:
num += "1"
return num
| 5,348,706 |
def get_bdbox_from_heatmap(heatmap, threshold=0.2, smooth_radius=20):
"""
Function to extract bounding boxes of objects in heatmap
Input :
Heatmap : matrix extracted with GradCAM.
threshold : value defining the values we consider , increasing it increases the size of bounding boxes.
smooth_radius : radius on which each pixel is blurred.
Output :
returned_objects : List of bounding boxes, N_objects * [ xmin, xmax, ymin, ymax, width, height ]
"""
# If heatmap is all zeros i initialize a default bounding box which wraps entire image
xmin = 0
xmax = heatmap.shape[1]
ymin = 0
ymax = heatmap.shape[0]
width = xmax-xmin
height = ymax-ymin
returned_objects = []
# Count if there is any "hot" value on the heatmap
count = (heatmap > threshold).sum()
# Blur the image to have continuous regions
heatmap = ndimage.uniform_filter(heatmap, smooth_radius)
# Threshold the heatmap with 1 for values > threshold and 0 else
thresholded = np.where(heatmap > threshold, 1, 0)
# Apply morphological filter to fill potential holes in the heatmap
thresholded = ndimage.morphology.binary_fill_holes(thresholded)
# Detect all independant objects in the image
labeled_image, num_features = ndimage.label(thresholded)
objects = ndimage.measurements.find_objects(labeled_image)
# We loop in each object ( if any is detected ) and append it to a global list
if count > 0:
for obj in objects:
x = obj[1]
y = obj[0]
xmin = x.start
xmax = x.stop
ymin = y.start
ymax = y.stop
width = xmax-xmin
height = ymax-ymin
returned_objects.append([xmin, xmax, ymin, ymax, width, height])
else:
returned_objects.append([xmin, xmax, ymin, ymax, width, height])
return returned_objects
| 5,348,707 |
def _checkSequenceError(string, start, expected):
"""
Checks that the string starts with the expected sequence number.
Args:
string: write your description
start: write your description
expected: write your description
"""
if not string.startswith(start):
raise Exception(
"wanted sequence (0x%s), got 0x%02x" %
(expected, _extractFirstInt(string))
)
| 5,348,708 |
def get_request(request_id, to_json=False, session=None):
"""
Get a request or raise a NoObject exception.
:param request_id: The id of the request.
:param to_json: return json format.
:param session: The database session in use.
:raises NoObject: If no request is founded.
:returns: Request.
"""
try:
query = session.query(models.Request).with_hint(models.Request, "INDEX(REQUESTS REQUESTS_SCOPE_NAME_IDX)", 'oracle')\
.filter(models.Request.request_id == request_id)
ret = query.first()
if not ret:
return None
else:
if to_json:
return ret.to_dict_json()
else:
return ret.to_dict()
except sqlalchemy.orm.exc.NoResultFound as error:
raise exceptions.NoObject('request request_id: %s cannot be found: %s' % (request_id, error))
| 5,348,709 |
def gen_multi_correlated(N, n, c_mat, p_arr, use_zscc=False, verify=False, test_sat=False, pack_output=True, print_stat=False):
"""Generate a set of bitstreams that are correlated according to the supplied correlation matrix"""
#Test if the desired parameters are satisfiable
sat_result = corr_sat(N, n, c_mat, p_arr, for_gen=True, print_stat=print_stat, use_zscc=use_zscc)
if not sat_result:
if print_stat:
print("SCC MATRIX NOT SATISFIABLE")
return test_sat #Don't fail the test if we were intending to check correlation satisfiability
sat = sat_result[0]
if not test_sat and not sat:
if print_stat:
print("SCC MATRIX NOT SATISFIABLE")
return False
Dij = sat_result[1]
N_arr = sat_result[2]
if print_stat:
print(c_mat)
print(p_arr)
#Perform the generation
bs_arr = np.zeros((n,N), dtype=np.uint8)
def gmc_rec(i):
"""Recursive portion of gen_multi_correlated"""
nonlocal N, n, N_arr, Dij, bs_arr
if i == n-1:
sentinel = 's'
last_cand = next(next_cand(N, N_arr[i], Dij, bs_arr, i), sentinel)
if last_cand is not sentinel:
bs_arr[i, :] = last_cand
return True
else:
return False
else:
for cand in next_cand(N, N_arr[i], Dij, bs_arr, i):
bs_arr[i, :] = cand
if gmc_rec(i+1):
return True
return False
gmc_result = gmc_rec(0)
if not test_sat and not gmc_result:
if print_stat:
print("GEN_MULTI_CORRELATED FAILED: Couldn't find a valid solution")
return False
if test_sat:
if gmc_result != sat:
print("Generation result: '{}' did not match scc sat result: '{}'. Corr mat: \n{}. p arr: {}" \
.format(gmc_result, sat, c_mat, p_arr))
return False
else:
print("SCC SAT TEST PASS. Corr mat: \n{}. p arr: {}".format(c_mat, p_arr))
#Verify the generation
if print_stat:
print(bs_arr)
if verify and gmc_result:
cmat_actual = bs.get_corr_mat(bs_arr, bs_len=N, use_zscc=use_zscc)
if np.any(np.abs(cmat_actual - c_mat) > 1e-3):
if print_stat:
print("GEN_MULTI_CORRELATED FAILED: Resulting SCC Matrix doesn't match: \n {} \n should be \n {}"
.format(cmat_actual, c_mat))
return False
for idx, bs_i in enumerate(bs_arr):
p_actual = bs.bs_mean(np.packbits(bs_i), bs_len=N)
if np.any(np.abs(p_actual - p_arr[idx]) > 1e-3):
if print_stat:
print("GEN_MULTI_CORRELATED FAILED: Resulting probability is incorrect: {} (should be {})".format(p_actual, p_arr[idx]))
return False
if print_stat:
print("GEN_MULTI_CORRELATED PASS")
if pack_output:
return True, np.packbits(bs_arr, axis=1)
else:
return True, bs_arr
| 5,348,710 |
def rxzero_traj_eval_grad(parms, t_idx):
"""
Analytical gradient for evaluated trajectory with respect to the log-normal parameters
It is expected to boost the optimization performance when the parameters are high-dimensional...
"""
v_amp_array = np.array([rxzero_vel_amp_eval(parm, t_idx) for parm in parms])
phi_array = np.array([rxzero_normal_Phi_eval(parm, t_idx) for parm in parms])
v_amp_grad_array = np.array([np.vstack([rxzero_vel_amp_eval_grad(parm[0:4], t_idx).T, np.zeros((2, len(t_idx)))]).T for parm in parms])
phi_grad_array = np.array([rxzero_normal_Phi_eval_grad(parm, t_idx) for parm in parms])
v_x_grad = np.concatenate([(v_amp_grad_array[parm_idx].T * np.cos(phi_array[parm_idx]) - v_amp_array[parm_idx] * np.sin(phi_array[parm_idx]) * phi_grad_array[parm_idx].T).T for parm_idx in range(len(parms))], axis=1)
v_y_grad = np.concatenate([(v_amp_grad_array[parm_idx].T * np.sin(phi_array[parm_idx]) + v_amp_array[parm_idx] * np.cos(phi_array[parm_idx]) * phi_grad_array[parm_idx].T).T for parm_idx in range(len(parms))], axis=1)
dt = t_idx[1] - t_idx[0]
pos_x_grad = np.cumsum(v_x_grad, axis=0) * dt
pos_y_grad = np.cumsum(v_y_grad, axis=0) * dt
return np.array([pos_x_grad, pos_y_grad]), np.array([v_x_grad, v_y_grad])
| 5,348,711 |
def exec_geoprocessing_model():
"""算法模型试运行测试
根据算法模型的guid标识,算法模型的输入参数,运行算法模型
---
tags:
- system_manage_api/geoprocessing_model
parameters:
- in: string
name: guid
type: string
required: true
description: 流程模型的guid
- in: array
name: param
type: array
required: true
description: 算法模型的初始化参数
responses:
200:
description: 算法模型运行的结果,结果数组
schema:
properties:
geoprocessing_model_result:
type: object
description: 结果数组,[{"function_name":"","value":""},{},...]
500:
description: 服务运行错误,异常信息
schema:
properties:
errMessage:
type: string
description: 异常信息,包括异常信息的类型
traceMessage:
type: string
description: 异常更加详细的信息,包括异常的位置
"""
try:
# exe_functinons_param = {}
# exe_functinons_already = {}
# exe_functinons_result = {}
# param_dic = {x["guid"]: x["default_value"] for x in list(request.json.get('param', []))}
# #根据算法模型的guid,从数据库中获取所有的函数信息
# #包括模块、名函数名、参数名称等
# pg_helper = PgHelper()
# records = pg_helper.query_datatable(
# '''select module_name,function_name,parameter_name,guid,
# from_module_name,from_function_name,from_name
# from gy_geoprocessing_model_node
# where geoprocessing_model_guid=%s''', (request.json.get('guid', None),))
# for x in records:
# if not (x["module_name"], x["function_name"]) in exe_functinons_param:
# exe_functinons_param[(x["module_name"], x["function_name"])] = {}
# exe_functinons_already[(x["module_name"], x["function_name"])] = False
# if x["guid"] in param_dic:
# exe_functinons_param[(x["module_name"], x["function_name"])][x["parameter_name"]] = param_dic[x["guid"]]
# else:
# exe_functinons_param[(x["module_name"], x["function_name"])][x["parameter_name"]] = None
# exe_functinons_result[(x["from_module_name"], x["from_function_name"], x["from_name"])] = (x["module_name"], x["function_name"],
# x["parameter_name"])
# flag_loop = True
# latest_result = {}
# while flag_loop:
# flag_loop = False
# #循环每一个函数
# for key_f in exe_functinons_param:
# #函数已经运行过
# if exe_functinons_already[key_f]:
# continue
# #如果一个函数的所有参数值都不是None,在运行所有的函数
# func_exeable = True
# for key_p in exe_functinons_param[key_f]:
# if exe_functinons_param[key_f][key_p] is None:
# func_exeable = False
# flag_loop = True
# break
# #运行函数
# if func_exeable:
# latest_result = {}
# exe_functinons_already[key_f] = True
# temp_result = geoprocessing_algorithm.__dict__[key_f[0]].__dict__[key_f[1]](**exe_functinons_param[key_f])
# #将结果赋给相应的参数
# for key_re in temp_result:
# if key_f + (key_re,) in exe_functinons_result:
# exe_functinons_param[exe_functinons_result[key_f +
# (key_re,)][:-1]][exe_functinons_result[key_f +
# (key_re,)][-1]] = temp_result[key_re]
# latest_result[key_f] = temp_result
# #将最新一次的运行结果进行解析,返回前段
# ret_string = ""
# for key_f in latest_result:
# for x in geoprocessing_algorithm.__dict__[key_f[0]].__dict__[key_f[1]].__annotations__["return"]:
# if x["name_en"] in latest_result[key_f]:
# ret_string = ret_string + x["name_zh_cn"] + ":" + str(latest_result[key_f][x["name_en"]]) + "\n"
# return jsonify({"geoprocessing_model_result": ret_string}), 200
return jsonify({}), 200
except Exception as exception:
return jsonify({"errMessage": repr(exception), "traceMessage": traceback.format_exc()}), 500
| 5,348,712 |
def perform_extra_url_query(url):
"""Performs a request to the URL supplied
Arguments:
url {string} -- A URL directing to another page of results from the NASA API
Returns:
Response object -- The response received from the NASA API
"""
response = requests.request("GET", url)
check_query_was_successful(response)
return response
| 5,348,713 |
def problem_fact_property(fact_type: Type) -> Callable[[Callable[[], List]],
Callable[[], List]]:
"""Specifies that a property on a @planning_solution class is a problem fact.
A problem fact must not change during solving (except through a ProblemFactChange event). The constraints in a
ConstraintProvider rely on problem facts for ConstraintFactory.from(Class).
Do not annotate planning entities as problem facts: they are automatically available as facts for
ConstraintFactory.from(Class).
"""
def problem_fact_property_function_mapper(getter_function: Callable[[], Any]):
ensure_init()
from org.optaplanner.optapy import PythonWrapperGenerator # noqa
from org.optaplanner.core.api.domain.solution import \
ProblemFactProperty as JavaProblemFactProperty
getter_function.__optapy_return = get_class(fact_type)
getter_function.__optaplannerPlanningEntityCollectionProperty = {
'annotationType': JavaProblemFactProperty
}
return getter_function
return problem_fact_property_function_mapper
| 5,348,714 |
def _load_method_arguments(name, argtypes, args):
"""Preload argument values to avoid freeing any intermediate data."""
if not argtypes:
return args
if len(args) != len(argtypes):
raise ValueError(f"{name}: Arguments length does not match argtypes length")
return [
arg if hasattr(argtype, "_type_") else argtype.from_param(arg)
for (arg, argtype) in zip(args, argtypes)
]
| 5,348,715 |
def crash_document_add(key=None):
"""
POST: api/vX/crash/<application_key>
add a crash document by web service
"""
if 'Content-Type' not in request.headers or request.headers['Content-Type'].find('multipart/form-data') < 0:
return jsonify({ 'success': False, 'message': 'input error' })
reports = request.files.getlist('reports')
if reports:
ds = DocumentService()
for report in reports:
documents = json.loads(report.read())
if not isinstance(documents, list): documents = [documents]
for document in documents:
result, msg = ds.add_document(key, document, DocumentModel.crash)
if not result:
# error
return abort(417, {'message': msg})
# success
return jsonify({'success': True, 'message': None})
# no reports
return abort(400, {'message': 'input error'})
| 5,348,716 |
def log_data(model, action, before, after, instance):
"""Logs mutation signals for Favourite and Category models
Args:
model(str): the target class of the audit-log: favourite or category
action(str): the type of mutation to be logged: create, update, delete
before(dict): the previous value of the data mutated
after(dict): the new value of the data mutated
instance(object): the favourite or category instance being mutated
Returns:
object: instance of AuditLog created for the mutation
"""
log = {
'model': model,
'action': action,
'date': timezone.now(),
'before': before,
'after': after,
'resource_id': instance.id
}
return AuditLog.objects.create(**log)
| 5,348,717 |
def do_train(args):
"""
Train the model using the provided arguments.
"""
# Assumption: it is cheap to store all the data in text form in
# memory (it's only about 144mb)
_, X, y = load_data_raw(args.input)
X_train, y_train, X_val, y_val = split_data(X, y, args.dev_split)
# Assumption: word vector model will also easily fit in memory.
wvecs = WordVectorModel.from_file(args.wvecs, False, '*UNKNOWN*')
# Typical values are 50, 50
input_shape = (1,args.n_words, wvecs.dim)
output_shape = len(LABELS)
# Build model
model = build_model(args, input_shape=input_shape, output_shape=output_shape, output_type=args.output_type)
# Training data on the other hand will not. Each input instance is
# 50x50 matrix with 8bytes per value: that's about 20kb.
# Assuming we want to store only about 500mb in memory at a time,
# that means we want at most 25k items in a batch.
# Typically minibatches of 32-128 are probably ok. Let's keep it
# that way?
for epoch in range(args.n_epochs):
log("== Training model, epoch {}", epoch)
scorer = Scorer(model)
for xy in tqdm(grouper(args.batch_size, zip(X_train, y_train))):
X_batch, y_batch = zip(*xy)
X_batch, y_batch = wvecs.embed_sentences(X_batch), array(make_one_hot(y_batch, len(LABELS)))
score = model.train_on_batch(X_batch, y_batch)
scorer.update(score, len(X_batch))
log("=== train error: {}", scorer)
scorer = Scorer(model)
for xy in tqdm(grouper(args.batch_size, zip(X_val, y_val))):
X_batch, y_batch = zip(*xy)
X_batch, y_batch = wvecs.embed_sentences(X_batch), array(make_one_hot(y_batch, len(LABELS)))
score = model.test_on_batch(X_batch, y_batch)
scorer.update(score, len(X_batch))
log("=== val error: {}", scorer)
## Save the model
save_model(model, args.model, args.weights)
#ys_ = model.predict(X_train_)
#run_model(X_train, y_train, ys_)
| 5,348,718 |
def download(url, local_filename, chunk_size=1024 * 10):
"""Download `url` into `local_filename'.
:param url: The URL to download from.
:type url: str
:param local_filename: The local filename to save into.
:type local_filename: str
:param chunk_size: The size to download chunks in bytes (10Kb by default).
:type chunk_size: int
:rtype: str
:returns: The path saved to.
"""
response = requests.get(url)
with open(local_filename, 'wb') as fp:
for chunk in response.iter_content(chunk_size=chunk_size):
if chunk:
fp.write(chunk)
return fp.name
| 5,348,719 |
def local_action_StillOnGroup(arg=None):
"""{"group": "Playback - Group", "schema": {"type": "object", "title": "Args", "properties": {
"group": {"type": "number", "order": 2, "title": "Group"}}}}"""
query = 'G%sST\r' % arg['group']
queue.request(lambda: udp.send(query),
lambda resp: handleReqResp('StillOnGroup', resp))
| 5,348,720 |
def main(args=None):
"""Command line interface.
:param list args: command line options (defaults to sys.argv)
:returns: exit code
:rtype: int
"""
parser = ArgumentParser(
prog='baseline',
description='Overwrite script with baseline update.')
parser.add_argument(
'path', nargs='*',
help='module or directory path')
parser.add_argument(
'-w', '--walk', action='store_true',
help='recursively walk directories')
args = parser.parse_args(args)
paths = args.path or ['.']
paths = [path for pattern in paths for path in glob(pattern)]
if args.walk:
for dirpath in (p for p in paths if os.path.isdir(p)):
for root, _dirs, files in os.walk(dirpath):
paths += (os.path.join(root, filename) for filename in files)
else:
for dirpath in (p for p in paths if os.path.isdir(p)):
paths += (os.path.join(dirpath, pth) for pth in os.listdir(dirpath))
update_paths = [
os.path.abspath(p) for p in paths if p.lower().endswith(UPDATE_EXT)]
if update_paths:
script_paths = [pth[:-len(UPDATE_EXT)] + '.py' for pth in update_paths]
print('Found updates for:')
for path in script_paths:
print(' ' + os.path.relpath(path))
print()
try:
input('Hit [ENTER] to update, [Ctrl-C] to cancel ')
except KeyboardInterrupt:
print()
print('Update canceled.')
else:
print()
for script_path, update_path in zip(script_paths, update_paths):
with open(update_path) as update:
new_content = update.read()
with open(script_path, 'w') as script:
script.write(new_content)
os.remove(update_path)
print(
os.path.relpath(update_path) +
' -> ' +
os.path.relpath(script_path))
return 0
| 5,348,721 |
def tokenize(data, tok="space", lang="en"):
"""Tokenize text data.
There are 5 tokenizers supported:
- "space": split along whitespaces
- "char": split in characters
- "13a": Official WMT tokenization
- "zh": Chinese tokenization (See ``sacrebleu`` doc)
- "moses": Moses tokenizer (you can specify lthe language).
Uses the `sacremoses <https://github.com/alvations/sacremoses>`_
Args:
data (list, str): String or list (of lists...) of strings.
tok (str, optional): Tokenization. Defaults to "space".
lang (str, optional): Language (only useful for the moses tokenizer).
Defaults to "en".
Returns:
list, str: Tokenized data
"""
if tok is "space":
def tokenizer(x): return x.split()
elif tok is "char":
def tokenizer(x): return list(x)
elif tok is "13a":
def tokenizer(x): return sacrebleu.tokenize_13a(x).split(" ")
elif tok is "zh":
def tokenizer(x): return sacrebleu.tokenize_zh(x).split(" ")
elif tok is "moses":
moses_tok = sacremoses.MosesTokenizer(lang=lang)
def tokenizer(x): return moses_tok.tokenize(x)
else:
raise ValueError(f"Unknown tokenizer {tok}")
return _tokenize(data, tokenizer)
| 5,348,722 |
def ift2(x, dim=(-2, -1)):
"""
Process the inverse 2D fast fourier transform and swaps the axis to get correct results using ftAxis
Parameters
----------
x: (ndarray) the array on which the FFT should be done
dim: the axis (or a tuple of axes) over which is done the FFT (default is the last of the array)
Returns
-------
See Also
--------
ftAxis, ftAxis_time, ift, ft2, ift2
"""
assert isinstance(x, np.ndarray)
if hasattr(dim, '__iter__'):
for d in dim:
if not isinstance(d, int):
raise TypeError(
'elements in dim should be an integer specifying the array dimension over which to do the calculation')
assert d <= len(x.shape)
else:
if not isinstance(dim, int):
raise TypeError(
'elements in dim should be an integer specifying the array dimension over which to do the calculation')
assert dim <= len(x.shape)
out = np.fft.fftshift(np.fft.ifft2(np.fft.fftshift(x, axes=dim)), axes=dim)
return out
| 5,348,723 |
def print_eval_info(train_losses, train_metrics, eval_losses, eval_metrics):
"""Pretty prints model evaluation results
"""
if not isinstance(train_losses, dict) \
and isinstance(train_metrics, dict) \
and isinstance(eval_losses, dict) \
and isinstance(eval_metrics, dict):
raise TypeError('Parameters `losses` and `metrics` should be '
'a dict {"task_id": value}.')
df = pd.DataFrame({
'train losses': pd.Series(train_losses),
'train metrics': pd.Series(train_metrics),
'eval losses': pd.Series(eval_losses),
'eval metrics': pd.Series(eval_metrics)
})
df.index.name = 'task_ids'
print(colored('\n [evaluations]:', 'cyan'))
table_str = tabulate(df, headers='keys', tablefmt='simple')
table_str = ' ' + table_str.replace('\n', '\n ')
print(table_str)
| 5,348,724 |
def chi2_test_independence(prediction_files: list, confidence_level: float):
"""Given a list of prediction files and a required confidence level,
return whether the sentiment probability is independent on which prediction
file it comes from.
Returns True if the sentiment probability is independent of source."""
df = generate_sentiment_counts_multiple_files(prediction_files)
observed = df[:-1].drop(columns='row_sum')
expected = np.outer(df['row_sum'][:-1],
df.loc['col_sum'][:-1]) / df.loc['col_sum']['row_sum']
expected = pd.DataFrame(expected)
expected.columns = df.columns[:-1]
expected.index = df.index[:-1]
chi2_stats = ((observed - expected)**2 / expected).sum().sum()
degs_of_freedom = len(observed) * len(observed.iloc[0])
critical_value = chi2.ppf(q=confidence_level, df=degs_of_freedom)
p_value = 1 - chi2.cdf(x=chi2_stats, df=degs_of_freedom)
LOGGER.info(
f"chi2_stats = {chi2_stats}, critical_value = {critical_value}, p_value = {p_value:.10f}"
)
return p_value > (1 - confidence_level)
| 5,348,725 |
def fetch_all_device_paths():
"""
Return all device paths inside worker nodes
Returns:
list : List containing all device paths
"""
path = os.path.join(constants.EXTERNAL_DIR, "device-by-id-ocp")
clone_repo(constants.OCP_QE_DEVICEPATH_REPO, path)
os.chdir(path)
logger.info("Running script to fetch device paths...")
run_cmd("ansible-playbook devices_by_id.yml")
with open("local-storage-block.yaml") as local_storage_block:
local_block = yaml.load(local_storage_block, Loader=yaml.FullLoader)
dev_paths = local_block["spec"]["storageClassDevices"][0]["devicePaths"]
logger.info(f"All devices are {dev_paths}")
os.chdir(constants.TOP_DIR)
shutil.rmtree(path)
return dev_paths
| 5,348,726 |
def test_roles__2(zcmlS):
"""The calendar visitor role is registered as an visitor role."""
assert has_visitor_role(['icemac.ab.calendar.Visitor'])
| 5,348,727 |
def align(fastq_file, pair_file, index_dir, names, align_dir, data):
"""Perform piped alignment of fastq input files, generating sorted, deduplicated BAM.
"""
umi_ext = "-cumi" if "umi_bam" in data else ""
out_file = os.path.join(align_dir, "{0}-sort{1}.bam".format(dd.get_sample_name(data), umi_ext))
num_cores = data["config"]["algorithm"].get("num_cores", 1)
rg_info = "rgid={rg} rgpl={pl} rgpu={pu} rgsm={sample}".format(**names)
pair_file = pair_file if pair_file else ""
final_file = None
if data.get("align_split"):
# BBMap does not accept input fastq streams
raise ValueError("bbmap is not compatible with alignment splitting, set `align_split: false`")
pair_arg = "in2=%s" % pair_file if pair_file else ""
if not utils.file_exists(out_file) and (final_file is None or not utils.file_exists(final_file)):
with postalign.tobam_cl(data, out_file, pair_file != "") as (tobam_cl, tx_out_file):
if index_dir.endswith(("/ref", "/ref/")):
index_dir = os.path.dirname(index_dir)
# sam=1.3 required for compatibility with strelka2
cmd = ("bbmap.sh sam=1.3 mdtag=t {rg_info} path={index_dir} in1={fastq_file} "
"{pair_arg} out=stdout.sam | ")
do.run(cmd.format(**locals()) + tobam_cl, "bbmap alignment: %s" % dd.get_sample_name(data))
data["work_bam"] = out_file
return data
| 5,348,728 |
async def insert(cls:"PhaazeDatabase", WebRequest:Request, DBReq:DBRequest) -> Response:
""" Used to insert a new entry into a existing container """
# prepare request for a valid insert
try:
DBInsertRequest:InsertRequest = InsertRequest(DBReq)
return await performInsert(cls, DBInsertRequest)
except (MissingIntoField, InvalidContent, ContainerNotFound, ContainerBroken, SysLoadError, SysStoreError) as e:
res = dict(
code = e.code,
status = e.status,
msg = e.msg()
)
return cls.response(status=e.code, body=json.dumps(res))
except Exception as ex:
return await cls.criticalError(ex)
| 5,348,729 |
def eulerAngleXYZ(t123, unit=np.pi/180., dtype=np.float32):
"""
::
In [14]: eulerAngleXYZ([45,0,0])
Out[14]:
array([[ 1. , 0. , 0. , 0. ],
[-0. , 0.7071, 0.7071, 0. ],
[ 0. , -0.7071, 0.7071, 0. ],
[ 0. , 0. , 0. , 1. ]], dtype=float32)
In [15]: eulerAngleXYZ([0,45,0])
Out[15]:
array([[ 0.7071, 0. , -0.7071, 0. ],
[-0. , 1. , 0. , 0. ],
[ 0.7071, -0. , 0.7071, 0. ],
[ 0. , 0. , 0. , 1. ]], dtype=float32)
In [16]: eulerAngleXYZ([0,0,45])
Out[16]:
array([[ 0.7071, 0.7071, 0. , 0. ],
[-0.7071, 0.7071, 0. , 0. ],
[ 0. , -0. , 1. , 0. ],
[ 0. , 0. , 0. , 1. ]], dtype=float32)
In [11]: extractEulerAnglesXYZ(eulerAngleXYZ([45,0,0]))
Out[11]: array([ 45., 0., 0.], dtype=float32)
In [12]: extractEulerAnglesXYZ(eulerAngleXYZ([0,45,0]))
Out[12]: array([ 0., 45., -0.], dtype=float32)
In [13]: extractEulerAnglesXYZ(eulerAngleXYZ([0,0,45]))
Out[13]: array([ 0., 0., 45.], dtype=float32)
https://github.com/g-truc/glm/blob/master/glm/gtx/euler_angles.inl
::
template<typename T>
GLM_FUNC_QUALIFIER mat<4, 4, T, defaultp> eulerAngleXYZ
(
T const & t1,
T const & t2,
T const & t3
)
{
T c1 = glm::cos(-t1);
T c2 = glm::cos(-t2);
T c3 = glm::cos(-t3);
T s1 = glm::sin(-t1);
T s2 = glm::sin(-t2);
T s3 = glm::sin(-t3);
mat<4, 4, T, defaultp> Result;
Result[0][0] = c2 * c3;
Result[0][1] =-c1 * s3 + s1 * s2 * c3;
Result[0][2] = s1 * s3 + c1 * s2 * c3;
Result[0][3] = static_cast<T>(0);
Result[1][0] = c2 * s3;
Result[1][1] = c1 * c3 + s1 * s2 * s3;
Result[1][2] =-s1 * c3 + c1 * s2 * s3;
Result[1][3] = static_cast<T>(0);
Result[2][0] =-s2;
Result[2][1] = s1 * c2;
Result[2][2] = c1 * c2;
Result[2][3] = static_cast<T>(0);
Result[3][0] = static_cast<T>(0);
Result[3][1] = static_cast<T>(0);
Result[3][2] = static_cast<T>(0);
Result[3][3] = static_cast<T>(1);
return Result;
}
"""
a = np.asarray(t123, dtype=dtype)
a *= unit
t1 = a[0]
t2 = a[1]
t3 = a[2]
c1 = np.cos(-t1);
c2 = np.cos(-t2);
c3 = np.cos(-t3);
s1 = np.sin(-t1);
s2 = np.sin(-t2);
s3 = np.sin(-t3);
Result = np.eye(4, dtype=dtype);
Result[0][0] = c2 * c3;
Result[0][1] =-c1 * s3 + s1 * s2 * c3;
Result[0][2] = s1 * s3 + c1 * s2 * c3;
Result[0][3] = 0;
Result[1][0] = c2 * s3;
Result[1][1] = c1 * c3 + s1 * s2 * s3;
Result[1][2] =-s1 * c3 + c1 * s2 * s3;
Result[1][3] = 0;
Result[2][0] =-s2;
Result[2][1] = s1 * c2;
Result[2][2] = c1 * c2;
Result[2][3] = 0;
Result[3][0] = 0;
Result[3][1] = 0;
Result[3][2] = 0;
Result[3][3] = 1;
return Result;
| 5,348,730 |
def is_on_cooldown(data):
""" Checks to see if user is on cooldown. Based on Castorr91's Gamble"""
# check if command is on cooldown
cooldown = Parent.IsOnCooldown(ScriptName, CGSettings.Command)
user_cool_down = Parent.IsOnUserCooldown(ScriptName, CGSettings.Command, data.User)
caster = Parent.HasPermission(data.User, "Caster", "")
if (cooldown or user_cool_down) and caster is False and not CGSettings.CasterCD:
if CGSettings.UseCD:
cooldownDuration = Parent.GetCooldownDuration(ScriptName, CGSettings.Command)
userCDD = Parent.GetUserCooldownDuration(ScriptName, CGSettings.Command, data.User)
if cooldownDuration > userCDD:
m_CooldownRemaining = cooldownDuration
message = CGSettings.OnCoolDown.format(data.UserName, m_CooldownRemaining)
SendResp(data, CGSettings.Usage, message)
else:
m_CooldownRemaining = userCDD
message = CGSettings.OnUserCoolDown.format(data.UserName, m_CooldownRemaining)
SendResp(data, CGSettings.Usage, message)
return True
elif (cooldown or user_cool_down) and CGSettings.CasterCD:
if CGSettings.UseCD:
cooldownDuration = Parent.GetCooldownDuration(ScriptName, CGSettings.Command)
userCDD = Parent.GetUserCooldownDuration(ScriptName, CGSettings.Command, data.User)
if cooldownDuration > userCDD:
m_CooldownRemaining = cooldownDuration
message = CGSettings.OnCoolDown.format(data.UserName, m_CooldownRemaining)
SendResp(data, CGSettings.Usage, message)
else:
m_CooldownRemaining = userCDD
message = CGSettings.OnUserCoolDown.format(data.UserName, m_CooldownRemaining)
SendResp(data, CGSettings.Usage, message)
return True
return False
| 5,348,731 |
def human_readable_size(num):
"""
To show size as 100K, 100M, 10G instead of
showing in bytes.
"""
for s in reversed(SYMBOLS):
power = SYMBOLS.index(s)+1
if num >= 1024**power:
value = float(num) / (1024**power)
return '%.1f%s' % (value, s)
# if size less than 1024 or human readable not required
return '%s' % num
| 5,348,732 |
def sum_2_level_dict(two_level_dict):
"""Sum all entries in a two level dict
Parameters
----------
two_level_dict : dict
Nested dict
Returns
-------
tot_sum : float
Number of all entries in nested dict
"""
'''tot_sum = 0
for i in two_level_dict:
for j in two_level_dict[i]:
tot_sum += two_level_dict[i][j]
'''
tot_sum = 0
for _, j in two_level_dict.items():
tot_sum += sum(j.values())
return tot_sum
| 5,348,733 |
def file_ref(name):
"""Helper function for getting paths to testing spectra."""
file = os.path.join(os.path.dirname(test_analyzer.__file__),
"test_analyzer", name)
return file
| 5,348,734 |
def q_values_from_q_func(q_func, num_grid_cells, state_bounds, action_n):
"""Computes q value tensor from a q value function
Args:
q_func (funct): function from state to q value
num_grid_cells (int): number of grid_cells for resulting q value tensor
state_bounds (list of tuples): state bounds for resulting q value
tensor
action_n (int): number of actions in action space
Returns:
np.ndarray: q value tensor
"""
q_values = np.zeros(num_grid_cells + (action_n,))
it = np.nditer(q_values, flags=['multi_index'])
while not it.finished:
qs = q_func(
index_to_state(
num_grid_cells, state_bounds=state_bounds,
discrete=it.multi_index[:-1]
)
)
q_values[it.multi_index] = qs[0]
it.iternext()
return q_values
| 5,348,735 |
def list_keypairs(k5token, project_id, region):
"""Summary - list K5 project keypairs
Args:
k5token (TYPE): valid regional domain scoped token
project_id (TYPE): Description
region (TYPE): K5 region
Returns:
TYPE: http response object
Deleted Parameters:
userid(TYPE): K5 user id
"""
try:
serverURL = 'https://compute.' + region + \
'.cloud.global.fujitsu.com/v2/' + project_id + '/os-keypairs'
response = requests.get(serverURL,
headers={
'X-Auth-Token': k5token,
'Content-Type': 'application/json',
'Accept': 'application/json'})
return response
except:
return ("\nUnexpected error:", sys.exc_info())
| 5,348,736 |
def check_ip(ip):
"""
Check whether the IP is valid or not.
Args:
IP (str): IP to check
Raises:
None
Returns:
bool: True if valid, else False
"""
ip = ip.strip()
if re.match(r'^(?:(25[0-5]|2[0-4][0-9]|1[0-9][0-9]|[1-9][0-9]|[0-9])'
'(\.(?!$)|$)){4}$', ip):
return True
else:
return False
| 5,348,737 |
def _stream_lines(blob: bytes) -> Iterator[bytes]:
"""
Split bytes into lines (newline (\\n) character) on demand.
>>> iter = _stream_lines(b"foo\\nbar\\n")
>>> next(iter)
b'foo'
>>> next(iter)
b'bar'
>>> next(iter)
Traceback (most recent call last):
...
StopIteration
>>> iter = _stream_lines(b"\\x00")
>>> next(iter)
b'\\x00'
:param blob: the bytes to split.
:return: a generated list of lines.
"""
start = 0
def _index(needle: bytes) -> Optional[int]:
try:
return blob.index(needle, start)
except ValueError:
return None
line_index = _index(b"\n")
while line_index is not None:
yield blob[start:line_index]
start = line_index + 1
line_index = _index(b"\n")
# Deal with blobs that do not end in a newline.
if start < len(blob):
yield blob[start:]
| 5,348,738 |
async def fetch_all_organizations(session: ClientSession) -> Dict:
"""Fetch all organizations from organization-catalog."""
url = f"{Config.org_cat_uri()}/organizations"
org_list = await fetch_json_data(url, None, session)
return {org["organizationId"]: org for org in org_list} if org_list else dict()
| 5,348,739 |
def create_splits_random(df: pd.DataFrame, val_frac: float,
test_frac: float = 0.,
test_split: Optional[set[tuple[str, str]]] = None,
) -> dict[str, list[tuple[str, str]]]:
"""
Args:
df: pd.DataFrame, contains columns ['dataset', 'location', 'label']
each row is a single image
assumes each image is assigned exactly 1 label
val_frac: float, desired fraction of dataset to use for val set
test_frac: float, desired fraction of dataset to use for test set,
must be 0 if test_split is given
test_split: optional set of (dataset, location) tuples to use as test
split
Returns: dict, keys are ['train', 'val', 'test'], values are lists of locs,
where each loc is a tuple (dataset, location)
"""
if test_split is not None:
assert test_frac == 0
train_frac = 1. - val_frac - test_frac
targets = {'train': train_frac, 'val': val_frac, 'test': test_frac}
# merge dataset and location into a single string '<dataset>/<location>'
df['dataset_location'] = df['dataset'] + '/' + df['location']
# create DataFrame of counts. rows = locations, columns = labels
loc_label_counts = (df.groupby(['label', 'dataset_location']).size()
.unstack('label', fill_value=0))
num_locs = len(loc_label_counts)
# label_count: label => number of examples
# loc_count: label => number of locs containing that label
label_count = loc_label_counts.sum()
loc_count = (loc_label_counts > 0).sum()
best_score = np.inf # lower is better
best_splits = None
for _ in tqdm(range(10_000)):
# generate a new split
num_train = int(num_locs * (train_frac + np.random.uniform(-.03, .03)))
if test_frac > 0:
num_val = int(num_locs * (val_frac + np.random.uniform(-.03, .03)))
else:
num_val = num_locs - num_train
permuted_locs = loc_label_counts.index[np.random.permutation(num_locs)]
split_to_locs = {'train': permuted_locs[:num_train],
'val': permuted_locs[num_train:num_train + num_val]}
if test_frac > 0:
split_to_locs['test'] = permuted_locs[num_train + num_val:]
# score the split
score = 0.
for split, locs in split_to_locs.items():
split_df = loc_label_counts.loc[locs]
target = targets[split]
# SSE for # of images per label (with 2x weight)
crop_frac = split_df.sum() / label_count
score += 2 * ((crop_frac - target) ** 2).sum()
# SSE for # of locs per label
loc_frac = (split_df > 0).sum() / loc_count
score += ((loc_frac - target) ** 2).sum()
if score < best_score:
tqdm.write(f'New lowest score: {score}')
best_score = score
best_splits = split_to_locs
assert best_splits is not None
split_to_locs = {
s: sorted(locs.map(lambda x: tuple(x.split('/', maxsplit=1))))
for s, locs in best_splits.items()
}
if test_split is not None:
split_to_locs['test'] = test_split
return split_to_locs
| 5,348,740 |
def ha_close(close,high,low,open, n=2, fillna=False):
"""Relative Strength Index (RSI)
Compares the magnitude of recent gains and losses over a specified time
period to measure speed and change of price movements of a security. It is
primarily used to attempt to identify overbought or oversold conditions in
the trading of an asset.
https://www.investopedia.com/terms/r/rsi.asp
Args:
close(pandas.Series): dataset 'Close' column.
n(int): n period.
fillna(bool): if True, fill nan values.
Returns:
pandas.Series: New feature generated.
"""
indicator = Heikin_Ashi(close=df[close],high=df[high],low=df[low],open=df[open],n=2,fillna = fillna)
return indicator.ha_close()
| 5,348,741 |
def PLUGIN_ENTRY():
"""
Required plugin entry point for IDAPython Plugins.
"""
return funcref_t()
| 5,348,742 |
def chao1_var_no_doubletons(singles, chao1):
"""Calculates chao1 variance in absence of doubletons.
From EstimateS manual, equation 7.
chao1 is the estimate of the mean of Chao1 from the same dataset.
"""
s = float(singles)
return s*(s-1)/2 + s*(2*s-1)**2/4 - s**4/(4*chao1)
| 5,348,743 |
def test_join_query_forward_by_series(columns, dtstart, delta, N):
"""Read data in forward direction"""
begin = dtstart
end = dtstart + delta*(N + 1)
timedelta = end - begin
query_params = {
"output": { "format": "csv" },
"order-by": "series"
}
query = att.make_join_query(columns, begin, end, **query_params)
queryurl = "http://{0}:{1}/api/query".format(HOST, HTTPPORT)
response = urlopen(queryurl, json.dumps(query))
exp_ts = begin
exp_value = 0
iterations = 0
expected_tags = [
"tag2=B",
"tag2=C",
"tag2=D",
]
bsize = count_elements("col1", "tag2", "B", begin, end)
csize = count_elements("col1", "tag2", "C", begin, end)
dsize = count_elements("col1", "tag2", "D", begin, end)
series_sizes = [
bsize,
bsize + csize,
bsize + csize + dsize,
]
nseries = len(expected_tags)
print("Test #3 - read forward, order by series")
prev_tag = None
reset_ix = 0
for line in response:
try:
columns = line.split(',')
tagline = columns[0].strip()
timestamp = att.parse_timestamp(columns[1].strip())
values = [float(it.strip()) for it in columns[2:]]
tagix = 0
while iterations >= series_sizes[tagix]:
tagix += 1
exp_tags = expected_tags[tagix]
if prev_tag != tagline:
exp_ts = begin + delta*reset_ix
exp_value = reset_ix
prev_tag = tagline
reset_ix += 1
for value in values:
att.check_values(exp_tags, tagline, 'ENDS', exp_ts, timestamp, exp_value*1.0, value, iterations)
exp_ts += nseries*delta
exp_value += nseries
iterations += 1
except:
print("Error at line: {0}".format(line))
raise
# Check that we received all values
if iterations != N:
raise ValueError("Expect {0} data points, get {1} data points".format(N, iterations))
print("Test #3 - passed")
| 5,348,744 |
def inBarrel(chain, index):
"""
Establish if the outer hit of a muon is in the barrel region.
"""
if abs(chain.muon_outerPositionz[index]) < 108:
return True
| 5,348,745 |
def test_image_display(argv):
"""Test image display on client machine.
Usage: python client_service_test.py [host:port] [image file path]
host: IP address of client machine.
port: gRPC service port. (see _CLIENT_SERVICE_GRPC_PORT in main.py)
image file path: a local path to an image file.
"""
target = argv[1]
image_path = argv[2]
grpc_channel = grpc.insecure_channel(target)
stub = client_pb2_grpc.RemoteServiceStub(grpc_channel)
stub.DisplayImage(client_pb2.Image(image_path=image_path))
raw_input('Press ENTER to continue.')
stub.DisplayOff(empty_pb2.Empty())
| 5,348,746 |
def load_pretrained_embeddings(pretrained_fname: str) -> np.array:
"""
Load float matrix from one file
"""
logging.log(logging.INFO, "Loading pre-trained embedding file: %s" % pretrained_fname)
# TODO: np.loadtxt refuses to work for some reason
# pretrained_embeddings = np.loadtxt(self.args.word_embedding_file, usecols=range(1, word_embedding_size+1))
pretrained_embeddings = []
with open(pretrained_fname, 'r') as f:
for line in f:
embedding = [float(s) for s in line.split()[1:]]
pretrained_embeddings.append(embedding)
pretrained_embeddings = np.array(pretrained_embeddings)
pretrained_embeddings /= np.std(pretrained_embeddings)
return pretrained_embeddings
| 5,348,747 |
def text(message: Text,
default: Text = "",
validate: Union[Validator,
Callable[[Text], bool],
None] = None, # noqa
qmark: Text = DEFAULT_QUESTION_PREFIX,
style: Optional[Style] = None,
path_autocomplete=False,
exec_autocomplete=False,
custom_autocomplete=None,
** kwargs: Any) -> Question:
"""Prompt the user to enter a free text message.
This question type can be used to prompt the user for some text input.
Args:
message: Question text
default: Default value will be returned if the user just hits
enter.
validate: Require the entered value to pass a validation. The
value can not be submited until the validator accepts
it (e.g. to check minimum password length).
This can either be a function accepting the input and
returning a boolean, or an class reference to a
subclass of the prompt toolkit Validator class.
qmark: Question prefix displayed in front of the question.
By default this is a `?`
style: A custom color and style for the question parts. You can
configure colors as well as font types for different elements.
Returns:
Question: Question instance, ready to be prompted (using `.ask()`).
"""
merged_style = merge_styles([DEFAULT_STYLE, style])
validator = build_validator(validate)
def get_prompt_tokens():
return [("class:qmark", qmark),
("class:question", ' {} '.format(message))]
promptArgs = dict({
'style': merged_style,
'validator': validator,
'complete_style': CompleteStyle.READLINE_LIKE,
})
if path_autocomplete:
promptArgs['completer'] = PathCompleter(
expanduser=True, delimiters=' \t\n;,')
elif exec_autocomplete:
promptArgs['completer'] = ExecutableCompleter(delimiters=' \t\n;,')
elif custom_autocomplete is not None and len(custom_autocomplete):
promptArgs['completer'] = WordCompleter(
custom_autocomplete, ignore_case=True, sentence=True)
p = PromptSession(get_prompt_tokens,
**promptArgs,
**kwargs)
p.default_buffer.reset(Document(default))
return Question(p.app)
| 5,348,748 |
def nav_bar(context):
"""
Define an active tab for the navigation bar
"""
home_active = ''
about_active = ''
detail_active = ''
list_active = ''
logout_active = ''
signup_active = ''
login_active = ''
friends_active = ''
snippets_active = ''
request = context['request']
url_name = resolve(request.path_info).url_name
if url_name == 'home':
home_active = 'active'
elif url_name == 'about':
about_active = 'active'
elif url_name == 'detail':
detail_active = 'active'
elif url_name == 'list':
list_active = 'active'
elif url_name == 'friends':
friends_active = 'active'
elif url_name == 'account_logout':
logout_active = 'active'
elif url_name == 'account_signup':
signup_active = 'active'
elif url_name == 'account_login':
login_active = 'active'
elif url_name == 'snippets' or url_name == 'snippet':
snippets_active = 'active'
return {
'request': request,
'home_active': home_active,
'about_active': about_active,
'detail_active': detail_active,
'list_active': list_active,
'friends_active': friends_active,
'logout_active': logout_active,
'signup_active': signup_active,
'login_active': login_active,
'snippets_active': snippets_active,
}
| 5,348,749 |
def setup_models(basedir, name, lc=True):
"""
Setup model container for simulation
Parameters
----------
basedir : string
Base directory
name : string
Name of source component
Returns
-------
models : `~gammalib.GModels()`
Model container
"""
# Initialise model container
models = gammalib.GModels()
# Extract binary component
binaries = gammalib.GModels(basedir+'/1dc/models/model_galactic_binaries.xml')
binary = binaries[name]
# Optionally remove lightcurve
if not lc:
binary.temporal(gammalib.GModelTemporalConst())
# Append binary to model container
models.append(binary)
# Append background model to container
models.extend(gammalib.GModels(basedir+'/1dc/models/model_bkg.xml'))
# Return model container
return models
| 5,348,750 |
def create_csm(image):
"""
Given an image file create a Community Sensor Model.
Parameters
----------
image : str
The image filename to create a CSM for
Returns
-------
model : object
A CSM sensor model (or None if no associated model is available.)
"""
isd = csmapi.Isd(image)
plugins = csmapi.Plugin.getList()
for plugin in plugins:
num_models = plugin.getNumModels()
for model_index in range(num_models):
model_name = plugin.getModelName(model_index)
if plugin.canModelBeConstructedFromISD(isd, model_name):
return plugin.constructModelFromISD(isd, model_name)
| 5,348,751 |
def walk(obj, path='', skiphidden=True):
"""Returns a recursive iterator over all Nodes starting from
findnode(obj, path).
If skiphidden is True (the default) then structure branches starting with
an underscore will be ignored.
"""
node = findnode(obj, path)
return walknode(node, skiphidden)
| 5,348,752 |
def setThermalMode(host, args, session):
"""
Set thermal control mode
@param host: string, the hostname or IP address of the bmc
@param args: contains additional arguments used for setting the thermal
control mode
@param session: the active session to use
@param args.zone: the zone to set the mode on
@param args.mode: the mode to enable
@return: Session object
"""
url = "https://" + host + "/xyz/openbmc_project/control/thermal/" + \
args.zone + "/attr/Current"
# Check args.mode against supported modes using `getThermalMode` output
modes = getThermalMode(host, args, session)
modes = os.linesep.join([m for m in modes.splitlines() if m])
modes = modes.replace("\n", ";").strip()
modesDict = dict(m.split(': ') for m in modes.split(';'))
sModes = ''.join(s for s in modesDict['Supported Modes'] if s not in '[ ]')
if args.mode.casefold() not in \
(m.casefold() for m in sModes.split(',')) or not args.mode:
result = ("Unsupported mode('" + args.mode + "') given, " +
"select a supported mode: \n" +
getThermalMode(host, args, session))
return result
data = '{"data":"' + args.mode + '"}'
try:
res = session.get(url, headers=jsonHeader, verify=False, timeout=30)
except(requests.exceptions.Timeout):
return(connectionErrHandler(args.json, "Timeout", None))
except(requests.exceptions.ConnectionError) as err:
return connectionErrHandler(args.json, "ConnectionError", err)
except(requests.exceptions.RequestException) as err:
return connectionErrHandler(args.json, "RequestException", err)
if (data and res.status_code != 404):
try:
res = session.put(url, headers=jsonHeader,
data=data, verify=False,
timeout=30)
except(requests.exceptions.Timeout):
return(connectionErrHandler(args.json, "Timeout", None))
except(requests.exceptions.ConnectionError) as err:
return connectionErrHandler(args.json, "ConnectionError", err)
except(requests.exceptions.RequestException) as err:
return connectionErrHandler(args.json, "RequestException", err)
if res.status_code == 403:
return "The specified thermal control zone(" + args.zone + ")" + \
" does not exist"
return res.text
else:
return "Setting thermal control mode(" + args.mode + ")" + \
" not supported or operation not available"
| 5,348,753 |
def odl():
"""操作数据层"""
pass
| 5,348,754 |
def to_string(class_name):
"""
Magic method that is used by the Metaclass created for Itop object.
"""
string = "%s : { " % type(class_name)
for attribute, value in class_name.__dict__.iteritems():
string += "%s : %s, " % (attribute, value)
string += "}"
return string
| 5,348,755 |
def mnist_10K_cluster(dataset_dir: Path) -> bool:
"""
Abstract:
The MNIST database of handwritten digits with 784 features.
It can be split in a training set of the first 60,000 examples,
and a test set of 10,000 examples
Source:
Yann LeCun, Corinna Cortes, Christopher J.C. Burges
http://yann.lecun.com/exdb/mnist/
Clustering task. n_classes = 10.
mnist x clustering dataset (10000, 785)
"""
dataset_name = 'mnist_10K_cluster'
os.makedirs(dataset_dir, exist_ok=True)
nrows_train, dtype = 10000, np.float32
X, y = fetch_openml(name='mnist_784', return_X_y=True,
as_frame=True, data_home=dataset_dir)
y = y.astype(int)
logging.info(f'{dataset_name} is loaded, started parsing...')
x_train = np.ascontiguousarray(X.values[:nrows_train, 1:], dtype=dtype)
y_train = np.ascontiguousarray(y.values[:nrows_train], dtype=dtype)
filename = f'{dataset_name}.npy'
data = np.concatenate((x_train, y_train[:, None]), axis=1)
np.save(os.path.join(dataset_dir, filename), data)
logging.info(f'dataset {dataset_name} is ready.')
return True
| 5,348,756 |
def molefraction_2_pptv(n):
"""Convert mixing ratio units from mole fraction to parts per
thousand by volume (pptv)
INPUTS
n: mole fraction (moles per mole air)
OUTPUTS
q: mixing ratio in parts per trillion by volume (pptv)
"""
# - start with COS mixing ratio n as mole fraction:
# (n mol COS) / (mol air)
# convert to mixing ratio as volumetric fraction
# = (n * 6.023 * 10^23 molecules COS) / (6.023 * 10^23 molecules air)
# = (q molecules COS) / (1000 molecules air)
# q is mixing ratio in pptv, n is mole fraction
# solve for q --> 1000n = q
# therefore pptv = 1000 * mole fraction
q = 1e3 * n
return(q)
| 5,348,757 |
def is_valid_y(y, warning=False, throw=False, name=None):
"""
"""
y = np.asarray(y, order='c')
valid = True
try:
if len(y.shape) != 1:
if name:
raise ValueError(('Condensed distance matrix \'%s\' must '
'have shape=1 (i.e. be one-dimensional).')
% name)
else:
raise ValueError('Condensed distance matrix must have shape=1 '
'(i.e. be one-dimensional).')
n = y.shape[0]
d = int(np.ceil(np.sqrt(n * 2)))
if (d * (d - 1) / 2) != n:
if name:
raise ValueError(('Length n of condensed distance matrix '
'\'%s\' must be a binomial coefficient, i.e.'
'there must be a k such that '
'(k \\choose 2)=n)!') % name)
else:
raise ValueError('Length n of condensed distance matrix must '
'be a binomial coefficient, i.e. there must '
'be a k such that (k \\choose 2)=n)!')
except Exception as e:
if throw:
raise
if warning:
warnings.warn(str(e))
valid = False
return valid
| 5,348,758 |
def yaml_parse(yamlstr):
"""Parse a yaml string"""
try:
# PyYAML doesn't support json as well as it should, so if the input
# is actually just json it is better to parse it with the standard
# json parser.
return json.loads(yamlstr)
except ValueError:
yaml.SafeLoader.add_multi_constructor(
"!", intrinsics_multi_constructor)
return yaml.safe_load(yamlstr)
| 5,348,759 |
def csl_item_from_pubmed_article(article):
"""
article is a PubmedArticle xml element tree
https://github.com/citation-style-language/schema/blob/master/csl-data.json
"""
csl_item = collections.OrderedDict()
if not article.find("MedlineCitation/Article"):
raise NotImplementedError("Unsupported PubMed record: no <Article> element")
title = article.findtext("MedlineCitation/Article/ArticleTitle")
if title:
csl_item["title"] = title
volume = article.findtext("MedlineCitation/Article/Journal/JournalIssue/Volume")
if volume:
csl_item["volume"] = volume
issue = article.findtext("MedlineCitation/Article/Journal/JournalIssue/Issue")
if issue:
csl_item["issue"] = issue
page = article.findtext("MedlineCitation/Article/Pagination/MedlinePgn")
if page:
csl_item["page"] = page
journal = article.findtext("MedlineCitation/Article/Journal/Title")
if journal:
csl_item["container-title"] = journal
journal_short = article.findtext("MedlineCitation/Article/Journal/ISOAbbreviation")
if journal_short:
csl_item["container-title-short"] = journal_short
issn = article.findtext("MedlineCitation/Article/Journal/ISSN")
if issn:
csl_item["ISSN"] = issn
date_parts = extract_publication_date_parts(article)
if date_parts:
csl_item["issued"] = {"date-parts": [date_parts]}
authors_csl = list()
authors = article.findall("MedlineCitation/Article/AuthorList/Author")
for author in authors:
author_csl = collections.OrderedDict()
given = author.findtext("ForeName")
if given:
author_csl["given"] = given
family = author.findtext("LastName")
if family:
author_csl["family"] = family
authors_csl.append(author_csl)
if authors_csl:
csl_item["author"] = authors_csl
for id_type, key in ("pubmed", "PMID"), ("pmc", "PMCID"), ("doi", "DOI"):
xpath = f"PubmedData/ArticleIdList/ArticleId[@IdType='{id_type}']"
value = article.findtext(xpath)
if value:
csl_item[key] = value.lower() if key == "DOI" else value
abstract = article.findtext("MedlineCitation/Article/Abstract/AbstractText")
if abstract:
csl_item["abstract"] = abstract
csl_item["URL"] = f"https://www.ncbi.nlm.nih.gov/pubmed/{csl_item['PMID']}"
csl_item["type"] = "article-journal"
return csl_item
| 5,348,760 |
def image(resource: celtypes.MapType) -> celtypes.Value:
"""
Reach into C7N to get the image details for this EC2 or ASG resource.
Minimally, the creation date is transformed into a CEL timestamp.
We may want to slightly generalize this to json_to_cell() the entire Image object.
The following may be usable, but it seems too complex:
::
C7N.filter.prefetch_instance_images(C7N.policy.resources)
image = C7N.filter.get_instance_image(resource["ImageId"])
return json_to_cel(image)
.. todo:: Refactor C7N
Provide the :py:class:`InstanceImageBase` mixin in a :py:class:`CELFilter` class.
We want to have the image details in the new :py:class:`CELFilter` instance.
"""
# Assuming the :py:class:`CELFilter` class has this method extracted from the legacy filter.
# Requies the policy already did this: C7N.filter.prefetch_instance_images([resource]) to
# populate cache.
image = C7N.filter.get_instance_image(resource)
if image:
creation_date = image["CreationDate"]
image_name = image["Name"]
else:
creation_date = "2000-01-01T01:01:01.000Z"
image_name = ""
return json_to_cel(
{"CreationDate": dateutil.parser.isoparse(creation_date), "Name": image_name}
)
| 5,348,761 |
def unphase_uvw(ra, dec, uvw):
"""
Calculate unphased uvws/positions from phased ones in an icrs or gcrs frame.
This code expects phased uvws or positions in the same frame that ra/dec
are in (e.g. icrs or gcrs) and returns unphased ones in the same frame.
Parameters
----------
ra : float
Right ascension of phase center.
dec : float
Declination of phase center.
uvw : ndarray of float
Phased uvws or positions relative to the array center,
shape (Nlocs, 3).
Returns
-------
unphased_uvws : ndarray of float
Unphased uvws or positions relative to the array center,
shape (Nlocs, 3).
"""
if uvw.ndim == 1:
uvw = uvw[np.newaxis, :]
return _utils._unphase_uvw(
np.float64(ra), np.float64(dec), np.ascontiguousarray(uvw, dtype=np.float64),
)
| 5,348,762 |
def sender_msg_to_array(msg):
"""
Parse a list argument as returned by L{array_to_msg} function of this
module, and returns the numpy array contained in the message body.
@param msg: a list as returned by L{array_to_msg} function
@rtype: numpy.ndarray
@return: The numpy array contained in the message
"""
[_dtype, _shape, _bin_msg] = msg_to_array(msg[2:])
_uuid = uuid.UUID(bytes=msg[0])
_data_name = msg[1].decode()
return (_uuid, _data_name, _dtype, _shape, _bin_msg)
| 5,348,763 |
def find_shortest_path(node):
"""Finds shortest path from node to it's neighbors"""
next_node,next_min_cost=node.get_min_cost_neighbor()
if str(next_node)!=str(node):
return find_shortest_path(next_node)
else:
return node
| 5,348,764 |
def cast_array_to_feature(array: pa.Array, feature: "FeatureType", allow_number_to_str=True):
"""Cast an array to the arrow type that corresponds to the requested feature type.
For custom features like Audio or Image, it takes into account the "cast_storage" methods
they defined to enable casting from other arrow types.
Args:
array (pa.Array): the PyArrow array to cast
feature (FeatureType): the target feature type
allow_number_to_str (bool, default ``True``): Whether to allow casting numbers to strings.
Defaults to True.
Raises:
pa.ArrowInvalidError: if the arrow data casting fails
TypeError: if the target type is not supported according, e.g.
- if a field is missing
= if casting from numbers to strings and allow_number_to_str is False
Returns:
pa.Array: the casted array
"""
from .features import Sequence, get_nested_type
_c = partial(cast_array_to_feature, allow_number_to_str=allow_number_to_str)
if isinstance(array, pa.ExtensionArray):
array = array.storage
if hasattr(feature, "cast_storage"):
return feature.cast_storage(array)
elif pa.types.is_struct(array.type):
# feature must be a dict or Sequence(subfeatures_dict)
if isinstance(feature, Sequence) and isinstance(feature.feature, dict):
feature = {
name: Sequence(subfeature, length=feature.length) for name, subfeature in feature.feature.items()
}
if isinstance(feature, dict) and set(field.name for field in array.type) == set(feature):
arrays = [_c(array.field(name), subfeature) for name, subfeature in feature.items()]
return pa.StructArray.from_arrays(arrays, names=list(feature), mask=array.is_null())
elif pa.types.is_list(array.type):
# feature must be either [subfeature] or Sequence(subfeature)
if isinstance(feature, list):
return pa.ListArray.from_arrays(array.offsets, _c(array.values, feature[0]))
elif isinstance(feature, Sequence):
if feature.length > -1:
if feature.length * len(array) == len(array.values):
return pa.FixedSizeListArray.from_arrays(_c(array.values, feature.feature), feature.length)
else:
return pa.ListArray.from_arrays(array.offsets, _c(array.values, feature.feature))
elif pa.types.is_fixed_size_list(array.type):
# feature must be either [subfeature] or Sequence(subfeature)
if isinstance(feature, list):
return pa.ListArray.from_arrays(array.offsets, _c(array.values, feature[0]))
elif isinstance(feature, Sequence):
if feature.length > -1:
if feature.length * len(array) == len(array.values):
return pa.FixedSizeListArray.from_arrays(_c(array.values, feature.feature), feature.length)
else:
offsets_arr = pa.array(range(len(array) + 1), pa.int32())
return pa.ListArray.from_arrays(offsets_arr, _c(array.values, feature.feature))
if pa.types.is_null(array.type):
return array_cast(array, get_nested_type(feature), allow_number_to_str=allow_number_to_str)
elif not isinstance(feature, (Sequence, dict, list, tuple)):
return array_cast(array, feature(), allow_number_to_str=allow_number_to_str)
raise TypeError(f"Couldn't cast array of type\n{array.type}\nto\n{feature}")
| 5,348,765 |
def check_key_match(config_name):
"""
Check key matches
@param config_name: Name of WG interface
@type config_name: str
@return: Return dictionary with status
"""
data = request.get_json()
private_key = data['private_key']
public_key = data['public_key']
return jsonify(f_check_key_match(private_key, public_key, config_name))
| 5,348,766 |
def main(argv=None):
"""Command line interface."""
parser = argparse.ArgumentParser(formatter_class=argparse.RawDescriptionHelpFormatter,
description='Converts Jupyter Notebooks to Atlassian Confluence pages using nbconvert',
epilog="Collects credentials from the following locations:\n"
"1. CONFLUENCE_USERNAME and CONFLUENCE_PASSWORD environment variables\n"
"2. ~/.nbconflux file in the format username:password\n"
"3. User prompts")
parser.add_argument('notebook', type=str, help='Path to local notebook (ipynb)')
parser.add_argument('url', type=str, help='URL of Confluence page to update')
parser.add_argument('--exclude-toc', action='store_true', help='Do not generate a table of contents')
parser.add_argument('--exclude-ipynb', action='store_true', help='Do not attach the notebook to the page')
parser.add_argument('--exclude-style', action='store_true', help='Do not include the Jupyter base stylesheet')
parser.add_argument('--include-mathjax', action='store_true', help='Enable MathJax on the page')
parser.add_argument('--extra-labels', nargs='+', type=str, help='Additional labels to add to the page')
args = parser.parse_args(argv or sys.argv[1:])
username = os.getenv('CONFLUENCE_USERNAME')
password = os.getenv('CONFLUENCE_PASSWORD')
cookies = json.loads(os.getenv('CONFLUENCE_COOKIES'))
cfg = os.path.expanduser('~/.nbconflux')
# Prefer credentials in environment variables
if cookies:
print('Using cookies from environment variables {}'.format(json.dumps(cookies)))
if username and password:
print('Using credentials for {} from environment variables'.format(username))
elif os.path.isfile(cfg):
# Fallback on credentials in a well known file location
with open(cfg) as f:
segs = f.read().strip().split(':', 1)
if len(segs) == 2:
username = segs[0]
password = segs[1]
print('Using credentials for {} from configuration file'.format(username))
# Prompt the user for missing credentials
if username is None:
current = getpass.getuser()
current = current[2:] if current.startswith('p-') else current
username = input('Confluence username ({}): '.format(current))
# Use the current username if the user doesn't enter anything
if not username.strip():
username = current
if password is None:
password = getpass.getpass('Confluence password: ')
notebook_to_page(args.notebook, args.url, username, password,
generate_toc=not args.exclude_toc, attach_ipynb=not args.exclude_ipynb,
enable_style=not args.exclude_style, enable_mathjax=args.include_mathjax,
extra_labels=args.extra_labels, cookies=cookies)
| 5,348,767 |
def delete_user_group(request, group_id, *args, **kwargs):
"""This one is not really deleting the group object, rather setting the active status
to False (delete) which can be later restored (undelete) )"""
try:
hydroshare.set_group_active_status(request.user, group_id, False)
messages.success(request, "Group delete was successful.")
except PermissionDenied:
messages.error(request, "Group delete errors: You don't have permission to delete"
" this group.")
return HttpResponseRedirect(request.META['HTTP_REFERER'])
| 5,348,768 |
def _generate_room_square(dungen: DungeonGenerator, room_data: RoomConceptData) -> RoomConcept:
"""
Generate a square-shaped room.
"""
map_width = dungen.map_data.width
map_height = dungen.map_data.height
# ensure not bigger than the map
room_width = min(dungen.rng.randint(room_data.min_width, room_data.max_width), map_width)
room_height = min(dungen.rng.randint(room_data.min_height, room_data.max_height), map_height)
# populate area with floor categories
tile_categories: List[List[TileCategoryType]] = []
for x in range(room_width):
tile_categories.append([])
for y in range(room_height):
tile_categories[x].append(TileCategory.FLOOR)
# convert to room
room = RoomConcept(tile_categories=tile_categories, design="square", key=room_data.key)
return room
| 5,348,769 |
def query(request):
"""
响应前端返回的数据并进行相应的推荐
:param request:
:return:
"""
content = {}
if request.method=='POST':
datatype = json.loads(request.body.decode('utf-8')) #得到前端返回的数据
province_all = datatype['all']
current_loc = datatype['currentLocation'] # 得到当前的省份,以这份省份为基准点算出经纬度
if province_all == 'true': #如果判断是全国的话,就是会直接将全国的数据返回
provinces_loc = province #
provinces_loc_sorted = province
else:
provinces_loc = datatype['regions'] # 得到所需要的大学
distance = []
for i in range(len(provinces_loc)):
distance.append(cal_distance(current_loc, provinces_loc[i]))
provinces_loc_sorted = [x['pos'] for y, x in sorted(zip(distance, provinces_loc))] # 排序后的省份,按照距离来进行排序
colleges = []
rank = (datatype['rank']) #获得排名
category = datatype['category'] #获得类别
if len(rank) == 0 or len(category) == 0 or len(provinces_loc) == 0 :#如果有一个是空则返回500状态码
return JsonResponse({
'status_code':500
})
else:
rank = float(rank)#将rank变成float型来方便判断
temp=[]
data={}
col_majors = {}
if category=='理科': #判断是否为理科
pdir = os.path.dirname(os.getcwd()) # 获取父目录
file = os.path.join(pdir, 'python14\江苏理科字典.pt')
predicted_data = torch.load(file) #导入理科的预测的排名
for province_loc in provinces_loc_sorted:
colleges_carrier = models.Colleges.objects.filter(provinceID=province.index(province_loc))#找到学校
for college in colleges_carrier:
major_carrier = models.Majors.objects.filter(provinceID=4,collegeID=college.collegeID, categoryID=2)#找到专业
if len(major_carrier)!= 0:
majors_ranks = predicted_data[college.collegeName]#从预测数据中找到rank
for major_rank in majors_ranks:
possibility = cal_possibility(float(rank), float(major_rank['rank']),
float(major_rank['cov'])) # 计算相应的概率
if possibility>=0.2:#设置了一个推荐的阈值来控制推荐数量
data['major'] = major_rank['major']
data['rank'] = major_rank['rank']
data['possibility'] = round(possibility*5,3)
temp.append(data.copy())
if len(temp) !=0:#如果不为空则可以代表有推荐的学校和专业
col_majors[college.collegeName]=temp[:]
colleges.append(college.collegeName)
temp.clear()
if category == '文科':
pdir = os.path.dirname(os.getcwd()) # 获取父目录
file = os.path.join(pdir, 'python14\江苏文科字典.pt')
predicted_data = torch.load(file)
for province_loc in provinces_loc_sorted:
colleges_carrier = models.Colleges.objects.filter(provinceID=province.index(province_loc))
for college in colleges_carrier:
major_carrier = models.Majors.objects.filter(provinceID=4,collegeID=college.collegeID,categoryID=1)
if len(major_carrier)!=0:
majors_ranks = predicted_data[college.collegeName]
for major_rank in majors_ranks:
possibility = cal_possibility(float(rank), float(major_rank['rank']),
float(major_rank['cov']))
if possibility>=0.2:
data['major'] = major_rank['major']
data['rank'] = major_rank['rank']
data['possibility'] = round(possibility*5,3)
temp.append(data.copy())
if len(temp) != 0:
col_majors[college.collegeName] = temp[:]
colleges.append(college.collegeName)
temp.clear()
content={
'colleges':colleges,
'status_code':200,
'col_majors':col_majors
}
return JsonResponse(content)
| 5,348,770 |
def str_is_float(value):
"""Test if a string can be parsed into a float.
:returns: True or False
"""
try:
_ = float(value)
return True
except ValueError:
return False
| 5,348,771 |
def get_from_cache(url, cache_dir=None, force_download=False, proxies=None, etag_timeout=10, resume_download=False):
#for bert-based-uncased, url is https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-uncased-config.json
#also, cache_dir is the following: /Users/msanatkar/.cache/torch/transformers
"""
Given a URL, look for the corresponding dataset in the local cache.
If it's not there, download it. Then return the path to the cached file.
"""
if cache_dir is None:
cache_dir = TRANSFORMERS_CACHE
#TRANSFORMERS_CACHE is equal to /Users/msanatkar/.cache/torch/transformers
if sys.version_info[0] == 3 and isinstance(cache_dir, Path):
cache_dir = str(cache_dir)
#sys.version_info[0] returns the Python version
if sys.version_info[0] == 2 and not isinstance(cache_dir, str):
cache_dir = str(cache_dir)
if not os.path.exists(cache_dir):
os.makedirs(cache_dir)
#cache_dir is equal to /Users/msanatkar/.cache/torch/transformers
# Get eTag to add to filename, if it exists.
if url.startswith("s3://"):
#url for BERT starts with https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-uncased-config.json so it doesn't satisfy this of condition
etag = s3_etag(url, proxies=proxies)
else:
try:
response = requests.head(url, allow_redirects=True, proxies=proxies, timeout=etag_timeout)
#head method make a head request to a webpage and returns the HTTP header
if response.status_code != 200:
#response code 200 refers to an OK response and no error
etag = None
else:
etag = response.headers.get("ETag")
#The ETag HTTP response header is an identifier for a specific version of a resource. It lets caches be more efficient and save bandwidth,
#as a web server does not need to resend a full response if the content has not changed
#ETage for bert-base-uncased is 74d4f96fdabdd865cbdbe905cd46c1f1
except (EnvironmentError, requests.exceptions.Timeout):
etag = None
if sys.version_info[0] == 2 and etag is not None:
etag = etag.decode('utf-8')
filename = url_to_filename(url, etag)
#etag for bert-base-uncased is 74d4f96fdabdd865cbdbe905cd46c1f1 and url is the following:
#https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-uncased-config.json
#filaname will be a str that is concatenation of the hashcode of the urlpath and the hashcode of the etag str
# get cache path to put the file
cache_path = os.path.join(cache_dir, filename)
#cache_dir is equal to /Users/msanatkar/.cache/torch/transformers
#cache_path for bert-base-uncased is the following:
#/Users/msanatkar/.cache/torch/transformers/4dad0251492946e18ac39290fcfe91b89d370fee250efe9521476438fe8ca185.bf3b9ea126d8c0001ee8a1e8b92229871d06d36d8808208cc2449280da87785c
# If we don't have a connection (etag is None) and can't identify the file
# try to get the last downloaded one
if not os.path.exists(cache_path) and etag is None:
#here, in this if condition, we are saying if cache_path doesn't exist which means that we never downloaded this json config file in .cache before
#and if we do not have access to the internet wihch is confirmed by etag being None, then, we try to find to get the latest downloaded one
matching_files = fnmatch.filter(os.listdir(cache_dir), filename + '.*')
#os.listdir will return all the files and directories in cache_dir which is /Users/msanatkar/.cache/torch/transormers
#in above, fnmatch returns a sublist of files returned by listdir that matches the hash-based filename corresponding to this config json file
matching_files = list(filter(lambda s: not s.endswith('.json'), matching_files))
#in above, we only choose those files that do not end with ".json". It seems that for every encoder model, there exist two files in .cache
#one of them is a josn file which will be the json config file describing the architecture of that model and the other one does not end with
#json that must contain the weigths of the network
if matching_files:
cache_path = os.path.join(cache_dir, matching_files[-1])
if resume_download:
#resume_download is for those cases that for some reason the downloading process of the files was interupted before and here we want to resume the
#download instead of starting from scratch
incomplete_path = cache_path + '.incomplete'
@contextmanager
def _resumable_file_manager():
with open(incomplete_path,'a+b') as f:
yield f
os.remove(incomplete_path)
temp_file_manager = _resumable_file_manager
if os.path.exists(incomplete_path):
resume_size = os.stat(incomplete_path).st_size
else:
resume_size = 0
else:
temp_file_manager = tempfile.NamedTemporaryFile
#here, temp_file_manager will be a temporary file that later on when the download is complete can be moved to the actual cache folder
resume_size = 0
#in below, we download the config file either if we didn't downlaod it before or the option force_download is True. Note: we never enable
#force_download because we are not crazy!
if not os.path.exists(cache_path) or force_download:
# Download to temporary file, then copy to cache dir once finished.
# Otherwise you get corrupt cache entries if the download gets interrupted.
with temp_file_manager() as temp_file:
logger.info("%s not found in cache or force_download set to True, downloading to %s", url, temp_file.name)
# GET file object
if url.startswith("s3://"):
#for huggingface files, they don't start with s3
if resume_download:
logger.warn('Warning: resumable downloads are not implemented for "s3://" urls')
s3_get(url, temp_file, proxies=proxies)
else:
#http_get downloads the file and writes its content into temp_file
http_get(url, temp_file, proxies=proxies, resume_size=resume_size)#resume_size will be zero if we didn't enable resume option
#here, url refer to a json config file .json
# we are copying the file before closing it, so flush to avoid truncation
temp_file.flush()
#flush method ensures that all the buffered data, are written into file
# shutil.copyfileobj() starts at the current position, so go to the start
temp_file.seek(0)
logger.info("copying %s to cache at %s", temp_file.name, cache_path)
with open(cache_path, 'wb') as cache_file:
shutil.copyfileobj(temp_file, cache_file)
#I believe cache_path here doesn't end with .json. In particular, if you look into .cache/torch/transformers, then there are bunch
#of different resources which all of them have similar names hash(model_name).hash(url) with no .json suffix. Some of these files are
#simply json config files of models and the other could be other resources like the weigths files. The json files inside the cache folder
#reperesent the url path of the resource as well as the etag version. below, you can find how this json meta file is created!
logger.info("creating metadata file for %s", cache_path)
meta = {'url': url, 'etag': etag}
meta_path = cache_path + '.json'
with open(meta_path, 'w') as meta_file:
output_string = json.dumps(meta)
if sys.version_info[0] == 2 and isinstance(output_string, str):
output_string = unicode(output_string, 'utf-8') # The beauty of python 2
meta_file.write(output_string)
logger.info("removing temp file %s", temp_file.name)
return cache_path
| 5,348,772 |
async def async_setup_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool:
"""Set up Ruckus Unleashed from a config entry."""
try:
ruckus = await hass.async_add_executor_job(
Ruckus,
entry.data[CONF_HOST],
entry.data[CONF_USERNAME],
entry.data[CONF_PASSWORD],
)
except ConnectionError as error:
raise ConfigEntryNotReady from error
coordinator = RuckusUnleashedDataUpdateCoordinator(hass, ruckus=ruckus)
await coordinator.async_refresh()
if not coordinator.last_update_success:
raise ConfigEntryNotReady
hass.data[DOMAIN][entry.entry_id] = {
COORDINATOR: coordinator,
UNDO_UPDATE_LISTENERS: [],
}
for platform in PLATFORMS:
hass.async_create_task(
hass.config_entries.async_forward_entry_setup(entry, platform)
)
return True
| 5,348,773 |
def get_user_plugins_grouped(get_allowed_plugin_uids_func,
get_registered_plugins_grouped_func,
registry,
user,
sort_items=True):
"""Get user plugins grouped.
:param callable get_allowed_plugin_uids_func:
:param callable get_registered_plugins_grouped_func:
:param fobi.base.BaseRegistry registry: Subclass of
``fobi.base.BaseRegistry`` instance.
:param django.contrib.auth.models.User user:
:param bool sort_items:
:return dict:
"""
ensure_autodiscover()
if not RESTRICT_PLUGIN_ACCESS or getattr(user, 'is_superuser', False):
return get_registered_plugins_grouped_func()
registered_plugins = {}
allowed_plugin_uids = get_allowed_plugin_uids_func(user)
for uid, plugin in registry._registry.items():
if uid in allowed_plugin_uids:
if PY3:
plugin_name = force_text(plugin.name, encoding='utf-8')
plugin_group = force_text(plugin.group, encoding='utf-8')
else:
plugin_name = force_text(
plugin.name, encoding='utf-8'
).encode('utf-8')
plugin_group = force_text(
plugin.group, encoding='utf-8'
).encode('utf-8')
if plugin_group not in registered_plugins:
registered_plugins[plugin_group] = []
registered_plugins[plugin_group].append((uid, plugin_name))
if sort_items:
for key, prop in registered_plugins.items():
prop.sort()
return registered_plugins
| 5,348,774 |
def show_frames(frames, freq = 12):
""" This function receives a list of frames and plays them back at the given
frequency.
"""
for frame in frames:
cv2.imshow('frame',frame)
cv2.waitKey(round(1000/freq))
| 5,348,775 |
def setup_container_system_config(basedir, mountdir, dir_modes):
"""Create a minimal system configuration for use in a container.
@param basedir: The directory where the configuration files should be placed (bytes)
@param mountdir: The base directory of the mount hierarchy in the container (bytes).
@param dir_modes: All directory modes in the container.
"""
# If overlayfs is not used for /etc, we need additional bind mounts
# for files in /etc that we want to override, like /etc/passwd
symlinks_required = determine_directory_mode(dir_modes, b"/etc") != DIR_OVERLAY
etc = os.path.join(basedir, b"etc")
if not os.path.exists(etc):
os.mkdir(etc)
for file, content in CONTAINER_ETC_FILE_OVERRIDE.items():
# Create "basedir/etc/file"
util.write_file(content, etc, file)
if symlinks_required:
# Create bind mount to "mountdir/etc/file"
make_bind_mount(
os.path.join(etc, file),
os.path.join(mountdir, b"etc", file),
private=True,
)
os.symlink(b"/proc/self/mounts", os.path.join(etc, b"mtab"))
# Bind bounds for symlinks are not possible, so do nothing for "mountdir/etc/mtab".
# This is not a problem because most systems have the correct symlink anyway.
if not os.path.isdir(mountdir.decode() + CONTAINER_HOME):
logging.warning(
"Home directory in container should be %(h)s but this directory "
"cannot be created due to directory mode of parent directory. "
"It is recommended to use '--overlay-dir %(p)s' or '--hidden-dir %(p)s' "
"and overwrite directory modes for subdirectories where necessary.",
{"h": CONTAINER_HOME, "p": os.path.dirname(CONTAINER_HOME)},
)
| 5,348,776 |
def calc_randnm7(reg_dict, mlx75027):
"""
Calculate the RANDMN7 register value
Parameters
----------
reg_dict : dict
The dictionary that contains all the register information
mlx75027 : bool
Set to True if using the MLX75027 sensor, False
if using the MLX75026 sensor.
Returns
----------
randnm7 : int
The randnm7 register value
"""
# print("calc_randnm7()")
speed = calc_speed(reg_dict, mlx75027)
hmax = calc_hmax(reg_dict, mlx75027, speed=speed)
pretime_enabled = np.any(
reg_dict["Px_PREHEAT"][2] | reg_dict["Px_PREMIX"][2])
if pretime_enabled:
px_pretime = calc_pretime(reg_dict, mlx75027)
# As noted in 7.12. can be calculated as: 1070 + HMAX * FLOOR( ((Px_PRETIME(in us)−11.13) / HMAX )* 120), with Px_PRETIME >= 11.13
if px_pretime >= 11.13:
randnm7 = 1070 + hmax * np.floor(((px_pretime-11.13)/hmax) * 120)
else:
randnm7 = 1070
else:
randnm7 = 1070
return int(randnm7)
| 5,348,777 |
def get_orientation(pose, ori):
"""Generate an orientation vector from yaw/pitch/roll angles in radians."""
yaw, pitch, roll = pose
c1 = np.cos(-yaw)
s1 = np.sin(-yaw)
c2 = np.cos(-pitch)
s2 = np.sin(-pitch)
c3 = np.cos(-roll)
s3 = np.sin(-roll)
Ryaw = np.array([[c1, s1, 0], [-s1, c1, 0], [0, 0, 1]])
Rpitch = np.array([[c2, 0, -s2], [0, 1, 0], [s2, 0, c2]])
Rroll = np.array([[1, 0, 0], [0, c3, s3], [0, -s3, c3]])
R = np.dot(Ryaw, np.dot(Rpitch, Rroll))
n = np.dot(R, ori)
return n
| 5,348,778 |
def draw_deformation(source_image, grid, grid_size = 12):
"""
source_image: PIL image object
sample_grid: the sampling grid
grid_size: the size of drawing grid
"""
im = copy.deepcopy(source_image)
d = ImageDraw.Draw(im)
H,W = source_image.size
dist =int(H/grid_size)
for i in range(grid_size):
step = int(dist*i)
d.line(list(zip((grid[0,step,:,0].numpy()+1)/2*H, (grid[0,step,:,1].numpy()+1)/2*H)),fill = 255,width=1)
d.line(list(zip((grid[0,:,step,0].numpy()+1)/2*H, (grid[0,:,step,1].numpy()+1)/2*H)),fill = 255,width=1)
return im
| 5,348,779 |
def dial_socket(host='localhost', port):
"""
Connect to the socket created by the
server instance on specified host and port
"""
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.connect((host, port))
return sock
| 5,348,780 |
def test_tamper_mutate_compress(logger):
"""
Tests that compress is handled right if its enabled
"""
backup = copy.deepcopy(actions.tamper.ACTIVATED_PRIMITIVES)
actions.tamper.ACTIVATED_PRIMITIVES = ["compress"]
try:
tamper = actions.tamper.TamperAction(None)
assert tamper.parse("TCP:flags:corrupt", logger)
tamper._mutate_tamper_type()
assert tamper.tamper_type == "compress"
assert tamper.tamper_proto_str == "DNS"
assert tamper.field == "qd"
packet = layers.packet.Packet(IP()/TCP()/DNS()/DNSQR())
packet2 = tamper.tamper(packet, logger)
assert packet2 == packet
finally:
actions.tamper.ACTIVATED_PRIMITIVES = backup
| 5,348,781 |
def analyseClassificationCoefficients(X: pd.DataFrame,
y: pd.Series,
D_learning_results: pd.DataFrame,
outputPath: str) -> dict:
"""
This function evaluates the importance coefficients of the input features of a model
Args:
X (pd.DataFrame): Input pandas dataFrame.
y (pd.Series): Input pandas series sith target label.
D_learning_results (pd.DataFrame): Results dataframe obstained from a grid search (analytics.learning.grids).
outputPath (str): Output filename path to save the results.
Returns:
dict: DESCRIPTION.
"""
output_figures = {}
# define the confusion matrix
x_train, x_test, y_train, y_test = train_test_split(X, y, test_size=0.25, random_state=0)
for index, row in D_learning_results.iterrows():
y_pred = row['MODEL'].predict(x_test)
cm = confusion_matrix(y_test, y_pred)
# plot the confusion matrix
fig = plt.figure(figsize=(9, 9))
ax = fig.gca()
sns.heatmap(cm, annot=True, fmt=".3f", linewidths=.5, square=True, cmap='Blues_r')
plt.ylabel('Actual label')
plt.xlabel('Predicted label')
ax.set(xticks=np.arange(cm.shape[1]),
yticks=np.arange(cm.shape[0]))
ax.set_xticklabels(labels=row['MODEL'].classes_, rotation=45)
ax.set_yticklabels(labels=row['MODEL'].classes_, rotation=45)
all_sample_title = 'Accuracy Score: {0}'.format(np.round(row['SCORE_TEST'], 2))
plt.title(f"Model: {row['MODEL_NAME']}, {all_sample_title}", size=15)
output_figures[f"{row['MODEL_NAME']}_confusionMatrix"] = fig
# analyse output for QDA
if row['MODEL_NAME'] == 'quadratic_discriminant_analysis':
# Print the mean for each class
# create a dataframe with one row for each feature of X
features_list = list(X.columns)
# extract coefficients riprendere da qui
fig = plt.figure(figsize=(12, 10))
means = row['MODEL'].means_
means_scaled = scale(means)
plt.imshow(means_scaled, cmap='bwr')
ax = fig.gca()
# set xticks
ax.set_xticks(range(0, len(features_list)))
ax.set_xticklabels(features_list, rotation=90)
# set yticks
ax.set_yticks(range(0, len(row['MODEL'].classes_)))
ax.set_yticklabels(row['MODEL'].classes_, rotation=45)
plt.colorbar()
plt.xlabel('Feature name')
plt.ylabel('Classes')
plt.title('QDA means per class')
output_figures[f"{row['MODEL_NAME']}_means"] = fig
# analyse output for LDA
elif row['MODEL_NAME'] == 'linear_discriminant_analysis':
# Print coefficients
# create a dataframe with one row for each feature of X
features_list = list(X.columns)
# extract coefficients riprendere da qui
fig = plt.figure(figsize=(12, 10))
coefficients = row['MODEL'].coef_
coefficients_scaled = scale(coefficients)
plt.imshow(coefficients_scaled, cmap='bwr')
ax = fig.gca()
# set xticks
ax.set_xticks(range(0, len(features_list)))
ax.set_xticklabels(features_list, rotation=90)
# set yticks
ax.set_yticks(range(0, len(row['MODEL'].classes_)))
ax.set_yticklabels(row['MODEL'].classes_, rotation=45)
plt.colorbar()
plt.xlabel('Feature name')
plt.ylabel('Classes')
plt.title('LDA coefficients')
output_figures[f"{row['MODEL_NAME']}_coefficients"] = fig
# analyse output for logistic regression
elif row['MODEL_NAME'] == 'logistic_regression':
# Print coefficients
# create a dataframe with one row for each feature of X
features_list = list(X.columns)
# extract coefficients riprendere da qui
fig = plt.figure(figsize=(12, 10))
coefficients = row['MODEL'].coef_
coefficients_scaled = scale(coefficients)
plt.imshow(coefficients_scaled, cmap='bwr')
ax = fig.gca()
# set xticks
ax.set_xticks(range(0, len(features_list)))
ax.set_xticklabels(features_list, rotation=90)
# set yticks
ax.set_yticks(range(0, len(row['MODEL'].classes_)))
ax.set_yticklabels(row['MODEL'].classes_, rotation=45)
plt.colorbar()
plt.xlabel('Feature name')
plt.ylabel('Classes')
plt.title('Logistic regression coefficients')
output_figures[f"{row['MODEL_NAME']}_coefficients"] = fig
elif row['MODEL_NAME'] == 'naive bayes':
# Print coefficients
# create a dataframe with one row for each feature of X
features_list = list(X.columns)
# print variance
fig = plt.figure(figsize=(12, 10))
coefficients = row['MODEL'].sigma_
coefficients_scaled = scale(coefficients)
plt.imshow(coefficients_scaled, cmap='bwr')
ax = fig.gca()
# set xticks
ax.set_xticks(range(0, len(features_list)))
ax.set_xticklabels(features_list, rotation=90)
# set yticks
ax.set_yticks(range(0, len(row['MODEL'].classes_)))
ax.set_yticklabels(row['MODEL'].classes_, rotation=45)
plt.colorbar()
plt.xlabel('Feature name')
plt.ylabel('Classes')
plt.title('Naive bayes sigma')
output_figures[f"{row['MODEL_NAME']}_sigma"] = fig
# print mean
fig = plt.figure(figsize=(12, 10))
coefficients = row['MODEL'].theta_
coefficients_scaled = scale(coefficients)
plt.imshow(coefficients_scaled, cmap='bwr')
ax = fig.gca()
# set xticks
ax.set_xticks(range(0, len(features_list)))
ax.set_xticklabels(features_list, rotation=90)
# set yticks
ax.set_yticks(range(0, len(row['MODEL'].classes_)))
ax.set_yticklabels(row['MODEL'].classes_, rotation=45)
plt.colorbar()
plt.xlabel('Feature name')
plt.ylabel('Classes')
plt.title('Naive bayes theta')
output_figures[f"{row['MODEL_NAME']}_theta"] = fig
elif row['MODEL_NAME'] == 'decision tree':
# Print coefficients
# create a dataframe with one row for each feature of X
features_list = list(X.columns)
# print variance
fig = plt.figure(figsize=(12, 10))
coefficients = row['MODEL'].feature_importances_
# coefficients_scaled = scale(coefficients)
plt.bar(features_list, coefficients)
ax = fig.gca()
# set xticks
# ax.set_xticks(range(0,len(features_list)))
ax.set_xticklabels(features_list, rotation=45)
plt.xlabel('Feature name')
plt.ylabel('Feature importance')
plt.title('Decision tree Gini importance')
output_figures[f"{row['MODEL_NAME']}_Gini"] = fig
# save the decision tree
dotfile = open(f"{outputPath}//dt.dot", 'w')
tree.export_graphviz(row['MODEL'],
out_file=dotfile,
feature_names=features_list,
class_names=row['MODEL'].classes_,
rounded=True,
proportion=False,
precision=2,
filled=True)
dotfile.close()
# http://webgraphviz.com/
else:
print(f"{row['MODEL_NAME']}, model not considered")
return output_figures
| 5,348,782 |
def centerfreq_to_bandnum(center_freq, norm_freq, nth_oct):
"""Returns band number from given center frequency."""
return nth_oct * np.log2(center_freq / norm_freq)
| 5,348,783 |
def crossval_model(
estimator: BaseEstimator,
X: pd.DataFrame,
y: Union[pd.Series, pd.DataFrame],
evaluators: Sequence[Evaluator],
cv: Optional[
Union[int, BaseCrossValidator]
] = None, # defaults to KFold(n_splits=5)
random_state: Optional[Union[int, np.random.RandomState]] = None,
stratify: Optional[Union[np.ndarray, pd.Series]] = None,
n_jobs=1,
) -> Sequence[Evaluator]:
"""
Evaluate a model using cross validation.
A list of evaluators determines what other metrics, such as feature
importance and partial dependence are computed
"""
# Run various checks and prepare the evaluators
random_state = check_random_state(random_state)
cv = 5 if cv is None else cv
if isinstance(cv, int):
cv = KFold(n_splits=cv, shuffle=True, random_state=random_state)
cross_val_split_generator = cv.split(X, stratify)
evalutors_evaluations = _repeatedly_evaluate_model(
estimator=estimator,
X=X,
y=y,
train_test_indices_generator=cross_val_split_generator,
evaluators=evaluators,
use_group_cv=False,
random_state=random_state,
name_for_logging="Cross validate",
n_jobs=n_jobs,
)
_set_evaluators_evaluations(evalutors_evaluations)
return evalutors_evaluations
| 5,348,784 |
def download_json(name, url, root_path):
"""abstract function to download a json file"""
download_path = os.path.join(root_path, name + '.json')
if not os.path.exists(download_path):
download = requests.get(url)
content = download.json()
with open(download_path, 'w') as output:
json.dump(content, output)
logging.info('file saved with success')
| 5,348,785 |
def measure_dist(positions,weights,v_ref,side = False):
"""
Will plot the mouse and allow me to click and measure with two clicks
side is false (so top view)
but can be True, then it's cut though the major axis of hte mouse (determined by v_reference)
"""
# simplest trick is to just rotate all points so the reference
# direction is perpendicular to x
v_ref = np.append(v_ref,0)
angle_with_x = angle_between(np.array([1.,0,0]),v_ref)
RR = rotate_body_model(0,0,-angle_with_x)
positions = (RR @ positions.T).T - v_ref
if side:
xx,yy = positions[:,0],positions[:,2]
else:
xx,yy = positions[:,0],positions[:,1]
#top view
plt.figure()
plt.scatter(xx,yy,c= weights/np.max(weights),s = 5)
# plt.xlim([-.05,.1])
# plt.ylim([0,.15])
ax = plt.gca
plt.axes().set_aspect('equal', 'datalim')
plt.title('click center of hip, then mid, then head of mouse!')
w,h = 570,800
plt.get_current_fig_manager().window.setGeometry(1920-w-10,60,w,h)
click_points = np.asanyarray(plt.ginput(0))
if click_points.shape[0] % 2 is not 0:
print('missing a point')
click_points = click_points[:-1,:]
n_clicks = click_points.shape[0]
start_points = click_points[np.arange(n_clicks)%2==0,:]
end_points = click_points[np.arange(n_clicks)%2==1,:]
n_points = start_points.shape[0]
plt.figure()
plt.scatter(xx,yy,c= weights/np.max(weights),s = 5)
for s,e in zip(start_points,end_points):
plt.plot([s[0],e[0]],[s[1],e[1]],'o-')
dist = np.linalg.norm(end_points-start_points,axis = 1)
leg_list = [str(np.round(d,decimals = 3))+" m" for d in dist]
plt.legend(leg_list)
plt.xlabel("x [m]")
plt.ylabel("y [m]")
plt.title('distance in meters')
# plt.xlim([-.05,.1])
# plt.ylim([0,.15])
ax = plt.gca
plt.axes().set_aspect('equal', 'datalim')
timestr = time.strftime("%Y%m%d-%H%M%S")
plt.savefig('/home/chrelli/git/3d_sandbox/mycetrack0p4/measurements/'+timestr+'.png')
plt.show()
w,h = 570,800
plt.get_current_fig_manager().window.setGeometry(1920-w-10,60,w,h)
return dist
| 5,348,786 |
def _get_draft_comments(request, issue, preview=False):
"""Helper to return objects to put() and a list of draft comments.
If preview is True, the list of objects to put() is empty to avoid changes
to the datastore.
Args:
request: Django Request object.
issue: Issue instance.
preview: Preview flag (default: False).
Returns:
2-tuple (put_objects, comments).
"""
comments = []
tbd = []
# XXX Should request all drafts for this issue once, now we can.
for patchset in issue.patchset_set.order('created'):
ps_comments = list(models.Comment.gql(
'WHERE ANCESTOR IS :1 AND author = :2 AND draft = TRUE',
patchset, request.user))
if ps_comments:
patches = dict((p.key(), p) for p in patchset.patch_set)
for p in patches.itervalues():
p.patchset = patchset
for c in ps_comments:
c.draft = False
# Get the patch key value without loading the patch entity.
# NOTE: Unlike the old version of this code, this is the
# recommended and documented way to do this!
pkey = models.Comment.patch.get_value_for_datastore(c)
if pkey in patches:
patch = patches[pkey]
c.patch = patch
if not preview:
tbd.append(ps_comments)
patchset.update_comment_count(len(ps_comments))
tbd.append(patchset)
ps_comments.sort(key=lambda c: (c.patch.filename, not c.left,
c.lineno, c.date))
comments += ps_comments
return tbd, comments
| 5,348,787 |
def activate_model(cfg):
"""Activate the dynamic parts."""
cfg["fake"] = cfg["fake"]()
return cfg
| 5,348,788 |
def generate_tsv_gen(rows, le='\n'):
"""
Generate tab-separated value output from a list of dicts.
The keys of the dict will be used as column headings, and
are assumed to be identical for all rows.
"""
header_string = None
for row in rows:
headers = []
fields = []
for header, value in row.items():
if(header_string is None):
headers.append(header)
if(not isinstance(value, basestring)):
value = str(value)
fields.append(value)
if(header_string is None):
header_string = '\t'.join(headers) + le;
yield header_string
yield '\t'.join(fields) + le;
| 5,348,789 |
def assert_allclose(
actual: numpy.ndarray, desired: numpy.ndarray, err_msg: Literal["boxcar, 10, 9"]
):
"""
usage.scipy: 2
"""
...
| 5,348,790 |
def convert_to_number(string):
"""
Tries to cast input into an integer number, returning the
number if successful and returning False otherwise.
"""
try:
number = int(string)
return number
except:
return False
| 5,348,791 |
def _ts_value(position, counts, exposure, background, kernel, norm, flux_estimator):
"""Compute TS value at a given pixel position.
Uses approach described in Stewart (2009).
Parameters
----------
position : tuple (i, j)
Pixel position.
counts : `~numpy.ndarray`
Counts image
background : `~numpy.ndarray`
Background image
exposure : `~numpy.ndarray`
Exposure image
kernel : `astropy.convolution.Kernel2D`
Source model kernel
norm : `~numpy.ndarray`
Norm image. The flux value at the given pixel position is used as
starting value for the minimization.
Returns
-------
TS : float
TS value at the given pixel position.
"""
dataset = SimpleMapDataset.from_arrays(
counts=counts,
background=background,
exposure=exposure,
kernel=kernel,
position=position,
norm=norm,
)
return flux_estimator.run(dataset)
| 5,348,792 |
def mean(nums: List) -> float:
"""
Find mean of a list of numbers.
Wiki: https://en.wikipedia.org/wiki/Mean
>>> mean([3, 6, 9, 12, 15, 18, 21])
12.0
>>> mean([5, 10, 15, 20, 25, 30, 35])
20.0
>>> mean([1, 2, 3, 4, 5, 6, 7, 8])
4.5
>>> mean([])
Traceback (most recent call last):
...
ValueError: List is empty
"""
if not nums:
raise ValueError("List is empty")
return sum(nums) / len(nums)
| 5,348,793 |
def test_MergeFang_ZeroOffTime():
"""Merger detects the off time gap and creates separate molecules.
"""
merger = proc.Merge(mergeRadius = 25,
tOff = 1,
statsComputer = proc.MergeFang())
pathToTestData = testDataRoot / Path('processor_test_files/merge.csv')
with open(str(pathToTestData), mode = 'r') as inFile:
df = pd.read_csv(inFile, comment = '#')
mergedDF = merger(df)
# Due to the smaller gap-time, there should be three tracks, not two
assert_equal(len(mergedDF), 3)
| 5,348,794 |
def test_fetch_returns_lst():
"""
GIVEN fetch()
WHEN is called
THEN should return list
"""
return_lst = fetch()
assert type(return_lst) == list
| 5,348,795 |
def post_3d(post_paths, labels, colours, linestyles, contour_levels_sig, x_label=None, y_label=None, z_label=None,
x_lims=None, y_lims=None, z_lims=None, smooth_xy=None, smooth_xz=None, smooth_yz=None, smooth_x=None,
smooth_y=None, smooth_z=None, print_areas=False, save_path=None):
"""
Produce triangle plot showing multiple 3D posteriors, each as output by plot_utils.get_3d_post.
Args:
post_paths (list): List of paths to 3D posterior .npz files, each as output by plot_utils.get_3d_post.
labels (list): List of legend labels, one for each posterior grid.
colours (list): List of colours, one for each posterior grid.
linestyles (list): List of linestyles, one for each posterior grid.
contour_levels_sig (list): List of confidence regions to plot in ascending order, e.g. [1, 3].
x_label (str, optional): X-axis label - default None, i.e. no label.
y_label (str, optional): Y-axis label - default None, i.e. no label.
z_label (str, optional): Z-axis label - default None, i.e. no label.
x_lims ((float, float), optional): X-axis limits - default None, limits set automatically.
y_lims ((float, float), optional): Y-axis limits - default None, limits set automatically.
z_lims ((float, float), optional): Z-axis limits - default None, limits set automatically.
smooth_xy (list, optional): List of kernel standard deviations for Gaussian smoothing in the x-y plane, one for
each posterior grid, or None for no smoothing (default None).
smooth_xz (list, optional): List of kernel standard deviations for Gaussian smoothing in the x-z plane, one for
each posterior grid, or None for no smoothing (default None).
smooth_yz (list, optional): List of kernel standard deviations for Gaussian smoothing in the y-z plane, one for
each posterior grid, or None for no smoothing (default None).
smooth_x (list, optional): List of kernel standard deviations for Gaussian smoothing of the 1D x posterior, one
for each posterior grid, or None for no smoothing (default None).
smooth_y (list, optional): List of kernel standard deviations for Gaussian smoothing of the 1D y posterior, one
for each posterior grid, or None for no smoothing (default None).
smooth_z (list, optional): List of kernel standard deviations for Gaussian smoothing of the 1D z posterior, one
for each posterior grid, or None for no smoothing (default None).
print_areas (bool, optional): If True, print relative areas/widths of the different posteriors. Note that
smoothing can affect these results, so for reliable results smoothing should be
switched off to extract relative areas, and then smoothing values should be set to
preserve unsmoothed relative areas. Default False.
save_path (str, optional): Path to save figure to, if supplied. If not supplied, figure is displayed.
"""
# Load unnormalised 3D posteriors
post_grids = []
for post_idx, post_path in enumerate(post_paths):
print(f'Loading {post_idx + 1} / {len(post_paths)}')
with np.load(post_path) as data:
x_grid_tmp = data['x_grid']
y_grid_tmp = data['y_grid']
z_grid_tmp = data['z_grid']
post_grids.append(data['post_grid'])
# Check grids consistent
if post_idx == 0:
x_grid, y_grid, z_grid = x_grid_tmp, y_grid_tmp, z_grid_tmp
else:
assert np.array_equal(x_grid, x_grid_tmp)
assert np.array_equal(y_grid, y_grid_tmp)
assert np.array_equal(z_grid, z_grid_tmp)
# Form 1D & 2D grids
print('Forming 1D & 2D grids')
x = x_grid[:, 0, 0]
y = y_grid[0, :, 0]
z = z_grid[0, 0, :]
xy_x, xy_y = np.meshgrid(x, y, indexing='ij')
xz_x, xz_z = np.meshgrid(x, z, indexing='ij')
yz_y, yz_z = np.meshgrid(y, z, indexing='ij')
# Calculate integration elements
print('Calculating integration elements')
dx = x[1] - x[0]
dy = y[1] - y[0]
dz = z[1] - z[0]
assert np.allclose(np.diff(x), dx)
assert np.allclose(np.diff(y), dy)
assert np.allclose(np.diff(z), dz)
dxdy = dx * dy
dxdz = dx * dz
dydz = dy * dz
dxdydz = dx * dy * dz
# Normalise 3D posteriors
print('Normalising')
post_grids = [post_grid / (np.sum(post_grid) * dxdydz) for post_grid in post_grids]
assert all([np.isclose(np.sum(post_grid) * dxdydz, 1) for post_grid in post_grids])
# Marginalise to get 2D posteriors
print('Marginalising 3D -> 2D')
posts_xy = [np.sum(post_grid, axis=2) * dz for post_grid in post_grids]
posts_xz = [np.sum(post_grid, axis=1) * dy for post_grid in post_grids]
posts_yz = [np.sum(post_grid, axis=0) * dx for post_grid in post_grids]
assert all([np.isclose(np.sum(post_xy) * dxdy, 1) for post_xy in posts_xy])
assert all([np.isclose(np.sum(post_xz) * dxdz, 1) for post_xz in posts_xz])
assert all([np.isclose(np.sum(post_yz) * dydz, 1) for post_yz in posts_yz])
# Marginalise again to get 1D posteriors
print('Marginalising 2D -> 1D')
posts_x = [np.sum(post_xy, axis=1) * dy for post_xy in posts_xy]
posts_y = [np.sum(post_xy, axis=0) * dx for post_xy in posts_xy]
posts_z = [np.sum(post_xz, axis=0) * dx for post_xz in posts_xz]
assert all([np.isclose(np.sum(post_x) * dx, 1) for post_x in posts_x])
assert all([np.isclose(np.sum(post_y) * dy, 1) for post_y in posts_y])
assert all([np.isclose(np.sum(post_z) * dz, 1) for post_z in posts_z])
# Additional marginalisation checks
print('Checking normalisation')
assert all([np.allclose(post_x, np.sum(post_xz, axis=1) * dz) for post_x, post_xz in zip(posts_x, posts_xz)])
assert all([np.allclose(post_y, np.sum(post_yz, axis=1) * dz) for post_y, post_yz in zip(posts_y, posts_yz)])
assert all([np.allclose(post_z, np.sum(post_yz, axis=0) * dy) for post_z, post_yz in zip(posts_z, posts_yz)])
assert all([np.allclose(post_x, np.sum(p_3d, axis=(1, 2)) * dydz) for post_x, p_3d in zip(posts_x, post_grids)])
assert all([np.allclose(post_y, np.sum(p_3d, axis=(0, 2)) * dxdz) for post_y, p_3d in zip(posts_y, post_grids)])
assert all([np.allclose(post_z, np.sum(p_3d, axis=(0, 1)) * dxdy) for post_z, p_3d in zip(posts_z, post_grids)])
# Apply smoothing
if smooth_xy is not None:
posts_xy = [ndimage.gaussian_filter(post_xy, [sig, sig / 2.]) for post_xy, sig in zip(posts_xy, smooth_xy)]
if smooth_xz is not None:
posts_xz = [ndimage.gaussian_filter(post_xz, sig) for post_xz, sig in zip(posts_xz, smooth_xz)]
if smooth_yz is not None:
posts_yz = [ndimage.gaussian_filter(post_yz, sig) for post_yz, sig in zip(posts_yz, smooth_yz)]
if smooth_x is not None:
posts_x = [ndimage.gaussian_filter(post_x, sig) for post_x, sig in zip(posts_x, smooth_x)]
if smooth_y is not None:
posts_y = [ndimage.gaussian_filter(post_y, sig) for post_y, sig in zip(posts_y, smooth_y)]
if smooth_z is not None:
posts_z = [ndimage.gaussian_filter(post_z, sig) for post_z, sig in zip(posts_z, smooth_z)]
# Convert 2D & 1D posteriors to confidence levels
print('Converting to confidence levels')
confs_xy = [gcl_post.post_to_conf(post_xy, dxdy) for post_xy in posts_xy]
confs_xz = [gcl_post.post_to_conf(post_xz, dxdz) for post_xz in posts_xz]
confs_yz = [gcl_post.post_to_conf(post_yz, dydz) for post_yz in posts_yz]
confs_x = [gcl_post.post_to_conf(post_x, dx) for post_x in posts_x]
confs_y = [gcl_post.post_to_conf(post_y, dy) for post_y in posts_y]
confs_z = [gcl_post.post_to_conf(post_z, dz) for post_z in posts_z]
# Extract out relative widths and areas
contour_levels = [0.] + [scipy.special.erf(contour_level / np.sqrt(2)) for contour_level in contour_levels_sig]
if print_areas:
print('Note that smoothing should be switched off to extract unbiased relative areas, and smoothing should be '
'set such that relative areas are preserved')
def count_points_within_outermost_contour(conf_grid):
return np.count_nonzero(conf_grid < contour_levels[-1])
rel_areas_xy = list(map(count_points_within_outermost_contour, confs_xy))
print('Relative areas x-y:', np.divide(rel_areas_xy, max(rel_areas_xy)))
rel_areas_xz = list(map(count_points_within_outermost_contour, confs_xz))
print('Relative areas x-z:', np.divide(rel_areas_xz, max(rel_areas_xz)))
rel_areas_yz = list(map(count_points_within_outermost_contour, confs_yz))
print('Relative areas y-z:', np.divide(rel_areas_yz, max(rel_areas_yz)))
rel_widths_x = list(map(count_points_within_outermost_contour, confs_x))
print('Relative widths x:', np.divide(rel_widths_x, max(rel_widths_x)))
rel_widths_y = list(map(count_points_within_outermost_contour, confs_y))
print('Relative widths y:', np.divide(rel_widths_y, max(rel_widths_y)))
rel_widths_z = list(map(count_points_within_outermost_contour, confs_z))
print('Relative widths z:', np.divide(rel_widths_z, max(rel_widths_z)))
# Plot everything
print('Plotting')
plt.rcParams.update({'font.size': 13})
plt.rcParams['axes.titlesize'] = 17
fig, axes = plt.subplots(nrows=3, ncols=3, sharex='col', figsize=(12.8, 8.6))
plt.subplots_adjust(left=.08, right=.97, bottom=.08, top=.97, wspace=0, hspace=0)
fill_colours = [[np.squeeze(matplotlib.colors.to_rgba_array(c, a)) for a in [0.3, 0.1, 0]] for c in colours]
# Row 0: x
for post_x, colour, fill, linestyle, label in zip(posts_x, colours, fill_colours, linestyles, labels):
axes[0, 0].plot(x, post_x, color=colour, ls=linestyle, lw=2, label=label)
axes[0, 0].fill_between(x, post_x, color=fill[1])
axes[0, 1].axis('off')
axes[0, 2].axis('off')
# Row 1: x vs y, y
for conf_xy, post_y, colour, fill, linestyle in zip(confs_xy, posts_y, colours, fill_colours, linestyles):
axes[1, 0].contour(xy_x, xy_y, conf_xy, levels=contour_levels, colors=colour, linestyles=[linestyle],
linewidths=2)
axes[1, 0].contourf(xy_x, xy_y, conf_xy, levels=contour_levels, colors=fill)
axes[1, 1].plot(y, post_y, color=colour, ls=linestyle, lw=2)
axes[1, 1].fill_between(y, post_y, color=fill[1])
axes[1, 2].axis('off')
# Row 2: x vs z, y vs z, z
for conf_xz, conf_yz, post_z, colour, fill, linestyle in zip(confs_xz, confs_yz, posts_z, colours, fill_colours,
linestyles):
axes[2, 0].contour(xz_x, xz_z, conf_xz, levels=contour_levels, colors=colour, linestyles=[linestyle],
linewidths=2)
axes[2, 0].contourf(xz_x, xz_z, conf_xz, levels=contour_levels, colors=fill)
axes[2, 1].contour(yz_y, yz_z, conf_yz, levels=contour_levels, colors=colour, linestyles=[linestyle],
linewidths=2)
axes[2, 1].contourf(yz_y, yz_z, conf_yz, levels=contour_levels, colors=fill)
axes[2, 2].plot(z, post_z, color=colour, ls=linestyle, lw=2)
axes[2, 2].fill_between(z, post_z, color=fill[1])
# Hide y ticks for 1D posteriors
axes[0, 0].tick_params(axis='y', which='both', left=False, labelleft=False)
axes[1, 1].tick_params(axis='y', which='both', left=False, labelleft=False)
axes[2, 2].tick_params(axis='y', which='both', left=False, labelleft=False)
# Add x ticks at top and bottom of 2D posteriors and at bottom of 1D posteriors
axes[0, 0].tick_params(axis='x', which='both', bottom=True, direction='in')
axes[1, 0].tick_params(axis='x', which='both', top=True, bottom=True, direction='in')
axes[2, 0].tick_params(axis='x', which='both', top=True, bottom=True, direction='inout', length=7.5)
axes[0, 1].tick_params(axis='x', which='both', bottom=True, direction='in')
axes[2, 1].tick_params(axis='x', which='both', top=True, bottom=True, direction='inout', length=7.5)
axes[2, 2].tick_params(axis='x', which='both', bottom=True, direction='inout', length=7.5)
# Add y ticks at left and right of 2D posteriors
axes[1, 0].tick_params(axis='y', which='both', left=True, direction='inout', length=7.5)
axes[1, 0].secondary_yaxis('right').tick_params(axis='y', which='both', right=True, direction='in',
labelright=False)
axes[2, 0].tick_params(axis='y', which='both', left=True, right=True, direction='inout', length=7.5)
axes[2, 1].tick_params(axis='y', which='both', left=True, right=True, labelleft=False, direction='in')
# Limits
axes[2, 0].set_xlim(x_lims)
axes[2, 1].set_xlim(y_lims)
axes[2, 2].set_xlim(z_lims)
axes[1, 0].set_ylim(y_lims)
axes[2, 0].set_ylim(z_lims)
axes[2, 1].set_ylim(z_lims)
# Fix overlapping z tick labels by removing every other tick
axes[2, 2].set_xticks(axes[2, 2].get_xticks()[1::2])
# Label axes
axes[2, 0].set_xlabel(x_label)
axes[2, 1].set_xlabel(y_label)
axes[2, 2].set_xlabel(z_label)
axes[1, 0].set_ylabel(y_label)
axes[2, 0].set_ylabel(z_label)
fig.align_ylabels()
# Title
axes[0, 0].annotate('Full Euclid-like mask', xy=(2.95, .95), xycoords='axes fraction', ha='right',
va='top', size=plt.rcParams['axes.titlesize'])
# Legend
leg_title = f'{min(contour_levels_sig)}\N{en dash}{max(contour_levels_sig)}$\\sigma$ confidence'
axes[0, 0].legend(loc='upper right', bbox_to_anchor=(3, .8), handlelength=4, frameon=False, title=leg_title)
if save_path is not None:
plt.savefig(save_path)
print('Saved ' + save_path)
else:
plt.show()
| 5,348,796 |
def det(m1: ndarray) -> float:
"""
Compute the determinant of a double precision 3x3 matrix.
https://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/det_c.html
:param m1: Matrix whose determinant is to be found.
:return: The determinant of the matrix.
"""
m1 = stypes.to_double_matrix(m1)
return libspice.det_c(m1)
| 5,348,797 |
def path_to_xy(path: PointList) -> XYList:
"""Convert PointList to XYList"""
return [p.xy() for p in path]
| 5,348,798 |
def apply_taint(state, addr, taint_id='', bits=PAGE_SIZE, var=None):
"""
Apply taint to a memory location
:param state: angr state
:param addr: memory address
:param taint_id: taint id
:param bits: number of bits
:param var: symbolic variable to store
:return:
"""
if var is None:
var = new_tainted_value(taint_id, bits)
# if not (isinstance(addr, int) or addr.concrete) and state.globals[SC]:
# # FIXME: Nilo, fix this
# raise RuntimeError("Nilo fix me!")
# #addr = self._get_target_concretization(self, addr, state)
state.memory.store(addr, var, inspect=False, disable_actions=True)
state.globals[TAINT_APPLIED] = True
| 5,348,799 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.