content
stringlengths 22
815k
| id
int64 0
4.91M
|
---|---|
def add_logger_stdout(app):
"""Creates a StreamHandler logger"""
f = ContextFilter()
app.logger.addFilter(f)
stdout_handler = logging.StreamHandler(sys.stdout)
FORMAT = '%(asctime)s %(hostname)s {0} :%(levelname)s: %(message)s [in %(pathname)s:%(lineno)d]'.format('Tester')
formatter = logging.Formatter(FORMAT, datefmt='%Y-%m-%dT%H:%M:%S')
stdout_handler.setFormatter(formatter)
stdout_handler.setLevel(logging.INFO)
stdout_handler._name = 'StreamHandler'
app.logger.addHandler(stdout_handler)
| 5,349,000 |
def main():
"""
Performing following actions:
1.) Reading configuration file.
2.) Connecting to database.
3.) Executing queries.
"""
rootLogger.info("Reading configuration files.")
config = configparser.ConfigParser()
config.read('dwh.cfg')
rootLogger.info("Connecting to database.")
conn = psycopg2.connect("host={} dbname={} user={} password={} port={}".format(*config['CLUSTER'].values()))
cur = conn.cursor()
drop_tables(cur, conn)
create_tables(cur, conn)
conn.close()
| 5,349,001 |
def softmax_mask(w: torch.Tensor,
dim=-1,
mask: torch.BoolTensor = None
) -> torch.Tensor:
"""
Allows having -np.inf in w to mask out, or give explicit bool mask
:param w:
:param dim:
:param mask:
:return:
"""
if mask is None:
mask = w != -np.inf
minval = torch.min(w[~mask]) # to avoid affecting torch.max
w1 = w.clone()
w1[~mask] = minval
# to prevent over/underflow
w1 = w1 - torch.max(w1, dim=dim, keepdim=True)[0]
w1 = torch.exp(w1)
p = w1 / torch.sum(w1 * mask.float(), dim=dim, keepdim=True)
p[~mask] = 0.
return p
| 5,349,002 |
def mocked_captcha_failed_call(app):
"""Monkey patch Captcha call in case of failure."""
token = app.config['dev_config']['secret_keys']['web']
httpretty.enable()
# mock dirbs core imei apis
httpretty.register_uri(httpretty.POST,
'https://www.google.com/recaptcha/api/siteverify',
data=dict(response='tokenforfailedcaptcha', secret=token),
body=json.dumps({"success": None}), content_type='application/json', status=200)
yield
# disable afterwards when not in use to avoid issues with the sockets
# reset states
httpretty.disable()
httpretty.reset()
| 5,349,003 |
def modular_nli_reader(resources_or_conf: Union[dict, SharedResources] = None):
"""Creates a Modular NLI reader instance. Model defined in config."""
from jack.readers.multiple_choice.shared import MultipleChoiceSingleSupportInputModule
from jack.readers.natural_language_inference.modular_nli_model import ModularNLIModel
from jack.readers.multiple_choice.shared import SimpleMCOutputModule
shared_resources = create_shared_resources(resources_or_conf)
input_module = MultipleChoiceSingleSupportInputModule(shared_resources)
model_module = ModularNLIModel(shared_resources)
output_module = SimpleMCOutputModule(shared_resources)
return TFReader(shared_resources, input_module, model_module, output_module)
| 5,349,004 |
def test_read_course_proto_correctness():
"""Test if read_course_proto() returns the right content with/without lab."""
course_raw_data = check_file_open('tests/test.json')
json_obj = course_raw_data['Test Data']
course_list = []
enrollment_dict = {}
index = 0
department_name = 'ACCT'
each_department = read_course_proto(json_obj, course_list, department_name, enrollment_dict)
assert each_department.deptName == 'ACCT'
assert len(each_department.courses) == 3
assert len(course_list) == 3
for i in range(len(each_department.courses)):
assert each_department.courses[i] == target_output.department_acct.courses[i]
for i in range(len(course_list)):
assert course_list[i] == target_output.courseList[i]
for key in enrollment_dict.keys():
assert enrollment_dict[key] == target_output.seats[index]
index += 1
course_list = []
department_name = 'ADMJ'
enrollment_dict = {}
each_department = read_course_proto(json_obj, course_list, department_name, enrollment_dict)
assert each_department.deptName == 'ADMJ'
assert len(each_department.courses) == 3
assert len(course_list) == 3
for i in range(len(each_department.courses)):
assert each_department.courses[i] == target_output.department_admj.courses[i]
for i in range(len(course_list)):
assert course_list[i] == target_output.courseList[i + 3]
for key in enrollment_dict.keys():
assert enrollment_dict[key] == target_output.seats[index]
index += 1
| 5,349,005 |
def test_qttexttospeech():
"""Test the qtpy.QtTextToSpeech namespace."""
from qtpy import QtTextToSpeech
assert QtTextToSpeech.QTextToSpeech is not None
assert QtTextToSpeech.QVoice is not None
if PYSIDE2:
assert QtTextToSpeech.QTextToSpeechEngine is not None
| 5,349,006 |
def move_box_and_gtt(n_targets=3,
time_limit=DEFAULT_TIME_LIMIT,
control_timestep=DEFAULT_CONTROL_TIMESTEP):
"""Loads `move_box_or_gtt` task."""
return _make_predicate_task(
n_boxes=1,
n_targets=n_targets,
include_gtt_predicates=True,
include_move_box_predicates=True,
max_num_predicates=2,
control_timestep=control_timestep,
time_limit=time_limit)
| 5,349,007 |
def get_body(data_dict, database_id, media_status, media_type):
"""
获取json数据
:param media_type:
:param media_status:
:param data_dict:
:param database_id:
:return:
"""
status = ""
music_status = ""
if media_status == MediaStatus.WISH.value:
status = "想看"
music_status = "想听"
elif media_status == MediaStatus.DO.value:
status = "在看"
music_status = "在听"
elif media_status == MediaStatus.COLLECT.value:
status = "看过"
music_status = "听过"
else:
status = ""
music_status = ""
log_detail.info(f"【RUN】- {media_type}数据信息整理为json格式")
rating = data_dict[MediaInfo.RATING_F.value]
# rating = float(rat) if rat == "" else 0
if media_type == MediaType.MUSIC.value:
body = {
"parent": {
"type": "database_id",
"database_id": f"{database_id}"
},
"properties": {
"音乐": {
"title": [{
"type": "text",
"text": {
"content": data_dict[MediaInfo.TITLE.value]
}
}]
},
"封面": {
"files": [{
"type": "external",
"name": data_dict[MediaInfo.IMG.value][-13:],
"external": {
"url": data_dict[MediaInfo.IMG.value]
}
}]
},
"表演者": {
"rich_text": [{
"type": "text",
"text": {
"content": data_dict[MediaInfo.PERFORMER.value]
}
}]
},
"发行时间": {
"select": {
"name": data_dict[MediaInfo.RELEASE_DATE.value][0:4]
}
},
"标记状态": {
"select": {
"name": f"{music_status}"
}
},
"豆瓣链接": {
"url": f"{data_dict[MediaInfo.URL.value]}"
}
}
}
# 评分
if data_dict[MediaInfo.RATING_F.value]:
rating_f = float(data_dict[MediaInfo.RATING_F.value])
tmp_dict = get_non_null_params_body(property_type=DatabaseProperty.NUMBER.value,
property_params=rating_f)
body["properties"]["评分"] = tmp_dict
# 评分人数
if data_dict[MediaInfo.ASSESS.value]:
tmp_dict = get_non_null_params_body(property_type=DatabaseProperty.NUMBER.value,
property_params=data_dict[MediaInfo.ASSESS.value])
body["properties"]["评分人数"] = tmp_dict
return body
elif media_type == MediaType.MOVIE.value:
# 导演 编剧 主演
text_director = ' / '.join(data_dict[MediaInfo.DIRECTOR.value])
text_screenwriter = ' / '.join(data_dict[MediaInfo.SCREENWRITER.value])
text_starring = ' / '.join(data_dict[MediaInfo.STARRING.value])
str_type = get_multi_select_body(data_dict[MediaInfo.MOVIE_TYPE.value])
json_type = json.loads(str_type)
str_c_or_r = get_multi_select_body(data_dict[MediaInfo.C_OR_R.value])
json_c_or_r = json.loads(str_c_or_r)
body = {
"parent": {
"type": "database_id",
"database_id": f"{database_id}"
},
"properties": {
"名字": {
"title": [{
"type": "text",
"text": {
"content": data_dict[MediaInfo.TITLE.value]
}
}]
},
"导演": {
"rich_text": [{
"type": "text",
"text": {
"content": text_director
}
}]
},
"编剧": {
"rich_text": [{
"type": "text",
"text": {
"content": text_screenwriter
}
}]
},
"主演": {
"rich_text": [{
"type": "text",
"text": {
"content": text_starring
}
}]
},
"类型": {
"multi_select": json_type
},
"国家地区": {
"multi_select": json_c_or_r
},
"IMDb": {
"url": f"https://www.imdb.com/title/{data_dict[MediaInfo.IMDB.value]}"
},
"标记状态": {
"select": {
"name": f"{status}"
}
},
"分类": {
"select": {
"name": f"{data_dict[MediaInfo.CATEGORIES.value]}"
}
},
"简介": {
"rich_text": [{
"type": "text",
"text": {
"content": data_dict[MediaInfo.RELATED.value]
}
}]
},
"封面": {
"files": [{
"type": "external",
"name": data_dict[MediaInfo.IMG.value][-15:],
"external": {
"url": data_dict[MediaInfo.IMG.value]
}
}]
},
"豆瓣链接": {
"url": f"{data_dict[MediaInfo.URL.value]}"
}
}
}
# 评分
if data_dict[MediaInfo.RATING_F.value]:
rating_f = float(data_dict[MediaInfo.RATING_F.value])
tmp_dict = get_non_null_params_body(property_type=DatabaseProperty.NUMBER.value,
property_params=rating_f)
body["properties"]["评分"] = tmp_dict
# 评分人数
if data_dict[MediaInfo.ASSESS.value]:
tmp_dict = get_non_null_params_body(property_type=DatabaseProperty.NUMBER.value,
property_params=data_dict[MediaInfo.ASSESS.value])
body["properties"]["评分人数"] = tmp_dict
return body
elif media_type == MediaType.BOOK.value:
body = {
"parent": {
"type": "database_id",
"database_id": f"{database_id}"
},
"properties": {
"书名": {
"title": [{
"type": "text",
"text": {
"content": data_dict[MediaInfo.TITLE.value]
}
}]
},
"封面": {
"files": [{
"type": "external",
"name": data_dict[MediaInfo.IMG.value][-13:],
"external": {
"url": data_dict[MediaInfo.IMG.value]
}
}]
},
"作者": {
"rich_text": [{
"type": "text",
"text": {
"content": data_dict[MediaInfo.AUTHOR.value]
}
}]
},
"出版年份": {
"select": {
"name": data_dict[MediaInfo.PUB_DATE.value][0:4]
}
},
"标记状态": {
"select": {
"name": f"{status}"
}
},
"豆瓣链接": {
"url": f"{data_dict[MediaInfo.URL.value]}"
}
}
}
# ISBN
if data_dict[MediaInfo.ISBN.value]:
tmp_dict = get_non_null_params_body(property_type=DatabaseProperty.URL.value,
property_params=data_dict[MediaInfo.ISBN.value])
body["properties"]["ISBN"] = tmp_dict
# 价格
if data_dict[MediaInfo.PRICE.value]:
tmp_float = float(data_dict[MediaInfo.PRICE.value])
tmp_dict = get_non_null_params_body(property_type=DatabaseProperty.NUMBER.value,
property_params=tmp_float)
body["properties"]["价格"] = tmp_dict
# 评分
if data_dict[MediaInfo.RATING_F.value]:
rating_f = float(data_dict[MediaInfo.RATING_F.value])
tmp_dict = get_non_null_params_body(property_type=DatabaseProperty.NUMBER.value,
property_params=rating_f)
body["properties"]["评分"] = tmp_dict
# 评分人数
if data_dict[MediaInfo.ASSESS.value]:
tmp_dict = get_non_null_params_body(property_type=DatabaseProperty.NUMBER.value,
property_params=data_dict[MediaInfo.ASSESS.value])
body["properties"]["评分人数"] = tmp_dict
# 页数
if data_dict[MediaInfo.PAGES.value]:
pages_num = int(data_dict[MediaInfo.PAGES.value])
tmp_dict = get_non_null_params_body(property_type=DatabaseProperty.NUMBER.value,
property_params=pages_num)
body["properties"]["页数"] = tmp_dict
# 出版社
if data_dict[MediaInfo.PUBLISHER.value]:
tmp_dict = get_non_null_params_body(property_type=DatabaseProperty.SELECT.value,
property_params=data_dict[MediaInfo.PUBLISHER.value])
body["properties"]["出版社"] = tmp_dict
return body
| 5,349,008 |
def _find_full_periods(events, quantity, capacity):
"""Find the full periods."""
full_periods = []
used = 0
full_start = None
for event_date in sorted(events):
used += events[event_date]['quantity']
if not full_start and used + quantity > capacity:
full_start = event_date
elif full_start and used + quantity <= capacity:
full_periods.append((full_start, event_date))
full_start = None
return full_periods
| 5,349,009 |
def crop_image(img, target_size, center):
""" crop_image """
height, width = img.shape[:2]
size = target_size
if center == True:
w_start = (width - size) / 2
h_start = (height - size) / 2
else:
w_start = random.randint(0, width - size)
h_start = random.randint(0, height - size)
w_end = w_start + size
h_end = h_start + size
img = img[h_start:h_end, w_start:w_end, :]
return img
| 5,349,010 |
def _histogram(
data=None,
bins="freedman-diaconis",
p=None,
density=False,
kind="step",
line_kwargs={},
patch_kwargs={},
**kwargs,
):
"""
Make a plot of a histogram of a data set.
Parameters
----------
data : array_like
1D array of data to make a histogram out of
bins : int, array_like, or str, default 'freedman-diaconis'
If int or array_like, setting for `bins` kwarg to be passed to
`np.histogram()`. If 'exact', then each unique value in the
data gets its own bin. If 'integer', then integer data is
assumed and each integer gets its own bin. If 'sqrt', uses the
square root rule to determine number of bins. If
`freedman-diaconis`, uses the Freedman-Diaconis rule for number
of bins.
p : bokeh.plotting.Figure instance, or None (default)
If None, create a new figure. Otherwise, populate the existing
figure `p`.
density : bool, default False
If True, normalized the histogram. Otherwise, base the histogram
on counts.
kind : str, default 'step'
The kind of histogram to display. Allowed values are 'step' and
'step_filled'.
line_kwargs : dict
Any kwargs to be passed to p.line() in making the line of the
histogram.
patch_kwargs : dict
Any kwargs to be passed to p.patch() in making the fill of the
histogram.
kwargs : dict
All other kwargs are passed to bokeh.plotting.figure()
Returns
-------
output : Bokeh figure
Figure populated with histogram.
"""
if data is None:
raise RuntimeError("Input `data` must be specified.")
# Instantiate Bokeh plot if not already passed in
if p is None:
y_axis_label = kwargs.pop("y_axis_label", "density" if density else "count")
if "plot_height" not in kwargs and "frame_height" not in kwargs:
kwargs["frame_height"] = 275
if "plot_width" not in kwargs and "frame_width" not in kwargs:
kwargs["frame_width"] = 400
y_range = kwargs.pop("y_range", bokeh.models.DataRange1d(start=0))
p = bokeh.plotting.figure(y_axis_label=y_axis_label, y_range=y_range, **kwargs)
# Compute histogram
bins = _bins_to_np(data, bins)
e0, f0 = _compute_histogram(data, bins, density)
if kind == "step":
p.line(e0, f0, **line_kwargs)
if kind == "step_filled":
x2 = [e0.min(), e0.max()]
y2 = [0, 0]
p = fill_between(e0, f0, x2, y2, show_line=True, p=p, patch_kwargs=patch_kwargs)
return p
| 5,349,011 |
def select_path() -> None:
"""Opens popup prompting user to pick a directory. Fills 'path' entry widget with path."""
root.update()
directory = str(tkinter.filedialog.askdirectory(title= "Download Path Selection", parent= root))
root.update()
entry = find_widgets_by_name("path")
entry.delete(0, "end")
entry.insert(0, directory)
| 5,349,012 |
def write_test_report(project, test_rep_path):
"""
Given a project that has been evaluated (.evaluate()),
extract test data and write it to an HTML document at risk_rep_path
"""
with open(test_rep_path, 'w') as test_rep:
# test_table is a bunch of <tr> rows with <td> columns
test_table = create_task_table(project.tests)
test_rep.write("""<!DOCTYPE html>
<html lang="en">
<head>
<title>Test Report: {name}</title>
<style>{REPORT_CSS}</style>
</head>
<body>
<h1>Test Report: {name}</h1>
<p>Point of Contact: <code>{poc}</code></p>
<details class="shaded"><summary>Deliverables</summary>
{deliverable}
</details>
<br>
<p>Test duration: <code>{eval_duration}</code> (hours:minutes:seconds)</p>
<br>
<table>
<tr>
<th>Tests</th>
<th>Status</th>
<th>Description</th>
</tr>
{test_table}
</table>
<br>
<script>
function toggleRow(event, element) {{
if (event.target == element || event.target.parentNode == element) {{
element.getElementsByClassName('expanded-row-content')[0].classList.toggle('hide-row');
}}
}}
</script>
<h2>Warnings</h2>
<em>These are minor issues detected when saving the project description</em>
<pre>{warnings}</pre>
</body>
</html>
""".format(
REPORT_CSS=REPORT_CSS,
name=html.escape(project.name),
poc=html.escape(project.poc),
deliverable=project.deliverable.get_report_desc(),
eval_duration=datetime.timedelta(seconds=project.evaluation_duration_s),
test_table=test_table,
warnings=project.get_warning_lines(),
))
project.reports.append(test_rep_path)
| 5,349,013 |
def getGarbageBlock():
"""获取正在标记的众生区块
{
?block_id=
}
返回 json
{
"is_success":bool,
"data": {"id": ,
"election_period":
"beings_block_id":
"votes":
"vote_list":
"status":
"create_time":
"""
try:
beings_block_id = request.args.get("block_id")
res = blockOfGarbage.getGarbageBlockQueue(beings_block_id)
if res is None:
http_message = HttpMessage(is_success=False, data=res)
else:
http_message = HttpMessage(is_success=True, data=res)
return http_message.getJson()
except Exception as err:
print(err)
http_message = HttpMessage(is_success=False, data="参数错误")
return http_message.getJson()
| 5,349,014 |
def partition(items, low, high):
"""Return index `p` after in-place partitioning given items in range
`[low...high]` by choosing a pivot (TODO: document your method here) from
that range, moving pivot into index `p`, items less than pivot into range
`[low...p-1]`, and items greater than pivot into range `[p+1...high]`.
TODO: Running time: ??? O(n)
TODO: Memory usage: ??? O(1)
# TODO: Choose a pivot any way and document your method in docstring above
# TODO: Loop through all items in range [low...high]
# TODO: Move items less than pivot into front of range [low...p-1]
# TODO: Move items greater than pivot into back of range [p+1...high]
# TODO: Move pivot item into final position [p] and return index p
"""
divider = low #keeps track of the pivot index used for comparision
pivot = high #default pivot index
for i in range(low, high):
# Move items less than pivot into front of range [low...p-1]
# Move items greater than pivot into back of range [p+1...high]
if items[i] < items[pivot]: #this does the work
items[i], items[divider] = items[divider], items[i] # by moving the items less than
divider += 1 # and leaving items greater where they are
# Move pivot item into final position [p] and return index p
items[pivot], items[divider] = items[divider], items[pivot]
return divider
| 5,349,015 |
def describe(module):
""" Describe the module object passed as argument
including its classes and functions """
wi('[Module: %s]\n' % module.__name__)
indent()
count = 0
for name in dir(module):
obj = getattr(module, name)
if inspect.isclass(obj):
count += 1; describe_klass(obj)
elif (inspect.ismethod(obj) or inspect.isfunction(obj)):
count +=1 ; describe_func(obj)
elif inspect.isbuiltin(obj):
count += 1; describe_builtin(obj)
if count==0:
wi('(No members)')
dedent()
| 5,349,016 |
def cluster_confusion_matrix(pred_cluster:Dict, target_cluster:Dict) -> EvalUnit:
""" simulate confusion matrix
Args:
pred_cluster: Dict element: cluster_id (cluster_id from 0 to max_size)| predicted clusters
target_cluster: Dict element:cluster_id (cluster_id from 0 to max_size) | target clusters
Returns:
In order to return detailed data, It will return a EvalUnit,
"""
pred_elements = list(pred_cluster.keys())
target_elements = list(target_cluster.keys())
it = itertools.product(pred_elements,target_elements)
tp,fp,tn,fn = 0,0,0,0
for x,y in it:
if x != y:#other word
x_cluster = pred_elements[x]
x_cluster_ = target_elements[x]
y_cluster = pred_elements[y]
y_cluster_ = target_elements[y]
if x_cluster == y_cluster and x_cluster_ == y_cluster_:
tp += 1
elif x_cluster != y_cluster and x_cluster_ != y_cluster_:
tn += 1
elif x_cluster == y_cluster and x_cluster_ != y_cluster_:
fp += 1
else:
fn +=1
return EvalUnit(tp,tn,fp,fn,'rand_index')
| 5,349,017 |
def change_short(x, y, limit):
"""Return abs(y - x) as a fraction of x, but with a limit.
>>> x, y = 2, 5
>>> abs(y - x) / x
1.5
>>> change_short(x, y, 100)
1.5
>>> change_short(x, y, 1) # 1 is smaller than 1.5
1
>>> x = 0
>>> change_short(x, y, 100) # No error, even though abs(y - x) / x divides by 0!
100
"""
return limited(x, limit if (x == 0) else abs(y - x) / x, limit)
| 5,349,018 |
def main(E_USER):
"""This is the main menu of Checkcryption.
E_USER: the encrypted username, used for verification."""
while True: # The 'crypter' loop
print('\nWhat would you like to do?')
option = input('Type \'e\' to encrypt, \'v\' to verify, or \'q\' to quit: ')
if re.match('e', option.lower()): # encrypt data
print('\nEncryption menu:')
source, source_location = source_menu('e') # Get source data
dest = dest_menu('e', source_location) # Get destination location
del source_location
verify_menu(source, dest, E_USER, 'e') # Verify user and encrypt
continue
elif re.match('v', option.lower()): # decrypt data
print('\nVerification menu:')
source, source_location = source_menu('v') # Get source data
dest = dest_menu('v') # Get encrypted data
del source_location
verify_menu(source, dest, E_USER, 'v') # Verify user and decrypt
continue
elif re.match('q', option.lower()): # quit
break
else:
print('\nThat is not a valid option, please try again.')
continue
del E_USER
| 5,349,019 |
def generate_correlation_map(f, x_data, y_data, method='chisquare_spectroscopic', filter=None, resolution_diag=20, resolution_map=15, fit_args=tuple(), fit_kws={}, distance=5, npar=1):
"""Generates a correlation map for either the chisquare or the MLE method.
On the diagonal, the chisquare or loglikelihood is drawn as a function of one fixed parameter.
Refitting to the data each time gives the points on the line. A dashed line is drawn on these
plots, with the intersection with the plots giving the correct confidence interval for the
parameter. In solid lines, the interval estimated by the fitting routine is drawn.
On the offdiagonal, two parameters are fixed and the model is again fitted to the data.
The change in chisquare/loglikelihood is mapped to 1, 2 and 3 sigma contourmaps.
Parameters
----------
f: :class:`.BaseModel`
Instance of the model for which the contour map has to be generated.
x_data: array_like or list of array_likes
Data on the x-axis for the fit. Must be appropriate input for *f*.
y_data: array_like or list of array_likes
Data on the y-axis for the fit. Must be appropriate input for *f*.
Other parameters
----------------
method: {'chisquare', 'chisquare_spectroscopic', mle'}
Chooses between generating the map for the chisquare routine or for
the likelihood routine.
filter: list of strings
Only the parameters matching the names given in this list will be used
to generate the maps.
resolution_diag: int
Number of points for the line plot on each diagonal.
resolution_map: int
Number of points along each dimension for the meshgrids.
fit_kws: dictionary
Dictionary of keywords to pass on to the fitting routine.
npar: int
Number of parameters for which simultaneous predictions need to be made.
Influences the uncertainty estimates from the parabola."""
# Save the original goodness-of-fit and parameters for later use
mapping = {'chisquare_spectroscopic': (fitting.chisquare_spectroscopic_fit, 'chisqr_chi'),
'chisquare': (fitting.chisquare_fit, 'chisqr_chi'),
'mle': (fitting.likelihood_fit, 'likelihood_mle')}
func, attr = mapping.pop(method.lower(), (fitting.chisquare_spectroscopic_fit, 'chisqr_chi'))
title = '{}\n${}_{{-{}}}^{{+{}}}$'
title_e = '{}\n$({}_{{-{}}}^{{+{}}})e{}$'
fit_kws['verbose'] = False
fit_kws['hessian'] = False
to_save = {'mle': ('fit_mle', 'result_mle')}
to_save = to_save.pop(method.lower(), ('chisq_res_par', 'ndof_chi', 'redchi_chi'))
saved = [copy.deepcopy(getattr(f, attr)) for attr in to_save]
# func(f, x_data, y_data, *fit_args, **fit_kws)
orig_value = getattr(f, attr)
orig_params = copy.deepcopy(f.params)
state = fitting._get_state(f, method=method.lower())
ranges = {}
chifunc = lambda x: chi2.cdf(x, npar) - 0.682689492 # Calculate 1 sigma boundary
boundary = optimize.root(chifunc, npar).x[0] * 0.5 if method.lower() == 'mle' else optimize.root(chifunc, npar).x[0]
# Select all variable parameters, generate the figure
param_names = []
no_params = 0
for p in orig_params:
if orig_params[p].vary and (filter is None or any([f in p for f in filter])):
no_params += 1
param_names.append(p)
fig, axes, cbar = _make_axes_grid(no_params, axis_padding=0, cbar=no_params > 1)
# Make the plots on the diagonal: plot the chisquare/likelihood
# for the best fitting values while setting one parameter to
# a fixed value.
saved_params = copy.deepcopy(f.params)
function_kws = {'method': method.lower(), 'func_args': fit_args, 'func_kwargs': fit_kws}
function_kws['orig_stat'] = orig_value
for i in range(no_params):
params = copy.deepcopy(saved_params)
ranges[param_names[i]] = {}
# Set the y-ticklabels.
ax = axes[i, i]
ax.set_title(param_names[i])
if i == no_params-1:
if method.lower().startswith('chisquare'):
ax.set_ylabel(r'$\Delta\chi^2$')
else:
ax.set_ylabel(r'$\Delta\mathcal{L}$')
# Select starting point to determine error widths.
value = orig_params[param_names[i]].value
stderr = orig_params[param_names[i]].stderr
print(stderr)
stderr = stderr if stderr is not None else 0.01 * np.abs(value)
stderr = stderr if stderr != 0 else 0.01 * np.abs(value)
result_left, success_left = fitting._find_boundary(-stderr, param_names[i], boundary, f, x_data, y_data, function_kwargs=function_kws)
result_right, success_right = fitting._find_boundary(stderr, param_names[i], boundary, f, x_data, y_data, function_kwargs=function_kws)
success = success_left * success_right
ranges[param_names[i]]['left'] = result_left
ranges[param_names[i]]['right'] = result_right
if not success:
print("Warning: boundary calculation did not fully succeed for " + param_names[i])
right = np.abs(ranges[param_names[i]]['right'] - value)
left = np.abs(ranges[param_names[i]]['left'] - value)
params[param_names[i]].vary = False
left_val, right_val = max(value - distance * left, orig_params[param_names[i]].min), min(value + distance * right, orig_params[param_names[i]].max)
ranges[param_names[i]]['right_val'] = right_val
ranges[param_names[i]]['left_val'] = left_val
value_range = np.linspace(left_val, right_val, resolution_diag)
value_range = np.sort(np.append(value_range, np.array([value - left, value + right, value])))
chisquare = np.zeros(len(value_range))
# Calculate the new value, and store it in the array. Update the progressbar.
with tqdm.tqdm(value_range, desc=param_names[i], leave=True) as pbar:
for j, v in enumerate(value_range):
chisquare[j] = fitting.calculate_updated_statistic(v, param_names[i], f, x_data, y_data, **function_kws)
fitting._set_state(f, state, method=method.lower())
pbar.update(1)
# Plot the result
ax.plot(value_range, chisquare, color='k')
c = '#0093e6'
# Indicate the used interval.
ax.axvline(value + right, ls="dashed", color=c)
ax.axvline(value - left, ls="dashed", color=c)
ax.axvline(value, ls="dashed", color=c)
ax.axhline(boundary, color=c)
up = '{:.2ug}'.format(u.ufloat(value, right))
down = '{:.2ug}'.format(u.ufloat(value, left))
val = up.split('+')[0].split('(')[-1]
r = up.split('-')[1].split(')')[0]
l = down.split('-')[1].split(')')[0]
if 'e' in up or 'e' in down:
ex = up.split('e')[-1]
ax.set_title(title_e.format(param_names[i], val, l, r, ex))
else:
ax.set_title(title.format(param_names[i], val, l, r))
# Restore the parameters.
fitting._set_state(f, state, method=method.lower())
for i, j in zip(*np.tril_indices_from(axes, -1)):
params = copy.deepcopy(orig_params)
ax = axes[i, j]
x_name = param_names[j]
y_name = param_names[i]
if j == 0:
ax.set_ylabel(y_name)
if i == no_params - 1:
ax.set_xlabel(x_name)
right = ranges[x_name]['right_val']
left = ranges[x_name]['left_val']
x_range = np.append(np.linspace(left, right, resolution_map), orig_params[x_name].value)
x_range = np.sort(x_range)
right = ranges[y_name]['right_val']
left = ranges[y_name]['left_val']
y_range = np.append(np.linspace(left, right, resolution_map), orig_params[y_name].value)
y_range = np.sort(y_range)
X, Y = np.meshgrid(x_range, y_range)
Z = np.zeros(X.shape)
i_indices, j_indices = np.indices(Z.shape)
with tqdm.tqdm(i_indices.flatten(), desc=param_names[j]+' ' + param_names[i], leave=True) as pbar:
for k, l in zip(i_indices.flatten(), j_indices.flatten()):
x = X[k, l]
y = Y[k, l]
print(x, y, f.params['Background0'].value)
Z[k, l] = fitting.calculate_updated_statistic([x, y], [x_name, y_name], f, x_data, y_data, **function_kws)
fitting._set_state(f, state, method=method.lower())
pbar.update(1)
Z = -Z
npar = 1
bounds = []
for bound in [0.997300204, 0.954499736, 0.682689492]:
chifunc = lambda x: chi2.cdf(x, npar) - bound # Calculate 1 sigma boundary
bounds.append(-optimize.root(chifunc, npar).x[0])
# bounds = sorted([-number*number for number in np.arange(1, 9, .1)])
bounds.append(1)
if method.lower() == 'mle':
bounds = [b * 0.5 for b in bounds]
norm = mpl.colors.BoundaryNorm(bounds, invcmap.N)
contourset = ax.contourf(X, Y, Z, bounds, cmap=invcmap, norm=norm)
f.params = copy.deepcopy(orig_params)
if method.lower() == 'mle':
f.fit_mle = copy.deepcopy(orig_params)
else:
f.chisq_res_par
try:
cbar = plt.colorbar(contourset, cax=cbar, orientation='vertical')
cbar.ax.yaxis.set_ticks([0, 1/6, 0.5, 5/6])
cbar.ax.set_yticklabels(['', r'3$\sigma$', r'2$\sigma$', r'1$\sigma$'])
except:
pass
setattr(f, attr, orig_value)
for attr, value in zip(to_save, saved):
setattr(f, attr, copy.deepcopy(value))
for a in axes.flatten():
if a is not None:
for label in a.get_xticklabels()[::2]:
label.set_visible(False)
for label in a.get_yticklabels()[::2]:
label.set_visible(False)
return fig, axes, cbar
| 5,349,020 |
def _botocore_error_serializer_integration_test(
service: str,
action: str,
exception: ServiceException,
code: str,
status_code: int,
message: str,
):
"""
Performs an integration test for the error serialization using botocore as parser.
It executes the following steps:
- Load the given service (f.e. "sqs")
- Serialize the _error_ response with the appropriate serializer from the AWS Serivce Framework
- Parse the serialized error response using the botocore parser
- Checks the the metadata is correct (status code, requestID,...)
- Checks if the parsed error response content is correct
:param service: to load the correct service specification, serializer, and parser
:param action: to load the correct service specification, serializer, and parser
:param exception: which should be serialized and tested against
:param code: expected "code" of the exception (i.e. the AWS specific exception ID, f.e.
"CloudFrontOriginAccessIdentityAlreadyExists")
:param status_code: expected HTTP response status code
:param message: expected error message
:return: None
"""
# Load the appropriate service
service = load_service(service)
# Use our serializer to serialize the response
response_serializer = create_serializer(service)
serialized_response = response_serializer.serialize_error_to_response(
exception, service.operation_model(action)
)
# Use the parser from botocore to parse the serialized response
response_parser: ResponseParser = create_parser(service.protocol)
parsed_response = response_parser.parse(
serialized_response.to_readonly_response_dict(),
service.operation_model(action).output_shape,
)
# Check if the result is equal to the initial response params
assert "Error" in parsed_response
assert "Code" in parsed_response["Error"]
assert "Message" in parsed_response["Error"]
assert parsed_response["Error"]["Code"] == code
assert parsed_response["Error"]["Message"] == message
assert "ResponseMetadata" in parsed_response
assert "RequestId" in parsed_response["ResponseMetadata"]
assert len(parsed_response["ResponseMetadata"]["RequestId"]) == 52
assert "HTTPStatusCode" in parsed_response["ResponseMetadata"]
assert parsed_response["ResponseMetadata"]["HTTPStatusCode"] == status_code
| 5,349,021 |
async def async_write_text(path, text):
"""Asynchronous IO writes of text data `text` to disk on the file path `path`"""
async with aiofiles.open(path, "w", encoding="utf-8") as f:
await f.write(text)
| 5,349,022 |
def local_ip():
"""find out local IP, when running spark driver locally for remote cluster"""
ip = ((([ip for ip in socket.gethostbyname_ex(socket.gethostname())[2] if not ip.startswith("127.")] or [[(s.connect(
("8.8.8.8", 53)), s.getsockname()[0], s.close()) for s in [socket.socket(socket.AF_INET, socket.SOCK_DGRAM)]][0][1]]) + ["no IP found"])[0])
return ip
| 5,349,023 |
def string_to_int_replacements(setting, item, for_property):
"""Maps keys to values from setting and item for replacing string
templates for settings which need to be converted to/from Strings.
"""
replacements = common_replacements(setting, item)
replacements.update({
'$string_to_int': string_to_int(item, property=for_property),
'$int_to_string': int_to_string(item)})
return replacements
| 5,349,024 |
def get_polytope(parser: ArgumentParser) -> None:
"""Adds arguments to parser for outputting polytope of single example"""
parser.add_argument("--word", type=str, help="Word representing the one relator", required=True)
| 5,349,025 |
def get_var_path(path):
"""
get the path stored in the 'app/data' directory
:param path: string
:return: string
"""
return os.path.join(VAR_DIR, path)
| 5,349,026 |
def solr_dt(instance, attribute, value):
"""A validator for ensuring a datetime string is Solr compatible."""
datetime.strptime(value, '%Y-%m-%dT%H:%M:%SZ')
| 5,349,027 |
def GitClone(
clone_url: str,
destination: pathlib.Path,
shallow: bool = False,
recursive: bool = False,
timeout: int = 3600,
) -> pathlib.Path:
"""Clone a repository from Github.
Args:
clone_url: The URL of the repo to clone.
destination: The output path. If this is already a non-empty directory, this
will fail.
shallow: Perform a shallow clone if set.
recursive: Clone submodules.
timeout: The maximum number of seconds to run a clone for before failing.
Returns:
The destination argument.
Raises:
RepoCloneFailed: On error.
"""
cmd = [
"timeout",
"-s9",
str(timeout),
"git",
"clone",
clone_url,
str(destination),
]
if shallow:
cmd += ["--depth", "1"]
if recursive:
cmd.append("--recursive")
try:
subprocess.check_call(cmd)
except subprocess.CalledProcessError:
raise RepoCloneFailed(f"Failed to clone repository: {clone_url}")
if not (destination / ".git").is_dir():
raise RepoCloneFailed(
f"Cloned repo `{clone_url}` but `{destination}/.git` not found"
)
return destination
| 5,349,028 |
def UnpackU32(buf, offset=0, endian='big'):
""" Unpack a 32-bit unsigned integer into 4 bytes.
Parameters:
buf - Input packed buffer.
offset - Offset in buffer.
endian - Byte order.
Return:
2-tuple of unpacked value, new buffer offset.
"""
try:
return (struct.unpack_from(_EndianCode[endian]+'I', buf, offset)[0],
offset+4)
except (KeyError, TypeError, DeprecationWarning, struct.error) as inst:
_UnpackException('u32', offset, endian, inst)
| 5,349,029 |
def get_reset_token(user, expires_sec=1800):
"""
Create a specify token for reset user password
Args:
user:
expires_sec:
Returns:
token: string
"""
hash_token_password = Serializer(APP.config['SECRET_KEY'], expires_sec)
return hash_token_password.dumps({'user_name': user.user_name}).decode('utf-8')
| 5,349,030 |
def _on_error_resume_next(*sources: Union[Observable, Future]) -> Observable:
"""Continues an observable sequence that is terminated normally or
by an exception with the next observable sequence.
Examples:
>>> res = rx.on_error_resume_next(xs, ys, zs)
Returns:
An observable sequence that concatenates the source sequences,
even if a sequence terminates exceptionally.
"""
sources_ = iter(sources)
def subscribe(observer, scheduler=None):
scheduler = scheduler or current_thread_scheduler
subscription = SerialDisposable()
cancelable = SerialDisposable()
def action(scheduler, state=None):
try:
source = next(sources_)
except StopIteration:
observer.on_completed()
return
# Allow source to be a factory method taking an error
source = source(state) if callable(source) else source
current = rx.from_future(source) if is_future(source) else source
d = SingleAssignmentDisposable()
subscription.disposable = d
def on_resume(state=None):
scheduler.schedule(action, state)
d.disposable = current.subscribe_(observer.on_next, on_resume, on_resume, scheduler)
cancelable.disposable = scheduler.schedule(action)
return CompositeDisposable(subscription, cancelable)
return Observable(subscribe)
| 5,349,031 |
def convert_literal_tuple_string_index_to_tuple(
df: pd.DataFrame, comm_before_user: bool = True, vertex_to_int: bool = False):
"""
Converts a DataFrame's literal string index of form '(community, vertex)' to a tuple (community, vertex) index.
Changes input DataFrame inplace.
"""
# convert index to column
df.reset_index(level=0, inplace=True)
# evaluate tuple literal
df['evaluated_index'] = [
_index_tuple_literal_eval_with_ordering(
string=tup.index,
comm_before_user=comm_before_user,
vertex_to_int=vertex_to_int)
for tup
in df.itertuples()
]
# convert back to index
df.set_index('evaluated_index', inplace=True)
| 5,349,032 |
def prompt_for_value(field_name: str, field_type):
"""Promt the user to input a value for the parameter `field_name`."""
print_formatted_text(
f"No value found for field '{field_name}' of type '{field_type}'. "
"Please enter a value for this parameter:"
)
response = prompt("> ")
while response == "":
print_formatted_text(f"No input received, please enter a value:")
response = prompt("> ")
return parse_value_from_string(response)
| 5,349,033 |
def _send_kv_msg(sender, msg, recv_id):
"""Send kvstore message.
Parameters
----------
sender : ctypes.c_void_p
C sender handle
msg : KVStoreMsg
kvstore message
recv_id : int
receiver's ID
"""
if msg.type == KVMsgType.PULL:
tensor_id = F.zerocopy_to_dgl_ndarray(msg.id)
_CAPI_SenderSendKVMsg(
sender,
int(recv_id),
msg.type.value,
msg.rank,
msg.name,
tensor_id)
elif msg.type in (KVMsgType.INIT, KVMsgType.GET_SHAPE_BACK):
tensor_shape = F.zerocopy_to_dgl_ndarray(msg.shape)
_CAPI_SenderSendKVMsg(
sender,
int(recv_id),
msg.type.value,
msg.rank,
msg.name,
tensor_shape)
elif msg.type in (KVMsgType.IP_ID, KVMsgType.GET_SHAPE):
_CAPI_SenderSendKVMsg(
sender,
int(recv_id),
msg.type.value,
msg.rank,
msg.name)
elif msg.type in (KVMsgType.FINAL, KVMsgType.BARRIER):
_CAPI_SenderSendKVMsg(
sender,
int(recv_id),
msg.type.value,
msg.rank)
else:
tensor_id = F.zerocopy_to_dgl_ndarray(msg.id)
data = F.zerocopy_to_dgl_ndarray(msg.data)
_CAPI_SenderSendKVMsg(
sender,
int(recv_id),
msg.type.value,
msg.rank,
msg.name,
tensor_id,
data)
| 5,349,034 |
def move_j(joints, accel, vel):
"""
Function that returns UR script for linear movement in joint space.
Args:
joints: A list of 6 joint angles (double).
accel: tool accel in m/s^2
accel: tool accel in m/s^2
vel: tool speed in m/s
Returns:
script: UR script
"""
# TODO: Test
# TODO: Check acceleration and velocity are below set limit
joint_positions = _format_joint_positions(joints)
return "movej({}, a = {:.2f}, v = {:.2f})\n".format(joint_positions, abs(accel), abs(vel))
| 5,349,035 |
def create_dist_list(dist: str, param1: str, param2: str) -> list:
""" Creates a list with a special syntax describing a distribution
Syntax: [identifier, param1, param2 (if necessary)]
"""
dist_list: list = []
if dist == 'fix':
dist_list = ["f", float(param1)]
elif dist == 'binary':
dist_list = ["b", float(param1)]
elif dist == 'binomial':
dist_list = ["i", float(param1), float(param2)]
elif dist == 'normal':
dist_list = ["n", float(param1), float(param2)]
elif dist == 'uniform':
dist_list = ["u", float(param1), float(param2)]
elif dist == 'poisson':
dist_list = ["p", float(param1)]
elif dist == 'exponential':
dist_list = ["e", float(param1)]
elif dist == 'lognormal':
dist_list = ["l", float(param1), float(param2)]
elif dist == 'chisquare':
dist_list = ["c", float(param1)]
elif dist == 'standard-t':
dist_list = ["t", float(param1)]
return dist_list
| 5,349,036 |
def compute_arfgm(t, qd, p, qw=0.0):
"""computes relative humidity from temperature, pressure and specific humidty (qd)
This might be similar to https://unidata.github.io/MetPy/latest/api/generated/metpy.calc.relative_humidity_from_specific_humidity.html
algorithm from RemapToRemo addgm
**Arguments:**
*p:*
atmospheric pressure ([Pa], 3d)
*t:*
temperature fields ([K], 3d)
*qd:*
specific humidity fields ([kg/kg], 3d)
*qw:*
liquid water content ([kg/kg], 3d)
**Returns:**
*relhum:*
relative humidity ([%],3d)
"""
# return fgqd(fgee(t),p)
#gqd = np.where(t >= C.B3, fgqd(fgew(t), p), fgqd(fgee(t), p))
fge = np.where(t >= C.B3, fgew(t), fgee(t))
gqd = fgqd(fge, p)
relhum = qd / gqd
return np.where(relhum > 1.0, (gqd + qw) / gqd, (qd + qw) / gqd)
| 5,349,037 |
def getOutputMeshpath(path, meshtype=None):
"""Returns the folder path for mesh file export as specified in the GUI.
Phobos by default creates a directory 'meshes' in the export path and subsequently creates
sub-directories of "export/path/meshes" for every format, e.g. resulting in "export/path/mesh/obj"
for .obj file export.
Args:
path(str): export path root (set in the GUI)
meshtype(str, optional): a valid mesh type, otherwise the type set in the GUI is used (Default value = None)
Returns:
string: output path for meshes
"""
return os.path.join(path, 'meshes', meshtype if meshtype else getOutputMeshtype())
| 5,349,038 |
def load_data():
""" loads the data for this task
:return:
"""
fpath = 'images/ball.png'
radius = 70
Im = cv2.imread(fpath, 0).astype('float32')/255 # 0 .. 1
# we resize the image to speed-up the level set method
Im = cv2.resize(Im, dsize=(0, 0), fx=0.5, fy=0.5)
height, width = Im.shape
centre = (width // 2, height // 2)
Y, X = np.ogrid[:height, :width]
phi = radius - np.sqrt((X - centre[0]) ** 2 + (Y - centre[1]) ** 2)
return Im, phi
| 5,349,039 |
def iteration_recognition(iteration=3, lists=5) -> NoReturn:
"""
Based on ~ CSC2032: Tutorial 1.4.1/2.1.1 ~ Question 3
Displays an array partially sorted by insertion sort.
The original array is display with others that are not
the original array. The user must pick out the correct
array.
"""
# Init. obstacles, copy of list, and partial sorted version of list
p_answers = [] # p_answers -> possible answers
a = return_rarray(size=7)
og_list = a.copy()
partially_sorted_array = insertion_sort_test(a)[iteration]
# Populate p_answers with non-answers and answer
i, i2 = 0, 0 # while-loop prevents equal ptrs causing no swap
while (i == i2): i, i2 = randint(0, len(a) - 1), randint(0, len(a) - 1)
# Swap elements in the original array and display to confuse user
for i in range(lists):
temp_list = og_list.copy()
swap(temp_list, i, i2)
p_answers.append(temp_list)
# Change a value in each possible answer list to be sorted
for pa in p_answers:
if (randint(0,1)): pa[randint(1,len(pa)-1)] = randint(0, max(pa)+2)
# Sets at least one vector in p_answers to the actual answer
p_answers[randint(0, lists - 1)] = og_list
# Display question information to user
print(f'\nSuppose after {int(iteration)} outer iterations of insertion')
print('sort we have the following array:')
print(f'{partially_sorted_array}\n') # Display actual trace
print('Then which of the following arrays could have been the initial')
print('array insertion sort was applied to.\n')
for i, vec in enumerate(p_answers): print(f"{i + 1}) {list_to_string(vec)}")
# User input is taken and evaluated
try:
user_inp = int(input('\nEnter the number of any correct array: '))
if not(0 < user_inp <= len(p_answers)):
print('\n<Invalid input - question exited>\n')
return
except ValueError:
print('\n<Invalid input - question exited>\n')
return
try:
user_list = insertion_sort_test(p_answers[user_inp-1])[iteration]
if (user_list == partially_sorted_array):
print('<Answer: Correct!>\n')
else:
print('<Answer: Incorrect ._. >\n')
acceptable = []
for i,arr in enumerate(p_answers):
if (insertion_sort_test(arr)[iteration] == partially_sorted_array):
acceptable.append(i+1)
print(f"Acceptable Answer(s): {list_to_string(acceptable)}")
except IndexError:
print('<Answer: Incorrect ._. >\n')
s = 'Acceptable answers:'
for i, pa in enumerate(p_answers):
if (pa == og_list): s = s + ' ' + f"[{str(i)}]"
print(s)
| 5,349,040 |
def validate_args(args, dhis_version):
"""
Validate arguments. Raises Exception if invalid
:param args: the argparse arguments
:param dhis_version: DHIS 2 version as an integer (e.g. 32)
:return: None
"""
if args.extend:
if not args.groups and not args.public_access:
raise PKClientException("ArgumentError: Must supply user groups when extending sharing - check your argument (-g)")
else:
if not args.public_access or len(args.public_access) not in (1, 2):
raise PKClientException("ArgumentError: Must use -a METADATA [DATA] - max. 2 arguments")
if args.groups:
for group in args.groups:
try:
metadata_permission = group[1]
except IndexError:
raise PKClientException("ArgumentError: Missing User Group permission for METADATA access")
if metadata_permission not in access.keys():
raise PKClientException('ArgumentError: User Group permission for METADATA access not valid: "{}"'.format(metadata_permission))
if dhis_version >= NEW_SYNTAX:
try:
data_permission = group[2]
except IndexError:
pass
else:
if data_permission not in access.keys():
raise PKClientException(
'ArgumentError: User Group permission for DATA access not valid: "{}"'.format(
data_permission))
| 5,349,041 |
def create_response_body(input_json):
"""Create a json response with specific args of input JSON."""
city_name = str(input_json['name'])
country_code = str(input_json['sys']['country'])
temp_celsius = get_val_unit(input_json, 'main', 'temp', ' °C')
wind_speed = get_val_unit(input_json, 'wind', 'speed', ' m/s')
wind_deg = get_val_unit(input_json, 'wind', 'deg', ' deg')
cloudines = get_cloudines(input_json)
pressure = get_val_unit(input_json, 'main', 'pressure', ' hPa')
humidity_percent = get_val_unit(input_json, 'main', 'humidity', '%')
coord_lon = str(input_json['coord']['lon'])
coord_lat = str(input_json['coord']['lat'])
sunrise_hour = get_hour_time(input_json, 'sys', 'sunrise')
sunset_hour = get_hour_time(input_json, 'sys', 'sunset')
requested_time = get_datetime_from_unix(0, input_json, 'dt')
output_json = {
"location_name": f"{city_name}, {country_code}",
"temperature": temp_celsius,
"wind": f"{wind_speed}, {wind_deg}",
"cloudines": cloudines,
"pressure": pressure,
"humidity": humidity_percent,
"sunrise": sunrise_hour,
"sunset": sunset_hour,
"geo_coordinates": [coord_lat, coord_lon],
"requested_time": requested_time
}
return output_json
| 5,349,042 |
def get_GESLA_surge_filename(city):
"""Get the path to the file containing GESLA-2 surge data for ‘city’."""
if city == "Brest":
filename = "brest-brest-france-refmar_SkewSurges.txt"
elif city == "La Rochelle":
filename = "la_rochelle_la_palli-la_rochelle_la_palli-france-refmar_SkewSurges.txt"
elif city == "Saint-Jean-de-Luz":
filename = "saint_jean_de_luz_so-saint_jean_de_luz_so-france-refmar_SkewSurges.txt"
elif city == "Cherbourg":
filename = "cherbourg-cherbourg-france-refmar_SkewSurges.txt"
elif city == "Le Havre":
filename = "le_havre-le_havre-france-refmar_SkewSurges.txt"
elif city == "Newlyn":
# Choose one of the following files
# filename = "newlyn,_cornwall-294a-united_kingdom-uhslc_SkewSurges.txt" # 1915-2010, 2 MB
# filename = "newlyn-newlyn-glossdm-bodc_SkewSurges.txt" # 1916-1944, 600 kB
filename = "newlyn-p001-uk-bodc_SkewSurges.txt" # 1915-2014, 1.7 MB
elif city == "Vigo":
filename = "vigo-vigo-spain-ieo_SkewSurges.txt"
elif city == "La Coruna":
filename = "la_coruna-830a-spain-uhslc_SkewSurges.txt"
elif city == "Santander":
filename = "santander-sant-spain-ieo_SkewSurges.txt"
else:
raise ValueError("unknown city: " + repr(city))
return os.path.join(GESLA_DIR, filename)
| 5,349,043 |
def getAllTraj () :
""" get all trajectories from C.TRAJ_DIR """
def loadPickle (f) :
with open(osp.join(C.TRAJ_DIR, f), 'rb') as fd :
return pickle.load(fd)
return list(map(loadPickle, os.listdir(C.TRAJ_DIR)))
| 5,349,044 |
def _get_count(_khoros_object, _user_id, _object_type):
"""This function returns the count of a specific user object (e.g. ``albums``, ``followers``, etc.) for a user.
:param _khoros_object: The core :py:class:`khoros.Khoros` object
:type _khoros_object: class[khoros.Khoros]
:param _user_id: The User ID associated with the user
:type _user_id: int, str
:param _object_type: The type of object for which to get the count (e.g. ``albums``, ``followers``, etc.)
:returns: The user object count as an integer
:raises: :py:exc:`khoros.errors.exceptions.GETRequestError`
"""
_api_response = query_users_table_by_id(_khoros_object, f'{_object_type}.count(*)', _user_id)
return int(_api_response['data']['items'][0][_object_type]['count'])
| 5,349,045 |
def antiSymmetrizeSignal(y, symmetryStep):
"""
Dischard symmetric part of a signal by
taking the difference of the signal at x[n] and x[n + symmetry_step]
get your corresponding x data as x[0:len(y)/2]
Parameters
----------
y : array_like
numpy array or list of data values to anti symmtetrize
symmetryStep : scalar
expected symmetry of the signal at x[n] occurs at x[n+symmetryStep]
Returns
----------
y_symmetrized : ndarray
numpy array of dimension size(y)/2 of the antisymmetrized data
"""
y = np.array(y)
s = np.zeros(len(y)/2)
for idx in range(0, len(s)):
# (positive field - negative field)/2
s[idx] = (y[idx] - y[idx+symmetryStep])/2.-(y[0] - y[0+symmetryStep])/2.
return s
| 5,349,046 |
def lms_to_rgb(img):
"""
rgb_matrix = np.array(
[[0.0809444479, -0.130504409, 0.116721066],
[0.113614708, -0.0102485335, 0.0540193266],
[-0.000365296938, -0.00412161469, 0.693511405]
]
)
"""
rgb_matrix = np.array(
[[ 2.85831110e+00, -1.62870796e+00, -2.48186967e-02],
[-2.10434776e-01, 1.15841493e+00, 3.20463334e-04],
[-4.18895045e-02, -1.18154333e-01, 1.06888657e+00]]
)
return np.tensordot(img, rgb_matrix, axes=([2], [1]))
| 5,349,047 |
def merge(output, students, **kwargs):
"""Read lists of students downloaded from Xidian, merge them to create a unified list."""
students_all = {}
fields = [kwargs["name_xdu"], kwargs["sid_xdu"]]
for xlsfile in students:
data = load_xls(xlsfile)
name_col, sid_col = find_column_index(data[0], fields)
stus = value_dict(data, sid_col, name_col)
students_all = merge_students(students_all, stus)
l_sid, l_name = kwargs["sid_baidu"], kwargs["name_baidu"]
form = student_form(students_all, l_sid, l_name)
# write_csv(form, output.replace(".xls", ".csv"))
write_xls(form, output, kwargs["sheet_baidu"])
| 5,349,048 |
def timeit(func):
"""
Decorator that returns the total runtime of a function
@param func: function to be timed
@return: (func, time_taken). Time is in seconds
"""
def wrapper(*args, **kwargs) -> float:
start = time.time()
func(*args, **kwargs)
total_time = time.time() - start
return total_time
return wrapper
| 5,349,049 |
def merge_count(data1, data2):
"""Auxiliary method to merge the lengths."""
return data1 + data2
| 5,349,050 |
def orthogonal_procrustes(fixed, moving):
"""
Implements point based registration via the Orthogonal Procrustes method.
Based on Arun's method:
Least-Squares Fitting of two, 3-D Point Sets, Arun, 1987,
`10.1109/TPAMI.1987.4767965 <http://dx.doi.org/10.1109/TPAMI.1987.4767965>`_.
Also see `this <http://eecs.vanderbilt.edu/people/mikefitzpatrick/papers/2009_Medim_Fitzpatrick_TRE_FRE_uncorrelated_as_published.pdf>`_
and `this <http://tango.andrew.cmu.edu/~gustavor/42431-intro-bioimaging/readings/ch8.pdf>`_.
:param fixed: point set, N x 3 ndarray
:param moving: point set, N x 3 ndarray of corresponding points
:returns: 3x3 rotation ndarray, 3x1 translation ndarray, FRE
:raises: ValueError
"""
validate_procrustes_inputs(fixed, moving)
# This is what we are calculating
R = np.eye(3)
T = np.zeros((3, 1))
# Arun equation 4
p = np.ndarray.mean(moving, 0)
# Arun equation 6
p_prime = np.ndarray.mean(fixed, 0)
# Arun equation 7
q = moving - p
# Arun equation 8
q_prime = fixed - p_prime
# Arun equation 11
H = np.matmul(q.transpose(), q_prime)
# Arun equation 12
# Note: numpy factors h = u * np.diag(s) * v
svd = np.linalg.svd(H)
# Replace Arun Equation 13 with Fitzpatrick, chapter 8, page 470,
# to avoid reflections, see issue #19
X = _fitzpatricks_X(svd)
# Arun step 5, after equation 13.
det_X = np.linalg.det(X)
if det_X < 0 and np.all(np.flip(np.isclose(svd[1], np.zeros((3, 1))))):
# Don't yet know how to generate test data.
# If you hit this line, please report it, and save your data.
raise ValueError("Registration fails as determinant < 0"
" and no singular values are close enough to zero")
if det_X < 0 and np.any(np.isclose(svd[1], np.zeros((3, 1)))):
# Implement 2a in section VI in Arun paper.
v_prime = svd[2].transpose()
v_prime[0][2] *= -1
v_prime[1][2] *= -1
v_prime[2][2] *= -1
X = np.matmul(v_prime, svd[0].transpose())
# Compute output
R = X
tmp = p_prime.transpose() - np.matmul(R, p.transpose())
T[0][0] = tmp[0]
T[1][0] = tmp[1]
T[2][0] = tmp[2]
fre = compute_fre(fixed, moving, R, T)
return R, T, fre
| 5,349,051 |
def make_model(arch_params, patch_size):
""" Returns the model.
Used to select the model.
"""
return RDN(arch_params, patch_size)
| 5,349,052 |
def calc_error(frame, gap, method_name):
"""Calculate the error between the ground truth and the GAP prediction"""
frame.single_point(method_name=method_name, n_cores=1)
pred = frame.copy()
pred.run_gap(gap=gap, n_cores=1)
error = np.abs(pred.energy - frame.energy)
logger.info(f'|E_GAP - E_0| = {np.round(error, 3)} eV')
return error
| 5,349,053 |
def find_model(sender, model_name):
"""
Register new model to ORM
"""
MC = get_mc()
model = MC.get((MC.c.model_name==model_name) & (MC.c.uuid!=''))
if model:
model_inst = model.get_instance()
orm.set_model(model_name, model_inst.table_name, appname=__name__, model_path='')
return orm.__models__.get(model_name)
| 5,349,054 |
def list_subtitles(videos, languages, pool_class=ProviderPool, **kwargs):
"""List subtitles.
The `videos` must pass the `languages` check of :func:`check_video`.
All other parameters are passed onwards to the provided `pool_class` constructor.
:param videos: videos to list subtitles for.
:type videos: set of :class:`~subliminal.video.Video`
:param languages: languages to search for.
:type languages: set of :class:`~babelfish.language.Language`
:param pool_class: class to use as provider pool.
:type: :class:`ProviderPool`, :class:`AsyncProviderPool` or similar
:return: found subtitles per video.
:rtype: dict of :class:`~subliminal.video.Video` to list of :class:`~subliminal.subtitle.Subtitle`
"""
listed_subtitles = defaultdict(list)
# check videos
checked_videos = []
for video in videos:
if not check_video(video, languages=languages):
logger.info('Skipping video %r', video)
continue
checked_videos.append(video)
# return immediately if no video passed the checks
if not checked_videos:
return listed_subtitles
# list subtitles
with pool_class(**kwargs) as pool:
for video in checked_videos:
logger.info('Listing subtitles for %r', video)
subtitles = pool.list_subtitles(video, languages - video.subtitle_languages)
listed_subtitles[video].extend(subtitles)
logger.info('Found %d subtitle(s)', len(subtitles))
return listed_subtitles
| 5,349,055 |
def public_incursion_no_expires(url, request):
""" Mock endpoint for incursion.
Public endpoint without cache
"""
return httmock.response(
status_code=200,
content=[
{
"type": "Incursion",
"state": "mobilizing",
"staging_solar_system_id": 30003893,
"constellation_id": 20000568,
"infested_solar_systems": [
30003888,
],
"has_boss": True,
"faction_id": 500019,
"influence": 1
}
]
)
| 5,349,056 |
def test_get_rows_none_selected():
"""Assert that an error is raised when no rows are selected."""
atom = ATOMClassifier(X_idx, y_idx, index=True, random_state=1)
with pytest.raises(ValueError, match=r".*has to be selected.*"):
atom._get_rows(index=slice(1000, 2000))
| 5,349,057 |
def bad_topics():
""" Manage Inappropriate topics """
req = request.vars
view_info = {}
view_info['errors'] = []
tot_del = 0
if req.form_submitted:
for item in req:
if item[:9] == 'inapp_id_':
inapp_id = int(req[item])
db(db.zf_topic_inappropriate.id==inapp_id).update(read_flag=True)
tot_del += 1
topics = db((db.zf_topic_inappropriate.read_flag==False) & (db.zf_topic.id==db.zf_topic_inappropriate.topic_id)).select(db.zf_topic_inappropriate.ALL, db.zf_topic.title, orderby=~db.zf_topic_inappropriate.creation_date)
view_info.update({'removed': tot_del})
return dict(request=request, topics=topics, view_info=view_info)
else:
topics = db((db.zf_topic_inappropriate.read_flag==False) & (db.zf_topic.id==db.zf_topic_inappropriate.topic_id)).select(db.zf_topic_inappropriate.ALL, db.zf_topic.title, orderby=~db.zf_topic_inappropriate.creation_date)
return dict(request=request, topics=topics, view_info=view_info)
| 5,349,058 |
def run(tl_key, p_bans=tuple(), g_bans=tuple(), p_open=True, g_open=True, qr=2, enable_cfg=False, cfg_name="wxReply"):
"""
启动
:param tl_key: 图灵key
:param p_bans: 好友黑名单
:param g_bans: 群组黑名单
:param p_open: 开启自动回复
:param g_open: 开启群艾特回复
:param qr: 二维码类型
:param enable_cfg: 是否启用配置文件
:return:
"""
print("""
,---------. .-./`) ____ ,---. .--. .-'''-. .---. .---. .---.
\ \\ .-.') .' __ `. | \ | | / _ \| | |_ _| | ,_|
`--. ,---'/ `-' \/ ' \ \| , \ | | (`' )/`--'| | ( ' ) ,-./ )
| \ `-'`"`|___| / || |\_ \| |(_ o _). | '-(_{;}_)\ '_ '`)
:_ _: .---. _.-` || _( )_\ | (_,_). '. | (_,_) > (_) )
(_I_) | | .' _ || (_ o _) |.---. \ :| _ _--. | ( . .-'
(_(=)_) | | | _( )_ || (_,_)\ |\ `-' ||( ' ) | | `-'`-'|___
(_I_) | | \ (_ o _) /| | | | \ / (_{;}_)| | | \
'---' '---' '.(_,_).' '--' '--' `-...-' '(_,_) '---' `--------`
""")
global OPEN_CHAT, OPEN_GROUP, ENABLE_CFG, CFG_NAME
# 配置文件名称
CFG_NAME = cfg_name
# 设置图灵key
tl_data['key'] = tl_key
ENABLE_CFG = enable_cfg
# 配置信息
if ENABLE_CFG:
# 读配置
cfg = get_cfg()
if cfg:
p_open = cfg.get('p_open')
g_open = cfg.get('g_open')
p_bans = cfg.get('p_bans')
g_bans = cfg.get('g_bans')
# 设置回复状态
OPEN_CHAT = p_open
OPEN_GROUP = g_open
# 配置itchat
itchat.auto_login(hotReload=True, enableCmdQR=qr, statusStorageDir=pkl_path)
# 默认黑名单
for ban in p_bans:
try:
add_p_ban(ban)
except Exception as ex:
print(ex)
continue
# 设置群黑名单
for ban in g_bans:
try:
add_g_ban(ban)
except Exception as ex:
print(ex)
continue
# 获取公众号列表
for ban in itchat.get_mps(update=True):
m_ban.add(ban['UserName'])
# 提示信息
send_to_file_helper('输入“/菜单”指令,获得帮助。')
# 写配置
set_cfg(ENABLE_CFG)
# 定时清理历史消息
t = threading.Thread(target=clear, args=(msgs,))
t.setDaemon(True)
t.start()
# 启动
itchat.run()
| 5,349,059 |
def test_items_Exception_for_elasticsearch_errs(monkeypatch):
"""An Elasticsearch error response other than a 400 results in a 500"""
monkeypatch.setattr(requests, 'post', mock_es_post_response_err)
# Simulate some unsuccessful status code from Elasticsearch, other than a
# 400 Bad Request. Say a 500 Server Error, or a 404.
sq = SearchQuery({'q': 'goodquery', 'from': 0, 'page': 1, 'page_size': 1})
with pytest.raises(Exception):
v2_handlers.items(sq)
| 5,349,060 |
def main_loop(): # noqa: C901
"""Contain all the logic for the app"""
menu = Menu(CONFIG)
files = get_files()
current_position = 0
quit_early = False
files_exhausted = False
while not (quit_early or files_exhausted):
filename = files[current_position]
files_message = "Current_file: {0} of {1}: {2}".format(current_position + 1, len(files),
os.path.split(filename)[1])
# message will be shown only when we are on the files tab
result = menu.run(message={'files': files_message})
if result == ("subdirs", "create_subdirs"):
create_subdirectories()
elif result == ("subdirs", "help"):
print_help()
elif result == ("subdirs", "quit"):
quit_early = True
elif result[0] == "files" and result[1] in ["interesting", "boring"]:
if not os.path.isdir(result[1]):
raise ValueError("Directory must be created first")
move_to_subdir(files[current_position], result[1])
print('File moved to {}'.format(result[1]))
current_position += 1
files_exhausted = current_position >= len(files)
elif result == ("files", "skip"):
current_position += 1
files_exhausted = current_position >= len(files)
else:
raise AssertionError("Unrecognized input, this should have been caught by Menu validator")
if files_exhausted:
print("All files done.")
else:
print("Program quit early.")
| 5,349,061 |
def get_min_id_for_repo_mirror_config():
"""
Gets the minimum id for a repository mirroring.
"""
return RepoMirrorConfig.select(fn.Min(RepoMirrorConfig.id)).scalar()
| 5,349,062 |
def check_series(
Z,
enforce_univariate=False,
allow_empty=False,
allow_numpy=True,
enforce_index_type=None,
):
"""Validate input data.
Parameters
----------
Z : pd.Series, pd.DataFrame
Univariate or multivariate time series
enforce_univariate : bool, optional (default=False)
If True, multivariate Z will raise an error.
allow_empty : bool
enforce_index_type : type, optional (default=None)
type of time index
Returns
-------
y : pd.Series, pd.DataFrame
Validated time series
Raises
------
ValueError, TypeError
If Z is an invalid input
"""
# Check if pandas series or numpy array
if not allow_numpy:
valid_data_types = tuple(
filter(lambda x: x is not np.ndarray, VALID_DATA_TYPES)
)
else:
valid_data_types = VALID_DATA_TYPES
if not isinstance(Z, valid_data_types):
raise TypeError(
f"Data must be a one of {valid_data_types}, but found type: {type(Z)}"
)
if enforce_univariate:
_check_is_univariate(Z)
# check time index
check_time_index(
Z.index, allow_empty=allow_empty, enforce_index_type=enforce_index_type
)
return Z
| 5,349,063 |
def save_json(data, source_airport, dest_airport, date):
"""
Saves the scraped airfares as a .json file.
Parameters
----------
data: List. Required.
A list containing all the flight information from the source airport to
the destination airport on the date specified.
source_airport: String. Required.
The source airport code we scraped data for.
dest_airport: String. Required.
The destination airport code we scraped data for.
date: String. Required.
The date we scraped data for. Must be in 'DD/MM/YYYY' format.
Returns
----------
None. The .json file is saved as
'<source_airport>_<dest_aiport>_<day>_<month>_<year>.json'
"""
dt = datetime.strptime(args["date"], "%d/%m/%Y")
fname = "{0}_{1}_{2}_{3}_{4}.json".format(args["source_airport"],
args["dest_airport"],
dt.day,
dt.month,
dt.year)
with open(fname,'w') as fp:
json.dump(scraped_data,fp,indent = 4)
print("Saved data to {0}".format(fname))
| 5,349,064 |
def __ensure_project_registered():
""" Ensure the project configured in project.yaml has been registered. """
client = get_ai_flow_client()
project_meta = client.get_project_by_name(current_project_config().get_project_name())
pp = {}
for k, v in current_project_config().items():
pp[k] = str(v)
if project_meta is None:
project_meta = client.register_project(name=current_project_config().get_project_name(),
properties=pp)
else:
project_meta = client.update_project(project_name=current_project_config().get_project_name(), properties=pp)
current_project_config().set_project_uuid(str(project_meta.uuid))
| 5,349,065 |
def B(s):
"""string to byte-string in
Python 2 (including old versions that don't support b"")
and Python 3"""
if type(s)==type(u""): return s.encode('utf-8') # Python 3
return s
| 5,349,066 |
def gather_tiling_strategy(data, axis):
"""Custom tiling strategy for gather op"""
strategy = list()
base = 0
for priority_value, pos in enumerate(range(len(data.shape) - 1, axis, -1)):
priority_value = priority_value + base
strategy.append(ct_util.create_constraint_on_tensor(tensor=data,
values=priority_value,
constraints=ct_util.TileConstraint.SET_PRIORITY,
tensor_pos=pos)[0])
return strategy
| 5,349,067 |
def fetch(
uri: str, auth: Optional[str] = None, endpoint: Optional[str] = None, **data
) -> OAuthResponse:
"""Perform post given URI with auth and provided data."""
req = requests.Request("POST", uri, data=data, auth=auth)
prepared = req.prepare()
timeout = time.time() + app.config["OAUTH_FETCH_TOTAL_TIMEOUT"]
retry = 0
result = _error(
errors.SERVER_ERROR, "An unknown error occurred talking to provider."
)
for i in range(app.config["OAUTH_FETCH_TOTAL_RETRIES"]):
prefix = "attempt #%d %s" % (i + 1, uri)
# TODO: Add jitter to backoff and/or retry after?
backoff = (2**i - 1) * app.config["OAUTH_FETCH_BACKOFF_FACTOR"]
remaining_timeout = timeout - time.time()
if (retry or backoff) > remaining_timeout:
app.logger.debug("Abort %s no timeout remaining.", prefix)
break
elif (retry or backoff) > 0:
app.logger.debug("Retry %s [sleep %.3f]", prefix, retry or backoff)
time.sleep(retry or backoff)
result, status, retry = _fetch(prepared, remaining_timeout, endpoint)
labels = {"endpoint": endpoint, "status": stats.status(status)}
stats.ClientRetryHistogram.labels(**labels).observe(i)
if status is not None and "error" in result:
error = result["error"]
error = app.config["OAUTH_FETCH_ERROR_TYPES"].get(error, error)
if error not in errors.DESCRIPTIONS:
error = "invalid_error"
stats.ClientErrorCounter.labels(error=error, **labels).inc()
if status is None:
pass # We didn't even get a response, so try again.
elif status not in app.config["OAUTH_FETCH_RETRY_STATUS_CODES"]:
break
elif "error" not in result:
break # No error reported so might as well return it.
app.logger.debug(
"Result %s [status %s] [retry after %s]", prefix, status, retry
)
# TODO: consider returning retry after time so it can be used.
return result
| 5,349,068 |
def redscreen(main_filename, back_filename):
"""
Implements the notion of "redscreening". That is,
the image in the main_filename has its "sufficiently"
red pixels replaced with pized from the corresponding x,y
location in the image in the file back_filename.
Returns the resulting "redscreened" image.
"""
image = SimpleImage(main_filename)
back = SimpleImage(back_filename)
for pixel in image:
average = (pixel.red + pixel.green + pixel.blue) // 3
# See if this pixel is "sufficiently" red
if pixel.red >= average * INTENSITY_THRESHOLD:
# If so, we get the corresponding pixel from the
# back image and overwrite the pixel in
# the main image with that from the back image.
x = pixel.x
y = pixel.y
image.set_pixel(x, y, back.get_pixel(x, y))
return image
| 5,349,069 |
def density_speed_conversion(N, frac_per_car=0.025, min_val=0.2):
"""
Fraction to multiply speed by if there are N nearby vehicles
"""
z = 1.0 - (frac_per_car * N)
# z = 1.0 - 0.04 * N
return max(z, min_val)
| 5,349,070 |
async def test_remove_all_outputs():
"""
GIVEN
ChannelMultiple with multiple output channels.
WHEN
All output channels are removed then a item is put on src channel.
EXPECT
Item is not copied to former output channels, but is removed from
src channel.
"""
src = create_channel()
out1 = create_channel()
out2 = create_channel()
m = create_multiple(src)
assert m.add_output(out1)
assert m.add_output(out2)
await asyncio.sleep(0.05)
m.remove_all_outputs()
x = 'x'
src.offer(x)
await asyncio.sleep(0.05)
assert src.empty()
assert out1.empty()
assert out2.empty()
src.close()
await asyncio.sleep(0.05)
| 5,349,071 |
def main():
"""
PARSE AND VALIDATE INTEGRATION PARAMS
"""
service_principal = demisto.params().get('credentials').get('identifier')
secret = demisto.params().get('credentials').get('password')
# Remove trailing slash to prevent wrong URL path to service
server_url = demisto.params()['url'][:-1] \
if (demisto.params()['url'] and demisto.params()['url'].endswith('/')) else demisto.params()['url']
api_version = demisto.params().get('api_version')
verify_certificate = not demisto.params().get('insecure', False)
# How many time before the first fetch to retrieve incidents
fetch_time = demisto.params().get('fetch_time', '3 days')
threat_status = argToList(demisto.params().get('threat_status'))
threat_type = argToList(demisto.params().get('threat_type'))
event_type_filter = demisto.params().get('events_type')
fetch_limit = 50
# Remove proxy if not set to true in params
proxies = handle_proxy()
LOG('Command being called is %s' % (demisto.command()))
try:
client = Client(server_url, api_version, verify_certificate, service_principal, secret, proxies)
if demisto.command() == 'test-module':
results = test_module(client, fetch_time, event_type_filter)
return_outputs(results, None)
elif demisto.command() == 'fetch-incidents':
integration_context = demisto.getIntegrationContext()
next_run, incidents, remained_incidents = fetch_incidents(
client=client,
last_run=demisto.getLastRun(),
first_fetch_time=fetch_time,
event_type_filter=event_type_filter,
threat_status=threat_status,
threat_type=threat_type,
limit=fetch_limit,
integration_context=integration_context
)
# Save last_run, incidents, remained incidents into integration
demisto.setLastRun(next_run)
demisto.incidents(incidents)
# preserve context dict
integration_context['incidents'] = remained_incidents
demisto.setIntegrationContext(integration_context)
elif demisto.command() == 'proofpoint-get-events':
return_outputs(*get_events_command(client, demisto.args()))
except Exception as e:
return_error(str(e))
| 5,349,072 |
def connect_to_amqp(sysconfig):
"""
Connect to an AMQP Server, and return the connection and Exchange.
:param sysconfig: The slickqaweb.model.systemConfiguration.amqpSystemConfiguration.AMQPSystemConfiguration instance
to use as the source of information of how to connect.
:return: (connection, exchange) on success, exception on error
"""
assert isinstance(sysconfig, AMQPSystemConfiguration)
configuration = dict()
configuration['AMQP'] = dict()
if hasattr(sysconfig, 'hostname') and sysconfig.hostname is not None:
configuration['AMQP']['hostname'] = sysconfig.hostname
else:
raise AMQPConnectionError(message="No hostname defined for AMQP connection.")
if hasattr(sysconfig, 'port') and sysconfig.port is not None:
configuration['AMQP']['port'] = sysconfig.port
if hasattr(sysconfig, 'username') and sysconfig.username is not None:
configuration['AMQP']['username'] = sysconfig.username
if hasattr(sysconfig, 'password') and sysconfig.password is not None:
configuration['AMQP']['password'] = sysconfig.password
if hasattr(sysconfig, 'virtualHost') and sysconfig.virtualHost is not None:
configuration['AMQP']['virtual host'] = sysconfig.virtualHost
if hasattr(sysconfig, 'exchangeName') and sysconfig.exchangeName is not None:
configuration['AMQP']['exchange'] = sysconfig.exchangeName
else:
raise AMQPConnectionError(message="No exchange defined for AMQP connection.")
logger = logging.getLogger("slickqaweb.amqpcon.connect_to_amqp")
url = str.format("amqp://{hostname}:{port}", **dict(list(configuration['AMQP'].items())))
if 'virtual host' in configuration['AMQP'] and configuration['AMQP']['virtual host'] != '':
url = str.format("{}/{}", url, configuration['AMQP']['virtual host'])
logger.debug("AMQPConnection configured with url %s", url)
exchange = Exchange(configuration['AMQP'].get('exchange', "amqp.topic"), type='topic', durable=True)
logger.debug("AMQPConnection is using exchange %s", exchange)
connection = None
if 'username' in configuration['AMQP'] and 'password' in configuration['AMQP']:
username = configuration['AMQP']['username']
password = configuration['AMQP']['password']
logger.debug("Using username %s and password %s to connect to AMQP Broker", username, password)
connection = Connection(url, userid=username, password=password)
else:
connection = Connection(url)
# typically connection connect on demand, but we want to flush out errors before proceeding
connection.connect()
exchange = exchange(connection)
exchange.declare()
return (connection, exchange)
| 5,349,073 |
def int_to_datetime(int_date, ds=None):
"""Convert integer date indices to datetimes."""
if ds is None:
return TIME_ZERO + int_date * np.timedelta64(1, 'D')
if not hasattr(ds, 'original_time'):
raise ValueError('Dataset with no original_time cannot be used to '
'convert ints to datetimes.')
first_int = ds.time.values[0]
delta_int = _get_delta(ds.time)
first_date = ds.original_time.values[0]
delta_date = _get_delta(ds.original_time)
return first_date + ((int_date - first_int) / delta_int) * delta_date
| 5,349,074 |
def show_plugin(name, path, user):
"""
Show a plugin in a wordpress install and check if it is installed
name
Wordpress plugin name
path
path to wordpress install location
user
user to run the command as
CLI Example:
.. code-block:: bash
salt '*' wordpress.show_plugin HyperDB /var/www/html apache
"""
ret = {"name": name}
resp = __salt__["cmd.shell"](
("wp --path={0} plugin status {1}").format(path, name), runas=user
).split("\n")
for line in resp:
if "Status" in line:
ret["status"] = line.split(" ")[-1].lower()
elif "Version" in line:
ret["version"] = line.split(" ")[-1].lower()
return ret
| 5,349,075 |
def test_trium2flat_idx():
"""Test the conversion from triangular upper index to flat array index."""
td = np.array([[1, 0], [0, 1], [0, 1]])
cm = BiCM(td)
assert cm.triumat2flat_idx(3, 5, 8) == 19
for k in range(45):
ij = cm.flat2triumat_idx(k, 10)
assert cm.triumat2flat_idx(ij[0], ij[1], 10) == k
for i in range(10):
for j in range(i + 1, 10):
k = cm.triumat2flat_idx(i, j, 10)
assert (i, j) == cm.flat2triumat_idx(k, 10)
| 5,349,076 |
def send_notification(request, type_id, receipient, message):
"""Method to send out notifications."""
try:
if type_id == 2:
# Get users from this Organization unit
title = "Child tranfer IN"
org_ids = [receipient]
person_ids = get_organization_persons(org_ids)
users = get_users(person_ids)
for user in users:
notify.send(user, recipient=user, verb=title,
description=message)
except Exception as e:
print ('Error sending notifications - %s' % (str(e)))
pass
| 5,349,077 |
def get_receiver_type(rinex_fname):
"""
Return the receiver type (header line REC # / TYPE / VERS) found
in *rinex_fname*.
"""
with open(rinex_fname) as fid:
for line in fid:
if line.rstrip().endswith('END OF HEADER'):
break
elif line.rstrip().endswith('REC # / TYPE / VERS'):
return line[20:40].strip()
raise ValueError('receiver type not found in header of RINEX file '
'{}'.format(rinex_fname))
| 5,349,078 |
def is_cyclone_phrase(phrase):
"""Returns whether all the space-delimited words in phrases are cyclone words
A phrase is a cyclone phrase if and only if all of its component words are
cyclone words, so we first split the phrase into words using .split(), and then
check if all of the words are cyclone words.
"""
return all([is_cyclone_word(word) for word in phrase.split()])
| 5,349,079 |
def _append_iwc_sensitivity(data_handler):
"""Calculates sensitivity of ice water content."""
iwc_sensitivity = _z_to_iwc(data_handler, 'Z_sensitivity')
data_handler.append_data(iwc_sensitivity, 'iwc_sensitivity')
| 5,349,080 |
def wl_to_wavenumber(wl, angular=False):
"""Given wavelength in meters, convert to wavenumber in 1/cm. The wave
number represents the number of wavelengths in one cm. If angular is true,
will calculate angular wavenumber. """
if angular:
wnum = (2*np.pi)/(wl*100)
else:
wnum = 1/(wl*100)
return wnum
| 5,349,081 |
def easeOutCubic(n):
"""A cubic tween function that begins fast and then decelerates.
Args:
n (float): The time progress, starting at 0.0 and ending at 1.0.
Returns:
(float) The line progress, starting at 0.0 and ending at 1.0. Suitable for passing to getPointOnLine().
"""
_checkRange(n)
n = n - 1
return n**3 + 1
| 5,349,082 |
def _evaluate_batch_beam(model, params, dico, batch, lengths, positions, langs, src_lens, trg_lens, \
gen_type, alpha, beta, gamma, dec_len, iter_mult, selected_pos, beam_size, length_penalty):
"""Run on one example"""
n_iter = dec_len * iter_mult
# log probabilities of previously present tokens at each position
vocab_size = len(dico)
mask_tok = dico.word2id[MASK_WORD]
total_topbeam_scores = np.array([0.] * beam_size)
batch = batch.repeat((1, beam_size))
lengths = lengths.repeat((beam_size))
positions = positions.repeat((1, beam_size))
langs = langs.repeat((1, beam_size))
for dec_iter in range(n_iter):
# predict the token depending on selected_pos
pred_mask = torch.zeros_like(batch).byte()
if gen_type == "src2trg":
pred_mask[src_lens[0] + selected_pos[dec_iter%dec_len] + 1, :] = 1
elif gen_type == "trg2src":
pred_mask[selected_pos[dec_iter%dec_len] + 1, :] = 1
# NOTE(Alex): shouldn't there be some masking here?
tensor = model('fwd', x=batch, lengths=lengths, positions=positions, langs=langs, causal=False)
# beam_size x |V|
scores_pred = model('predict_wo_targets', tensor=tensor, pred_mask=pred_mask)
# get top_beam scores and tokens; need to take log softamx so scores are on same scale
log_probs_pred = torch.log_softmax(scores_pred, dim=-1)
# beam_size x beam_size
topbeam_scores, topbeam_toks = log_probs_pred.topk(beam_size, dim=-1)
### exception for first
if dec_iter == 0:
total_topbeam_scores = total_topbeam_scores + np.diagonal(topbeam_scores.cpu().numpy())# + selected_pos_scores
for i_beam, topbeam_tok in enumerate(torch.diagonal(topbeam_toks)):
# substitute that token in
if gen_type == "src2trg":
batch[src_lens[0] + selected_pos[dec_iter%dec_len] + 1][i_beam] = topbeam_tok
elif gen_type == "trg2src":
batch[selected_pos[dec_iter%dec_len] + 1][i_beam] = topbeam_tok
else:
sys.exit("something is wrong")
continue
### all iterations except first
# compute updated beam scores
topbeam_scores = topbeam_scores.cpu().numpy()
new_total_topbeam_scores = np.expand_dims(total_topbeam_scores, 1) + topbeam_scores
# sort and take beam_size highest
new_topbeam_tok_flat = new_total_topbeam_scores.reshape(-1).argsort()[-beam_size:]
# create clones of the tokens and scores so far so we don't overwrite when updating
batch_clone = batch.clone()
total_topbeam_scores_clone = copy.deepcopy(total_topbeam_scores)
# iterate over the highest scoring beams
for i_beam, topbeam_tok_flat in enumerate(new_topbeam_tok_flat):
topbeam_tok_row = int(np.floor(topbeam_tok_flat / beam_size))
topbeam_tok_col = int(topbeam_tok_flat % beam_size)
topbeam_tok = topbeam_toks[topbeam_tok_row][topbeam_tok_col]
batch[:,i_beam] = batch_clone[:, topbeam_tok_row]
total_topbeam_scores[i_beam] = total_topbeam_scores_clone[topbeam_tok_row] + \
topbeam_scores[topbeam_tok_row][topbeam_tok_col]
# substitute that token in
if gen_type == "src2trg":
batch[src_lens[0] + selected_pos[dec_iter%dec_len] + 1][i_beam] = topbeam_tok
elif gen_type == "trg2src":
batch[selected_pos[dec_iter%dec_len] + 1][i_beam] = topbeam_tok
else:
sys.exit("something is wrong")
return batch, total_topbeam_scores / (dec_len ** length_penalty)
| 5,349,083 |
def tetrahedron(a, b, c, d, depth, vertices, edges, surfaces, config):
"""
Recursive tetrahedron fractal generation.
ARGUMENTS:
a, b, c, d - tetrahedron vertices
depth - how deep in interations
vertices, edges, surfaces - mesh data lists
config - config tuple to generate from
"""
# if zero depth, final iteration, draw tetrahedron
if depth == 0:
draw_tetrahedron(a, b, c, d, vertices, edges, surfaces)
else:
# create sub-tetrahedrons and iterate over them
tetrahedrons = generate_sub_tetrahedrons(a, b, c, d)
for index, item in enumerate(tetrahedrons):
depth_iter = depth-1 # helper variable
# setup next tetrahedron according to config
if index in config[0]:
if index in config[1]:
depth_iter = 0
# recursive call
tetrahedron(item[0], item[1], item[2], item[3],
depth_iter, vertices, edges, surfaces, config)
| 5,349,084 |
def _fix_entries(entries):
"""recursive function to collapse entries into correct format"""
cur_chapter_re, chapter_entry = None, None
new_entries = []
for entry in entries:
title, doxy_path, subentries = entry
if subentries is not None:
new_subentries = _fix_entries(subentries)
new_entries.append([title, doxy_path, new_subentries])
elif cur_chapter_re and cur_chapter_re.match(title):
chapter_entry[2].append(entry)
else:
new_entries.append(entry)
chapter_match = CHAPTER_RE.match(title)
if chapter_match:
cur_chapter_re = re.compile(
chapter_match.group('num') + r'\.\d+:')
chapter_entry = entry
chapter_entry[-1] = []
else:
cur_chapter_re, chapter_entry = None, None
return new_entries
| 5,349,085 |
def _check_bulk_delete(attempted_pairs, result):
"""
Checks if the RCv3 bulk delete command was successful.
"""
response, body = result
if response.code == 204: # All done!
return body
errors = []
non_members = pset()
for error in body["errors"]:
match = _SERVER_NOT_A_MEMBER_PATTERN.match(error)
if match is not None:
pair = match.groupdict()
non_members = non_members.add(
(normalize_lb_id(pair["lb_id"]), pair["server_id"]))
continue
match = _LB_INACTIVE_PATTERN.match(error)
if match is not None:
errors.append(LBInactive(match.group("lb_id")))
continue
match = _LB_DOESNT_EXIST_PATTERN.match(error)
if match is not None:
del_lb_id = normalize_lb_id(match.group("lb_id"))
# consider all pairs with this LB to be removed
removed = [(lb_id, node_id) for lb_id, node_id in attempted_pairs
if lb_id == del_lb_id]
non_members |= pset(removed)
continue
match = _SERVER_DOES_NOT_EXIST.match(error)
if match is not None:
del_server_id = match.group("server_id")
# consider all pairs with this server to be removed
removed = [(lb_id, node_id) for lb_id, node_id in attempted_pairs
if node_id == del_server_id]
non_members |= pset(removed)
else:
raise UnknownBulkResponse(body)
if errors:
raise BulkErrors(errors)
elif non_members:
to_retry = pset(attempted_pairs) - non_members
return bulk_delete(to_retry) if to_retry else None
else:
raise UnknownBulkResponse(body)
| 5,349,086 |
def get_frontend_ui_base_url(config: "CFG") -> str:
"""
Return ui base url
"""
return as_url_folder(urljoin(get_root_frontend_url(config), FRONTEND_UI_SUBPATH))
| 5,349,087 |
def read_image(name):
"""
Reads image into a training example. Might be good to threshold it.
"""
im = Image.open(name)
pix = im.load()
example = []
for x in range(16):
for y in range(16):
example.append(pix[x, y])
return example
| 5,349,088 |
def insert_info_data(xml_root, result_annotation):
"""Fill available information of annotation
Args:
xml_root: root for xml parser
result_annotation: output annotation in COCO representation
"""
log.info('Reading information data...')
version = ''
date = ''
description = ''
year = ''
for child in xml_root:
if child.tag == 'version':
version = child.text
if child.tag == 'meta':
for task in child:
for entry in task:
if entry.tag == 'name':
description = entry.text
if entry.tag == 'created':
date = entry.text
date = date.split(' ')[0]
year = date.split('-')[0]
result_annotation['info'] = {
'contributor': '',
'date_created': date,
'description': description,
'url': '',
'version': version,
'year': year
}
log.info('Found the next information data: {}'.format(result_annotation['info']))
| 5,349,089 |
def l3tc_underlay_lag_config_unconfig(config, dut1, dut2, po_name, members_dut1, members_dut2):
"""
:param config:
:param dut1:
:param dut2:
:param po_name:
:param members_dut1:
:param members_dut2:
:return:
"""
st.banner("{}Configuring LAG between Spine and Leaf node.".format('Un' if config != 'yes' else ''))
result = True
if config == 'yes':
# configure po and add members
[out, exceptions] = \
utils.exec_all(fast_start, [[poapi.config_portchannel, dut1, dut2, po_name,
members_dut1, members_dut2, "add"]])
st.log([out, exceptions])
else:
# del po and delete members
[out, exceptions] = \
utils.exec_all(fast_start, [[poapi.config_portchannel, dut1, dut2, po_name,
members_dut1, members_dut2, "del"]])
st.log([out, exceptions])
return result
| 5,349,090 |
def _xrdcp_copyjob(wb, copy_job: CopyFile, xrd_cp_args: XrdCpArgs, printout: str = '') -> int:
"""xrdcp based task that process a copyfile and it's arguments"""
if not copy_job: return
overwrite = xrd_cp_args.overwrite
batch = xrd_cp_args.batch
sources = xrd_cp_args.sources
chunks = xrd_cp_args.chunks
chunksize = xrd_cp_args.chunksize
makedir = xrd_cp_args.makedir
tpc = xrd_cp_args.tpc
posc = xrd_cp_args.posc
# hashtype = xrd_cp_args.hashtype
streams = xrd_cp_args.streams
cksum = xrd_cp_args.cksum
timeout = xrd_cp_args.timeout
rate = xrd_cp_args.rate
cmdline = f'{copy_job.src} {copy_job.dst}'
return retf_print(_xrdcp_sysproc(cmdline, timeout))
| 5,349,091 |
def merge_preclusters_ld(preclusters):
"""
Bundle together preclusters that share one LD snp
* [ Cluster ]
Returntype: [ Cluster ]
"""
clusters = list(preclusters)
for cluster in clusters:
chrom = cluster.gwas_snps[0].snp.chrom
start = min(gwas_snp.snp.pos for gwas_snp in cluster.gwas_snps)
end = max(gwas_snp.snp.pos for gwas_snp in cluster.gwas_snps)
# A dictionary that maps from snp to merged clusters
snp_owner = dict()
for cluster in preclusters:
for ld_snp in cluster.ld_snps:
# If this SNP has been seen in a different cluster
if ld_snp in snp_owner and snp_owner[ld_snp] is not cluster:
# Set other_cluster to that different cluster
other_cluster = snp_owner[ld_snp]
merged_cluster = merge_clusters(cluster, other_cluster)
# Remove the two previous clusters and replace them with
# the merged cluster
clusters.remove(cluster)
clusters.remove(other_cluster)
clusters.append(merged_cluster)
# Set the new cluster as the owner of these SNPs.
for snp in merged_cluster.ld_snps:
snp_owner[snp] = merged_cluster
for snp in cluster.ld_snps:
snp_owner[snp] = merged_cluster
# Skip the rest of this cluster.
break
else:
snp_owner[ld_snp] = cluster
for cluster in clusters:
chrom = cluster.gwas_snps[0].snp.chrom
start = min(gwas_snp.snp.pos for gwas_snp in cluster.gwas_snps)
end = max(gwas_snp.snp.pos for gwas_snp in cluster.gwas_snps)
logging.info("\tFound %i clusters from the GWAS peaks" % (len(clusters)))
return clusters
| 5,349,092 |
def cross_recurrence_matrix( xps, yps ):
"""Cross reccurence matrix.
Args:
xps (numpy.array):
yps (numpy.array):
Returns:
numpy.array : A 2D numpy array.
"""
return recurrence_matrix( xps, yps )
| 5,349,093 |
def define_panels(x, y, N=40):
"""
Discretizes the geometry into panels using 'cosine' method.
Parameters
----------
x: 1D array of floats
x-coordinate of the points defining the geometry.
y: 1D array of floats
y-coordinate of the points defining the geometry.
N: integer, optional
Number of panels;
default: 40.
Returns
-------
panels: 1D Numpy array of Panel objects.
The list of panels.
"""
R = (x.max()-x.min())/2.0 # circle radius
x_center = (x.max()+x.min())/2.0 # x-coordinate of circle center
theta = numpy.linspace(0.0, 2.0*numpy.pi, N+1) # array of angles
x_circle = x_center + R*numpy.cos(theta) # x-coordinates of circle
x_ends = numpy.copy(x_circle) # x-coordinate of panels end-points
y_ends = numpy.empty_like(x_ends) # y-coordinate of panels end-points
# extend coordinates to consider closed surface
x, y = numpy.append(x, x[0]), numpy.append(y, y[0])
# compute y-coordinate of end-points by projection
I = 0
for i in range(N):
while I < len(x)-1:
if (x[I] <= x_ends[i] <= x[I+1]) or (x[I+1] <= x_ends[i] <= x[I]):
break
else:
I += 1
a = (y[I+1]-y[I])/(x[I+1]-x[I])
b = y[I+1] - a*x[I+1]
y_ends[i] = a*x_ends[i] + b
y_ends[N] = y_ends[0]
# create panels
panels = numpy.empty(N, dtype=object)
for i in range(N):
panels[i] = Panel(x_ends[i], y_ends[i], x_ends[i+1], y_ends[i+1])
return panels
| 5,349,094 |
def compile_for_llvm(function_name, def_string, optimization_level=-1,
globals_dict=None):
"""Compiles function_name, defined in def_string to be run through LLVM.
Compiles and runs def_string in a temporary namespace, pulls the
function named 'function_name' out of that namespace, optimizes it
at level 'optimization_level', -1 for the default optimization,
and marks it to be JITted and run through LLVM.
"""
namespace = {}
if globals_dict is None:
globals_dict = globals()
exec def_string in globals_dict, namespace
func = namespace[function_name]
if optimization_level is not None:
if optimization_level >= DEFAULT_OPT_LEVEL:
func.__code__.co_optimization = optimization_level
func.__code__.co_use_jit = True
return func
| 5,349,095 |
def setup():
"""
This just moves the initial setup-commands to the top for better readability
:return: -
"""
plugin.add_config("api_base", is_required=True)
plugin.add_config("api_key", is_required=True)
plugin.add_config("room_id", is_required=True)
plugin.add_config("series_tracking", is_required=False, default_value=True)
plugin.add_command(
"series",
print_series,
"Get a list of currently tracked series",
room_id=[plugin.read_config("room_id")],
)
plugin.add_command(
"episodes",
current_episodes,
"Post a new message that is tracking episodes",
room_id=[plugin.read_config("room_id")],
)
# initially post current episodes at the start of the week
plugin.add_timer(current_episodes, frequency="weekly")
# check for updates to the episodes' status and update the message accordingly
plugin.add_timer(update_current_episodes, frequency=datetime.timedelta(minutes=5))
# check for changes to currently tracked series
plugin.add_timer(check_series_changes, frequency="daily")
| 5,349,096 |
def setup_svm_classifier(training_data, y_training, testing_data, features, method="count", ngrams=(1,1)):
"""
Setup SVM classifier model using own implementation
Parameters
----------
training_data: Pandas dataframe
The dataframe containing the training data for the classifier
testing_data: Pandas dataframe
The dataframe containing the testing data for the classifier
y_training: Pandas dataframe
The dataframe containing the y training data for the classifier
features: String or list of strings if using multiple features
Names of columns of df that are used for trainig the classifier
method: String
Can be either "count" or "tfidf" for specifying method of feature weighting
ngrams: tuple (min_n, max_n), with min_n, max_n integer values
range for ngrams used for vectorization
Returns
-------
model: SVM Classifier (scratch implementation)
Trained SVM Classifier from own implementation
vec: sklearn CountVectorizer or TfidfVectorizer
CountVectorizer or TfidfVectorizer fit and transformed for training data
x_testing: Pandas dataframe
The dataframe containing the test data for the SVM classifier
"""
# generate x and y training data
if method == "count":
vec, x_training, x_testing = define_features_vectorizer(features, training_data, testing_data,ngramrange=ngrams)
elif method == "tfidf":
vec, x_training, x_testing = define_features_tfidf(features, training_data, testing_data,ngramrange=ngrams)
else:
print("Method has to be either count or tfidf")
return 1
# train classifier
model = SVMClassifier_scratch()
model.fit(x_training, y_training)
return model, vec, x_testing
| 5,349,097 |
def buildHeaderString(keys):
"""
Use authentication keys to build a literal header string that will be
passed to the API with every call.
"""
headers = {
# Request headers
'participant-key': keys["participantKey"],
'Content-Type': 'application/json',
'Ocp-Apim-Subscription-Key': keys["subscriptionKey"]
}
return headers
| 5,349,098 |
def parametrize_environment_file(metafunc):
"""
This param runs tests for every environment file in the template dir
"""
env_files = get_filenames_list(metafunc, [".env"])
metafunc.parametrize("env_file", env_files)
| 5,349,099 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.