content
stringlengths 22
815k
| id
int64 0
4.91M
|
---|---|
def resolve_grant_endpoint(doi_grant_code):
"""Resolve the OpenAIRE grant."""
# jsonresolver will evaluate current_app on import if outside of function.
from flask import current_app
pid_value = '10.13039/{0}'.format(doi_grant_code)
try:
_, record = Resolver(pid_type='grant', object_type='rec',
getter=Record.get_record).resolve(pid_value)
return record
except Exception:
current_app.logger.error(
'Grant {0} does not exists.'.format(pid_value), exc_info=True)
raise
| 5,349,300 |
def from_tiff(path: Union[Path, str]) -> OME:
"""Generate OME metadata object from OME-TIFF path.
This will use the first ImageDescription tag found in the TIFF header.
Parameters
----------
path : Union[Path, str]
Path to OME TIFF.
Returns
-------
ome: ome_types.model.ome.OME
ome_types.OME metadata object
Raises
------
ValueError
If the TIFF file has no OME metadata.
"""
with Path(path).open(mode="rb") as fh:
try:
offsetsize, offsetformat, tagnosize, tagnoformat, tagsize, codeformat = {
b"II*\0": (4, "<I", 2, "<H", 12, "<H"),
b"MM\0*": (4, ">I", 2, ">H", 12, ">H"),
b"II+\0": (8, "<Q", 8, "<Q", 20, "<H"),
b"MM\0+": (8, ">Q", 8, ">Q", 20, ">H"),
}[fh.read(4)]
except KeyError:
raise ValueError(f"{path!r} does not have a recognized TIFF header")
fh.read(4 if offsetsize == 8 else 0)
fh.seek(unpack(offsetformat, fh.read(offsetsize))[0])
for _ in range(unpack(tagnoformat, fh.read(tagnosize))[0]):
tagstruct = fh.read(tagsize)
if unpack(codeformat, tagstruct[:2])[0] == 270:
size = unpack(offsetformat, tagstruct[4 : 4 + offsetsize])[0]
if size <= offsetsize:
desc = tagstruct[4 + offsetsize : 4 + offsetsize + size]
break
fh.seek(unpack(offsetformat, tagstruct[-offsetsize:])[0])
desc = fh.read(size)
break
else:
raise ValueError(f"No OME metadata found in file: {path}")
if desc[-1] == 0:
desc = desc[:-1]
return from_xml(desc.decode("utf-8"))
| 5,349,301 |
def increase_structure_depth(previous_architecture, added_block, problem_type):
"""Returns new structure given the old one and the added block.
Increases the depth of the neural network by adding `added_block`.
For the case of cnns, if the block is convolutional, it will add it before
the flattening operation. Otherwise, if it is a dense block, then it will
be added at the end.
For the dnn and rnn case, the added_block is always added at the end.
Args:
previous_architecture: the input architecture. An np.array holding
`blocks.BlockType` (i.e., holding integers).
added_block: a `blocks.BlockType` to add to previous_architecture.
problem_type: a `PhoenixSpec.ProblemType` enum.
Returns:
np.array of `blocks.BlockType` (integers).
"""
if added_block == blocks.BlockType.EMPTY_BLOCK:
return previous_architecture.copy()
output = previous_architecture.copy()
# No problems for DNN of RNN
if problem_type != phoenix_spec_pb2.PhoenixSpec.CNN:
return np.append(output, added_block)
# TODO(b/172564129): Change this class (blocks) to a singleton
builder = blocks.Blocks()
# CNN case - convolution before fully connected.
if not builder[added_block].is_input_order_important:
return np.append(output, added_block)
# First block index in which order is not important
index_for_new_block = next(
index for index, block in enumerate(previous_architecture)
if not builder[block].is_input_order_important)
return np.insert(output, index_for_new_block, added_block)
| 5,349,302 |
def elina_linexpr0_set_cst_scalar_double(linexpr, num):
"""
Set the constant of an ElinaLinexpr0 by using a c_double.
Parameters
----------
linexpr : ElinaLinexpr0Ptr
Destination.
num : c_double
Source.
Returns
-------
None
"""
try:
elina_linexpr0_set_cst_scalar_double_c = elina_auxiliary_api.elina_linexpr0_set_cst_scalar_double
elina_linexpr0_set_cst_scalar_double_c.restype = None
elina_linexpr0_set_cst_scalar_double_c.argtypes = [ElinaLinexpr0Ptr, c_double]
elina_linexpr0_set_cst_scalar_double_c(linexpr, num)
except:
print('Problem with loading/calling "elina_linexpr0_set_cst_scalar_double" from "libelinaux.so"')
print('Make sure you are passing ElinaLinexpr0Ptr, c_double to the function')
| 5,349,303 |
def create_table_string(data, highlight=(True, False, False, False), table_class='wikitable', style=''):
"""
Takes a list and returns a wikitable.
@param data: The list that is converted to a wikitable.
@type data: List (Nested)
@param highlight: Tuple of rows and columns that should be highlighted.
(first row, last row, left column, right column)
@type highlight: Tuple
@param table_class: A string containing the class description.
See wikitable help.
@type table_class: String
@param style: A string containing the style description.
See wikitable help.
@type style: String
"""
last_row = len(data) - 1
last_cell = len(data[0]) - 1
table = '{{| class="{}" style="{}"\n'.format(table_class, style)
for key, row in enumerate(data):
if key == 0 and highlight[0] or key == last_row and highlight[1]:
row_string = '|-\n! ' + '\n! '.join(cell for cell in row)
else:
row_string = '|-'
cells = ''
for ckey, cell in enumerate(row):
if ckey == 0 and highlight[2]:
cells += '\n! ' + cell
elif ckey == last_cell and highlight[3]:
cells += '\n! ' + cell
else:
cells += '\n| ' + cell
row_string += cells
table += row_string + '\n'
table += '|}'
return table
| 5,349,304 |
def group_tokens(tokens):
"""
Join and separate tokens to be more suitable for diffs.
Transformations:
- Empty tokens are removed
- Text containing newlines is split to have the newline be one token
- Other sequential whitespace tokens are joined
- Token types which contain freeform text (ie. comments, strings) are split into words
"""
for token_type, group in itertools.groupby(tokens, get_token_type):
if any(token_type in type_set for type_set in JOIN_TOKENS):
text = ''.join(get_token_text(token) for token in group)
group = [(token_type, text)]
if any(token_type in type_set for type_set in WORD_TOKENS):
group = (
(token_type, word)
for token in group
for word in split_words(get_token_text(token))
)
# Split by newlines
for token in group:
text_parts = re.split(r'(\n)', get_token_text(token))
for text_part in text_parts:
# Empty tokens are discarded, to avoid confusing
# difflib or highlighting empty regions
if text_part:
yield (token_type, text_part)
| 5,349,305 |
def path_to_dnd(path: Path) -> str:
"""Converts a `Path` into an acceptable value for `tkinterdnd2.`"""
# tkinterdnd2 will only accept fs paths with forward slashes, even on Windows.
wants_sep = '/'
if os.path.sep == wants_sep:
return str(path)
else:
return wants_sep.join(str(path).split(os.path.sep))
| 5,349,306 |
def modify_reflection(args, json_reflection, rid):
"""
modify a reflection
"""
base_url, token = get_base_url_token(args)
x = _modify_reflection(token, base_url, rid, json_reflection, ssl_verify=args.get("ssl_verify", True))
click.echo(json.dumps(x))
| 5,349,307 |
async def current_page_error(call: CallbackQuery):
"""The function handles clicking on the current page"""
await call.answer(cache_time=60)
| 5,349,308 |
def registry_dispatcher_document(self, code, collection):
"""
This task receive a list of codes that should be queued for DOI registry
"""
return _registry_dispatcher_document(code, collection, skip_deposited=False)
| 5,349,309 |
def get_reddit_oauth_scopes(
scopes: Collection[str] | None = None,
) -> dict[str, dict[str, str]]:
"""Get metadata on the OAUTH scopes offered by the Reddit API."""
# Set up the request for scopes
scopes_endpoint = "/api/v1/scopes"
scopes_endpoint_url = REDDIT_BASE_URL + scopes_endpoint
headers = {"User-Agent": USER_AGENT}
query_params = {}
if scopes:
query_params["scopes"] = scopes
# Make and process the request
response = requests.get(
scopes_endpoint_url,
params=query_params,
headers=headers,
timeout=REQUEST_TIMEOUT_S,
)
response.raise_for_status()
response_json: dict[str, dict[str, str]] = response.json()
return response_json
| 5,349,310 |
def loop_filtering(preprocessed_file: str = None,
unprocessed_text_file: str = None,
output_file: str = r'./preprocessed_filtered.txt',
unprocessed_filtered_file: str = r'./unprocessed_filtered.txt',
saved_file: str = None,
threshold: float = 0.99):
"""
Filter texts using double for loop and save the unique texts to new file.
:param preprocessed_file:
:param unprocessed_text_file:
:param output_file:
:param unprocessed_filtered_file:
:param saved_file:
:param threshold:
:return:
"""
texts, unprocessed_texts = [], []
# validate the input file paths
if verify_path(preprocessed_file):
texts = open(preprocessed_file).readlines()
if unprocessed_text_file and verify_path(unprocessed_text_file):
unprocessed_texts = open(unprocessed_text_file).readlines()
temp_texts = {} # containing indices for looking up the actual texts, these contain the texts that are similar
# use the deleted list containing indices to delete the texts from the non preprocessed file
# add the indices of the ones to delete
delete_texts = set()
# write filtered texts and duplicate texts in separate files
start_i = 0
if saved_file:
if verify_path(saved_file):
line = open(saved_file, 'r').readlines()
dict_obj = eval(line[0])
start_i = dict_obj['position']
texts = dict_obj['text']
if unprocessed_texts: unprocessed_texts = dict_obj['text_orig']
for i in tqdm(range(start_i, len(texts) - 1)):
# print("\nIteration: ", i, "\n")
# every 100 iterations, prompt user to see if they want to stop
if (i + 1) % 100 == 0:
USER_INPUT = input("Would you like to STOP? If yes, please enter 'y'. If no, please enter 'n': ")
if USER_INPUT == 'y':
new_dict = {"position": i + 1, "text": texts, "text_orig": unprocessed_texts}
# save the status file to the current directory
# This status file will be later used to resume the filtering process
with open(r'./saved_file.txt', 'w') as new_file:
new_file.write(str(new_dict))
exit()
text1 = texts[i]
# If the first text is '@@@', then skip the current iteration
if text1 == '@@@':
continue
for j in range(i + 1, len(texts)):
text2 = texts[j]
# If the second text is '@@@', then skip the current iteration
if text2 == '@@@':
continue
# calculate cosine similarity score
cosine = get_cosine_similarity(text1, text2)
# if cosine similarity score is greater than given threshold
# add the text to the temp_texts dictionary with index as its key
if cosine > threshold:
temp_texts[j] = text2
else:
continue
# if there are texts in temp_texts, then print the similar texts to the screen
if temp_texts:
print('\n', '-' * 45)
print('TEXT 1: ', text1)
for k, v in temp_texts.items():
print(k, ': ', v, '\n')
else:
continue
# ensure that the inputs match the ones in the dict temp_texts
delete_lst = ''
# Prompt for user input of indices to delete
while not delete_lst:
delete_lst = input(
'Enter the indices of the texts to delete and separated with '
'spaces. Otherwise to KEEP ALL TEXTS, ENTER -1. To DELETE ALL TEXTS, ENTER -2: ')
# Ask for user input until right indices are entered
while True:
if delete_lst == '-1':
break
if delete_lst == '-2':
break
if [x for x in delete_lst.split() if int(x) not in temp_texts.keys()]:
print(
"ERROR: you have entered an index that is not in temp_texts. Please enter an index that is in temp_texts")
delete_lst = input(
'Enter the indices of the texts to delete and separate with spaces. '
'Otherwise to keep ALL TEXTS, ENTER -1: ')
continue
else:
break
print()
# Keep all texts
if delete_lst == '-1':
temp_texts.clear()
delete_texts.clear()
continue
# Delete all texts
if delete_lst == '-2':
for x in temp_texts.keys():
delete_texts.add(int(x))
for idx in delete_texts:
# Replace the texts to be deleted with '@@@'
texts[idx] = '@@@'
unprocessed_texts[idx] = '@@@'
temp_texts.clear()
delete_texts.clear()
continue
# Delete select texts
lst = delete_lst.split(' ') # return a list containing the indices
for x in lst:
delete_texts.add(int(x))
for idx in delete_texts:
# Replace the texts to be deleted with '@@@'
texts[idx] = '@@@'
unprocessed_texts[idx] = '@@@'
print('DELETED TEXTS: ', delete_texts)
temp_texts.clear()
delete_texts.clear()
# after everything is finished, write to file
with open(output_file, 'w') as f:
for text in texts:
if text == '@@@':
continue
f.write(text)
# after everything is finished, write to file
with open(unprocessed_filtered_file, 'w') as orig_file:
for text in unprocessed_texts:
if text == '@@@':
continue
orig_file.write(text)
| 5,349,311 |
def beta_reader(direc):
"""
Function to read in beta values for each tag
"""
path = direc
H_beta = np.loadtxt('%s/Beta Values/h_beta_final2.txt' % path)
Si_beta = np.loadtxt('%s/Beta Values/si_beta_final2.txt' % path)
He_emi_beta = np.loadtxt('%s/Beta Values/he_emi_beta_final2.txt' % path)
He_cyg_beta = np.loadtxt('%s/Beta Values/he_cyg_beta_final2.txt' % path)
He_abs_beta = np.loadtxt('%s/Beta Values/he_abs_beta_final2.txt' % path)
H_alp_beta = np.loadtxt('%s/Beta Values/h_alp_beta_final2.txt' % path)
Ca_beta = np.loadtxt('%s/Beta Values/ca_beta_final2.txt' % path)
iib_dp_beta = np.loadtxt('%s/Beta Values/iibdp_beta_final2.txt' % path)
Fe_beta = np.loadtxt('%s/Beta Values/fe_beta_final2.txt' % path)
S_beta = np.loadtxt('%s/Beta Values/s_beta_final2.txt' % path)
return H_beta,Si_beta,He_emi_beta,He_cyg_beta,He_abs_beta,H_alp_beta,Ca_beta,iib_dp_beta,Fe_beta,S_beta
| 5,349,312 |
def import_people(
ctx: DataFunctionContext,
user_key: str,
use_sample: bool = False,
):
"""
Params:
user_key: User API key from crunchbase.
use_sample: Whether to use the sample bulk CSV endpoint (default False)
"""
base_import(
data_source="people",
user_key=user_key,
ctx=ctx,
use_sample=use_sample,
)
| 5,349,313 |
def boto3_s3_upload(s3, dst, file):
"""Upload Item to s3.
:param s3: -- Sqlalchemy session object.
:param dst: -- str. Location to storage ???
:param file: -- ???. File object.
Return Type: Bool
"""
s3.Object(settings.config_type['AWS_BUCKET'], file).put(Body=open(os.path.join(dst, file), 'rb'))
return file
| 5,349,314 |
def get_logging_format():
"""return the format string for the logger"""
formt = "[%(asctime)s] %(levelname)s:%(message)s"
return formt
| 5,349,315 |
def plot_hydrogen_balance(results):
""" Plot the hydrogen balance over time """
n_axes = results["times"].shape[0]
fig = plt.figure(figsize=(6.0, 5.5))
fig.suptitle('Hydrogen production and utilization over the year', fontsize=fontsize+1, fontweight='normal', color='k')
axes = fig.subplots(n_axes)
for index, ax in enumerate(axes):
x1, y1 = results["times"][index, :] / 24, +results["H2_produced"][index, :]
x2, y2 = results["times"][index, :] / 24, -results["H2_utilized"][index, :]
for t in ax.xaxis.get_major_ticks(): t.label1.set_fontsize(fontsize)
for t in ax.yaxis.get_major_ticks(): t.label1.set_fontsize(fontsize)
ax.plot([0.0], [0.0], linestyle="", marker="", label="Period " + str(index + 1))
ax.plot(x1, y1, linewidth=0.75, linestyle='-', color='k', label="Produced")
ax.plot(x2, y2, linewidth=0.75, linestyle='-', color='r', label="Utilized")
ax.set_ylabel('Mass flow (kg/s)', fontsize=fontsize, color='k', labelpad=fontsize)
if index + 1 == n_axes:
ax.set_xlabel('Time (days)', fontsize=fontsize, color='k', labelpad=fontsize)
ax.legend(ncol=1, loc='lower right', fontsize=fontsize-1, edgecolor='k', framealpha=1.0)
dy = max(np.max(y1)-np.min(y2), 0.02)
ax.set_ylim([np.min(y2)-dy/5, np.max(y1)+dy/5])
fig.tight_layout()
return fig, axes
| 5,349,316 |
def ref_dw(fc, fmod):
"""Give the reference value for roughness by linear interpolation from the data
given in "Psychoacoustical roughness:implementation of an optimized model"
by Daniel and Weber in 1997
Parameters
----------
fc: integer
carrier frequency
fmod: integer
modulation frequency
Output
------
roughness reference values from the article by Daniel and Weber
"""
if fc == 125:
fm = np.array(
[
1.0355988,
10.355987,
11.132686,
13.851132,
18.511328,
20.064724,
24.724918,
31.32686,
41.423946,
49.967636,
57.34628,
64.33657,
72.10356,
90.74434,
79.4822,
86.084145,
91.909386,
100.45307,
]
)
R = np.array(
[
0.0,
0.04359673,
0.09468665,
0.16416894,
0.19482289,
0.27656674,
0.3113079,
0.34196186,
0.32356948,
0.26226157,
0.20299728,
0.15803815,
0.11512262,
0.0619891,
0.09264305,
0.07016349,
0.05177112,
0.03950954,
]
)
if fc == 250:
fm = np.array(
[
0.7373272,
3.9324117,
9.585254,
14.2549925,
16.71275,
19.907835,
22.611366,
23.594471,
29.493088,
30.47619,
37.112137,
41.29032,
47.926266,
50.13825,
51.121353,
53.08756,
54.07066,
56.774193,
58.248848,
62.427036,
61.68971,
69.308754,
68.57143,
71.27496,
73.73272,
73.97849,
75.207375,
79.139786,
79.139786,
84.792625,
90.19969,
97.81874,
104.70046,
112.31951,
120.92166,
129.76959,
]
)
R = np.array(
[
0.00432277,
0.00576369,
0.06340057,
0.16138329,
0.17435159,
0.26945245,
0.32132566,
0.3443804,
0.42651296,
0.44668588,
0.47694525,
0.4668588,
0.42651296,
0.46253604,
0.41210374,
0.4020173,
0.43948126,
0.37463978,
0.39193085,
0.3631124,
0.3429395,
0.3040346,
0.28242075,
0.27521613,
0.259366,
0.24207492,
0.24351585,
0.2204611,
0.20461094,
0.17146975,
0.14697406,
0.11815562,
0.09942363,
0.07636888,
0.05619597,
0.04322766,
]
)
if fc == 500:
fm = np.array(
[
7.6375403,
15.79288,
20.841423,
26.666666,
30.93851,
34.43366,
40.2589,
44.919094,
49.190937,
51.521034,
57.34628,
64.33657,
69.77346,
74.04531,
81.42395,
87.63754,
94.23948,
102.78317,
116.763756,
129.57928,
140.84143,
149.77347,
160.2589,
]
)
R = np.array(
[
0.04972752,
0.1253406,
0.23569483,
0.35013625,
0.46457765,
0.5258856,
0.619891,
0.67302454,
0.69346046,
0.69550407,
0.6873297,
0.67098093,
0.6321526,
0.57901907,
0.5074932,
0.4400545,
0.38487738,
0.3153951,
0.22752044,
0.16621253,
0.11920981,
0.08651226,
0.06811989,
]
)
if fc == 1000:
fm = np.array(
[
0.0,
3.884415,
9.7237625,
17.147604,
29.302307,
37.933605,
48.504757,
55.145306,
55.948395,
57.480103,
60.618927,
63.314735,
65.28852,
67.201035,
69.55657,
76.14433,
77.2943,
82.847725,
83.352325,
88.26008,
89.019806,
93.92756,
94.4309,
97.78904,
99.06719,
104.23258,
103.963005,
106.03293,
109.89504,
111.18953,
115.05101,
117.38172,
119.95311,
125.630646,
132.60141,
137.24963,
144.47617,
151.19432,
159.97737,
]
)
R = np.array(
[
0.0,
0.00211198,
0.03450088,
0.1382977,
0.40437,
0.60555416,
0.80238307,
0.89103884,
0.9516347,
0.90182984,
0.9753813,
0.92339617,
0.9969634,
0.92983717,
0.9882475,
0.9556905,
0.92104256,
0.89138556,
0.86107534,
0.83503467,
0.7960629,
0.7700222,
0.736826,
0.71946436,
0.6819286,
0.6529984,
0.6284707,
0.62555665,
0.5764418,
0.5764243,
0.52586645,
0.52727795,
0.48683867,
0.44491437,
0.40008652,
0.3726063,
0.3205599,
0.29016566,
0.24531329,
]
)
if fc == 2000:
fm = np.array(
[
0.0,
4.4051557,
7.5956764,
10.048887,
12.017292,
15.69636,
17.911657,
20.366364,
20.619616,
25.28251,
27.987852,
30.20053,
31.18548,
34.37525,
34.38161,
39.782192,
39.298134,
42.23989,
42.981316,
45.18539,
44.95683,
46.663754,
48.13538,
50.358532,
53.04068,
55.264206,
56.971127,
58.68778,
60.890354,
62.367218,
62.84529,
65.06246,
67.00842,
68.48715,
71.90736,
73.62214,
76.79096,
79.24305,
81.67831,
85.10337,
91.45038,
93.655945,
96.586105,
96.33435,
98.04801,
106.5901,
107.57281,
115.62524,
118.07209,
120.26419,
121.97673,
129.54285,
131.255,
134.91576,
135.15628,
136.87106,
144.92911,
159.83092,
]
)
R = np.array(
[
0.00271003,
0.00538277,
0.04194128,
0.06631085,
0.10694477,
0.1407891,
0.18955104,
0.21934068,
0.250504,
0.30331025,
0.35477808,
0.39405492,
0.41708192,
0.4509304,
0.47396567,
0.54031587,
0.55929023,
0.5809457,
0.60803974,
0.6161512,
0.674419,
0.65407926,
0.66761696,
0.74483424,
0.71229106,
0.7908634,
0.7705236,
0.7854143,
0.78810567,
0.8206137,
0.779959,
0.83549607,
0.79482895,
0.83411205,
0.8164678,
0.8245834,
0.78255093,
0.8028555,
0.76218426,
0.76215523,
0.7119658,
0.7254973,
0.7051472,
0.67940396,
0.6834545,
0.6088561,
0.62375295,
0.5478037,
0.549138,
0.5138889,
0.5138744,
0.4487694,
0.44739988,
0.41484842,
0.39994115,
0.40805677,
0.3524327,
0.27371538,
]
)
if fc == 4000:
fm = np.array(
[
3.1950846,
16.221199,
23.840246,
29.984638,
30.230415,
37.112137,
37.603687,
45.714287,
51.85868,
57.265743,
63.90169,
68.57143,
74.47005,
78.156685,
82.33487,
88.97082,
98.064514,
108.14132,
115.02304,
123.870964,
128.78648,
133.21045,
143.04147,
151.39784,
155.08449,
157.29646,
160.24577,
]
)
R = np.array(
[
0.00432277,
0.11383285,
0.23054755,
0.29538906,
0.31123918,
0.39337176,
0.41066283,
0.50864553,
0.5907781,
0.62680113,
0.6426513,
0.65273774,
0.64841497,
0.6440922,
0.6152738,
0.5720461,
0.5158501,
0.45677233,
0.41210374,
0.3631124,
0.34149855,
0.3184438,
0.2795389,
0.24495678,
0.24783862,
0.23919308,
0.24063401,
]
)
if fc == 8000:
fm = np.array(
[
4.6498036,
7.1022663,
8.569778,
16.16957,
23.037289,
24.018497,
25.735521,
27.451048,
30.885843,
33.578465,
34.319515,
38.48526,
40.206398,
42.654747,
45.355972,
50.995964,
52.953144,
55.896774,
56.631092,
60.54957,
61.772808,
63.238823,
66.18058,
68.86871,
70.58611,
72.78196,
74.744,
78.409225,
80.61181,
82.31723,
86.23272,
87.20532,
90.384995,
91.11295,
96.73499,
100.39909,
106.50631,
117.26071,
127.28154,
137.0596,
145.37276,
154.66376,
159.55597,
]
)
R = np.array(
[
0.0053807,
0.02704024,
0.0256728,
0.08251926,
0.14614701,
0.15562384,
0.17186953,
0.18269515,
0.21789658,
0.22329386,
0.24903294,
0.27338803,
0.30453888,
0.31129324,
0.3478559,
0.3952338,
0.39521724,
0.42364773,
0.42499653,
0.43986857,
0.4398582,
0.4330707,
0.4547261,
0.44386315,
0.46146387,
0.43976498,
0.4573636,
0.44107231,
0.4437637,
0.4180039,
0.42203578,
0.40034726,
0.39761028,
0.3759238,
0.35826093,
0.3379046,
0.30533242,
0.2686558,
0.23334044,
0.20480223,
0.18711658,
0.1667126,
0.16396113,
]
)
return np.interp(fmod, fm, R)
| 5,349,317 |
def points_2d_inside_image(
width: int,
height: int,
camera_model: str,
points_2d: np.ndarray,
points_3d: Optional[np.ndarray] = None,
) -> np.ndarray:
"""Returns the indices for an array of 2D image points that are inside the image canvas.
Args:
width: Pixel width of the image canvas.
height: Pixel height of the image canvas.
camera_model: One of `opencv_pinhole`, `opencv_fisheye`, `pd_fisheye`.
More details in :obj:`~.model.sensor.CameraModel`.
points_2d: A matrix with dimensions (nx2) containing the points that should be tested
if inside the image canvas. Points must be in image coordinate system (x,y).
points_3d: Optional array of size (nx3) which provides the 3D camera coordinates for each point. Required for
camera models `opencv_pinhole` and `opencv_fisheye`.
Returns:
An array with dimensions (n,).
"""
if camera_model in (CAMERA_MODEL_OPENCV_PINHOLE, CAMERA_MODEL_OPENCV_FISHEYE) and points_3d is None:
raise ValueError(f"`points_3d` must be provided for camera model {camera_model}")
if len(points_2d) != len(points_3d):
raise ValueError(
f"Mismatch in length between `points_2d` and `points_3d` with {len(points_2d)} vs. {len(points_3d)}"
)
return np.where(
(points_2d[:, 0] >= 0)
& (points_2d[:, 0] < width)
& (points_2d[:, 1] >= 0)
& (points_2d[:, 1] < height)
& (points_3d[:, 2] > 0 if camera_model in (CAMERA_MODEL_OPENCV_PINHOLE, CAMERA_MODEL_OPENCV_FISHEYE) else True)
)
| 5,349,318 |
def list2tensors(some_list):
"""
:math:``
Description:
Implemented:
[True/False]
Args:
(:):
(:):
Default:
Shape:
- Input: list
- Output: list of tensors
Examples::
"""
t_list=[]
for i in some_list:
t_list.append(torch.tensor(i))
return t_list
| 5,349,319 |
def print_sys_info(verbosity=None):
"""Print package information as a formatted table."""
global _COL_MAX
pkgs = get_info_fluiddyn()
pkgs_third_party = get_info_third_party()
widths = None
width_pkg_name = 0
for _pkgs in (pkgs, pkgs_third_party):
for pkg_name, pkg_details in _pkgs.items():
width_pkg_name = max(width_pkg_name, len(pkg_name) + 1)
widths = get_col_widths(pkg_details, widths)
widths.insert(0, width_pkg_name)
pkgs_keys = list(pkgs)
heading = ["Package"]
heading.extend(pkgs[pkgs_keys[0]])
widths = reset_col_width(widths)
_print_heading(heading, widths=widths)
def print_pkg(about_pkg):
for v, w in zip(about_pkg.values(), widths[1:]):
v = str(v)
if len(v) > w:
v = v[:10] + "..." + v[10 + 4 - w :]
_print_item(str(v), width=w)
print()
for pkg_name, about_pkg in pkgs.items():
print(pkg_name.ljust(widths[0]), end="")
print_pkg(about_pkg)
for pkg_name, about_pkg in pkgs_third_party.items():
print(pkg_name.ljust(widths[0]), end="")
print_pkg(about_pkg)
info_sw = get_info_software()
_print_dict(info_sw, "Software")
info_hw = get_info_hardware()
_print_dict(info_hw, "Hardware")
info_py = get_info_python()
_print_dict(info_py, "Python")
if verbosity is not None:
_print_heading("\nNumPy", case=None)
get_info_numpy(True, verbosity)
info_h5py = get_info_h5py()
_print_dict(info_h5py, "h5py", case=None)
print()
| 5,349,320 |
def interpret_input(inputs):
""" convert input entries to usable dictionaries """
for key, value in inputs.items(): # interpret each line's worth of entries
if key in ['v0', 'y0', 'angle']: # for variables, intepret distributions
converted = interpret_distribution(key, value) # use a separate method to keep things clean
elif key == 'metric': # metrics are easy, they're just a list
converted = list(x.strip().lower() for x in value.split(','))
for c in converted: # check the metrics are valid entries
if c not in ['mean', 'std', 'percentile']:
raise IOError('Unrecognized metric:', c)
else:
raise IOError('Unrecognized keyword entry: {} = {}'.format(key, value))
inputs[key] = converted # replace the key with the converted values
return inputs
| 5,349,321 |
def time(prompt=None, output_hour_clock=24, milli_seconds=False, fill_0s=True, allow_na=False):
"""
Repeatedly ask the user to input hours, minutes and seconds until they input valid values and return this in a defined format
:param prompt: Message to display to the user before asking them for inputs. Default: None
:param output_hour_clock: Whether to output in 24 hour clock or in 12 hour clock with AM/PM. Default: 24
:param milli_seconds: Whether or not to allow more accuracy in seconds. Default: False
:param fill_0s: Whether or not to fill numerical times with leading 0s. Default: False
:param allow_na: Whether or not to allow empty inputs too. Default: False
"""
extras = None if allow_na else [""]
output_hour_clock = assert_valid(output_hour_clock, SpecNumList([12, 24], None, True), "param output_hour_clock")
if prompt is not None:
print(prompt, "\n")
input_hour_clock = validate_input(SpecNumList([12, 24], None, True), "Input hour clock (12/24): ")
if input_hour_clock == 12:
hours = validate_input(SpecNumRange(1, 12, None, True, extras), "Hours (12 hour clock): ")
period = validate_input(SpecStr(["am", "pm"], extra_values=extras), "AM or PM? ")
if hours == 12:
hours = 0
if period == "pm":
hours += 12
else:
hours = validate_input(SpecNumRange(0, 23, None, True, extras), "Hours (24 hour clock): ")
minutes = validate_input(SpecNumRange(0, 59, None, True, extras), "Minutes: ")
if milli_seconds:
seconds = validate_input(SpecNumRange(0, 59.999999, 6, False, extras), "Seconds including decimal: ")
else:
seconds = validate_input(SpecNumRange(0, 59, 0, True, extras), "Seconds: ")
if hours is not None and output_hour_clock == 12:
if hours < 12:
period = "AM"
else:
period = "PM"
hours %= 12
if hours == 0:
hours = 12
if fill_0s:
if hours is not None and hours < 10:
hours = "0" + str(hours)
if minutes is not None and minutes < 10:
minutes = "0" + str(minutes)
if seconds is not None and seconds < 10:
seconds = "0" + str(seconds)
to_return = "{}:{}:{}".format(hours, minutes, seconds)
if output_hour_clock == 12:
to_return += " {}".format(period)
return to_return
| 5,349,322 |
def main():
"""
Simple tests for the L3OrderBookManager class
"""
os.system("cls")
print("------------ORDERBOOK TEST------------")
order_book = L3OrderBookManager()
# Testing insertion
order1 = create_order(1, 100, 10, 1, 2)
order2 = create_order(2, 100, 20, 1, 2)
order3 = create_order(3, 100, 30, 1, 2)
order4 = create_order(4, 100, 40, 1, 2)
order5 = create_order(5, 100, 50, 1, 2)
orders = [order1, order2, order3, order4, order5]
for order in orders:
order_book.handle_event(order)
order_book.dump()
# Testing deletion
order = create_order(3, 0, 0, 1, 3)
order_book.handle_event(order)
order_book.dump()
# Testing update
order = create_order(1, 100, 40, 1, 4)
order_book.handle_event(order)
order_book.dump()
# Testing empty level
order = create_order(6, 200, 40, 1, 2)
order_book.handle_event(order)
order_book.dump()
order = create_order(6, 50, 0, 1, 3)
order_book.handle_event(order)
order_book.dump()
print("----------END ORDERBOOK TEST----------")
| 5,349,323 |
def update_notes(text1, text2, index):
"""
Displays notes with the given index in given text widgets.
If index is lower than 0, displays notes for index 0.
If index is higher than the highest index there are
notes for the text widgets are left empty.
text2 is always given the notes at index (index + 1) if existing
"""
max_index = (len(runtime_info["notes"]) - 1)
if index < 0:
index = 0
text1.config(state=tkinter.NORMAL)
text2.config(state=tkinter.NORMAL)
text1.delete("1.0", tkinter.END)
text2.delete("1.0", tkinter.END)
if index <= max_index:
text1.insert(tkinter.END, runtime_info["notes"][index])
# cant disply notes for index+1
if index < max_index:
text2.insert(tkinter.END, runtime_info["notes"][index + 1])
text1.config(state=tkinter.DISABLED)
text2.config(state=tkinter.DISABLED)
| 5,349,324 |
def PySys_GetFile(space, name, def_):
"""Return the FILE* associated with the object name in the
sys module, or def if name is not in the module or is not associated
with a FILE*."""
raise NotImplementedError
| 5,349,325 |
def byol_a_url(ckpt, refresh=False, *args, **kwargs):
"""
The model from URL
ckpt (str): URL
"""
return byol_a_local(_urls_to_filepaths(ckpt, refresh=refresh), *args, **kwargs)
| 5,349,326 |
def parse_csv_row(obj: dict[str, Any], rowtype: str, content: str) -> None:
"""Parses a row in the CSV."""
if rowtype == 'row':
if 'dataset_labels' not in obj:
obj['dataset_labels'] = defaultdict(list)
assert '|' in content
ds, ds_label = content.split('|')
obj['dataset_labels'][ds].append(ds_label)
elif rowtype == 'datasettaxon':
if 'taxa' not in obj:
obj['taxa'] = []
assert '|' in content
ds, taxon_level, taxon_name = content.split('|')
obj['taxa'].append({
'level': taxon_level,
'name': taxon_name,
'datasets': [ds]
})
elif rowtype == 'max_count':
obj['max_count'] = int(content)
elif rowtype == 'prioritize':
obj['prioritize'] = eval(content) # pylint: disable=eval-used
else:
if 'taxa' not in obj:
obj['taxa'] = []
taxon_level = rowtype
taxon_name = content
obj['taxa'].append({
'level': taxon_level,
'name': taxon_name
})
| 5,349,327 |
def bellman_ford(g, start):
"""
Given an directed graph with possibly negative edge weights and with n vertices and m edges as well
as its vertex s, compute the length of shortest paths from s to all other vertices of the graph.
Returns dictionary with vertex as key.
- If vertex not present in the dictionary, then it is not reachable from s
- If distance to vertex is None, then this vertex is reachable from a negative cycle
- Otherwise, value of a dictionary is the length of a path from s to a vertex
"""
dist = {}
prev = {}
dist[start] = 0
def __construct_path(t):
path = []
path.append(t)
u = prev[t]
while u in prev and u != t:
path.append(u)
u = prev[u]
path.reverse()
return path
c = Graph()
for _ in g.get_vertices():
relaxed = False
for e in g.get_edges():
u = e.start
v = e.end
w = e.weight
if u not in dist:
continue
if v not in dist or dist[u] + w < dist[v]:
dist[v] = dist[u] + w
prev[v] = u
relaxed = True
c.add_edge(u, v, w)
if not relaxed:
return dist
ncv = set()
for e in g.get_edges():
u = e.start
v = e.end
w = e.weight
if u not in dist:
continue
if v in dist and dist[u] + w < dist[v]:
for x in __construct_path(u):
ncv.add(x)
dist[v] = dist[u] + w
prev[v] = u
for v in ncv:
if v not in dist:
continue
if dist[v] is None:
continue
visited = set()
q = deque()
q.append(v)
while q:
x = q.popleft()
dist[x] = None
visited.add(x)
for e in c.get_edges(x):
if e.end in visited:
continue
q.append(e.end)
return dist
| 5,349,328 |
def _read_date(settings_file):
"""Get the data from the settings.xml file
Parameters
----------
settings_file : Path
path to settings.xml inside open-ephys folder
Returns
-------
datetime
start time of the recordings
Notes
-----
The start time is present in the header of each file. This might be useful
if 'settings.xml' is not present.
"""
locale.setlocale(locale.LC_TIME, 'en_US.utf-8')
root = ElementTree.parse(settings_file).getroot()
for e0 in root:
if e0.tag == 'INFO':
for e1 in e0:
if e1.tag == 'DATE':
break
return datetime.strptime(e1.text, '%d %b %Y %H:%M:%S')
| 5,349,329 |
def conv_res_step(x, hparams, padding, mask):
"""One step of convolutions and mid-residual."""
k = (hparams.kernel_height, hparams.kernel_width)
k2 = (hparams.large_kernel_size, 1)
dilations_and_kernels1 = [((1, 1), k), ((1, 1), k)]
dilations_and_kernels2 = [((1, 1), k2), ((4, 4), k2)]
with tf.variable_scope("conv_res_step"):
y = common_layers.subseparable_conv_block(
x, hparams.filter_size, dilations_and_kernels1,
padding=padding, mask=mask, separabilities=0, name="residual1")
y = tf.nn.dropout(y, 1.0 - hparams.dropout)
return common_layers.subseparable_conv_block(
y, hparams.hidden_size, dilations_and_kernels2,
padding=padding, mask=mask, separabilities=0, name="residual2")
| 5,349,330 |
def _get_kind_name(item):
"""Returns the kind name in CollectionDef.
Args:
item: A data item.
Returns:
The string representation of the kind in CollectionDef.
"""
if isinstance(item, (six.string_types, six.binary_type)):
kind = "bytes_list"
elif isinstance(item, six.integer_types):
kind = "int64_list"
elif isinstance(item, float):
kind = "float_list"
elif isinstance(item, Any):
kind = "any_list"
else:
kind = "node_list"
return kind
| 5,349,331 |
def set_default_config(app):
"""
:param app:
"""
app.config.setdefault('RB_DEFAULT_ACCEPTABLE_MIMETYPES', {
v.mimetype for _, v in DEFAULT_BUILDERS.items()
})
app.config.setdefault('RB_DEFAULT_RESPONSE_FORMAT', DEFAULT_BUILDERS['json'].mimetype)
app.config.setdefault('RB_FORMAT_KEY', 'format')
app.config.setdefault('RB_DEFAULT_ENCODE', 'utf-8')
app.config.setdefault('RB_DEFAULT_DUMP_INDENT', None)
app.config.setdefault('RB_BASE64_ALTCHARS', None)
app.config.setdefault('RB_HTML_DEFAULT_TEMPLATE', None)
app.config.setdefault('RB_HTML_AS_TABLE', False)
app.config.setdefault('RB_YAML_ALLOW_UNICODE', True)
app.config.setdefault('RB_CSV_DEFAULT_NAME', 'filename')
app.config.setdefault('RB_CSV_DELIMITER', ';')
app.config.setdefault('RB_CSV_QUOTING_CHAR', '"')
app.config.setdefault('RB_CSV_DIALECT', 'excel-tab')
app.config.setdefault('RB_XML_CDATA', False)
app.config.setdefault('RB_XML_ROOT', 'ROOT')
app.config.setdefault('RB_FLATTEN_PREFIX', '')
app.config.setdefault('RB_FLATTEN_SEPARATOR', '_')
app.config.setdefault('RB_JSONP_PARAM', 'callback')
| 5,349,332 |
def Froude_number(v, h, g=9.80665):
"""
Calculate the Froude Number of the river, channel or duct flow,
to check subcritical flow assumption (if Fr <1).
Parameters
------------
v : int/float
Average velocity [m/s].
h : int/float
Mean hydrolic depth float [m].
g : int/float
Gravitational acceleration [m/s2].
Returns
---------
Fr : float
Froude Number of the river [unitless].
"""
assert isinstance(v, (int,float)), 'v must be of type int or float'
assert isinstance(h, (int,float)), 'h must be of type int or float'
assert isinstance(g, (int,float)), 'g must be of type int or float'
Fr = v / np.sqrt( g * h )
return Fr
| 5,349,333 |
def handle_error(
func: Callable[[Command | list[Command]], Any]
) -> Callable[[str], Any]:
"""Handle tradfri api call error."""
@wraps(func)
async def wrapper(command: Command | list[Command]) -> None:
"""Decorate api call."""
try:
await func(command)
except RequestError as err:
_LOGGER.error("Unable to execute command %s: %s", command, err)
return wrapper
| 5,349,334 |
def write(ser, command, log):
"""Write command to serial port, append what you write to log."""
ser.write(command)
summary = " I wrote: " + repr(command)
log += summary + "\n"
print summary
return log
| 5,349,335 |
def update(probabilities, one_gene, two_genes, have_trait, p):
"""
Add to `probabilities` a new joint probability `p`.
Each person should have their "gene" and "trait" distributions updated.
Which value for each distribution is updated depends on whether
the person is in `have_gene` and `have_trait`, respectively.
"""
for key, value in probabilities.items():
# update the probability that the person has x copies of gene by adding the joint probability p
if key in one_gene:
probabilities[key]["gene"][1] = probabilities[key]["gene"][1] + p
elif key in two_genes:
probabilities[key]["gene"][2] = probabilities[key]["gene"][2] + p
else:
probabilities[key]["gene"][0] = probabilities[key]["gene"][0] + p
# update the probability that the person exhibits a trait by adding the joint probability p
if key in have_trait:
probabilities[key]["trait"][True] = probabilities[key]["trait"][True] + p
else:
probabilities[key]["trait"][False] = probabilities[key]["trait"][False] + p
| 5,349,336 |
def anscombe(x):
"""Compute Anscombe transform."""
return 2 * np.sqrt(x + 3 / 8)
| 5,349,337 |
def max_accuracy(c1, c2):
"""
Relabel the predicted labels *in order* to
achieve the best accuracy, and return that
score and the best labelling
Parameters
----------
c1 : np.array
numpy array with label of predicted cluster
c2 : np.array
numpy array with label of true cluster
"""
c1 = c1.astype(str)
c2 = c2.astype(str)
match_satimage = pd.DataFrame({"Guess": c1, "True": c2})
match_satimage['match'] = match_satimage['Guess'] + '_t' + match_satimage['True']
comparison = pd.DataFrame(match_satimage['match'])
A = comparison.value_counts()
sum = 0
clusters = []
c1new = np.copy(c1).astype(int)
j = 0
for i in range(len(A)):
C_str = A[[i]].index.values[0][0]
#print(C_str)
CTL = C_str.split('_')
if CTL[0] in clusters or CTL[1] in clusters or CTL[0] == '-1':
pass
else:
c1new[c1 == CTL[0]] = CTL[1][1:]
clusters.append(CTL[0])
clusters.append(CTL[1])
sum = sum + int(A[[i]])
#print(clusters)
#print(sum)
j = j + 1
accuracy = sum/len(c1)
return accuracy, c1new.astype(int)
| 5,349,338 |
def create_sphere():
"""Create and return a single sphere of radius 5."""
sphere = rt.sphere()
sphere.radius = 5
return sphere
| 5,349,339 |
def calc_entropy(data):
"""
Calculate the entropy of a dataset.
Input:
- data: any dataset where the last column holds the labels.
Returns the entropy of the dataset.
"""
entropy = 0.0
###########################################################################
# TODO: Implement the function. #
###########################################################################
labels = np.unique(data[:, -1])
for label in labels:
entropy -= (np.count_nonzero(data[:, -1] == label) / data.shape[0]) * np.log2(np.count_nonzero(data[:, -1] == label) / data.shape[0])
pass
###########################################################################
# END OF YOUR CODE #
###########################################################################
return entropy
| 5,349,340 |
def get_input(prompt=None):
"""Sets the prompt and waits for input.
:type prompt: None | list[Text] | str
"""
if not isinstance(prompt, type(None)):
if type(prompt) == str:
text_list = [Text(prompt, color=prompt_color,
new_line=True)]
elif type(prompt) == list:
text_list = prompt
else:
raise Exception("Must be None, str, or list[Text]")
update_textbox("events", text_list)
_user_input = check_input()
while isinstance(_user_input, type(None)):
time.sleep(.1)
if not is_running():
return None
_user_input = check_input()
return _user_input
| 5,349,341 |
def alpha_liq(Nu, lyambda_feed, d_inner):
"""
Calculates the coefficent of heat transfer(alpha) from liquid to wall of pipe.
Parameters
----------
Nu : float
The Nusselt criterion, [dimensionless]
lyambda_feed : float
The thermal conductivity of feed, [W / (m * degreec celcium)]
d_inner : float
The diametr of inner pipe, [m]
Returns
-------
alpha_liq : float
The coefficent of heat transfer(alpha), [W / (m**2 * degrees celcium)]
References
----------
Романков, формула 4.11, стр.150
"""
return Nu * lyambda_feed / d_inner
| 5,349,342 |
def get_dataset(opts):
""" Dataset And Augmentation
"""
if opts.dataset == 'camvids':
mean, std = camvids.get_norm()
train_transform = train_et.ExtCompose([
# et.ExtResize(size=opts.crop_size),
train_et.ExtRandomScale((0.5, 2.0)),
train_et.ExtRandomHorizontalFlip(),
train_et.New_ExtRandomCrop(
size=(481, 481), pad_if_needed=True),
train_et.ExtToTensor(),
train_et.ExtNormalize(mean=mean, std=std),
])
if opts.crop_val:
val_transform = et.ExtCompose([
et.ExtResize(opts.crop_size),
et.ExtCenterCrop(opts.crop_size),
et.ExtToTensor(),
et.ExtNormalize(mean=mean, std=std),
])
else:
val_transform = et.ExtCompose([
et.ExtToTensor(),
et.ExtNormalize(mean=mean, std=std),
])
train_dst = camvids.CamvidSegmentation(opts.data_root, image_set='trainval', transform=train_transform, num_copys=opts.num_copys)
val_dst = camvids.CamvidSegmentation(opts.data_root, image_set='test', transform=val_transform)
if opts.dataset == 'voc':
# train_transform = et.ExtCompose([
# #et.ExtResize(size=opts.crop_size),
# et.ExtRandomScale((0.5, 2.0)),
# et.ExtRandomCrop(size=(opts.crop_size, opts.crop_size), pad_if_needed=True),
# et.ExtRandomHorizontalFlip(),
# et.ExtToTensor(),
# et.ExtNormalize(mean=[0.485, 0.456, 0.406],
# std=[0.229, 0.224, 0.225]),
# ])
train_transform = train_et.ExtCompose([
# et.ExtResize(size=opts.crop_size),
train_et.ExtRandomScale((0.5, 2.0)),
train_et.ExtRandomHorizontalFlip(),
train_et.New_ExtRandomCrop(
size=(opts.crop_size, opts.crop_size), pad_if_needed=True),
train_et.ExtToTensor(),
train_et.ExtNormalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225]),
])
if opts.crop_val:
val_transform = et.ExtCompose([
et.ExtResize(opts.crop_size),
et.ExtCenterCrop(opts.crop_size),
et.ExtToTensor(),
et.ExtNormalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225]),
])
else:
val_transform = et.ExtCompose([
et.ExtToTensor(),
et.ExtNormalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225]),
])
train_dst = VOCSegmentation(root=opts.data_root, year=opts.year,
image_set='train', download=opts.download, transform=train_transform, num_copys=opts.num_copys)
val_dst = VOCSegmentation(root=opts.data_root, year=opts.year,
image_set='val', download=False, transform=val_transform)
if opts.dataset == 'cityscapes':
train_transform = et.ExtCompose([
# et.ExtResize( 512 ),
et.ExtRandomCrop(size=(opts.crop_size, opts.crop_size)),
et.ExtColorJitter(brightness=0.5, contrast=0.5, saturation=0.5),
et.ExtRandomHorizontalFlip(),
et.ExtToTensor(),
et.ExtNormalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225]),
])
val_transform = et.ExtCompose([
# et.ExtResize( 512 ),
et.ExtToTensor(),
et.ExtNormalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225]),
])
train_dst = Cityscapes(root=opts.data_root, split='train', transform=train_transform, num_copys=opts.num_copys)
print("------------------------now copy: {:}----------------------------------".format(opts.num_copys))
val_dst = Cityscapes(root=opts.data_root,
split='val', transform=val_transform)
return train_dst, val_dst
| 5,349,343 |
def gen_sankey_diagram_distribute_query(query_statement, params, final_entites_name):
"""
桑基图数据分布查询
:param query_statement:
:param params:
:param final_entites_name:
:return:
"""
query_statement = dgraph_get_project_count(query_statement)
# 第一层的节点
first_level_sql = """select a.code as tag_code, a.alias as tag_alias, a.id as tag_id, b.code as parent_code
from tag as a, tag as b where a.parent_id in (select id from tag where code in ('techops', 'bissness'))
and a.parent_id = b.id and a.kpath = 1;"""
first_level_list = tagaction.query_direct_sql_to_map_list(connections['bkdata_basic_slave'], first_level_sql)
# 第二层的节点
first_level_id_tuple = tuple([each_tag['tag_id'] for each_tag in first_level_list])
second_level_sql = """select a.code as tag_code, a.alias as tag_alias, a.id as tag_id, b.code as parent_code
from tag as a, tag as b where a.parent_id in {} and a.parent_id = b.id and a.kpath = 1;""".format(
first_level_id_tuple
)
second_level_list = tagaction.query_direct_sql_to_map_list(connections['bkdata_basic_slave'], second_level_sql)
# graphQL
for each_tag in first_level_list:
query_statement += get_single_tag_query(
each_tag.get('tag_code'), '$final_filter_uids_name', need_me_count=False
)
# 其他节点和processing_type之间的对应关系
query_statement += get_other_tag_query(first_level_list, '$final_filter_uids_name')
for each_tag in second_level_list:
query_statement += get_single_tag_query(
each_tag.get('tag_code'), '$final_filter_uids_name', need_me_count=False, need_processing_type=True
)
query_statement = query_statement.replace('$final_filter_uids_name', final_entites_name)
query_statement += '\n}'
dgraph_result = meta_dgraph_complex_search(query_statement, return_original=True)
return {
'first_level_tag_list': first_level_list,
'second_level_tag_list': second_level_list,
'dgraph_result': dgraph_result,
}
| 5,349,344 |
def draw_rotation_button(button_data: ButtonData) -> None:
"""Draw the rotation button according to the button data given. This
essentially consists into filling the button surface and drawing a little
arrow onto it.
"""
if button_data['drawable']:
button_surface = button_data['surface']
button_surface.fill(BUTTON_BG_COLOR)
does_rotate_up = button_data['does_rotate_up']
left = button_surface.get_rect().left
right = button_surface.get_rect().right
top = button_surface.get_rect().top
bottom = button_surface.get_rect().bottom
w = (right - left) / 100 # One percent width
h = (bottom - top) / 100 # One percent height
point_list = [[
left + (20 * w), ((bottom - 20 * h)
if does_rotate_up else (top + 20 * h))
], [
right / 2, ((top + 20 * h)
if does_rotate_up else (bottom - 20 * h))
], [
right - (20 * w), ((bottom - 20 * h)
if does_rotate_up else (top + (20 * h)))
]]
pygame.draw.aalines(button_surface, BUTTON_FG_COLOR, False, point_list)
| 5,349,345 |
def declare(objective:str, displayname:str=None, criteria:str="dummy"):
"""
objective:str -> The id/name given to a scoreboard
displayname:str -> The name that will be displayed on screen
criteria:str -> The criteria of the scoreboard
"""
f = f"scoreboard objectives add {objective} {criteria}"
global SCOREBOARDS
SCOREBOARDS.append(objective)
if displayname == None:
return f"scoreboard objectives add {objective} {criteria}\n"
else:
return f"scoreboard objectives add {objective} {criteria} \"{displayname}\"\n"
| 5,349,346 |
def complement_angle(angle):
""" 90 minus angle, in degrees"""
return 90 - angle;
| 5,349,347 |
def parse_custom_commands(command, separator=";"):
"""Parse run custom command string into the commands list
:param str command: run custom [config] command(s)
:param str separator: commands separator in the string
:rtype: list[str]
"""
if not command:
return []
return command.strip(separator).split(separator)
| 5,349,348 |
def lower_volatility_band(c, dev_target, band_target, center_target):
"""
| Calculates the lower volatility band
| Name: lower\_volatility\_band\_\ **c**\ \_times\_\ **band_target.name**\ &\ **dev_target.name**\ \_over\_\ **center_target.name**
:param c: Multiplier constant
:type c: float
:param dev_target: Used for band displacement. Can be a constant or a function
:type dev_target: function or float
:param band_target: Used for band displacement. Can be a constant or a function
:type band_target: function or float
:param center_target: Data column for the band center
:type center_target: str
"""
def return_function(data):
if hasattr(band_target, "name") & hasattr(dev_target, "name"):
column_name = f"lower_volatility_band_{c}_times_{band_target.name}&{dev_target.name}_under_{center_target.name}"
elif hasattr(band_target, "name"):
column_name = f"lower_volatility_band_{c}_times_{band_target.name}&{dev_target}_under_{center_target.name}"
else:
column_name = f"lower_volatility_band_{c}_times_{band_target}&{dev_target}_under_{center_target.name}"
if column_name not in data.columns:
data[column_name] = center_target - c * dev_target * band_target
return data[column_name].copy()
return return_function
| 5,349,349 |
def find_by_attr(node, value, name="name", maxlevel=None):
"""Identical to :any:`search.find_by_attr` but cached."""
return search.find_by_attr(node, value, name=name, maxlevel=maxlevel)
| 5,349,350 |
def training(request):
""" Function: training
* training top
"""
def _get_selected_object():
project_name = request.session.get('training_view_selected_project', None)
selected_project = Project.objects.get(name=project_name)
model_name = request.session.get('training_view_selected_model', None)
selected_model = MlModel.objects.get(name=model_name, project=selected_project)
return selected_project, selected_model
def _training_run():
selected_project, selected_model = _get_selected_object()
if (selected_model):
logging.debug(selected_model)
# --- Load config ---
config_path = os.path.join(selected_model.model_dir, 'config.json')
with open(config_path, 'r') as f:
config_data = json.load(f)
# --- Training Model ---
main_path = os.path.abspath('./app/machine_learning/main.py')
logging.debug(f'main_path: {main_path}')
logging.debug(f'current working directory: {os.getcwd()}')
subproc_training = subprocess.Popen(['python', main_path, '--mode', 'train', '--config', config_path])
logging.info(f'subproc: Training worker PID: {subproc_training.pid}')
# --- Update status and Register PID to MlModel database ---
selected_model.status = selected_model.STAT_TRAINING
selected_model.training_pid = subproc_training.pid
selected_model.save()
return
def _stop_trainer():
selected_project, selected_model = _get_selected_object()
if (selected_model):
logging.debug(selected_model)
# --- Load config ---
config_path = os.path.join(selected_model.model_dir, 'config.json')
with open(config_path, 'r') as f:
config_data = json.load(f)
# --- Get FIFO path ---
fifo = config_data['env']['trainer_ctrl_fifo']['value']
# --- Send stop command ---
with open(fifo, 'w') as f:
f.write('stop\n')
return
def _reset_trainer(request):
logging.debug('reset_trainer: ')
logging.debug(request.POST.keys())
'''
if ('reset_trainer' in request.POST.keys()):
trainer.release_memory()
'''
return
def _launch_tensorboard(model):
config_path = os.path.join(model.model_dir, 'config.json')
with open(config_path, 'r') as f:
config_data = json.load(f)
if (not model.tensorboard_pid in psutil.pids()):
subproc_tensorboard = subprocess.Popen(['tensorboard', \
'--logdir', model.model_dir, \
'--port', f'{config_data["env"]["tensorboard_port"]["value"]}'])
logging.info(f'subproc: Tensorboard worker PID: {subproc_tensorboard.pid}')
model.tensorboard_pid = subproc_tensorboard.pid
model.save()
# logging.info('-------------------------------------')
# logging.info(request.method)
# logging.info(request.POST)
# logging.info('-------------------------------------')
if (request.method == 'POST'):
if ('training_view_project_dropdown' in request.POST):
request.session['training_view_selected_project'] = request.POST.getlist('training_view_project_dropdown')[0]
elif ('training_view_model_dropdown' in request.POST):
curr_project = Project.objects.get(name=request.session['training_view_selected_project'])
if 'training_view_selected_model' in request.session.keys():
prev_model = MlModel.objects.get(name=request.session['training_view_selected_model'], project=curr_project)
else:
prev_model = None
request.session['training_view_selected_model'] = request.POST.getlist('training_view_model_dropdown')[0]
curr_model = MlModel.objects.get(name=request.session['training_view_selected_model'], project=curr_project)
# --- Close previous Tensorboard ---
# * https://psutil.readthedocs.io/en/latest/#kill-process-tree
if ((prev_model is not None) and (prev_model.tensorboard_pid is not None) and (prev_model.tensorboard_pid in psutil.pids())):
p = psutil.Process(prev_model.tensorboard_pid)
c = p.children(recursive=True)
c.append(p)
for p in c:
try:
p.send_signal(signal.SIGTERM)
except psutil.NoSuchProcess:
pass
gone, alive = psutil.wait_procs(c, timeout=3)
prev_model.tensorboard_pid = None
prev_model.save()
# --- Launch new Tensorboard ---
_launch_tensorboard(curr_model)
elif ('training_run' in request.POST):
_training_run()
elif ('stop_trainer' in request.POST):
_stop_trainer()
else:
logging.warning('Unknown POST command:')
logging.warning(request.POST)
return redirect('training')
else:
get_all_fifo_command()
sidebar_status = SidebarActiveStatus()
sidebar_status.training = 'active'
text = get_version()
project = Project.objects.all().order_by('-id').reverse()
project_name = request.session.get('training_view_selected_project', None)
if (project_name is not None):
project_dropdown_selected = Project.objects.get(name=project_name)
else:
project_dropdown_selected = None
if (project_dropdown_selected):
model = MlModel.objects.filter(project=project_dropdown_selected).order_by('-id').reverse()
model_name = request.session.get('training_view_selected_model', None)
if (model_name is not None):
model_dropdown_selected = MlModel.objects.get(name=model_name, project=project_dropdown_selected)
_launch_tensorboard(model_dropdown_selected)
else:
model_dropdown_selected = None
else:
model = MlModel.objects.all().order_by('-id').reverse()
model_dropdown_selected = None
# --- Get Tensorboard PORT ---
if (model_dropdown_selected is not None):
config_path = os.path.join(model_dropdown_selected.model_dir, 'config.json')
with open(config_path, 'r') as f:
config_data = json.load(f)
tensorboard_port = config_data["env"]["tensorboard_port"]["value"]
else:
tensorboard_port = None
context = {
'project': project,
'model': model,
'tensorboard_port': tensorboard_port,
'sidebar_status': sidebar_status,
'text': text,
'project_dropdown_selected': project_dropdown_selected,
'model_dropdown_selected': model_dropdown_selected
}
return render(request, 'training.html', context)
| 5,349,351 |
def remind(phenny, input):
"""Set a reminder"""
m = r_command.match(input.bytes)
if not m:
return phenny.reply("Sorry, didn't understand the input.")
length, scale, message = m.groups()
length = float(length)
factor = scaling.get(scale, 60)
duration = length * factor
if duration % 1:
duration = int(duration) + 1
else: duration = int(duration)
t = int(time.time()) + duration
reminder = (input.sender, input.nick, message)
try: phenny.remind_data[t].append(reminder)
except KeyError: phenny.remind_data[t] = [reminder]
dump_database(phenny)
if duration >= 60:
w = ''
if duration >= 3600 * 12:
w += time.strftime(' on %d %b %Y', time.gmtime(t))
w += time.strftime(' at %H:%MZ', time.gmtime(t))
phenny.reply('Okay, will remind%s' % w)
else: phenny.reply('Okay, will remind in %s secs' % duration)
| 5,349,352 |
def compile_shader(path_to_glslc, shader_path, stage, out_name, md5_name):
"""
-fauto-map-locations SPIR-V and later GLSL versions require inputs and outputs to be bound to an attribute location, this just assigns them automagically
-fauto-bind-uniforms SPIR-V and later GLSL versions require uniforms to have explicit binding, this just assigns them automagically
-O optimises performance over size
-o is the output file
:param path_to_glslc:
:param shader_path:
:param out_name:
:return:
"""
cmd_line = '{0} --target-spv=spv1.0 -fauto-map-locations -fauto-bind-uniforms -O -o {1} -fshader-stage={2} {3}'\
.format(path_to_glslc,
out_name,
stage,
shader_path)
result = subprocess.call(cmd_line,
stdin=None,
stdout=stdout,
stderr=stderr)
# Write MD5 of source
md5_file = open(md5_name, 'wb')
hash_md5 = hashlib.md5()
with open(shader_path, "rb") as f:
for chunk in iter(lambda: f.read(4096), b""):
hash_md5.update(chunk)
md5_file.write(hash_md5.digest())
md5_file.close()
if result == 0:
print('Compiled {0}, MD5: {1}'.format(shader_path, hash_md5.hexdigest()))
else:
print(result)
| 5,349,353 |
def set_random_seed(seed, device=None):
""" Set random seed before running training.
Refs:
https://qiita.com/TokyoMickey/items/cc8cd43545f2656b1cbd
https://github.com/chainer/chainer/issues/4550
"""
print(
"==> Set manual random seed to {} in process PID={} PPID={} on device={}".format(
seed, os.getpid(), os.getppid(), device
)
)
# set Python random seed
random.seed(seed)
# set NumPy random seed
np.random.seed(seed)
# NOTE: use device before setting up the random generator
# https://github.com/chainer/chainer/issues/4487
if device is not None:
chainer.backends.cuda.get_device_from_id(int(device)).use()
# set Chainer(CuPy) random seed
cp.random.seed(seed)
# force cuDNN to be deterministic
chainer.global_config.cudnn_deterministic = True
| 5,349,354 |
def get_timestamps_from_sensor_folder(sensor_folder_wildcard: str) -> NDArrayInt:
"""Timestamp always lies at end of filename.
Args:
sensor_folder_wildcard: string to glob to find all filepaths for a particular
sensor files within a single log run
Returns:
Numpy array of integers, representing timestamps
"""
path_generator = glob.glob(sensor_folder_wildcard)
path_generator.sort()
timestamps: NDArrayInt = np.array([int(Path(jpg_fpath).stem.split("_")[-1]) for jpg_fpath in path_generator])
return timestamps
| 5,349,355 |
def plot(topography, subplot_location=111):
"""
Plot an image of the topography using matplotlib.
Parameters
----------
topography : :obj:`SurfaceTopography`
Height information
"""
# We import here because we don't want a global dependence on matplotlib
import matplotlib.pyplot as plt
try:
sx, sy = topography.physical_sizes
except TypeError:
sx, sy = topography.nb_grid_pts
nx, ny = topography.nb_grid_pts
ax = plt.subplot(subplot_location, aspect=sx / sy)
Y, X = np.meshgrid(np.arange(ny + 1) * sy / ny,
np.arange(nx + 1) * sx / nx)
Z = topography[...]
mesh = ax.pcolormesh(X, Y, Z)
plt.colorbar(mesh, ax=ax)
ax.set_xlim(0, sx)
ax.set_ylim(0, sy)
if topography.unit is not None:
unit = topography.unit
else:
unit = 'a.u.'
ax.set_xlabel('Position $x$ ({})'.format(unit))
ax.set_ylabel('Position $y$ ({})'.format(unit))
return ax
| 5,349,356 |
def vehicles_missing(request):
"""
Displays to users their theft reports
"""
reports = TheftReport.objects.all()
return render(request, "vms/theft_reports.html", {
'reports': reports,
})
| 5,349,357 |
def isfile(path):
"""
Test for existence of input file.
"""
if not os.path.isfile(path):
log("Input file not found: %s" % path)
sys.exit(1)
else:
return os.path.abspath(path)
| 5,349,358 |
def create_distribution_p_dest(tfs):
""" Calculates probability distributions of choosing a destination.
Note that city zones correspond to positions in datatensor and not
the origional IDs from the city_zone_coordinates.index array.
"""
p_dest = np.zeros(
(
tfs.number_zones,
tfs.number_zones,
tfs.T
)
)
min_x = np.zeros(
(
tfs.number_zones,
tfs.number_zones
)
)
max_x = np.zeros(
(
tfs.number_zones,
tfs.number_zones
)
)
normalization_factor = np.zeros(
(
tfs.number_zones,
tfs.T
)
)
for source in range(tfs.number_zones):
for dest in range(tfs.number_zones):
value_array = tfs.datatensor_mean[source, dest, :]
max_x[source, dest] = np.amax(
value_array
)
min_x[source, dest] = np.amin(
value_array
)
for source in range(tfs.number_zones):
for dest in range(tfs.number_zones):
for time in range(tfs.T):
if max_x[source, dest] > 0:
mean = tfs.datatensor_mean[source, dest, time]
p_dest[source, dest, time] = (
(
(mean - min_x[source, dest]) / (
max_x[source, dest] - min_x[source, dest]
)
)**tfs.e_dest
)
for source in range(tfs.number_zones):
for time in range(tfs.T):
value_array = p_dest[source, :, time]
normalization_factor[source, time] = np.sum(
value_array
)
for source in range(tfs.number_zones):
for time in range(tfs.T):
if normalization_factor[source, time] > 0:
for dest in range(tfs.number_zones):
p_dest[source, dest, time] = (
p_dest[source, dest, time] / (
normalization_factor[source, time]
)
)
tfs.p_dest = p_dest
| 5,349,359 |
def get_limits(data):
""" Get the x, y ranges of the ST data.
"""
y_min = 1e6
y_max = -1e6
x_min = 1e6
x_max = -1e6
for doc in data:
x = doc["x"]
y = doc["y"]
y_min = y if y < y_min else y_min
y_max = y if y > y_max else y_max
x_min = x if x < x_min else x_min
x_max = x if x > x_max else x_max
return x_min, x_max, y_min, y_max
| 5,349,360 |
def test_upload_list(staff_client):
"""Listview does not support GET"""
response = staff_client.get(api_path)
assert response.status_code == status.HTTP_405_METHOD_NOT_ALLOWED
| 5,349,361 |
def get_bool(prompt: str | None = None, default: bool = False) -> bool:
"""Gets a boolean response from the command line.
:param prompt: Input prompt.
:param default: Default value used if no characters are typed.
:return: Input boolean.
"""
input_str = input(_prompt_from_message(prompt, default='y' if default else 'n'))
return input_str.lower().startswith('y')
| 5,349,362 |
def create_config_file(access_key='', secret_key=''):
"""
Creates a config file for secret stuff. Option to provide keys.
Parameters:
access_key: Unsplash access key.
secret_key: Unsplash secret key.
"""
config = configparser.ConfigParser()
if not os.path.exists('config.ini'):
config['UNSPLASH'] = dict(access_key=access_key, secret_key=secret_key)
with open('config.ini', 'w+') as configfile:
config.write(configfile)
print('A new file is created. Please fill your access_key.')
else:
config.read('config.ini')
client_id = config.get('UNSPLASH', 'access_key', fallback='no_key')
if client_id in (None, '', 'no_key'):
print('No key is provided. Please fill your key.')
else:
print('Config file setup properly.')
| 5,349,363 |
def proj_helsinki(x, y):
"""Project Helsinki coordinates into ETRS-GK25 (EPSG:3879).
https://www.hel.fi/helsinki/fi/kartat-ja-liikenne/kartat-ja-paikkatieto/paikkatiedot+ja+-aineistot/koordinaatistot_ja+_korkeudet/koordinaatti_ja_korkeusjarjestelmat # pylint: disable=line-too-long
"""
# pylint: disable=invalid-name
output_epsg = "EPSG:3879"
a = 6654650.14636
b = 25447166.49457
c = 0.99998725362
d = -0.00120230340
e = 0.00120230340
f = 0.99998725362
x, y = a + c * x + d * y, b + e * x + f * y
return x, y, output_epsg
| 5,349,364 |
def _check_start_stop(raw, start, stop):
"""Aux function."""
out = list()
for st in (start, stop):
if st is None:
out.append(st)
else:
try:
out.append(_ensure_int(st))
except TypeError: # not int-like
out.append(raw.time_as_index(st)[0])
return out
| 5,349,365 |
def main(function, js):
"""Console script for es_reindex."""
args = json.loads(js)
config = args['config']
# e.g. --json='{"config": "./es_index_tool/data/example_config.json"}'
tool = ESIndexTool(config_path=config)
if 'id' not in args:
tool.reindex()
else:
# e.g., --json='{"id": "2kS98AsytSXb8prbH"}'
id_ = args['id']
tool.index_document_by_id(id_)
return 0
| 5,349,366 |
def read(
datapath,
qt_app=None,
dataplus_format=True,
gui=False,
start=0,
stop=None,
step=1,
convert_to_gray=True,
series_number=None,
use_economic_dtype=True,
dicom_expected=None,
orientation_axcodes="original",
**kwargs
):
"""Returns 3D data and its metadata.
# NOTE(:param qt_app:) If it is set to None (as default) all dialogs for series selection are performed in
terminal. If qt_app is set to QtGui.QApplication() dialogs are in Qt.
:param datapath: directory with input data, if url is give, the file is downloaded into `~/data/downloads/`
:param qt_app: Dialog destination. If None (default) -> terminal, if 'QtGui.QApplication()' -> Qt
:param dataplus_format: New data format. Metadata and data are returned in one structure.
:param gui: True if 'QtGui.QApplication()' instead of terminal should be used
:param int start: used for DicomReader, defines where 3D data reading should start
:param int stop: used for DicomReader, defines where 3D data reading should stop
:param int step: used for DicomReader, defines step for 3D data reading
:param bool convert_to_gray: if True -> RGB is converted to gray
:param int series_number: used in DicomReader, essential in metadata
:param use_economic_dtype: if True, casts 3D data array to less space consuming dtype
:param dicom_expected: set true if it is known that data is in dicom format. Set False to suppress
dicom warnings.
:param orientation_axcodes: 'SPL' inferior to Superior, anterior to Posetrior, right to Left. Standard is for nifty
is RAS.
:return: tuple (data3d, metadata)
"""
# Simple read function. Internally calls DataReader.Get3DData()
dr = DataReader()
return dr.Get3DData(
datapath=datapath,
qt_app=qt_app,
dataplus_format=dataplus_format,
gui=gui,
start=start,
stop=stop,
step=step,
convert_to_gray=convert_to_gray,
series_number=series_number,
use_economic_dtype=use_economic_dtype,
dicom_expected=dicom_expected,
orientation_axcodes=orientation_axcodes,
**kwargs
)
| 5,349,367 |
def simulate(rmg, diffusion_limited=True):
"""
Simulate the RMG job and run the sensitivity analysis if it is on, generating
output csv files
diffusion_limited=True implies that if it is a liquid reactor diffusion limitations will be enforced
otherwise they will not be in a liquid reactor
"""
util.make_output_subdirectory(rmg.output_directory, 'solver')
for index, reaction_system in enumerate(rmg.reaction_systems):
if reaction_system.sensitive_species:
logging.info('Conducting simulation and sensitivity analysis of reaction system %s...' % (index + 1))
if reaction_system.sensitive_species == ['all']:
reaction_system.sensitive_species = rmg.reaction_model.core.species
else:
logging.info('Conducting simulation of reaction system %s...' % (index + 1))
reaction_system.attach(SimulationProfileWriter(
rmg.output_directory, index, rmg.reaction_model.core.species))
reaction_system.attach(SimulationProfilePlotter(
rmg.output_directory, index, rmg.reaction_model.core.species))
sens_worksheet = []
for spec in reaction_system.sensitive_species:
csvfile_path = os.path.join(rmg.output_directory, 'solver',
'sensitivity_{0}_SPC_{1}.csv'.format(index + 1, spec.index))
sens_worksheet.append(csvfile_path)
pdep_networks = []
for source, networks in rmg.reaction_model.network_dict.items():
pdep_networks.extend(networks)
model_settings = ModelSettings(tol_keep_in_edge=0, tol_move_to_core=1, tol_interrupt_simulation=1)
simulator_settings = rmg.simulator_settings_list[-1]
if isinstance(reaction_system, LiquidReactor):
if diffusion_limited:
rmg.load_database()
solvent_data = rmg.database.solvation.get_solvent_data(rmg.solvent)
diffusion_limiter.enable(solvent_data, rmg.database.solvation)
elif rmg.uncertainty is not None:
rmg.verbose_comments = True
rmg.load_database()
# Store constant species indices
if reaction_system.const_spc_names is not None:
reaction_system.get_const_spc_indices(rmg.reaction_model.core.species)
reaction_system.simulate(
core_species=rmg.reaction_model.core.species,
core_reactions=rmg.reaction_model.core.reactions,
edge_species=rmg.reaction_model.edge.species,
edge_reactions=rmg.reaction_model.edge.reactions,
surface_species=[],
surface_reactions=[],
pdep_networks=pdep_networks,
sensitivity=True if reaction_system.sensitive_species else False,
sens_worksheet=sens_worksheet,
model_settings=model_settings,
simulator_settings=simulator_settings,
)
if reaction_system.sensitive_species:
plot_sensitivity(rmg.output_directory, index, reaction_system.sensitive_species)
rmg.run_uncertainty_analysis()
| 5,349,368 |
def test_sombrero_start():
"""
Checks if the second and last values are right.
"""
xbegin = -0.899999995500383
ybegin = 8.99884295089148e-06
test = so.stingray(0.18, -0.9, 0, 50)
assert math.isclose(test[0][1], xbegin)
assert math.isclose(test[1][1], ybegin)
| 5,349,369 |
def sum_squares2(n):
"""
Returns: sum of squares from 1 to n-1
Example: sum_squares(5) is 1+4+9+16 = 30
Parameter n: The number of steps
Precondition: n is an int > 0
"""
# Accumulator
total = 0
print('Before while')
x = 0
while x < n:
print('Start loop '+str(x))
total = total + x*x
x = x+1
print('End loop ')
print('After while')
return total
| 5,349,370 |
def make_bgm(
music_path_list: list,
output_dir: str,
output_tmp_filename: str,
music_total_time: int = 0,
fade_time: list = [1000, 1000],
) -> str:
"""
调整BGM时长以适应视频
Args:
music_path_list (list): BGM文件的存放路径
output_dir (str): 转换过后BGM文件的输出路径
output_tmp_filename (str): 转换过后的BGM文件名称
music_total_time (int, optional): BGM的播放时长. Defaults to 0.
fade_time (list, optional): BGM的淡入淡出时长ms. Defaults to [1000, 1000].
Returns:
str: 转换完成的文件存放路径
"""
# INFO 处理多个BGM
before_music_time = 0
diff_time = 0
all_music = AudioSegment.empty()
flag = 0
while True:
if flag == 1:
break
for music_path in music_path_list:
_music = AudioSegment.from_file(music_path)
after_music_time = before_music_time + _music.duration_seconds * 1000
if after_music_time >= music_total_time:
console.log("当前添加的歌曲是:{}".format(
str(music_path).split("/")[-1]))
console.log("当前的时长为: {}ms".format(before_music_time))
diff_time = music_total_time - before_music_time
console.log("准备截取:{}ms".format(diff_time))
all_music = all_music + _music[len(_music) - diff_time:]
console.log("截取完毕,现在时长为:{}ms".format(before_music_time +
diff_time))
flag = 1
break
else:
console.log("当前添加的歌曲是:{}".format(
str(music_path).split("/")[-1]))
console.log("当前的时长为: {}ms".format(after_music_time))
before_music_time = after_music_time
all_music = all_music + _music
# INFO 处理两个视频拼接时的细节
bgm = all_music
fade_in_time, fade_out_time = fade_time
bgm.fade_in(fade_in_time).fade_out(fade_out_time)
console.log("开始导出音频文件")
output_file_path = os.path.join(output_dir, output_tmp_filename)
bgm.export(output_file_path, format="mp3")
console.log("音频暂存文件导出完毕")
return output_file_path
| 5,349,371 |
def read_image(src):
"""Read and resize individual images"""
im = cv2.imread(src, cv2.IMREAD_COLOR)
im = cv2.resize(im, (COLS, ROWS), interpolation=cv2.INTER_CUBIC)
return im
| 5,349,372 |
def add_db_entry():
"""
Store received data to database if validation is okay
:return: validation information and error code
"""
data = request.get_json()
app.logger.debug("Received Data (POST)")
app.logger.debug(pformat(data))
# Make sure the data is valid
validation, err_code = DataValidator.validate_request_sanity(data)
if validation == "OK":
app.logger.info("<<<<VALIDATION OK>>>>")
validation, err_code = store_to_db(data)
else:
app.logger.error("<<<<VALIDATION NOT OK>>>>")
app.logger.error(pformat({"data": validation, "error_code": err_code}))
info_json = jsonify({"data": validation, "error_code": err_code})
return info_json, err_code
| 5,349,373 |
def pattern_classifier(data, pattern_threshold):
"""Return an array mask passing our selection."""
return data["key_pattern"] > pattern_threshold
| 5,349,374 |
def is_scalar(a) -> bool:
"""
Tests if a python object is a scalar (instead of an array)
Parameters
----------
a : object
Any object to be checked
Returns
-------
bool
Whether the input object is a scalar
"""
if isinstance(a, (list, tuple)):
return False
if hasattr(a, "__array__") and hasattr(a, "__len__"): # np.array(1) is scalar
return False
return True
| 5,349,375 |
def median(ts: TimeSeries, /, window_length: int = 3) -> TimeSeries:
"""
Calculate a moving median.
On n-dimensional data, filtering occurs on the first axis (time).
Parameters
----------
ts
Input TimeSeries
window_length
Optional. Kernel size, must be odd. The default is 3.
Example
-------
>>> ts = ktk.TimeSeries(time=np.arange(0, 0.5, 0.1))
>>> ts.data['data1'] = np.array([10., 11., 11., 20., 14., 15.])
>>> ts = ktk.filters.median(ts)
>>> ts.data['data1']
array([10., 11., 11., 14., 15., 15.])
"""
out_ts = ts.copy()
for key in ts.data:
window_shape = [1 for i in range(len(ts.data[key].shape))]
window_shape[0] = window_length
out_ts.data[key] = ndi.median_filter(
ts.data[key], size=window_shape)
return out_ts
| 5,349,376 |
def execute_read_query(connection, query):
"""Execute a read query on the postgres database.
Args:
connection (Psycopg2 Connection): The connection to the postgres database.
query (string): The SQL query to be run.
Returns:
list(tuples): The results of the SQL query.
"""
logging.debug(f"Executing Read Query: {query}")
cursor = connection.cursor()
result = None
try:
cursor.execute(query)
result = cursor.fetchall()
logging.debug("Query was successful.")
return result
except OperationalError as e:
logging.error(f"The error '{e}' occurred")
| 5,349,377 |
def extract_strings_from_file(filename):
"""
extracts strings from a provided filename
Returns the a list of extracted strings found in a provided filename.
Entries are stripped when processing and lines leading with a comment are
ignored.
Args:
filename: the filename
Returns:
the list of strings
"""
filelist = []
if os.path.isfile(filename):
with open(filename) as f:
for raw_line in f:
line = raw_line.strip()
if not line or line.startswith('#'):
continue
filelist.append(line)
return filelist
| 5,349,378 |
def event_income():
"""
>>> SELECT event.name, SUM((((registration.ticket * (100 - registration.discount)) * event.ticket_price) / 100)) AS price
FROM registration INNER JOIN event ON (registration.event_id = event.id) GROUP BY event.name
"""
tickets_price = F('ticket') * (100 - F('discount')) * F('event__ticket_price') / 100
registration_list = Registration.objects.values('event__name')
# ==> group by event
events_income = registration_list.annotate(income=Sum(tickets_price))
for e in events_income:
print("{event__name} reaches {income}$ as an income".format(**e))
| 5,349,379 |
def convert_polynomial_coefficients(A_in, B_in, C_in, D_in, oss=False, inverse=False,
parent_aperture=None):
"""Emulate some transformation made in nircam_get_polynomial_both.
Written by Johannes Sahlmann 2018-02-18, structure largely based on nircamtrans.py code
by Colin Cox.
Parameters
----------
A_in : numpy array
polynomial coefficients
B_in : numpy array
polynomial coefficients
C_in : numpy array
polynomial coefficients
D_in : numpy array
polynomial coefficients
oss : bool
Whether this is an OSS aperture or not
inverse : bool
Whether this is forward or backward/inverse transformation
parent_aperture : str
Name of parent aperture
Returns
-------
AR, BR, CR, DR, V3SciXAngle, V3SciYAngle, V2Ref, V3Ref : tuple of arrays and floats
Converted polynomial coefficients
"""
if inverse is False:
# forward direction
V2Ref = A_in[0]
V3Ref = B_in[0]
A_in[0] = 0.0
B_in[0] = 0.0
V3SciXAngle = np.rad2deg(np.arctan2(A_in[1], B_in[1])) # V3SciXAngle
V3SciYAngle = np.rad2deg(np.arctan2(A_in[2], B_in[2]))
V3Angle = V3SciYAngle # V3SciYAngle
if abs(V3Angle) > 90.0:
V3Angle = V3Angle - math.copysign(180.0, V3Angle)
# AR, BR = rotate_coefficients(A_in, B_in, V3Angle)
AR, BR = add_rotation(A_in, B_in, -1*V3Angle)
CS = shift_coefficients(C_in, V2Ref, V3Ref)
DS = shift_coefficients(D_in, V2Ref, V3Ref)
CR = prepend_rotation_to_polynomial(CS, V3Angle)
DR = prepend_rotation_to_polynomial(DS, V3Angle)
if oss:
# OSS apertures
V3Angle = copy.deepcopy(V3SciYAngle)
else:
# non-OSS apertures
if abs(V3SciYAngle) > 90.0: # e.g. NRCA2_FULL
# print 'Reverse Y axis direction'
AR = -flip_y(AR)
BR = flip_y(BR)
CR = flip_x(CR)
DR = -flip_x(DR)
else: # e.g NRCA1_FULL
# print 'Reverse X axis direction'
AR = -flip_x(AR)
BR = flip_x(BR)
CR = -flip_x(CR)
DR = flip_x(DR)
V3SciXAngle = V3SciXAngle - math.copysign(180.0, V3SciXAngle)
# V3Angle = betaY # Cox: Changed 4/29 - might affect rotated polynomials
V3SciYAngle = V3Angle
return AR, BR, CR, DR, V3SciXAngle, V3SciYAngle, V2Ref, V3Ref
else:
siaf_detector_layout = read.read_siaf_detector_layout()
master_aperture_names = siaf_detector_layout['AperName'].data
if parent_aperture.AperName not in master_aperture_names:
raise RuntimeError
polynomial_degree = parent_aperture.Sci2IdlDeg
V3SciYAngle = copy.deepcopy(parent_aperture.V3SciYAngle) # betaY
V3SciXAngle = parent_aperture.V3SciXAngle # betaX
betaY = V3SciYAngle + parent_aperture.DetSciYAngle
# master aperture is never OSS
if abs(betaY) > 90.0: # e.g. NRCA2_FULL
# print 'Reverse Y axis direction'
AR = -flip_y(A_in)
BR = flip_y(B_in)
CR = flip_x(C_in)
DR = -flip_x(D_in)
else: # e.g NRCA1_FULL
# print 'Reverse X axis direction'
AR = -flip_x(A_in)
BR = flip_x(B_in)
CR = -flip_x(C_in)
DR = flip_x(D_in)
V3SciXAngle = revert_correct_V3SciXAngle(V3SciXAngle)
# rotate the other way
# A, B = rotate_coefficients(AR, BR, -V3SciYAngle)
A, B = add_rotation(AR, BR, +1*V3SciYAngle)
A[0] = parent_aperture.V2Ref
B[0] = parent_aperture.V3Ref
# now invert the last part of nircam_get_polynomial_forward
AFS = A
BFS = B
# shift by parent aperture reference point
AF = shift_coefficients(AFS, -parent_aperture.XDetRef, -parent_aperture.YDetRef)
BF = shift_coefficients(BFS, -parent_aperture.XDetRef, -parent_aperture.YDetRef)
CS = prepend_rotation_to_polynomial(CR, -V3SciYAngle)
DS = prepend_rotation_to_polynomial(DR, -V3SciYAngle)
C = shift_coefficients(CS, -parent_aperture.V2Ref, -parent_aperture.V3Ref)
D = shift_coefficients(DS, -parent_aperture.V2Ref, -parent_aperture.V3Ref)
C[0] += parent_aperture.XDetRef
D[0] += parent_aperture.YDetRef
return AF, BF, C, D
| 5,349,380 |
def spinner_runner_factory(spec, t_compile, extra_commands):
"""Optimized spinner runner, which receives the spec of an animation, and controls
the flow of cycles and frames already compiled to a certain screen length and with
wide chars fixed, thus avoiding any overhead in runtime within complex spinners,
while allowing their factories to be garbage collected.
Args:
spec (SimpleNamespace): the spec of an animation
t_compile (about_time.Handler): the compile time information
extra_commands (tuple[tuple[cmd, list[Any], dict[Any]]]): requested extra commands
Returns:
a spinner runner
"""
def spinner_runner():
"""Wow, you are really deep! This is the runner of a compiled spinner.
Every time you call this function, a different generator will kick in,
which yields the frames of the current animation cycle. Enjoy!"""
yield from next(cycle_gen) # I love generators!
def runner_check(*args, **kwargs): # pragma: no cover
return check(spec, *args, **kwargs)
spinner_runner.__dict__.update(spec.__dict__, check=fix_signature(runner_check, check, 1))
spec.__dict__.update(t_compile=t_compile, runner=spinner_runner) # set after the update above.
sequential(spec)
apply_extra_commands(spec, extra_commands)
cycle_gen = spec.strategy(spec.data)
return spinner_runner
| 5,349,381 |
def _project_im_rois(im_rois, im_scale_factor, im_crop):
"""Project image RoIs into the rescaled training image."""
im_rois[:, 0] = np.minimum(
np.maximum(im_rois[:, 0], im_crop[0]), im_crop[2])
im_rois[:, 1] = np.minimum(
np.maximum(im_rois[:, 1], im_crop[1]), im_crop[3])
im_rois[:, 2] = np.maximum(
np.minimum(im_rois[:, 2], im_crop[2]), im_crop[0])
im_rois[:, 3] = np.maximum(
np.minimum(im_rois[:, 3], im_crop[3]), im_crop[1])
crop = np.tile(im_crop[:2], [im_rois.shape[0], 2])
rois = (im_rois - crop) * im_scale_factor
# For YAROIPooling Layer
# rois = (im_rois - crop)
# width = im_crop[2] - im_crop[0]
# height = im_crop[3] - im_crop[1]
# rois[:, 0] = rois[:, 0] / width
# rois[:, 1] = rois[:, 1] / height
# rois[:, 2] = rois[:, 2] / width
# rois[:, 3] = rois[:, 3] / height
return rois
| 5,349,382 |
def evaluate_error_absolute(poses_to_test: List[Tuple[str, kapture.PoseTransform]],
poses_ground_truth: List[Tuple[str, kapture.PoseTransform]]
) -> List[Tuple[str, float, float]]:
"""
Evaluate the absolute error for poses to a ground truth.
:param poses_to_test: poses to test
:param poses_ground_truth: reference poses
:return: list of error evaluation
"""
poses_ground_truth_as_dict = {name: pose for name, pose in poses_ground_truth}
result = [(name,) + world_pose_transform_distance(pose, poses_ground_truth_as_dict[name])
for (name, pose) in poses_to_test]
return result
| 5,349,383 |
def restricted_offset(parent_dimensions, size, offset):
""" Get offset restricted by various factors
"""
limit_x = (parent_dimensions[0] - size[0]) / 2
limit_y = (parent_dimensions[1] - size[1]) / 2
x = clamp(offset[0], -limit_x, limit_x)
y = clamp(offset[1], -limit_y, limit_y)
return x, y
| 5,349,384 |
def plot_object_var(ax, arr, top=10, color=DEFAULT_COLOR, label=None, alpha=1.):
"""Plots a bar plot into an matplotlib axe.
Parameters
----------
ax: plt.axes.Axes
axe where to add the plot
arr: array like
Array of object values
color: str (default DEFAULT_COLOR)
color of the plot
label: str (default None)
label of the plot
alpha: float (default 1.)
opacity
Raises
------
TypeError:
arr is not an array like
TypeError:
arr is not a object array
"""
if not utils.is_array_like(arr):
raise TypeError('arr is not an array like')
if utils.find_dtype(arr) != 'object':
raise TypeError('arr is not an object array')
if type(arr) in [list, np.ndarray]:
arr = pd.Series(arr)
v_c = arr.value_counts().sort_values(ascending=False)
v_c = v_c if len(v_c) <= top else v_c[:top]
x, y = v_c.index, v_c.values
bar = ax.bar(x, y, color=color, label=label, alpha=alpha)
| 5,349,385 |
def wt_sgrna(target='none'):
"""
Return the wildtype sgRNA sequence.
The construct is composed of 3 domains: stem, nexus, and hairpins. The
stem domain encompasses the lower stem, the bulge, and the upper stem.
Attachments are allowed pretty much anywhere, although it would be prudent
to restrict this based on the structural biology of Cas9 if you're planning
to make random attachments.
"""
sgrna = Construct('wt')
sgrna += spacer(target)
sgrna += Domain('stem', 'GUUUUAGAGCUAGAAAUAGCAAGUUAAAAU')
sgrna += Domain('nexus', 'AAGGCUAGUCCGU')
sgrna += Domain('hairpins', 'UAUCAACUUGAAAAAGUGGCACCGAGUCGGUGC')
sgrna += Domain('tail', 'UUUUUU')
sgrna['stem'].expected_fold = '((((((..((((....))))....))))))'
sgrna['hairpins'].expected_fold = '.....((((....)))).((((((...))))))'
sgrna['stem'].style = 'green'
sgrna['nexus'].style = 'red'
sgrna['hairpins'].style = 'blue'
sgrna['stem'].attachment_sites = 'anywhere'
sgrna['nexus'].attachment_sites = 'anywhere'
sgrna['hairpins'].attachment_sites = 'anywhere'
return sgrna
| 5,349,386 |
def export_nodes(nodes, csvfilepath):
"""
Writes the standard nodes data in `nodes` to the CSV file at `csvfilepath`.
"""
with open(csvfilepath, "w") as csv_file:
csvwriter = csv.DictWriter(csv_file, STANDARD_NODE_HEADER_V0)
csvwriter.writeheader()
for node in nodes:
noderow = node_to_rowdict(node)
csvwriter.writerow(noderow)
return csvfilepath
| 5,349,387 |
def _var_network(graph,
add_noise=True,
inno_cov=None,
invert_inno=False,
T=100,
initial_values=None):
"""Returns a vector-autoregressive process with correlated innovations.
Useful for testing.
Example:
graph=numpy.array([[[0.2,0.,0.],[0.5,0.,0.]],
[[0.,0.1,0. ],[0.3,0.,0.]]])
represents a process
X_1(t) = 0.2 X_1(t-1) + 0.5 X_2(t-1) + eps_1(t)
X_2(t) = 0.3 X_2(t-1) + 0.1 X_1(t-2) + eps_2(t)
with inv_inno_cov being the negative (except for diagonal) inverse
covariance matrix of (eps_1(t), eps_2(t)) OR inno_cov being
the covariance. Initial values can also be provided.
Parameters
----------
graph : array
Lagged connectivity matrices. Shape is (n_nodes, n_nodes, max_delay+1)
add_noise : bool, optional (default: True)
Flag to add random noise or not
inno_cov : array, optional (default: None)
Covariance matrix of innovations.
invert_inno : bool, optional (defualt : False)
Flag to negate off-diagonal elements of inno_cov and invert it before
using it as the covariance matrix of innovations
T : int, optional (default: 100)
Sample size.
initial_values : array, optional (defult: None)
Initial values for each node. Shape is (n_nodes, max_delay+1), i.e. must
be of shape (graph.shape[1], graph.shape[2]).
Returns
-------
X : array
Array of realization.
"""
n_nodes, _, period = graph.shape
time = T
# Test stability
_check_stability(graph)
# Generate the returned data
data = np.random.randn(n_nodes, time)
# Load the initial values
if initial_values is not None:
# Check the shape of the initial values
_check_initial_values(initial_values, data[:, :period].shape)
# Input the initial values
data[:, :period] = initial_values
# Check if we are adding noise
noise = None
if add_noise:
# Use inno_cov if it was provided
if inno_cov is not None:
noise = _generate_noise(inno_cov,
time=time,
use_inverse=invert_inno)
# Otherwise just use uncorrelated random noise
else:
noise = np.random.randn(time, n_nodes)
for a_time in range(period, time):
data_past = np.repeat(
data[:, a_time-period:a_time][:, ::-1].reshape(1, n_nodes, period),
n_nodes, axis=0)
data[:, a_time] = (data_past*graph).sum(axis=2).sum(axis=1)
if add_noise:
data[:, a_time] += noise[a_time]
return data.transpose()
| 5,349,388 |
def build_features(component, borders, initial_group):
"""
Integrate peaks within similarity components and build features
:param component: a groupedROI object
:param borders: dict - key is a sample name, value is a (n_borders x 2) matrix;
predicted, corrected and transformed to normal values borders
:param initial_group: a number of mzrt group
:return: None (in-place correction)
"""
rtdiff = (component.rois[0].rt[1] - component.rois[0].rt[0])
scandiff = (component.rois[0].scan[1] - component.rois[0].scan[0])
frequency = scandiff / rtdiff
features = []
labels = np.unique(component.grouping)
for label in labels:
# compute number of peaks
peak_number = None
for i, sample in enumerate(component.samples):
# to do: it would be better to have mapping from group to samples and numbers
if component.grouping[i] == label:
peak_number = len(borders[sample])
for p in range(peak_number):
# build feature
intensities = []
samples = []
rois = []
feature_borders = []
shifts = []
rtmin, rtmax, mz = None, None, None
for i, sample in enumerate(component.samples):
# to do: it would be better to have mapping from group to samples and numbers
if component.grouping[i] == label:
assert len(borders[sample]) == peak_number
begin, end = borders[sample][p]
intensity = np.sum(component.rois[i].i[begin:end])
intensities.append(intensity)
samples.append(sample)
rois.append(component.rois[i])
feature_borders.append(borders[sample][p])
shifts.append(component.shifts[i])
if mz is None:
mz = component.rois[i].mzmean
rtmin = component.rois[i].rt[0] + begin / frequency
rtmax = component.rois[i].rt[0] + end / frequency
else:
mz = (mz * i + component.rois[i].mzmean) / (i + 1)
rtmin = min((rtmin, component.rois[i].rt[0] + begin / frequency))
rtmax = max((rtmax, component.rois[i].rt[0] + end / frequency))
features.append(Feature(samples, rois, feature_borders, shifts,
intensities, mz, rtmin, rtmax,
initial_group, label))
# to do: there are a case, when borders are empty
# assert len(features) != 0
return features
| 5,349,389 |
def test_rl():
"""Test the RL algorithm using an openai gym environment"""
ENVS = ('Pendulum-v0', 'MountainCarContinuous-v0', 'BipedalWalker-v3', 'LunarLanderContinuous-v2',
'BipedalWalkerHardcore-v3')
ENV = ENVS[0]
model_dir = os.path.join(os.getcwd(), 'models')
os.makedirs(os.path.join(model_dir, str(datetime.date.today()) + '-' + ENV), exist_ok=True)
save_dir = os.path.join(model_dir, str(datetime.date.today()) + '-' + ENV)
env = gym.make(ENV)
iter_per_episode = 200
n_state = env.observation_space.shape
n_action = env.action_space.shape[0]
action_bound = 1
env.seed(1234)
np.random.seed(1234)
num_episodes = 1001
PER = False
batch_size = 128
#Pendulum
layer_1_nodes, layer_2_nodes = 128, 128
tau = 0.001
actor_lr, critic_lr = 0.001, 0.0001
GAMMA = 0.99
ep = 0.001
actor_noise = OrnsteinUhlenbeck(np.zeros(n_action))
agent = DDPG(n_state, n_action, action_bound, layer_1_nodes, layer_2_nodes, actor_lr, critic_lr, PER, GAMMA,
tau, batch_size, save_dir)
load_models = False
save = True
# If loading model, a gradient update must be called once before loading weights
if load_models:
load_model(agent)
for i in range(num_episodes):
s = env.reset()
sum_reward = 0
agent.sum_q = 0
agent.actor_loss = 0
agent.critic_loss = 0
while True:
env.render()
a = agent.action(s)
a_clip = a + actor_noise()
s1, r, done, _ = env.step(a_clip)
# Store in replay memory
if PER:
error = 1 # D_i = max D
agent.memory.add(error, (
(np.reshape(s, (n_state[0],)), np.reshape(a_clip, (n_action,)), r, np.reshape(s1, (n_state[0],)), done)))
else:
agent.memory.add(
(np.reshape(s, (n_state[0],)), np.reshape(a_clip, (n_action,)), r, np.reshape(s1, (n_state[0],)), done))
agent.train()
sum_reward += r
s = s1
if done:
print(f'Episode: {i}, reward: {int(sum_reward)}')
# rewards.append(sum_reward)
print('===========')
if save:
agent.save_model()
break
| 5,349,390 |
def get_cred_fh(library: str) -> str:
"""
Determines correct SimplyE credential file
"""
if library == "BPL":
return ".simplyE/bpl_simply_e.yaml"
elif library == "NYPL":
return ".simplyE/nyp_simply_e.yaml"
else:
raise ValueError("Invalid library code passsed")
| 5,349,391 |
def create_app(enviornment):
"""Construct the core application."""
app = Flask(__name__, static_url_path = "")
app.config.from_object(Config)
if enviornment == 'test':
app.config['TESTING'] = True
return app
db.init_app(app)
with app.app_context():
# Imports
from app import routes
# Create tables for our models
# db.create_all()
return app
| 5,349,392 |
async def on_member_update(before: Member, after: Member):
""" """
# todo
pass
| 5,349,393 |
def list_characters(caller, character_list, roster_type="Active Characters", roster=None,
titles=False, hidden_chars=None, display_afk=False, use_keys=True):
"""
Formats lists of characters. If we're given a list of 'hidden_chars', we compare
the list of names in character_list to that, and if any match, we use the data
in there for the character for things such as idle timer. Otherwise, we use
the data from the roster object for the name match to propagate our fields.
If display_afk is true, we list the idle timer for each character.
"""
# format
message = format_header(roster_type)
if not character_list or not roster:
message += "\nNo characters found."
else:
if display_afk:
table = prettytable.PrettyTable(["{wName #",
"{wSex",
"{wAge",
"{wFealty{n",
"{wConcept{n",
"{wSR{n",
"{wIdle{n"])
else:
table = prettytable.PrettyTable(["{wName #",
"{wSex",
"{wAge",
"{wFealty{n",
"{wConcept{n",
"{wSR{n"])
for char in character_list:
try:
if use_keys:
name = char.key
else:
name = char.name
charob = char
char = str(char)
except AttributeError:
# this was not an object, but just a name
name = char
charob = None
sex = "-"
age = "-"
house = "-"
concept = "-"
srank = "-"
afk = "-"
# check if the name matches anything in the hidden characters list
hide = False
if charob and not use_keys and hasattr(charob, 'is_disguised') and charob.is_disguised:
hide = True
if not charob and hidden_chars:
# convert both to lower case for case-insensitive matching
match_list = [ob for ob in hidden_chars if ob.name.lower() == char.lower()]
if match_list:
charob = match_list[0]
hide = True
if charob:
if not use_keys and charob.name and name != charob.name and caller.check_permstring("Builders"):
name += "{w(%s){n" % charob.name
if titles:
title = charob.db.longname
if title and not hide:
name = '{n' + title.replace(char, '{c' + char + '{n')
# yes, yes, I know they're not the same thing.
# sex is only 3 characters and gender is 5.
sex = charob.db.gender
if not sex or hide:
sex = "-"
sex = sex[0].capitalize()
age = charob.db.age
if not age or hide:
age = "-"
house = charob.db.fealty
if not house or hide:
house = "-"
concept = charob.db.concept
if not concept or hide:
concept = "-"
srank = charob.db.social_rank
if not srank or hide:
srank = "-"
if not titles or hide:
name = "{c" + name + "{n"
if display_afk:
afk = utils.time_format(charob.idle_time or 0)
if display_afk:
table.add_row([name, sex, age, house, concept[:25], srank, afk])
else:
table.add_row([name, sex, age, house, concept[:30], srank])
message += "\n%s" % table
message += "\n"
arx_more.msg(caller, message, justify_kwargs=False)
| 5,349,394 |
def initLogging(logFilename):
"""Init for logging
"""
logging.basicConfig(
level = logging.DEBUG,
#format = '%(name)-12s.%(lineno)-4d %(levelname)-8s %(message)s',
format = 'LINE %(lineno)-4d %(levelname)-8s %(message)s',
# ('%(name)-12s %(asctime)s %(levelname)-8s %(message)s', '%a, %d %b %Y %H:%M:%S',)
datefmt = '%m-%d %H:%M')#,
#filename = logFilename,
#filemode = 'w');
# define a Handler which writes INFO messages or higher to the sys.stderr
console = logging.StreamHandler();
console.setLevel(logging.INFO);
# set a format which is simpler for console use
formatter = logging.Formatter('LINE %(lineno)-4d : %(levelname)-8s %(message)s');
# tell the handler to use this format
console.setFormatter(formatter);
| 5,349,395 |
def test_list_g_day_max_length_nistxml_sv_iv_list_g_day_max_length_1_2(mode, save_output, output_format):
"""
Type list/gDay is restricted by facet maxLength with value 5.
"""
assert_bindings(
schema="nistData/list/gDay/Schema+Instance/NISTSchema-SV-IV-list-gDay-maxLength-1.xsd",
instance="nistData/list/gDay/Schema+Instance/NISTXML-SV-IV-list-gDay-maxLength-1-2.xml",
class_name="NistschemaSvIvListGDayMaxLength1",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
| 5,349,396 |
def destroyDomain():
"""
Delete the domain. Warning, deletes all items as well!
"""
global sdbConnection
global sdbDomain
sdbConnect()
try:
sdbConnection.delete_domain(config.AWS_SDB_DOMAIN_NAME)
sdbDomain = None
return
except Exception as e:
debug(e)
raise
| 5,349,397 |
def weighted_percentiles(a, percentiles, weights=None):
"""Compute weighted percentiles by using interpolation of the weighted ECDF.
Parameters
----------
a : np.ndarray
Vector of data for computing quantiles
percentiles : np.ndarray
Vector of percentiles in [0, 100]
weights : np.ndarray
Vector of non-negative weights. Not required to sum to one.
Returns
-------
percentiles : np.ndarray"""
a = np.array(a)
percentiles = np.array(percentiles)
quantiles = percentiles / 100.
if weights is None:
weights = np.ones(len(a))
else:
weights = np.array(weights)
assert np.all(weights > 0), 'Weights must be > 0'
assert np.all(quantiles >= 0) and np.all(quantiles <= 1), 'Percentiles must be in [0, 100]'
sorti = np.argsort(a)
a = a[sorti]
weights = weights[sorti]
"""Two definitions for the weighted eCDF. See _plotSolutions() below for a comparison.
Note that there are also several options in R for computing a weighted quantile,
but I did not fully understand the motivation for each. The chosen option here was intuitive to me
and agreed well with the empirical solution below.
https://github.com/harrelfe/Hmisc/R/wtd.stats.s"""
# ecdf = np.cumsum(weights) / weights.sum()
ecdf = (np.cumsum(weights) - 0.5 * weights) / np.sum(weights)
return np.interp(quantiles, ecdf, a)
| 5,349,398 |
def mirror_image(images: List[str], registry: Registry):
"""Synchronize all source images to target registry, only pushing changed layers."""
sync_config = SyncConfig(
version=1,
creds=[registry.creds],
sync=[sync_asset(image, registry) for image in images],
)
with NamedTemporaryFile(mode="w") as tmpfile:
yaml.safe_dump(sync_config, tmpfile)
proc = subprocess.Popen(
["./regsync", "once", "-c", tmpfile.name, "-v", "debug"],
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
encoding="utf-8",
)
while proc.returncode is None:
for line in proc.stdout:
print(line.strip())
proc.poll()
| 5,349,399 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.