content
stringlengths 22
815k
| id
int64 0
4.91M
|
---|---|
def genmove(proc, colour, pluck_random=True):
""" Send either a `genmove` command to the client, or generate a random
move until it is accepted by the client """
if pluck_random and random() < 0.05:
for _count in range(100):
proc.stdin.write('1000 play %s %s\n' % (colour, random_vertex(),))
proc.stdin.flush()
for line in proc.stdout:
line = (str(line) or '').strip()
print(line)
if line.startswith('=1000'):
vertex = line.split(' ', maxsplit=2)[-1].strip()
return vertex
elif line.startswith('?1000'):
break
return 'pass'
else:
proc.stdin.write('2000 genmove %s\n' % (colour,))
proc.stdin.flush()
for line in proc.stdout:
line = (str(line) or '').strip()
print(line)
if line.startswith('=2000'):
vertex = line.split(' ', maxsplit=2)[-1].strip()
return vertex
return None
| 5,346,600 |
def geojson_to_meta_str(txt):
""" txt is assumed to be small
"""
vlayer = QgsVectorLayer(txt, "tmp", "ogr")
crs_str = vlayer.sourceCrs().toWkt()
wkb_type = vlayer.wkbType()
geom_str = QgsWkbTypes.displayString(wkb_type)
feat_cnt = vlayer.featureCount()
return geom_str, crs_str, feat_cnt
| 5,346,601 |
def tdf_UppestID(*args):
"""
* Returns ID 'ffffffff-ffff-ffff-ffff-ffffffffffff'.
:rtype: Standard_GUID
"""
return _TDF.tdf_UppestID(*args)
| 5,346,602 |
def blend_multiply(cb: float, cs: float) -> float:
"""Blend mode 'multiply'."""
return cb * cs
| 5,346,603 |
def print_data_distribution(y_classes, class_names):
"""
:param y_classes: class of each instance, for example, if there are 3 classes, and y[i] is [1,0,0], then instance[i] belongs to class[0]
:param class_names: name of each class
:return: None
"""
count = np.zeros(len(class_names))
pro = []
num = []
for y in y_classes:
class_index = np.argmax(y)
count[class_index] = count[class_index] + 1
for i, class_name in enumerate(class_names):
print(class_name, count[i])
pro.append(class_name)
num.append(count[i])
return pro, num
| 5,346,604 |
def reorder_conj_pols(pols):
"""
Reorders a list of pols, swapping pols that are conjugates of one another.
For example ('xx', 'xy', 'yx', 'yy') -> ('xx', 'yx', 'xy', 'yy')
This is useful for the _key2inds function in the case where an antenna
pair is specified but the conjugate pair exists in the data. The conjugated
data should be returned in the order of the polarization axis, so after conjugating
the data, the pols need to be reordered.
For example, if a file contains antpair (0, 1) and pols 'xy' and 'yx', but
the user requests antpair (1, 0), they should get:
[(1x, 0y), (1y, 0x)] = [conj(0y, 1x), conj(0x, 1y)]
Args:
pols: Polarization array (strings or ints)
Returns:
conj_order: Indices to reorder polarization axis
"""
if not isinstance(pols, collections.Iterable):
raise ValueError('reorder_conj_pols must be given an array of polarizations.')
cpols = np.array([conj_pol(p) for p in pols]) # Array needed for np.where
conj_order = [np.where(cpols == p)[0][0] if p in cpols else -1 for p in pols]
if -1 in conj_order:
raise ValueError('Not all conjugate pols exist in the polarization array provided.')
return conj_order
| 5,346,605 |
def dx_login(project_name):
"""
Check if user has logged into the system using dx toolkit
:return: boolean True is user has logged in else false
"""
cmd = ["dx", "whoami"]
try:
cmd_out = subprocess.check_output(cmd)
except subprocess.CalledProcessError as cp:
raise AssetBuilderException("Failed to run the dx command: " + str(cmd))
except OSError:
raise AssetBuilderException("dx command is not found. Make sure that the dx toolkit is installed on this system.")
try:
cmd_out = subprocess.check_output(["dx", "select", project_name])
except subprocess.CalledProcessError as cp:
raise AssetBuilderException("Failed to select the project: " + project_name)
except OSError:
raise AssetBuilderException("dx command is not found. Make sure that the dx toolkit is installed on this system.")
| 5,346,606 |
def get_machine_from_uuid(uuid):
"""Helper function that returns a Machine instance of this uuid."""
machine = Machine()
machine.get_from_uuid(uuid)
return machine
| 5,346,607 |
def select_loop_directional(edge, directional = True, direction = 0):
"""
*Bugs: Improve selection of edges that dont have two faces
Selects more than intended
FEATURES:
*Select border if the selection is in a border
"""
counter = 0
iterations = 0
selection = [edge]
selected = selection
new_selection = selection
iterate = True
directionality_loop = True
mesh = get_bmesh()
update_indexes(mesh, edges = True)
while directionality_loop and counter < 2:
while iterations < ITERATION_LIMIT and iterate:
print("")
print("----------------------------")
print(iterations)
print("Current Edge")
print(selection)
if direction == 0:
new_selection = [selection[0].link_loops[0].link_loop_next.link_loop_radial_next.link_loop_next.edge]
else:
new_selection = [selection[0].link_loops[0].link_loop_prev.link_loop_radial_next.link_loop_prev.edge]
if new_selection[0].select:
print("CHANGE DIRECTION")
if direction == 0:
new_selection = [selection[0].link_loops[0].link_loop_prev.link_loop_radial_next.link_loop_prev.edge]
else:
new_selection = [selection[0].link_loops[0].link_loop_next.link_loop_radial_next.link_loop_next.edge]
#Check if new selection is still selected after correcting direction
if new_selection[0].select:
print("COMPLETE LAP")
iterate = False
if len(list_intersection(list(new_selection[0].verts),list(selection[0].verts)))< 1:
#Correct selection for cases where theres holes close by
print("HOLES CLOSEBY, CORRECTING")
new_selection = [selection[0].link_loops[0].link_loop_radial_next.link_loop_prev.link_loop_radial_next.link_loop_prev.edge]
if len([face for face in new_selection[0].link_faces if (selection[0] in face.edges)]) > 0:
#Make sure you cant accidentally select a loop on top or below of it
print("LOOP WILL JUMP ROW")
new_selection = selection
iterate = False
if len([face for face in new_selection[0].link_faces if len(list(face.verts)) is not 4]) is not 0:
#End selection on ngons or triangles
print("END LOOP")
iterate = False
selection = new_selection
new_selection[0].select = True
iterations += 1
#If not directional reset and start the other way
if not directional:
iterate = True
iterations = 0
direction = 1
selection = [edge]
new_selection = selection
else:
directionality_loop = False
counter += 1
| 5,346,608 |
def convert_numbers(text):
"""Convert numbers to number words"""
tokens = []
for token in text.split(" "):
try:
word = w2n.num_to_word(token)
tokens.append(word)
except:
tokens.append(token)
return " ".join(tokens)
| 5,346,609 |
def init_emulator(rom: bytes):
""" For use in interactive mode """
emulator = NitroEmulator()
emulator.load_nds_rom(rom, True)
return emulator
| 5,346,610 |
def sub_module_name_of_named_params(named_params: kParamDictType, module_name_sub_dict: Dict[str, str]) \
-> Union[Dict[str, nn.Parameter], Dict[str, torch.Tensor]]:
"""Sub named_parameters key's module name part with module_name_sub_dict.
Args:
named_params: Key-value pair of param name and param value.
module_name_sub_dict: Module names' sub dict.
Returns:
named parameters whose module name part of it's param name is subbed by module_name_sub_dict.
"""
sub_named_params = dict()
for module_param_name, value in named_params.items():
param_name, module_name = map(lambda inverse_name: inverse_name[::-1],
module_param_name[::-1].split('.', maxsplit=1))
if module_name not in module_name_sub_dict:
sub_named_params[module_param_name] = value
else:
sub_named_params[module_name_sub_dict[module_name] + '.' + param_name] = value
return sub_named_params
| 5,346,611 |
def add_mongodb_document(
m_client_db=get_mongodb_database(),
collection=None,
index_name=None,
doc_type=None,
doc_uuid=None,
doc_body=None
):
"""
Funtion to add a MongoDB document by providing index_name,
document type, document contents as doc and document id.
"""
status = { 'status_code' : 200 }
log_.debug( "function : %s", inspect.stack()[0][3] )
log_.debug( "locals() : \n%s", pformat(locals()))
db = m_client_db[ collection ]
### TO DO
try :
res = db.insert_one(
doc_body,
)
res_add = {
'item_id' : str(res.inserted_id),
'operation' : "item added"
}
except :
res = {}
res_add = {
'item_id' : None,
'operation' : 'not added...'
}
status = {
'status_code' : 500,
'error' : "",
'info' : ""
}
# log_.debug( "res : \n%s", pformat(res.__dict__))
log_.debug( "res_add : \n%s", pformat(res_add))
print()
return res_add, status
| 5,346,612 |
def my_func_1(x, y):
"""
Возвращает возведение числа x в степень y.
Именованные параметры:
x -- число
y -- степень
(number, number) -> number
>>> my_func_1(2, 2)
4
"""
return x ** y
| 5,346,613 |
def test_run_model(fake_regression_data) -> alt.Chart:
"""Smoke test.
"""
res = run_model.run(fake_regression_data,
y='y',
X=['x1', 'x2', 'x3', 'x4'])
assert isinstance(res, RegressionResultsWrapper)
| 5,346,614 |
def indexer_testapp(es_app):
""" Indexer testapp, meant for manually triggering indexing runs by posting to /index.
Always uses the ES app (obviously, but not so obvious previously) """
environ = {
'HTTP_ACCEPT': 'application/json',
'REMOTE_USER': 'INDEXER',
}
return webtest.TestApp(es_app, environ)
| 5,346,615 |
def BNN_like(NN,cls=tfp.layers.DenseReparameterization,copy_weight=False,**kwargs):
"""
Create Bayesian Neural Network like input Neural Network shape
Parameters
----------
NN : tf.keras.Model
Neural Network for imitating shape
cls : tfp.layers
Bayes layers class
copy_weight : bool, optional
Copy weight from NN when `True`. The default is `False`
Returns
-------
model : tf.keras.Model
Bayes Neural Network
"""
inputs = tf.keras.Input(shape=(tf.shape(NN.layers[0].kernel)[0],))
x = inputs
for i, L in enumerate(NN.layers):
layer_kwargs = { **kwargs }
if copy_weight:
layer_kwargs["kernel_prior_fn": multivariate_normal_fn(L.kernel)]
layer_kwargs["bias_prior_fn": multivariate_normal_fn(L.bias)]
x = cls(L.units,activation=L.activation,**layer_kwargs)(x)
return tf.keras.Model(inputs=inputs,outputs=x)
| 5,346,616 |
def CreateRepository(repoDir):
""" Creates a Subversion repository """
try:
TimestampDir(repoDir) # renames previous repository if it already exists
Execute(r'%s create %s --fs-type fsfs' % (SVN_ADMIN_EXE, repoDir))
print 'Finished -- CreateRepository'
except Exception, e:
print 'Failed -- CreateRepository', e
raise
| 5,346,617 |
def flattenImageALFOSC(fn,outfn):
""" Flatten all ALFOSC image extentions """
import numpy
import pyfits
f = pyfits.open(fn)
h1 = f[1].header
d1 = f[1].data
d2 = f[2].data
f.close()
d = numpy.hstack((d1,d2))
hdu = pyfits.PrimaryHDU(d)
n = pyfits.HDUList([hdu])
n[0].header.update('AMPLMODE','A')
n[0].header.update('CTYPE1', h1['CTYPE1'])
n[0].header.update('CTYPE2', h1['CTYPE2'])
n[0].header.update('CRVAL1', h1['CRVAL1'])
n[0].header.update('CRVAL2', h1['CRVAL2'])
n[0].header.update('CUNIT1', h1['CUNIT1'])
n[0].header.update('CUNIT2', h1['CUNIT2'])
n[0].header.update('CRPIX1', h1['CRPIX1'])
n[0].header.update('CRPIX2', h1['CRPIX2'])
n[0].header.update('CD1_1', h1['CD1_1'])
n[0].header.update('CD1_2', h1['CD1_2'])
n[0].header.update('CD2_1', h1['CD2_1'])
n[0].header.update('CD2_1', h1['CD2_1'])
n.writeto(outfn)
| 5,346,618 |
def matrix_mult(a, b):
"""
Function that multiplies two matrices a and b
Parameters
----------
a,b : matrices
Returns
-------
new_array : matrix
The matrix product of the inputs
"""
new_array = []
for i in range(len(a)):
new_array.append([0 for i in range(len(b[0]))])
for j in range(len(b[0])):
for k in range(len(a[0])):
new_array[i][j] += a[i][k] * b[k][j]
return new_array
| 5,346,619 |
def build_type(tp) -> Tuple[str, List[Type]]:
"""
Build typescript type from python type.
"""
tokens = tokenize_python_type(tp)
dependencies = [
token
for token in tokens
if token not in TYPE_MAPPING_WITH_GENERIC_FALLBACK
and not type(token) in TRIVIAL_TYPE_MAPPING
and not isinstance(token, _Final)
]
return _build_type(tokens), dependencies
| 5,346,620 |
def laplacian_radial_kernel(distance, bandwidth=1.0):
"""Laplacian radial kernel.
Parameters
----------
distance : array-like
Array of non-negative real values.
bandwidth : float, optional (default=1.0)
Positive scale parameter of the kernel.
Returns
-------
weight : array-like
Array of non-negative real values of the same shape than
parameter 'distance'.
Returns
-------
http://crsouza.com/2010/03/17/
kernel-functions-for-machine-learning-applications/
https://data-flair.training/blogs/svm-kernel-functions/
"""
distance = _check_distance(distance)
bandwidth = _check_bandwidth(bandwidth)
scaled_distance = distance / bandwidth
weight = gs.exp(- scaled_distance)
return weight
| 5,346,621 |
def build_eslog_config_param(
group_id,
task_name,
rt_id,
tasks,
topic,
table_name,
hosts,
http_port,
transport,
es_cluster_name,
es_version,
enable_auth,
user,
password,
):
"""
es参数构建
:param group_id: 集群名
:param task_name: 任务名
:param rt_id: rt_id
:param tasks: 任务数
:param topic: 来源topic
:param table_name: 表名
:param hosts: es的host
:param http_port: es的port
:param transport: es transport的port
:param es_cluster_name: es集群名称
:param es_version es集群版本
:param enable_auth 是否启用验证
:param user: 用户名
:param password: 密码, 加密过的
:return: 参数
"""
return {
"group.id": group_id,
"rt.id": rt_id,
"topics": topic,
"type.name": table_name,
"tasks.max": "%s" % tasks,
"es.index.prefix": table_name.lower(),
"es.cluster.name": es_cluster_name,
"es.cluster.version": es_version,
"es.hosts": hosts,
"es.transport.port": transport,
"es.host": hosts,
"es.http.port": http_port,
"connector.class": "com.tencent.bk.base.datahub.databus.connect.sink.es.EsSinkConnector",
"flush.timeout.ms": "10000",
"batch.size": "10000",
"max.in.flight.requests": "5",
"retry.backoff.ms": "5000",
"max.retry": "5",
"es.cluster.enable.auth": enable_auth,
"es.cluster.enable.PlaintextPwd": False, # 当前都是加密后的密码
"es.cluster.username": user,
"es.cluster.password": password,
}
| 5,346,622 |
def greet_user(username):
# Docstring - used for documentation of functions
"""Display a simple greeting."""
print("Hello, " + username.title() + "!")
| 5,346,623 |
def test_crud__EditForm__2(address_book, browser):
"""Editing the addressbook can be canceled."""
address_book.title = u'ftest-ab'
browser.login('mgr')
browser.open(browser.ADDRESS_BOOK_DEFAULT_URL)
browser.getLink('Master data').click()
browser.getLink('Address book').click()
assert browser.ADDRESS_BOOK_EDIT_URL == browser.url
assert 'ftest-ab' == browser.getControl('title').value
browser.getControl('title').value = 'fancy book'
browser.getControl('Cancel').click()
assert 'No changes were applied.' == browser.message
assert 'ftest-ab' == browser.getControl('title').value
| 5,346,624 |
def disclosure(input_df, cur_period):
"""
Reading in a csv, converting to a data frame and converting some cols to int.
:param input_df: The csv file that is converted into a data frame.
:param cur_period: The current period for the results process.
:return: None.
"""
input_df = pd.read_csv(input_df, dtype={"Q601_asphalting_sand": int,
'Q602_building_soft_sand': int,
'Q603_concreting_sand': int,
'Q604_bituminous_gravel': int,
'Q605_concreting_gravel': int,
'Q606_other_gravel': int,
'Q607_constructional_fill': int,
'Q608_total': int,
'enterprise_ref': int, 'period': int,
'region': int})
input_df["disclosive"] = None
input_df["publish"] = None
input_df["reason"] = None
def run_disclosure(row):
if row['Q608_total'] == 0:
row['disclosive'] = 'N'
row['publish'] = 'Publish'
row['reason'] = ' Total is zero'
else:
row['disclosive'] = 'Y'
row['publish'] = 'N/A'
return row
disaggregated_data = input_df[input_df.period == cur_period]
region_agg = disaggregated_data.groupby('region')
region_agg = region_agg.agg({'Q608_total': 'sum', 'Q607_constructional_fill': 'sum',
'Q606_other_gravel': 'sum', 'Q605_concreting_gravel': 'sum',
'Q604_bituminous_gravel': 'sum', 'Q603_concreting_sand': 'sum',
'Q602_building_soft_sand': 'sum', 'Q601_asphalting_sand': 'sum',
'enterprise_ref': 'nunique'})
region_agg = region_agg.apply(run_disclosure, axis=1)
# regionlorm = disaggregated_data.groupby(['region'])
region_agg_lorm = disaggregated_data.groupby(['region', 'land_or_marine'])
return region_agg_lorm
| 5,346,625 |
def assert_c0_L0_mod(s: Section, attr: Any) -> None:
"""Assert after child0, leaf0 have a modification."""
assert s(attr) == [10, '2', '3']
assert s.children(attr) == [10, '2', '3']
assert s.leaves(attr) == [0, '1', '2', '3']
assert s[0](attr) == 10
assert s[0].leaves(attr) == [0, '1']
assert s[0][0](attr) == 0
| 5,346,626 |
def get_all_file_paths(directory):
"""
Gets all the files in the specified input directory
"""
file_paths = []
for root, _, files in os.walk(directory):
for filename in files:
filepath = os.path.join(root, filename)
file_paths.append(filepath)
return file_paths
| 5,346,627 |
def save_result(save_path, bbox_res, catid2name, threshold):
"""
save result as txt
"""
with open(save_path, 'w') as f:
for dt in bbox_res:
catid, bbox, score = dt['category_id'], dt['bbox'], dt['score']
if score < threshold:
continue
# each bbox result as a line
# for rbox: classname score x1 y1 x2 y2 x3 y3 x4 y4
# for bbox: classname score x1 y1 w h
bbox_pred = '{} {} '.format(catid2name[catid], score) + ' '.join(
[str(e) for e in bbox])
f.write(bbox_pred + '\n')
| 5,346,628 |
def kitchen_sink():
"""Combines all of the test data."""
return word_frequencies.load(_KITCHEN_SINK_DATA)
| 5,346,629 |
def sim_matrix(a, b, eps=1e-8):
"""
added eps for numerical stability
"""
a = normalize_embeddings(a, eps)
b = normalize_embeddings(b, eps)
sim_mt = torch.mm(a, b.transpose(0, 1))
return sim_mt
| 5,346,630 |
def delete_model(name, force=False):
"""Permanently deletes the given model from local and remote storage.
CAUTION: this cannot be undone!
Args:
name: the name of the model, which can have "@<ver>" appended to refer
to a specific version of the model. If no version is specified, the
latest version of the model is assumed
force: whether to force delete the remote model (True) or display a
confirmation message that the user must approve before deleting
the remote model (False). The default is False
Raises:
ModelError: if the model could not be found
"""
# Flush model locally
flush_model(name)
model, models_dir, manifest = _find_model(name)
if force or etau.query_yes_no(
"Are you sure you want to permanently delete this model from "
"remote storage? This cannot be undone!",
default="no",
):
# Flush model remotely
logger.info("Deleting model '%s' from remote storage", name)
model.manager.delete_model()
# Delete model from manifest
manifest_path = manifest.make_manifest_path(models_dir)
logger.info(
"Removing model '%s' from manifest '%s'", name, manifest_path
)
manifest.remove_model(model.name)
manifest.write_json(manifest_path)
else:
logger.info("Remote deletion of model '%s' aborted", name)
| 5,346,631 |
def read_addon_xml(path):
"""Parse the addon.xml and return an info dictionary"""
info = dict(
path='./', # '/storage/.kodi/addons/plugin.video.vrt.nu',
profile='special://userdata', # 'special://profile/addon_data/plugin.video.vrt.nu/',
type='xbmc.python.pluginsource',
)
tree = ET.parse(path)
root = tree.getroot()
info.update(root.attrib) # Add 'id', 'name' and 'version'
info['author'] = info.pop('provider-name')
for child in root:
if child.attrib.get('point') != 'xbmc.addon.metadata':
continue
for grandchild in child:
# Handle assets differently
if grandchild.tag == 'assets':
for asset in grandchild:
info[asset.tag] = asset.text
continue
# Not in English ? Drop it
if grandchild.attrib.get('lang', 'en_GB') != 'en_GB':
continue
# Add metadata
info[grandchild.tag] = grandchild.text
return {info['name']: info}
| 5,346,632 |
def get_submission_list(start_timestamp, end_timestamp, args=None):
"""
Scrapes a subreddit for submissions between to given dates. Due to limitations
of the underlying service, it may not return all the possible submissions, so
it will be necessary to call this method again. The method requests the results
in descending orders, so in subsequent calls, you should only update end_timestamp.
:param start_timestamp: request results after this date/time.
:param end_timestamp: request results before this date/time.
:param args: the args to pass to the endpoint
:return: the JSON object returned by the service.
"""
# Generic parameters: for each submission we want its ID and timestamp,
# 500 is the maximum limit, sorted temporally by the most recent
params = "fields=id,created_utc,subreddit&limit=500&sort=desc&sort_type=created_utc"
if args:
for key, value in args.items():
params += "&{0}={1}".format(key, value)
url = "{0}?before={1}&after={2}&{3}".format(
PUSHSHIFT_ENDPOINT, end_timestamp, start_timestamp, params
)
resp = requests.get(url)
return resp.json()
| 5,346,633 |
def test_invalid_hopping_matrix():
"""
Check that an error is raised when the passed size does not match the
shape of hopping matrices.
"""
with pytest.raises(ValueError):
tbmodels.Model(size=2, hop={(0, 0, 0): np.eye(4)})
| 5,346,634 |
def loadDataSet():
"""
load data from data set
Args:
Returns:
dataSet: train input of x
labelSet: train input of y
"""
# initialize x-trainInput,y-trainInput
dataSet = []
labelSet = []
# open file reader
fr = open('testSet.txt')
for line in fr.readlines():
# strip() -- get rid of the space on both side
# split() -- division as tab
lineArr = line.strip().split()
# padding data in list
# x0 = 1.0 , x1 = column1 , x2 = column2
dataSet.append([1.0, float(lineArr[0]), float(lineArr[1])])
# label = column3
labelSet.append(float(lineArr[2]))
return dataSet,labelSet
| 5,346,635 |
def xds_read_xparm_new_style(xparm_file):
"""Parse the XPARM file to a dictionary."""
data = map(float, " ".join(open(xparm_file, "r").readlines()[1:]).split())
starting_frame = int(data[0])
phi_start, phi_width = data[1:3]
axis = data[3:6]
wavelength = data[6]
beam = data[7:10]
spacegroup = int(data[10])
cell = data[11:17]
a, b, c = data[17:20], data[20:23], data[23:26]
assert int(data[26]) == 1
nx, ny = map(int, data[27:29])
px, py = data[29:31]
ox, oy = data[31:33]
distance = data[33]
x, y = data[34:37], data[37:40]
normal = data[40:43]
results = {
"starting_frame": starting_frame,
"phi_start": phi_start,
"phi_width": phi_width,
"axis": axis,
"wavelength": wavelength,
"beam": beam,
"nx": nx,
"ny": ny,
"px": px,
"py": py,
"distance": distance,
"ox": ox,
"oy": oy,
"x": x,
"y": y,
"normal": normal,
"spacegroup": spacegroup,
"cell": cell,
"a": a,
"b": b,
"c": c,
}
return results
| 5,346,636 |
def clear_bit(val, offs):
"""Clear bit at offset 'offs' in value."""
return val & ~(1 << offs)
| 5,346,637 |
def get_org_details(orgs):
"""Get node and site details, store in Org object"""
org_details = []
for org in orgs:
org_id = org['id']
org_name = org['name']
org_longname = org['longname']
Org = namedtuple('Org', ['org_id', 'org_name', 'org_longname'])
org_details.extend([Org(org_id, org_name, org_longname)])
return org_details
| 5,346,638 |
def decrypt(ctx, input_file, output_file):
"""Decrypt CSE configuration file."""
SERVER_CLI_LOGGER.debug(f"Executing command: {ctx.command_path}")
console_message_printer = utils.ConsoleMessagePrinter()
utils.check_python_version(console_message_printer)
try:
try:
password = os.getenv('CSE_CONFIG_PASSWORD') or utils.prompt_text(
PASSWORD_FOR_CONFIG_DECRYPTION_MSG,
hide_input=True,
color='green')
decrypt_file(input_file, password, output_file)
msg = "Decryption successful."
console_message_printer.general(msg)
SERVER_CLI_LOGGER.debug(msg)
except cryptography.fernet.InvalidToken:
raise Exception("Decryption failed: Invalid password")
except Exception as err:
console_message_printer.error(str(err))
SERVER_CLI_LOGGER.error(str(err))
sys.exit(1)
| 5,346,639 |
def flipFast(index, s, T, rand, neighs, bonds):
"""
Much more efficient version of _oneSweep MC
input:
index: spin to attempt flipping
s: spin configuration
T: temperature
rand: a uniform random float
neighs: list of neighboring sites to index
bonds: list of bonds connections index to its neighbors
"""
dE = 0.
for nn, jj in zip(neighs, bonds):
dE += s[nn]*jj
dE *= 2*s[index]
if dE < 0 or rand <= exp(-dE/T): s[index] *= -1
| 5,346,640 |
def slot_size_selection():
"""
Plots the latency and number of slots needed to encode repeater protocols for
a few different levels of fidelity for both cases when two nodes are connected
or separated by two hops
:return: None
"""
link_length = 5
topology = gen_line_topology(num_end_node_comm_q=1, num_end_node_storage_q=3, link_length=link_length)
protocols = []
source = '0'
destinations = ['1', '2']
fidelities = [0.6 + 0.1 * i for i in range(4)]
for destination in destinations:
for fidelity in fidelities:
demand = (source, destination, fidelity, 1)
protocol = get_protocol_without_rate_constraint(topology, demand)
if protocol:
print("Found protocol between {} and {} with fidelity {} and rate {}".format(source, destination,
protocol.F, protocol.R))
protocols.append((demand, protocol))
# Increments of 4ms
slot_sizes = sorted(list(set([0.0001 * i for i in range(1, 200)])))
latency_data = {}
slot_count_data = {}
for demand, protocol in protocols:
pdata_lat = []
pdata_slt = []
for slot_size in slot_sizes:
task = convert_protocol_to_task(demand, protocol, slot_size)
task, dec, corr = schedule_dag_for_resources(task, topology)
asap_d, alap_d, shift_d = dec
if not corr:
import pdb
pdb.set_trace()
elif asap_d < shift_d or alap_d < shift_d:
import pdb
pdb.set_trace()
num_slots = task.c
task_latency = num_slots * slot_size
pdata_lat.append((slot_size, task_latency))
pdata_slt.append((slot_size, num_slots))
s, d, f, r = demand
print("Hops: {}, Fidelity: {}, Slot Size: {}, Latency: {}".format(d, f, slot_size, task_latency))
latency_data[demand] = pdata_lat
slot_count_data[demand] = pdata_slt
fig, axes = plt.subplots(nrows=1, ncols=4)
for i, destination in enumerate(destinations):
fmts = {
0.6: ("-.", "0.8"),
0.7: ("--", "0.6"),
0.8: ("-", "0.4"),
0.9: ("-", "0.2")
}
for demand in latency_data.keys():
if demand[1] != destination:
continue
pdata = latency_data[demand]
spdata = sorted(pdata)
xdata = [d[0] for d in spdata]
ydata = [d[1] for d in spdata]
fidelity = round(demand[2], 2)
label = "F={}".format(fidelity)
fmt, c = fmts[fidelity]
axes[i].plot(xdata, ydata, linestyle=fmt, color=c, label=label)
axes[i].set(xlabel="Slot Size(s)", ylabel="Latency(s)")
for demand in slot_count_data.keys():
if demand[1] != destination:
continue
pdata = slot_count_data[demand]
spdata = sorted(pdata)
xdata = [d[0] for d in spdata]
ydata = [d[1] for d in spdata]
fidelity = round(demand[2], 2)
label = "F={}".format(fidelity)
fmt, c = fmts[fidelity]
axes[i+2].plot(xdata, ydata, linestyle=fmt, color=c, label=label)
axes[i+2].set(xlabel="Slot Size(s)", ylabel="Num Slots")
axes[0].set_title("Link")
axes[1].set_title("Two Hop")
axes[2].set_title("Link")
axes[3].set_title("Two Hop")
handles, labels = axes[-1].get_legend_handles_labels()
fig.legend(handles, labels, bbox_to_anchor=(0.96, 0.35), loc='lower right', fontsize=14)
def on_resize(event):
fig.tight_layout()
fig.canvas.draw()
fig.canvas.mpl_connect('resize_event', on_resize)
plt.autoscale()
plt.show()
| 5,346,641 |
def flush_after(handler, delay):
"""Add 'handler' to the queue so that it is flushed after 'delay' seconds by the flush thread.
Return the scheduled event which may be used for later cancellation (see cancel()).
"""
if not isinstance(handler, logging.Handler):
raise TypeError("handler must be a logging.Handler instance")
return _FLUSH_THREAD.submit(handler.flush, delay)
| 5,346,642 |
def _ExtractCLPath(output_of_where):
"""Gets the path to cl.exe based on the output of calling the environment
setup batch file, followed by the equivalent of `where`."""
# Take the first line, as that's the first found in the PATH.
for line in output_of_where.strip().splitlines():
if line.startswith('LOC:'):
return line[len('LOC:'):].strip()
| 5,346,643 |
def test_graph_get_node(full_graph):
"""Passing get_node an invalid node raises a KeyError."""
assert full_graph.get_node('top1') == {'mid1': 78, 'mid2': 113}
assert full_graph.get_node('mid2') == {'third4': 78, 'third5': 91}
| 5,346,644 |
def model_location(model):
"""
Set ODAHUFLOW_MODEL_LOCATION_ENV_VAR to `model` if `model` is not None
Add ODAHUFLOW_MODEL_LOCATION_ENV_VAR to sys.path
Clean state in context manager exit
:param model:
:return:
"""
model_location_for_use = original_model_location = os.environ.get(ODAHUFLOW_MODEL_LOCATION_ENV_VAR, "")
if original_model_location:
_logger.debug(f'{ODAHUFLOW_MODEL_LOCATION_ENV_VAR} env var is set in a system '
f'({ODAHUFLOW_MODEL_LOCATION_ENV_VAR}={original_model_location})')
else:
_logger.debug(f'{ODAHUFLOW_MODEL_LOCATION_ENV_VAR} env is not set in a system')
if model:
_logger.debug(f'--model option is passed. {ODAHUFLOW_MODEL_LOCATION_ENV_VAR} will be replaced '
f'with {model}')
model_location_for_use = os.environ[ODAHUFLOW_MODEL_LOCATION_ENV_VAR] = model
if not model_location_for_use:
raise RuntimeError(f'Either {ODAHUFLOW_MODEL_LOCATION_ENV_VAR} env var or --model option MUST be specified')
sys.path.append(model_location_for_use)
_logger.debug(f'{model_location_for_use} is added to sys.path')
try:
yield
finally:
if model:
os.environ[ODAHUFLOW_MODEL_LOCATION_ENV_VAR] = original_model_location
_logger.debug(f'{ODAHUFLOW_MODEL_LOCATION_ENV_VAR} is set to original value={original_model_location}')
if model_location_for_use:
sys.path.remove(model_location_for_use)
_logger.debug(f'{model_location_for_use} is removed from sys.path')
| 5,346,645 |
def getprotobyname(*args,**kw):
"""getprotobyname(name) -> integer
Return the protocol number for the named protocol. (Rarely used.)"""
pass
| 5,346,646 |
async def test_bad_coin_name(get_auto_switching_and_profits_statistics_response):
"""Tests an API call with a non-existent coin name"""
session = aiohttp.ClientSession()
miningpoolhubapi = MiningPoolHubAPI(session=session)
assert miningpoolhubapi.api_key_set() is True
with aioresponses() as m:
m.get(
"https://doggy_coin.miningpoolhub.com/index.php?action=getuserbalance&api_key=test&page=api",
exception=aiohttp.ClientConnectionError(),
)
m.get(
GET_AUTO_SWITCHING_URL,
status=200,
body=json.dumps(get_auto_switching_and_profits_statistics_response),
headers=CONTENT_HEADERS,
)
with pytest.raises(InvalidCoinError):
await miningpoolhubapi.async_get_user_balance(coin_name="doggy_coin")
await session.close()
| 5,346,647 |
def logGamma(x):
"""The natural logarithm of the gamma function.
Based on public domain NETLIB (Fortran) code by W. J. Cody and L. Stoltz<BR>
Applied Mathematics Division<BR>
Argonne National Laboratory<BR>
Argonne, IL 60439<BR>
<P>
References:
<OL>
<LI>W. J. Cody and K. E. Hillstrom, 'Chebyshev Approximations for the Natural Logarithm of the Gamma Function,' Math. Comp. 21, 1967, pp. 198-203.
<LI>K. E. Hillstrom, ANL/AMD Program ANLC366S, DGAMMA/DLGAMA, May, 1969.
<LI>Hart, Et. Al., Computer Approximations, Wiley and sons, New York, 1968.
</OL></P><P>
From the original documentation:
</P><P>
This routine calculates the LOG(GAMMA) function for a positive real argument X.
Computation is based on an algorithm outlined in references 1 and 2.
The program uses rational functions that theoretically approximate LOG(GAMMA)
to at least 18 significant decimal digits. The approximation for X > 12 is from reference 3,
while approximations for X < 12.0 are similar to those in reference 1, but are unpublished.
The accuracy achieved depends on the arithmetic system, the compiler, the intrinsic functions,
and proper selection of the machine-dependent constants.
</P><P>
Error returns:<BR>
The program returns the value XINF for X .LE. 0.0 or when overflow would occur.
The computation is believed to be free of underflow and overflow."""
y = x
if y < 0.0 or y > LOG_GAMMA_X_MAX_VALUE:
# Bad arguments
return float("inf")
if y <= EPS:
return -math.log(y)
if y <= 1.5:
if (y < pnt68):
corr = -math.log(y)
xm1 = y
else:
corr = 0.0;
xm1 = y - 1.0;
if y <= 0.5 or y >= pnt68:
xden = 1.0;
xnum = 0.0;
for i in xrange(8):
xnum = xnum * xm1 + lg_p1[i];
xden = xden * xm1 + lg_q1[i];
return corr + xm1 * (lg_d1 + xm1 * (xnum / xden));
else:
xm2 = y - 1.0;
xden = 1.0;
xnum = 0.0;
for i in xrange(8):
xnum = xnum * xm2 + lg_p2[i];
xden = xden * xm2 + lg_q2[i];
return corr + xm2 * (lg_d2 + xm2 * (xnum / xden));
if (y <= 4.0):
xm2 = y - 2.0;
xden = 1.0;
xnum = 0.0;
for i in xrange(8):
xnum = xnum * xm2 + lg_p2[i];
xden = xden * xm2 + lg_q2[i];
return xm2 * (lg_d2 + xm2 * (xnum / xden));
if y <= 12.0:
xm4 = y - 4.0;
xden = -1.0;
xnum = 0.0;
for i in xrange(8):
xnum = xnum * xm4 + lg_p4[i];
xden = xden * xm4 + lg_q4[i];
return lg_d4 + xm4 * (xnum / xden);
assert y <= lg_frtbig
res = lg_c[6];
ysq = y * y;
for i in xrange(6):
res = res / ysq + lg_c[i];
res /= y;
corr = math.log(y);
res = res + LOGSQRT2PI - 0.5 * corr;
res += y * (corr - 1.0);
return res
| 5,346,648 |
def regional_validity(query_point, regional_inclusion, regional_exclusions):
""" regional_validity
Returns whether a coordinate point is inside a polygon and outside of excluded regions.
Input: A Point object, a Polygon Object of the inclusion region; a list of Polygon Objects of excluded regions.
Output: True if the query point is both inside the regional polygon and outside all exlusions; False otherwise.
"""
if query_point.within(regional_inclusion):
# Check if the point co-occurs with city areas...
for city in regional_exclusions:
if query_point.within(city):
return False
return True
return False
| 5,346,649 |
def get_word_vector_list(doc, w2v):
"""Get all the vectors for a text"""
vectors = []
for word in doc:
try:
vectors.append(w2v.wv[word])
except KeyError:
continue
return vectors
| 5,346,650 |
def solve(lines, n):
"""Apply the rules specified in the input lines to the starting
pattern for n iterations.
The number of lit pixels in the final pattern is returned.
"""
rules = load_rulebook(lines)
pattern = START
for _ in range(n):
pattern = enhance(pattern, rules)
return sum([row.count('#') for row in pattern])
| 5,346,651 |
def _to_plotly_color(scl, transparence=None):
"""
converts a rgb color in format (0-1,0-1,0-1) to a plotly color 'rgb(0-255,0-255,0-255)'
"""
plotly_col = [255 * _c for _c in mplc.to_rgba(scl)] if len(scl) == 3 else [255 * _c for _c in mplc.to_rgb(scl)]
if transparence is not None:
assert 0. <= transparence <= 1.0
plotly_col[3] = transparence
return "rgba({:.0f}, {:.0f}, {:.0f}, {:.4f})".format(*plotly_col)
else:
return "rgb({:.0f}, {:.0f}, {:.0f})".format(*plotly_col[:3])
| 5,346,652 |
def dense_attention_block(seqs_repr, is_training, num_layers,
decay_variable, decay_constant,
units, dropout, query_dropout,
l2_scale, name=''):
"""
"""
for i in range(num_layers):
with tf.variable_scope('dense_attention{}'.format(i), reuse=tf.AUTO_REUSE):
#seqs_repr = tf.Print(seqs_repr, [tf.shape(seqs_repr)], "{}".format(i))
seqs_repr = attention_block(seqs_repr,
is_training,
decay_variable,
decay_constant,
dropout,
query_dropout,
l2_scale)
layer_reprs.append(seqs_repr)
return seqs_repr
| 5,346,653 |
def reset_get_unique_name():
"""Reset the heaps that store previously-assigned names."""
global name_heap, prefix_counts
name_heap = set([None])
prefix_counts = collections.Counter()
| 5,346,654 |
def _pp(data, title=None, prefix=''):
"""
pretty printer
:param data: single enty or list of key-value tuples
:param title: optional title
:param quiet: if true print only the values
"""
ctx = click.get_current_context()
if title is not None:
print(title)
if not isinstance(data, list):
data = [data]
for kv in data:
value = kv[1] if kv[1] is not None else 'N/A'
if isinstance(value, list):
value = ', '.join(value)
if ctx.obj.get(CTX_QUIET, False):
print(value)
else:
label = f"{prefix}{kv[0]}"
print(f"{label.ljust(30, '_')} {value}")
| 5,346,655 |
def generate_schema_type(app_name: str, model: object) -> DjangoObjectType:
"""
Take a Django model and generate a Graphene Type class definition.
Args:
app_name (str): name of the application or plugin the Model is part of.
model (object): Django Model
Example:
For a model with a name of "Device", the following class definition is generated:
Class DeviceType(DjangoObjectType):
Meta:
model = Device
fields = ["__all__"]
if a FilterSet exist for this model at '<app_name>.filters.<ModelName>FilterSet'
The filterset will be store in filterset_class as follow
Class DeviceType(DjangoObjectType):
Meta:
model = Device
fields = ["__all__"]
filterset_class = DeviceFilterSet
"""
main_attrs = {}
meta_attrs = {"model": model, "fields": "__all__"}
# We'll attempt to find a FilterSet corresponding to the model
# Not all models have a FilterSet defined so the function return none if it can't find a filterset
meta_attrs["filterset_class"] = get_filterset_for_model(model)
main_attrs["Meta"] = type("Meta", (object,), meta_attrs)
schema_type = type(f"{model.__name__}Type", (DjangoObjectType,), main_attrs)
return schema_type
| 5,346,656 |
def do_authorize():
"""
Send a token request to the OP.
"""
oauth2.client_do_authorize()
try:
redirect = flask.session.pop("redirect")
return flask.redirect(redirect)
except KeyError:
return flask.jsonify({"success": "connected with fence"})
| 5,346,657 |
def run(number, gens, neat_stats, hyperneat_stats, es_hyperneat_small_stats,
es_hyperneat_medium_stats, es_hyperneat_large_stats):
"""
Run the experiments.
"""
print(f"This is run #{str(number)}")
neat_stats.append(neat_xor.run(gens)[1])
hyperneat_stats.append(hyperneat_xor.run(gens)[1])
es_hyperneat_small_stats.append(es_hyperneat_xor.run(gens, "S")[1])
es_hyperneat_medium_stats.append(es_hyperneat_xor.run(gens, "M")[1])
es_hyperneat_large_stats.append(es_hyperneat_xor.run(gens, "L")[1])
| 5,346,658 |
def get_windows():
"""
Return all windows found by WM with CPU, fullscreen, process name, and class information.
"""
# Basic window information
result = check_output('nice -n 19 wmctrl -l -p', shell=True)
lines = [a for a in result.decode('utf8').split('\n') if a != '']
windows = [re.split(r'\s+', a, maxsplit=4) for a in lines]
# Window properties
window_index = {}
for window in windows:
window_id = window[0]
r = check_output('nice -n 19 xprop -id {}'.format(window_id), shell=True)
wm_classes = []
r_class = re.search(br'WM_CLASS\(STRING\) = (.*)\n', r)
if r_class:
wm_classes = re.findall('\"(.*?)\"', r_class.group(1).decode('ascii'))
fullscreen = b'WM_STATE_FULLSCREEN' in r
window_index[window_id] = (fullscreen, wm_classes)
# Basic process information
usable_lines = []
result = check_output('nice -n 19 top -b -n 2', shell=True)
lines = [a for a in result.decode('utf8').split('\n') if a != '']
first_found = False
for i, line in enumerate(lines):
r = re.search(r'PID\s+USER\s+PR\s+NI', line)
if r:
if first_found:
usable_lines = lines[i + 1:]
break
else:
first_found = True
processes = [re.split(r'\s+', a.strip()) for a in usable_lines]
process_index = {a[0]: (a[8], a[11]) for a in processes}
result = []
for window in windows:
cpu, name = process_index.get(window[2], (None, None))
fullscreen, wm_classes = window_index.get(window[0], None)
result.append(Window(*window, cpu=cpu, fullscreen=fullscreen, name=name,
wm_classes=wm_classes))
return result
| 5,346,659 |
def get_google_open_id_connect_token(service_account_credentials):
"""Get an OpenID Connect token issued by Google for the service account.
This function:
1. Generates a JWT signed with the service account's private key
containing a special "target_audience" claim.
2. Sends it to the OAUTH_TOKEN_URI endpoint. Because the JWT in #1
has a target_audience claim, that endpoint will respond with
an OpenID Connect token for the service account -- in other words,
a JWT signed by *Google*. The aud claim in this JWT will be
set to the value from the target_audience claim in #1.
For more information, see
https://developers.google.com/identity/protocols/OAuth2ServiceAccount .
The HTTP/REST example on that page describes the JWT structure and
demonstrates how to call the token endpoint. (The example on that page
shows how to get an OAuth2 access token; this code is using a
modified version of it to get an OpenID Connect token.)
"""
service_account_jwt = (
service_account_credentials._make_authorization_grant_assertion())
request = google.auth.transport.requests.Request()
body = {
'assertion': service_account_jwt,
'grant_type': google.oauth2._client._JWT_GRANT_TYPE,
}
token_response = google.oauth2._client._token_endpoint_request(
request, OAUTH_TOKEN_URI, body)
return token_response['id_token']
| 5,346,660 |
def test_date_expression(
dask_client, # pylint: disable=redefined-outer-name,unused-argument
):
"""Test of expressions handling dates.."""
ds = make_dataset(5 * 24)
partitioning = Date(("dates", ), "D")
for partition, _ in partitioning.split_dataset(ds, "num_lines"):
variables = dict(partitioning.parse("/".join(partition)))
expr = Expression("year==2000")
assert expr(variables)
expr = Expression("year==2000 and month==1")
assert expr(variables)
expr = Expression("year==2000 and month==1 and day in range(1, 12)")
assert expr(variables)
| 5,346,661 |
def main():
"""
Handles the execution of the code
"""
parser = argparse.ArgumentParser()
parser.add_argument("--snpEff_jar", default=None, required=False,
help="path of local snpEff jar")
parser.add_argument("--vcf", type=str, required=True,
help="path of vcf to annotate")
parser.add_argument("--outpath", type=str,
help="path to write out the vcf")
parser.add_argument('--ver', default='GRCh37.75', required=False,
help='genome assembly')
parser.add_argument("--all", required=False, action='store_true',
help='run all the annotations')
parser.add_argument("--var_eff", required=False, action='store_true',
help='parse variant effect')
parser.add_argument("--total_depth", required=False, action='store_true',
help='compute total depth for variants')
parser.add_argument("--alt_depth", required=False, action='store_true',
help='compute alt depth for variants')
parser.add_argument("--vaf", required=False, action='store_true',
help='compute vaf')
parser.add_argument("--allele_freq", required=False, action='store_true',
help='ExaAC allele_frequency')
parser.add_argument("--var_type", required=False, action='store_true',
help='annotate variant type')
parser.add_argument("--verbose", required=False, action='store_true',
help='enable verbose output')
parser.add_argument("--simple_output", required=False, default=None,
help='outpath for simplified report of data')
args = parser.parse_args()
if args.verbose:
def verboseprint(message):
print(message)
else:
verboseprint = lambda *a: None
# parse the header
header_str, header_file = extract_vcf_header(args.vcf, args.outpath)
verboseprint(f"{','.join(header_str)} extracted from {args.vcf}")
# load the annotated vcf
vcf_pd = pd.read_csv(args.vcf, sep='\t', comment='#',
names=header_str)
verboseprint("Loaded vcf...")
# extract the var effect
if args.allele_freq or args.var_eff or args.all:
verboseprint("Running ExAC annontation...")
decoded, track_idx, key = restExAC(vcf_pd)
if args.var_eff or args.all:
addExACcscq(decoded, track_idx, key, vcf_pd)
verboseprint("ExAC variant consequences added...")
if args.allele_freq or args.all:
addExACFreq(decoded, track_idx, key, vcf_pd)
verboseprint("ExAC variant frequencies added...")
if args.total_depth or args.all or args.alt_depth or args.vaf:
verboseprint("Computing depth metrics...")
add_format = True
for sample in vcf_pd.columns[9:]:
sample_pd = parseSampleField(vcf_pd, sample)
read_count_pd = parseDepthData(sample_pd)
if args.total_depth or args.all:
computeTotalDepth(read_count_pd, sample, vcf_pd, add_format)
verboseprint(f"Total depth added to {sample}...")
if args.alt_depth or args.all:
parseAltSeqDepth(read_count_pd, sample, vcf_pd, add_format)
verboseprint(f"Alt depth added to {sample}...")
if args.vaf or args.all:
computeVAF(read_count_pd, sample, vcf_pd, add_format)
verboseprint(f"VAF added to {sample}...")
add_format = False
if args.outpath:
vcf_pd.to_csv(header_file, sep='\t', index=False)
verboseprint(f"annotated vcf was saved to {args.outpath}")
else:
verboseprint("File was annotated, but not written out as \
--outpath was missing")
header_file.close()
# annotate the vcf with snpEff
if args.snpEff_jar:
verboseprint("Running snpEff...")
runSnpEff(jar_path=args.snpEff_jar, input_vcf=args.outpath,
output_vcf=args.outpath, version=args.ver)
if type(args.simple_output) == str:
verboseprint(f"Generating simplified report {args.simple_output}")
simple_df = vcf_pd[['#CHROM', 'POS', 'ID', 'REF', 'ALT', 'QUAL', 'FILTER']]
if args.var_eff or args.all:
simple_df['ExAC_CSQS'] = vcf_pd.INFO.str.extract('ExAC_CSQS=(.*?);')
verboseprint(f"added variant effect to {args.simple_output}")
if args.allele_freq or args.all:
simple_df['ExAC_AF'] = vcf_pd.INFO.str.extract('(?:ExAC_AF=)(.*)')
verboseprint(f"added variant frequency to {args.simple_output}")
if args.total_depth or args.all:
loc = vcf_pd.FORMAT[0].split(':').index('TD')
for sample in vcf_pd.columns[9:]:
simple_df[f'TD_{sample}'] = vcf_pd[sample].str.split(':', expand=True)[loc]
verboseprint(f"added variant depth to {args.simple_output}")
if args.alt_depth or args.all:
loc = vcf_pd.FORMAT[0].split(':').index('AS')
for sample in vcf_pd.columns[9:]:
simple_df[f'AS_{sample}'] = vcf_pd[sample].str.split(':', expand=True)[loc]
verboseprint(f"added alternative depth to {args.simple_output}")
if args.vaf or args.all:
loc = vcf_pd.FORMAT[0].split(':').index('VAF')
for sample in vcf_pd.columns[9:]:
simple_df[f'VAF{sample}'] = vcf_pd[sample].str.split(':', expand=True)[loc]
verboseprint(f"added vaf to {args.simple_output}")
if args.var_type or args.all:
simple_df['TYPE'] = vcf_pd.INFO.str.extract('TYPE=(.*?);')
verboseprint(f"added variant type to {args.simple_output}")
simple_df.to_csv(args.simple_output, index=False)
| 5,346,662 |
def term_to_atoms(terms):
"""Visitor to list atoms in term."""
if not isinstance(terms, list):
terms = [terms]
new_terms = []
for term in terms:
if isinstance(term, And):
new_terms += term_to_atoms(term.to_list())
elif isinstance(term, Or):
new_terms += term_to_atoms(term.to_list())
elif isinstance(term, Not):
new_terms.append(term.child)
else:
new_terms.append(term)
return new_terms
| 5,346,663 |
def expr_max(argv):
"""
Max aggregator function for :class:`Expression` objects
Returns
-------
exp : :class:`Expression`
Max of given arguments
Examples
--------
>>> x = so.VariableGroup(10, name='x')
>>> y = so.expr_max(2*x[i] for i in range(10))
"""
return expr_nested(argv, 'max')
| 5,346,664 |
def test_get_url(test_dict: FullTestDict, page_number: int):
"""
- GIVEN a list of words and a page number
- WHEN the url is generated
- THEN test it is returns the expected url
"""
word_list = convert_list_of_str_to_kaki(test_dict['input'])
expected_url = test_dict['ojad']['url'] % page_number
assert ojad.get_url(word_list, page_number) == expected_url
| 5,346,665 |
def handle_size(bytes_in=False, bytes_out=False):
"""
a function that converts bytes to human readable form. returns a
string like: 42.31 TB. example:
your_variable_name = make_readable(value_in_bytes)
"""
tib = 1024 ** 4
gib = 1024 ** 3
mib = 1024 ** 2
kib = 1024
if bytes_in:
data = float(bytes_in)
if data >= tib:
symbol = 'TB'
new_data = data / tib
elif data >= gib:
symbol = 'GB'
new_data = data / gib
elif data >= mib:
symbol = 'MB'
new_data = data / mib
elif data >= kib:
symbol = 'KB'
new_data = data / kib
elif data >= 0:
symbol = ' B'
new_data = data
formated_data = "{0:.2f}".format(new_data)
converted_data = str(formated_data) + symbol
return converted_data
elif bytes_out:
symbol = bytes_out[-1].lower()
data = bytes_out[0:-1]
try:
bytes = int(data)
except Exception as e:
print("couldnt convert " + data + " to int!")
print(e)
exit()
if symbol == 't':
converted_data = bytes * tib
elif symbol == 'g':
converted_data = bytes * gib
elif symbol == 'm':
converted_data = bytes * mib
elif symbol == 'k':
converted_data = bytes * kib
else:
print("unsupported size type! expected t, g, m, or k!")
exit()
return converted_data
| 5,346,666 |
def return_elapsed(gs):
"""Returns a description of the elapsed time of recent operations.
Args:
gs: global state.
Returns:
A dictionary containing the count, minimum elapsed time,
maximum elapsed time, average elapsed time, and list of elapsed time
records.
"""
assert isinstance(gs, global_state.GlobalState)
elapsed_list = []
elapsed_sum = 0.0
elapsed_min = None
elapsed_max = None
for elapsed_record in gs.get_elapsed():
duration = elapsed_record.elapsed_seconds
elapsed_list.append(
{'start_time': utilities.seconds_to_timestamp(
elapsed_record.start_time),
'what': elapsed_record.what,
'threadIdentifier': elapsed_record.thread_identifier,
'elapsed_seconds': duration})
elapsed_sum += duration
if (elapsed_min is None) or (elapsed_max is None):
elapsed_min = duration
elapsed_max = duration
else:
elapsed_min = min(elapsed_min, duration)
elapsed_max = max(elapsed_max, duration)
return {'count': len(elapsed_list),
'min': elapsed_min,
'max': elapsed_max,
'average': elapsed_sum / len(elapsed_list) if elapsed_list else None,
'items': elapsed_list}
| 5,346,667 |
def mirror():
"""Runs dump, sync_media, sync_dump and sqlimport."""
dump()
sync_dump()
local('python manage.py sqlimport')
sync_media()
| 5,346,668 |
def calculate_per_class_lwlrap(truth, scores):
"""Calculate label-weighted label-ranking average precision.
Arguments:
truth: np.array of (num_samples, num_classes) giving boolean ground-truth
of presence of that class in that sample.
scores: np.array of (num_samples, num_classes) giving the classifier-under-
test's real-valued score for each class for each sample.
Returns:
per_class_lwlrap: np.array of (num_classes,) giving the lwlrap for each
class.
weight_per_class: np.array of (num_classes,) giving the prior of each
class within the truth labels. Then the overall unbalanced lwlrap is
simply np.sum(per_class_lwlrap * weight_per_class)
"""
assert truth.shape == scores.shape
num_samples, num_classes = scores.shape
# Space to store a distinct precision value for each class on each sample.
# Only the classes that are true for each sample will be filled in.
precisions_for_samples_by_classes = np.zeros((num_samples, num_classes))
for sample_num in range(num_samples):
pos_class_indices, precision_at_hits = (
_one_sample_positive_class_precisions(scores[sample_num, :],
truth[sample_num, :]))
precisions_for_samples_by_classes[sample_num, pos_class_indices] = (
precision_at_hits)
labels_per_class = np.sum(truth > 0, axis=0)
weight_per_class = labels_per_class / float(np.sum(labels_per_class))
# Form average of each column, i.e. all the precisions assigned to labels in
# a particular class.
per_class_lwlrap = (np.sum(precisions_for_samples_by_classes, axis=0) /
np.maximum(1, labels_per_class))
# overall_lwlrap = simple average of all the actual per-class, per-sample precisions
# = np.sum(precisions_for_samples_by_classes) / np.sum(precisions_for_samples_by_classes > 0)
# also = weighted mean of per-class lwlraps, weighted by class label prior across samples
# = np.sum(per_class_lwlrap * weight_per_class)
return per_class_lwlrap, weight_per_class
| 5,346,669 |
def test_uci_getLineParts():
"""
Test the UCI getLineParts utility function
"""
from paradrop.lib.utils import uci
line = "config interface wan"
result = uci.getLineParts(line)
assert result == line.split()
# It should eat the apostrophes and give same result.
line2 = "config 'interface' 'wan'"
result2 = uci.getLineParts(line)
assert result2 == result
line = "option key 'my password has spaces'"
result = uci.getLineParts(line)
assert result == ["option", "key", "my password has spaces"]
line = "config interface 'oops"
result = uci.getLineParts(line)
assert result == ["config", "interface", "oops"]
| 5,346,670 |
def chan_faces(n1: int, n2: int, xform,
dim1: Tuple[float, float, float, float],
dim2: Tuple[float, float, float, float]):
"""
^y
| 0--------7
| | |
| | 5-----6
| | |
+--|--|-------> z
| |
| 4-----3
| |
1--------2
<----> e/zsc
<--------> bflange
"""
faces = [
# front face
[0, 5, 6, 7],
[0, 1, 4, 5],
[1, 2, 3, 4],
# back face
[8, 13, 14, 15],
[8, 9, 12, 13],
[9, 10, 11, 12],
# around the C (counter-clockwise)
[0, 8, 9, 1],
[1, 9, 10, 2],
[2, 10, 11, 3],
[3, 11, 12, 4],
[4, 12, 13, 5],
[5, 13, 14, 6],
[6, 14, 15, 7],
[7, 15, 8, 0],
]
points_list = []
for nid, dim in [(n1, dim1), (n2, dim2)]:
bflange, hall, tweb, tflange = dim
# distance from shear center to neutral axis
#zsc_na = 3 * bflange ** 2 / (6 * bflange + h) # per msc 2018 refman
zsc = 0. ## TODO: consider the shear center
#if 0: # pragma: no cover
# msc 2018 refman; p.670
#h = hall - tflange
#tw = tweb
#tf = tflange
#b = bflange - tw / 2.
#bf = bflange - tw
#hw = hall - 2. * tf
#A = 2 * tf * bf + (h + tf) * tw
#zc = bf * tf * (bf + tw) / A
#zsc = b**2 * tf / (2*b*tw + 1/3. * h * tf)
#E = zs - tw / 2.
#zna = zc + zsc
points = np.array([
[0., hall/2, zsc], # 0
[0., -hall/2, zsc], # 1
[0., -hall/2, zsc + bflange], # 2
[0., -hall/2 + tflange, zsc + bflange], # 3
[0., -hall/2 + tflange, zsc + tweb], # 4
[0., hall/2 - tflange, zsc + tweb], # 5
[0., hall/2 - tflange, zsc + bflange], # 6
[0., hall/2, zsc + bflange], # 7
]) # 16 x 3
pointsi = points @ xform + nid
points_list.append(pointsi)
return faces, np.vstack(points_list)
| 5,346,671 |
def generate_manifest(name, p, h=None):
""" generate_manifest(name, p, h) -> mapping
Generates a mapping used as the manifest file.
:param name: a dotted package name, as in setup.py
:param p: the zip file with package content.
:param h: optional hash function to use.
:returns: the path to the created manifest file.
"""
if h is None:
h = hashlib.sha256
m = {}
fh = m["files"] = {}
order = []
with zipfile.ZipFile(p) as zf:
for fi in zf.filelist:
order.append(fi.filename)
hash_all = h()
for fn in sorted(order):
contents = zf.read(fn)
hash_all.update(contents)
fh[fn] = h(contents).hexdigest()
m["name"] = name
m["sum"] = hash_all.hexdigest()
m["date"] = datetime.datetime.now().isoformat()
return m
| 5,346,672 |
def benchrun(methods,
model,
case_args,
filename,
cpus=1,):
"""
Parameters
----------
methods : list of str
Voter systems to be assessed by the election model.
model : func
Election model running function as
>>> e = func(**kwargs)
Where
- `e` is an Election object
- `kwargs` is a dict of arguments generated by `case_args`
case_args : generator
Generator that creates the parametric arguments to input into the model.
Must accept argument `methods` --
>>> generator = case_args(methods)
>>> args = next(generator)
filename : str
Naming prefix for output files
cpus : int
Number of processes or CPU's to use
Returns
-------
df : Dataframe
Results for every election iteration assessed
"""
b = _BenchRunner(model=model, case_args=case_args, filename=filename)
if cpus > 1:
return b.runmult(methods, cpus=cpus)
else:
return b.run(methods)
| 5,346,673 |
def retry( exceptions,times=3,sleep_second=0):
"""
Retry Decorator
Retries the wrapped function/method `times` times if the exceptions listed
in ``exceptions`` are thrown
:param times: The number of times to repeat the wrapped function/method
:type times: Int
:param Exceptions: Lists of exceptions that trigger a retry attempt
:type Exceptions: Tuple of Exceptions
"""
if not py.iterable(exceptions):exceptions=[exceptions]
def decorator(func):
def newfn(*args, **kwargs):
attempt = 0
while attempt < times:
try:
return func(*args, **kwargs)
except Exception as e:
for i in exceptions:
if isinstance(e,i):
log(
'Exception thrown when attempting to run %s, attempt '
'%d of %d' % (func, attempt, times),
exc_info=True
)
attempt += 1
if sleep_second:sleep(sleep_second)
break
else:#when no break
raise e
return func(*args, **kwargs)
return newfn
return decorator
| 5,346,674 |
def positiveId(obj):
"""Return id(obj) as a non-negative integer."""
result = id(obj)
if result < 0:
result += _address_mask
assert result > 0
return result
| 5,346,675 |
def get_vroitems_from_package(package):
"""Get all the items from the vRO Package.
Args:
package (str): Path to a package file.
Returns:
VROElementMetadata[]: a list of VROElementMetadata.
"""
vro_items_id, vro_items = [], []
with zipfile.ZipFile(package, 'r') as zip_ref:
for x in zip_ref.namelist():
if x.startswith("elements"):
item_id = os.path.basename(os.path.split(x)[0])
if item_id not in vro_items_id:
with zip_ref.open('elements/' + item_id + '/info', 'r') as xml_info_file:
xml_info = xml_info_file.read()
with zip_ref.open('elements/' + item_id + '/data', 'r') as data_file:
data = data_file.read()
vro_item = VROElementMetadata(item_id, xml_info, data)
vro_items.append(vro_item)
vro_items_id.append(item_id)
logger.info("New item %s" % vro_item)
return vro_items
| 5,346,676 |
def compute_annualized_total_return_over_months(df, column_price, months):
"""
Computed the annualized total return over the specified number of months.
This is equivalent to Compound Annual Growth Rate (CAGR).
Note: If the period is less than one year, it is best not to use annualized total return as it could result in a
very large (positive or negative) number that is not meaningful.
:param df: dataframe (sorted in ascending time order)
:param column_price: name of source column in dataframe with price values (adjusted for splits and dividends) to
compute annualized total return
:param months: time period in months (e.g. 1 = 1 month, 2 = 2 months, 2.5 = 1 month and ~15 days, etc.)
:return: annualized total return over months
"""
# calculate cumulative total return
total_return = compute_cumulative_total_return(df, column_price)
# calculate annualized total returns over months
annualized_total_return = ((1 + total_return)**(12/months)) - 1
return annualized_total_return
| 5,346,677 |
def build_sparse_ts_from_distributions(start_date, end_date, seasonalities, time_interval, dist_dict, **kwargs):
"""constructs a time series with given distributions and seasonalities in a given frequency time_interval"""
ts_list = []
for (name, dist), seasonality in zip(dist_dict.items(), seasonalities):
ts_list.append(build_sparse_ts_by_seasonality(dist, start_date, end_date, seasonality, time_interval,
**kwargs.get(name, {})))
ts = reduce(lambda x, y: add_ts_with_different_dates(x, y), ts_list) # add time series together
return ts
| 5,346,678 |
def preprocess_label(labels, scored_classes, equivalent_classes):
""" convert string labels to binary labels """
y = np.zeros((len(scored_classes)), np.float32)
for label in labels:
if label in equivalent_classes:
label = equivalent_classes[label]
if label in scored_classes:
y[scored_classes.index(label)] = 1
return y
| 5,346,679 |
def compile_on_disk(source_file: str,
parser_name: str = '',
compiler_suite: str = "",
extension: str = ".xml") -> Iterable[Error]:
"""
Compiles the a source file with a given compiler and writes the
result to a file.
If no ``compiler_suite`` is given it is assumed that the source
file is an EBNF grammar. In this case the result will be a Python
script containing a parser for that grammar as well as the
skeletons for a preprocessor, AST transformation table, and compiler.
If the Python script already exists only the parser name in the
script will be updated. (For this to work, the different names
need to be delimited section marker blocks.). `compile_on_disk()`
returns a list of error messages or an empty list if no errors
occurred.
:param source_file: The file name of the source text to be compiled.
:param parser_name: The name of the generated parser. If the empty
string is passed, the default name "...Parser.py" will be used.
:param compiler_suite: The file name of the parser/compiler-suite
(usually ending with 'Parser.py'), with which the source file
shall be compiled. If this is left empty, the source file is
assumed to be an EBNF-Grammar that will be compiled with the
internal EBNF-Compiler.
:param extension: The result of the compilation (if successful)
is written to a file with the same name but a different extension
than the source file. This parameter sets the extension.
:returns: A (potentially empty) list of error or warning messages.
"""
filepath = os.path.normpath(source_file)
rootname = os.path.splitext(filepath)[0]
if not parser_name: parser_name = rootname + 'Parser.py'
f = None # Optional[TextIO]
with open(source_file, encoding="utf-8") as f:
source = f.read()
# dhpath = relative_path(os.path.dirname(rootname), DHPARSER_PARENTDIR)
compiler_name = as_identifier(os.path.basename(rootname))
if compiler_suite:
sfactory, pfactory, tfactory, cfactory = load_compiler_suite(compiler_suite)
compiler1 = cfactory()
else:
sfactory = get_ebnf_preprocessor # PreprocessorFactoryFunc
pfactory = get_ebnf_grammar # ParserFactoryFunc
tfactory = get_ebnf_transformer # TransformerFactoryFunc
cfactory = get_ebnf_compiler # CompilerFactoryFunc
compiler1 = cfactory() # Compiler
is_ebnf_compiler = False # type: bool
if isinstance(compiler1, EBNFCompiler):
is_ebnf_compiler = True
compiler1.set_grammar_name(compiler_name, source_file)
result, messages, _ = compile_source(source, sfactory(), pfactory(), tfactory(), compiler1)
if has_errors(messages):
return messages
elif is_ebnf_compiler:
# trans == get_ebnf_transformer or trans == EBNFTransformer:
# either an EBNF- or no compiler suite given
ebnf_compiler = cast(EBNFCompiler, compiler1) # type: EBNFCompiler
global SECTION_MARKER, RX_SECTION_MARKER, PREPROCESSOR_SECTION, PARSER_SECTION, \
AST_SECTION, COMPILER_SECTION, END_SECTIONS_MARKER, RX_WHITESPACE
f = None
try:
f = open(parser_name, 'r', encoding="utf-8")
source = f.read()
sections = split_source(parser_name, source)
intro, imports, preprocessor, _, ast, compiler, outro = sections
ast_trans_python_src = imports + ast
ast_trans_table = dict() # type: TransformationDict
try:
ast_trans_table = compile_python_object(ast_trans_python_src,
r'(?:\w+_)?AST_transformation_table$')
except Exception as e:
if isinstance(e, NameError):
err_str = 'NameError "{}" while compiling AST-Transformation. ' \
'Possibly due to a forgotten import at the beginning ' \
'of the AST-Block (!)'.format(str(e))
else:
err_str = 'Exception {} while compiling AST-Transformation: {}' \
.format(str(type(e)), str(e))
messages.append(Error(err_str, 0, CANNOT_VERIFY_TRANSTABLE_WARNING))
if is_logging():
with open(os.path.join(log_dir(), rootname + '_AST_src.py'), 'w',
encoding='utf-8') as f:
f.write(ast_trans_python_src)
messages.extend(ebnf_compiler.verify_transformation_table(ast_trans_table))
# TODO: Verify compiler
except (PermissionError, FileNotFoundError, IOError):
intro, imports, preprocessor, _, ast, compiler, outro = '', '', '', '', '', '', ''
finally:
if f:
f.close()
f = None
if RX_WHITESPACE.fullmatch(intro):
intro = '#!/usr/bin/env python3'
if RX_WHITESPACE.fullmatch(outro):
outro = read_template('DSLParser.pyi').format(NAME=compiler_name)
if RX_WHITESPACE.fullmatch(imports):
imports = DHParser.ebnf.DHPARSER_IMPORTS
if RX_WHITESPACE.fullmatch(preprocessor):
preprocessor = ebnf_compiler.gen_preprocessor_skeleton()
if RX_WHITESPACE.fullmatch(ast):
ast = ebnf_compiler.gen_transformer_skeleton()
if RX_WHITESPACE.fullmatch(compiler):
compiler = ebnf_compiler.gen_compiler_skeleton()
try:
f = open(parser_name, 'w', encoding="utf-8")
f.write(intro)
f.write(SECTION_MARKER.format(marker=SYMBOLS_SECTION))
f.write(imports)
f.write(SECTION_MARKER.format(marker=PREPROCESSOR_SECTION))
f.write(preprocessor)
f.write(SECTION_MARKER.format(marker=PARSER_SECTION))
f.write(cast(str, result))
f.write(SECTION_MARKER.format(marker=AST_SECTION))
f.write(ast)
f.write(SECTION_MARKER.format(marker=COMPILER_SECTION))
f.write(compiler)
f.write(SECTION_MARKER.format(marker=END_SECTIONS_MARKER))
f.write(outro)
except (PermissionError, FileNotFoundError, IOError) as error:
print(f'# Could not write file "{parser_name}" because of: '
+ "\n# ".join(str(error).split('\n)')))
print(result)
finally:
if f:
f.close()
if platform.system() != "Windows":
# set file permissions so that the parser_name can be executed
st = os.stat(parser_name)
os.chmod(parser_name, st.st_mode | stat.S_IEXEC)
else:
f = None
try:
f = open(rootname + extension, 'w', encoding="utf-8")
if isinstance(result, Node):
if extension.lower() == '.xml':
f.write(result.as_xml())
else:
f.write(result.as_sxpr())
elif isinstance(result, str):
f.write(result)
else:
raise AssertionError('Illegal result type: ' + str(type(result)))
except (PermissionError, FileNotFoundError, IOError) as error:
print('# Could not write file "' + rootname + '.py" because of: '
+ "\n# ".join(str(error).split('\n)')))
print(result)
finally:
if f:
f.close()
return messages
| 5,346,680 |
def load_settings(filename='settings.yaml'):
"""Read settings from a file.
Keyword arguments:
filename -- the source file (default settings.yaml)
"""
with open(filename, 'r') as settings_yaml:
logging.debug("Reading settings from file: %s", filename)
return yaml.load(settings_yaml)
| 5,346,681 |
def enforce_types(data):
"""Convert lists to sets.
"""
# We properly set 'title', 'text', 'pretext' and 'alias' as unicode
for _, node in data.iter_all_nodes():
# This avoid warnings about mix of types in 'rank' later
if node.data['alias'] is not None:
node.data['alias'] = unicode(node.data['alias'])
for graph_data in node.data['graphs']:
graph_data['title'] = unicode(graph_data['title'])
graph_data['text'] = unicode(graph_data['text'])
graph_data['pretext'] = unicode(graph_data['pretext'])
# We properly set 'index' as a set
for _, node in data.iter_all_nodes():
for graph_data in node.data['graphs']:
graph_data['index'] = set(sanitize(graph_data['index'], apply_=handle_index))
# We properly set 'labels' as a set of dict
for _, node in data.iter_all_nodes():
node.data['labels'] = set(sanitize(node.data['labels'], apply_=handle_label))
for graph_data in node.data['graphs']:
graph_data['labels'] = set(sanitize(graph_data['labels'], apply_=handle_label))
| 5,346,682 |
def pytest_configure(config):
"""
pytest hook, used here to register custom marks to get rid of spurious
warnings
"""
config.addinivalue_line(
"markers", "mock_device_proxy: the test requires tango.DeviceProxy to be mocked"
)
| 5,346,683 |
def main():
""" Hook for command line interface """
npix = (1800, 900)
skydir_gc = coordinates.SkyCoord(0., 0., frame=coordinates.Galactic, unit="deg")
wcs_gc = create_wcs(skydir_gc, coordsys='GAL', projection="AIT",
cdelt=0.2, crpix=(900.5, 450.5))
t_all = table.Table.read("new_srcs_filtered.fits")
glat_all = t_all['GLAT']
glon_all = t_all['GLON']
pix_crds_all = wcs_gc.wcs_world2pix(glon_all, glat_all, 1)
clusters = yaml.load(open("clustered_idx_dict.yaml"))
fig = plt.figure()
axout = fig.add_subplot(111)
axout.set_xlim(0, npix[0])
axout.set_ylim(0, npix[1])
for key, val in list(clusters.items()):
if len(val) < 10:
continue
clust = [key] + val
print(clust)
axout.plot(pix_crds_all[0][clust], pix_crds_all[1][clust])
axout.plot(pix_crds_all[0], pix_crds_all[1], 'r,')
| 5,346,684 |
def extract_ocr_region(
datasets_root_path,
img_extracted_path,
labels_out_path
):
"""
Description:
To extract ocr regions from the raw imgs,
and record corresponding label.
params:
path_with_raw_imgs_and_labels,
path to save imgs extracted,
path to save labels by lines
return: None
"""
if os.path.exists(labels_out_path):
os.remove(labels_out_path)
# get paths
label_axis_paths = sorted(
os.listdir(os.path.join(datasets_root_path, 'txt_9000'))
)
for i in range(len(label_axis_paths)):
label_axis_paths[i] = os.path.join(
datasets_root_path, 'txt_9000', label_axis_paths[i]
)
image_paths = sorted(
os.listdir(os.path.join(datasets_root_path, 'image_9000'))
)
for i in range(len(image_paths)):
image_paths[i] = os.path.join(
datasets_root_path, 'image_9000', image_paths[i]
)
img_idx = 0
for image_path_idx in range(len(image_paths))[:100]:
img = cv2.imread(image_paths[image_path_idx])
if img is None:
print("Error img " + image_paths[image_path_idx])
continue
print('image_path:', image_paths[image_path_idx])
lab_ax_path = label_axis_paths[image_path_idx]
with open(lab_ax_path, 'r') as fin:
for line in fin.readlines():
(axis, label) = (line.strip().split(',')[:-1],
line.strip().split(',')[-1])
if label == '###':
continue
point_axis_1 = [round(float(axis[0])), round(float(axis[1]))]
point_axis_2 = [round(float(axis[2])), round(float(axis[3]))]
point_axis_3 = [round(float(axis[4])), round(float(axis[5]))]
point_axis_4 = [round(float(axis[6])), round(float(axis[7]))]
canvas = np.zeros_like(img)
point_axis = np.array([[point_axis_1, point_axis_2,
point_axis_3, point_axis_4]])
mask = cv2.fillPoly(
canvas,
point_axis,
(255, 255, 255)
)
text = cv2.bitwise_and(img, mask)
# 怎样才能保证 全为字白背景黑 或 全为字黑背景白呢?
# 矫正ocr区域的角度
# centroid_x, centroid_y = (np.mean(point_axis[0][:, 0]),
# np.mean(point_axis[0][:, 1]))
# Get diagonal point pairs
idx_dis_max = np.argmax(
np.sum((point_axis[0] - point_axis_1)**2, axis=1)
)
point_axis_pair_1 = [point_axis[0][0],
point_axis[0][idx_dis_max]]
pair_2_idx = list(set(range(1, 4)) - {idx_dis_max})
point_axis_pair_2 = [point_axis[0][pair_2_idx[0]],
point_axis[0][pair_2_idx[1]]]
# correcting_angle = (
# np.arctan((point_axis_1[1] - centroid_y) /
# (point_axis_1[0] - centroid_x)) +
# np.arctan((point_axis_2[1] - centroid_y) /
# (point_axis_2[0] - centroid_x)) +
# np.arctan((point_axis_3[1] - centroid_y) /
# (point_axis_3[0] - centroid_x)) +
# np.arctan((point_axis_4[1] - centroid_y) /
# (point_axis_4[0] - centroid_x))
# ) / 4
if not ((point_axis_pair_1[1][0] - point_axis_pair_1[0][0]) and
(point_axis_pair_2[1][0] - point_axis_pair_2[0][0])):
correcting_angle = 0
else:
correcting_angle = ((point_axis_pair_1[1][1] -
point_axis_pair_1[0][1]) /
(point_axis_pair_1[1][0] -
point_axis_pair_1[0][0]) +
(point_axis_pair_2[1][1] -
point_axis_pair_2[0][1]) /
(point_axis_pair_2[1][0] -
point_axis_pair_2[0][0])) / 2
correcting_angle = np.rad2deg(correcting_angle)
print("correcting_angle:", correcting_angle)
text_rot = get_img_rot_broa.get_img_rot_broa(text,
correcting_angle)
# text_rot = corr_ang_by_radon.corr_ang_by_radon(text)[0]
text_cropped = elim_pure_borders.elim_pure_borders(text_rot)
cv2.imwrite(os.path.join(img_extracted_path,
str(img_idx)+'.jpg'),
text_cropped)
# import matplotlib.pyplot as plt
# fig, (ax0, ax1, ax2) = plt.subplots(1, 3, figsize=(10, 4))
# ax0.imshow(cv2.cvtColor(text, cv2.COLOR_BGR2RGB))
# ax0.set_title('text')
# ax1.imshow(cv2.cvtColor(text_rot, cv2.COLOR_BGR2RGB))
# ax1.set_title('text_rot')
# ax2.imshow(cv2.cvtColor(text_cropped, cv2.COLOR_BGR2RGB))
# ax2.set_title('text_cropped')
# plt.show()
# thresholding
# blur = cv2.cvtColor(cv2.GaussianBlur(text, (5, 5), 0),
# cv2.COLOR_BGR2GRAY)
# thr = cv2.threshold(blur, 127, 255,
# cv2.THRESH_BINARY + cv2.THRESH_OTSU)[1]
# plt.figure(1)
# plt.imshow(blur, cmap='gray')
# plt.figure(2)
# plt.imshow(thr, cmap='gray')
# plt.title('thr')
# plt.show()
img_idx += 1
# Test by tesseract
# import pytesseract
# from PIL import Image
# cv2.imwrite('./t.png', thr)
# res = pytesseract.image_to_string(
# Image.open('./t.png'),
# lang='chi_sim+eng'
# )
# print('Prediction by tesseract is:', res)
# Use NN to replace tesseract
# single labels
with open('./labels_out.txt', 'a+') as fout:
fout.write(label + '\n')
| 5,346,685 |
def vec_abs(field: SampledField):
""" See `phi.math.vec_abs()` """
if isinstance(field, StaggeredGrid):
field = field.at_centers()
return field.with_values(math.vec_abs(field.values))
| 5,346,686 |
def print_filters():
"""This method will print list of filters fields used by ``buildtest build --filter``. This method is invoked by
running ``buildtest build --helpfilter``.
"""
table = Table("[blue]Field", "[blue]Description", title="Buildtest Filters")
table.add_row("[green]tags", "[red]Filter tests by 'tag' field")
table.add_row("[green]type", "[red]Filter test by 'type' field")
table.add_row("[green]maintainers", "[red]Filter test by 'maintainers' field")
console.print(table)
| 5,346,687 |
def spoofRequest(app):
"""
Make REQUEST variable to be available on the Zope application server.
This allows acquisition to work properly
"""
_policy=PermissiveSecurityPolicy()
_oldpolicy=setSecurityPolicy(_policy)
newSecurityManager(None, OmnipotentUser().__of__(app.acl_users))
info = {'SERVER_NAME': 'isaw4.atlantides.org',
'SERVER_PORT': '8083',
'REQUEST_METHOD': 'GET'}
return makerequest(app, environ=info)
| 5,346,688 |
def plot_precision_recall(AP, precisions, recalls):
"""Draw the precision-recall curve.
AP: Average precision at IoU >= 0.5
precisions: list of precision values
recalls: list of recall values
"""
# Plot the Precision-Recall curve
_, ax = plt.subplots(1)
ax.set_title("Precision-Recall Curve. AP@50 = {:.3f}".format(AP))
ax.set_ylim(0, 1.1)
ax.set_xlim(0, 1.1)
_ = ax.plot(recalls, precisions)
| 5,346,689 |
def data_to_percentage(data_list: pd.DataFrame) -> pd.DataFrame:
"""
Takes a dataframe with one or more columns filled with digits and returns a
dataframe with the percentages corresponding to the number of times the
numbers 1-9 appear in each column.
Args:
data_list: a dataframe of integers representing all of the leading
digits from a dataset (in this case, the number of vote counts).
Each columns is a category and is a Series with digits.
threshold: (int) minimum number of integers in column for percentage
to be found in it and for it to be returned.
Returns:
returns a dataframe of Series with the percentages of each column that
are each unique number in that column. Any numbers outside of [1, 9] are
not included and any column with fewer unique digits than another column
is dropped.
"""
def per_column_percentage(column: pd.Series) -> pd.Series:
number_of_occurrences = column.value_counts()
number_of_occurrences = number_of_occurrences[
(number_of_occurrences.index > 0)
& (number_of_occurrences.index < 10)
]
return number_of_occurrences.multiply(
100 / sum(number_of_occurrences)
).sort_index()
return data_list.apply(per_column_percentage).dropna(axis=1)
| 5,346,690 |
def is_not_none_or_whitespace(param_name: str, value_to_check: str) -> None:
"""
Raises ValueError if the value is None or a whitespace/empty string
:param param_name: Name of the parameter to validate
:param value_to_check: Value to of the parameter being validated
"""
if value_to_check is None or value_to_check.strip() == '':
raise ValueError(f'Parameter {param_name} contains a None, empty, or whitespace string')
| 5,346,691 |
def create_classifier_from_encoder(data:DataBunch, encoder_path:str=None, path=None,
dropout1=0.5, device: torch.device = torch.device('cuda', 0), **kwargs):
"""Factory function to create classifier from encoder to allow transfer learning."""
from .models.models import EncoderClassifier
path = data.path if path is None else path
if encoder_path is None:
logger.info("WARNING: `encoder_path` is None, not using pretrained feature extractor")
encoder = None
else:
encoder = torch.load(encoder_path, map_location='cpu')
model = EncoderClassifier(data.train_ds.shape, encoder, len(data.classes),dropout1=dropout1)
learn = Learner(data, model, path, model_type="classifier", device=device, **kwargs)
learn.freeze_encoder()
return learn
| 5,346,692 |
def gz_compression(filepath, delete_source=True):
"""
Compress the given file using gzip.
Delete the source file if asked.
:param filepath: path pointing to the source file
:param delete_source: boolean indicating if the source file should be deleted
"""
# compress the file using gzip
with open(filepath, 'rb') as f_in:
with gzip.open(filepath + ".gz", 'wb') as f_out:
shutil.copyfileobj(f_in, f_out)
# delete source file if asked
if delete_source:
os.remove(filepath)
| 5,346,693 |
def clone(job):
"""
Action that creates a clone of a machine.
"""
service = job.service
vdc = service.parent
if 'g8client' not in vdc.producers:
raise j.exceptions.RuntimeError("No producer g8client found. Cannot continue clone of %s" % service)
g8client = vdc.producers["g8client"][0]
config_instance = "{}_{}".format(g8client.aysrepo.name, g8client.model.data.instance)
cl = j.clients.openvcloud.get(instance=config_instance, create=False, die=True, sshkey_path="/root/.ssh/ays_repos_key")
acc = cl.account_get(vdc.model.data.account)
space = acc.space_get(vdc.model.dbobj.name, vdc.model.data.location)
if service.name not in space.machines:
raise j.exceptions.RuntimeError("Machine with name %s doesn't exist in the cloud space" % service.name)
machine = space.machines[service.name]
clone_name = service.model.data.cloneName
machine.clone(clone_name)
machine.start()
| 5,346,694 |
def test_force_overwrite_two_pages(overwrite_pages, tmp_path, setup_page):
"""Checks for multiple pages force overwrite flag is set <=> page is updated if author is changed"""
pages_obj = {1: {}, 2: {}}
config_file, (
pages_obj[1]["page_id"],
pages_obj[1]["page_title"],
pages_obj[2]["page_id"],
pages_obj[2]["page_title"],
) = setup_page(2)
new_config = replace_new_author(config_file, tmp_path)
for page_no in range(2):
if overwrite_pages[page_no]:
new_config = _set_page_to_overwrite(
config_file=new_config, page_no=page_no + 1, tmp_path=tmp_path
)
rewrite_page_file(Config(new_config).pages[page_no].page_file)
result: Result = run_with_config(new_config)
assert result.exit_code == 0
for page_no in range(2):
assert (
f"Updating page #{pages_obj[page_no+1]['page_id']}" in result.stdout
) == overwrite_pages[page_no]
| 5,346,695 |
def filter_string(
df: pd.DataFrame,
column_name: Hashable,
search_string: str,
complement: bool = False,
case: bool = True,
flags: int = 0,
na=None,
regex: bool = True,
) -> pd.DataFrame:
"""Filter a string-based column according to whether it contains a substring.
This is super sugary syntax that builds on top of `pandas.Series.str.contains`.
It is meant to be the method-chaining equivalent of the following:
```python
df = df[df[column_name].str.contains(search_string)]]
```
This method does not mutate the original DataFrame.
Example: Retain rows whose column values contain a particular substring.
>>> import pandas as pd
>>> import janitor
>>> df = pd.DataFrame({"a": range(3, 6), "b": ["bear", "peeL", "sail"]})
>>> df
a b
0 3 bear
1 4 peeL
2 5 sail
>>> df.filter_string(column_name="b", search_string="ee")
a b
1 4 peeL
>>> df.filter_string(column_name="b", search_string="L", case=False)
a b
1 4 peeL
2 5 sail
Example: Filter names does not contain `'.'` (disable regex mode).
>>> import pandas as pd
>>> import janitor
>>> df = pd.Series(["JoseChen", "Brian.Salvi"], name="Name").to_frame()
>>> df
Name
0 JoseChen
1 Brian.Salvi
>>> df.filter_string(column_name="Name", search_string=".", regex=False, complement=True)
Name
0 JoseChen
:param df: A pandas DataFrame.
:param column_name: The column to filter. The column should contain strings.
:param search_string: A regex pattern or a (sub-)string to search.
:param complement: Whether to return the complement of the filter or not. If
set to True, then the rows for which the string search fails are retained
instead.
:param case: If True, case sensitive.
:param flags: Flags to pass through to the re module, e.g. re.IGNORECASE.
:param na: Fill value for missing values. The default depends on dtype of
the array. For object-dtype, `numpy.nan` is used. For `StringDtype`,
`pandas.NA` is used.
:param regex: If True, assumes `search_string` is a regular expression. If False,
treats the `search_string` as a literal string.
:returns: A filtered pandas DataFrame.
""" # noqa: E501
criteria = df[column_name].str.contains(
pat=search_string,
case=case,
flags=flags,
na=na,
regex=regex,
)
if complement:
return df[~criteria]
return df[criteria]
| 5,346,696 |
def on_demand_feature_view(
features: List[Feature], inputs: Dict[str, Union[FeatureView, RequestDataSource]]
):
"""
Declare an on-demand feature view
:param features: Output schema with feature names
:param inputs: The inputs passed into the transform.
:return: An On Demand Feature View.
"""
def decorator(user_function):
on_demand_feature_view_obj = OnDemandFeatureView(
name=user_function.__name__,
inputs=inputs,
features=features,
udf=user_function,
)
functools.update_wrapper(
wrapper=on_demand_feature_view_obj, wrapped=user_function
)
return on_demand_feature_view_obj
return decorator
| 5,346,697 |
def get_error_string(ftdi):
"""
get_error_string(context ftdi) -> char *
Get string representation for last error code
Parameters:
-----------
ftdi: pointer to ftdi_context
Returns:
--------
Pointer: to error string
"""
errstr = ftdi_get_error_string(ftdi)
return cast(errstr, c_char_p).value.decode('ascii')
| 5,346,698 |
def DenseNet52k12(growth_rate = 12,
reduction = 0.5):
"""
Parameters:
----------
Returns
-------
"""
return DenseNet(reduction = reduction,
growth_rate = growth_rate,
layers=52)
| 5,346,699 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.