content
stringlengths 0
894k
| type
stringclasses 2
values |
---|---|
from pathlib import Path
from typing import List, Tuple
import numpy as np
from relaxations.interval import Interval
from relaxations.linear_bounds import LinearBounds
def load_spec(spec_dir: Path, counter: int) -> List[Tuple[List[Interval], Interval, LinearBounds]]:
parameters = list()
interval_bounds = list()
lower_biases = list()
upper_biases = list()
lower_weights = list()
upper_weights = list()
with (spec_dir / f'{counter}.csv').open('r') as f:
split_parameters = list()
split_interval_bounds = list()
split_lower_biases = list()
split_upper_biases = list()
split_lower_weights = list()
split_upper_weights = list()
for line in f.readlines():
if '|' in line:
lower, upper = line.strip().split(' | ')
lower = [float(v) for v in lower.split(' ')]
upper = [float(v) for v in upper.split(' ')]
split_lower_biases.append(lower[0])
split_upper_biases.append(upper[0])
split_lower_weights.append(lower[1:])
split_upper_weights.append(upper[1:])
elif 'SPEC_FINISHED' in line:
parameters.append(np.asarray(split_parameters))
interval_bounds.append(np.asarray(split_interval_bounds))
lower_biases.append(np.asarray(split_lower_biases))
upper_biases.append(np.asarray(split_upper_biases))
lower_weights.append(np.asarray(split_lower_weights))
upper_weights.append(np.asarray(split_upper_weights))
split_parameters = list()
split_interval_bounds = list()
split_lower_biases = list()
split_upper_biases = list()
split_lower_weights = list()
split_upper_weights = list()
elif line.startswith('('):
split_interval_bounds.extend(eval(line))
else:
split_parameters.append([float(v) for v in line.strip().split(' ')])
parameters = np.array(parameters)
interval_bounds = np.asarray(interval_bounds)
lower_biases = np.asarray(lower_biases)
upper_biases = np.asarray(upper_biases)
lower_weights = np.asarray(lower_weights)
upper_weights = np.asarray(upper_weights)
result = list()
for i in range(len(parameters)):
params = [Interval(param[0], param[1]) for param in parameters[i]]
bounds = Interval(
lower_bound=interval_bounds[i][:, 0],
upper_bound=interval_bounds[i][:, 1]
)
constraints = LinearBounds(
upper_slope=upper_weights[i],
upper_offset=upper_biases[i],
lower_slope=lower_weights[i],
lower_offset=lower_biases[i]
)
result.append((params, bounds, constraints))
return result
| python |
import cv2
import numpy as np
def parse_mapping(ground_truth):
map = {}
f = open(ground_truth, "r")
for line in f.readlines():
label = line.strip().split(",")[1]
if label not in map:
map[label] = len(map)
def parse_data(ground_truth):
images = {}
label_dict = {}
f = open(ground_truth, "r")
for line in f.readlines():
line_split = line.strip().split(",")
(fname, label, x1, y1, x2, y2) = line_split
# create dictionary of labels
if label in label_dict:
label_dict[label] += 1
else:
label_dict[label] = 1
fname = "MIO-TCD-Localization/train/" + fname + ".jpg"
if fname not in images:
images[fname] = {}
img = cv2.imread(fname)
h, w, _ = img.shape
# for every new image
images[fname]["filepath"] = fname
images[fname]["height"] = h
images[fname]["width"] = w
images[fname]["bboxes"] = []
images[fname]['bboxes'].append(
{
"x1" : int(float(x1)),
"y1" : int(float(y1)),
"x2" : int(float(x2)),
"y2" : int(float(y2)),
"class" : label
})
list1 = []
for image in images:
list1.append(images[image])
return list1, label_dict
| python |
# 入力フレーズに対してコサイン類似度を求めていく
# 類似度の結果をjson, pklで出力
from sentence_transformers import SentenceTransformer
import numpy as np
import torch
from torch import nn
import pickle
import json
# 今回の入力
key_phrase = 'pulls the trigger'
# データセットの読み込み
with open('combined_word2id_dict.pkl', 'rb') as f:
phrase_dict = pickle.load(f)
# PhraseBERTのモデルの読み込み
model = SentenceTransformer('whaleloops/phrase-bert')
# 入力のベクトル表現を得る len(p1) = 256 の固定長
p1 = model.encode(key_phrase)
cos_sim = nn.CosineSimilarity(dim=0)
result = {}
# データセットの各フレーズに対してiterate
for phrase, id in phrase_dict.items():
print('phrase is:', phrase)
print('id is:', id)
# フレーズのベクトル表現を得る
emb = model.encode(phrase)
# 入力とフレーズとのコサイン類似度を求める
similarity = cos_sim(torch.tensor(p1), torch.tensor(emb))
print('similarity is:', similarity)
# print('similarty.item()', similarity.item())
result[phrase] = similarity.item()
# 結果の保存
with open('results_dict.json', 'w') as f:
json.dump(result, f, indent=4)
with open('results_dict.pkl', 'wb') as f:
pickle.dump(result, f)
# print(f'The cosine similarity between phrase 1 and 2 is: {cos_sim( torch.tensor(p1), torch.tensor(p2))}')
# print(f'The cosine similarity between phrase 1 and 3 is: {cos_sim( torch.tensor(p1), torch.tensor(p3))}')
# print(f'The cosine similarity between phrase 2 and 3 is: {cos_sim( torch.tensor(p2), torch.tensor(p3))}')
# print(f'The cosine similarity between phrase 4 and 1 is: {cos_sim( torch.tensor(p4), torch.tensor(p1))}')
# print(f'The cosine similarity between phrase 4 and 5 is: {cos_sim( torch.tensor(p4), torch.tensor(p5))}')
| python |
s = 0
cont = 0
for c in range(1, 501, 2):
if c % 3 == 0 and c % 2 == 1:
s += c
cont += 1
print(f'A soma de {cont} valores múltiplos de 3 entre 1 e 501 é {s}')
| python |
import math
import numpy as np
import os
from scipy import ndimage
from scipy.interpolate import RegularGridInterpolator as rgi
import common
import argparse
import ntpath
# Import shipped libraries.
import librender
import libmcubes
from multiprocessing import Pool
use_gpu = True
if use_gpu:
import libfusiongpu as libfusion
from libfusiongpu import tsdf_gpu as compute_tsdf
else:
import libfusioncpu as libfusion
from libfusioncpu import tsdf_cpu as compute_tsdf
class Fusion:
"""
Performs TSDF fusion.
"""
def __init__(self):
"""
Constructor.
"""
parser = self.get_parser()
self.options = parser.parse_args()
self.render_intrinsics = np.array([
self.options.focal_length_x,
self.options.focal_length_y,
self.options.principal_point_x,
self.options.principal_point_y,
], dtype=float)
# Essentially the same as above, just a slightly different format.
self.fusion_intrisics = np.array([
[self.options.focal_length_x, 0, self.options.principal_point_x],
[0, self.options.focal_length_y, self.options.principal_point_y],
[0, 0, 1]
])
self.image_size = np.array([
self.options.image_height,
self.options.image_width,
], dtype=np.int32)
# Mesh will be centered at (0, 0, 1)!
self.znf = np.array([
1 - 0.75,
1 + 0.75
], dtype=float)
# Derive voxel size from resolution.
self.voxel_size = 1./self.options.resolution
self.truncation = self.options.truncation_factor*self.voxel_size
def get_parser(self):
"""
Get parser of tool.
:return: parser
"""
parser = argparse.ArgumentParser(description='Scale a set of meshes stored as OFF files.')
parser.add_argument('--mode', type=str, default='render',
help='Operation mode: render, fuse or sample.')
input_group = parser.add_mutually_exclusive_group(required=True)
input_group.add_argument('--in_dir', type=str,
help='Path to input directory.')
input_group.add_argument('--in_file', type=str,
help='Path to input directory.')
parser.add_argument('--out_dir', type=str,
help='Path to output directory; files within are overwritten!')
parser.add_argument('--t_dir', type=str,
help='Path to transformation directory.')
parser.add_argument('--n_proc', type=int, default=0,
help='Number of processes to run in parallel'
'(0 means sequential execution).')
parser.add_argument('--overwrite', action='store_true',
help='Overwrites existing files if true.')
parser.add_argument('--n_points', type=int, default=100000,
help='Number of points to sample per model.')
parser.add_argument('--n_views', type=int, default=100,
help='Number of views per model.')
parser.add_argument('--image_height', type=int, default=640,
help='Depth image height.')
parser.add_argument('--image_width', type=int, default=640,
help='Depth image width.')
parser.add_argument('--focal_length_x', type=float, default=640,
help='Focal length in x direction.')
parser.add_argument('--focal_length_y', type=float, default=640,
help='Focal length in y direction.')
parser.add_argument('--principal_point_x', type=float, default=320,
help='Principal point location in x direction.')
parser.add_argument('--principal_point_y', type=float, default=320,
help='Principal point location in y direction.')
parser.add_argument('--sample_weighted', action='store_true',
help='Whether to use weighted sampling.')
parser.add_argument('--sample_scale', type=float, default=0.2,
help='Scale for weighted sampling.')
parser.add_argument(
'--depth_offset_factor', type=float, default=1.5,
help='The depth maps are offsetted using depth_offset_factor*voxel_size.')
parser.add_argument('--resolution', type=float, default=256,
help='Resolution for fusion.')
parser.add_argument(
'--truncation_factor', type=float, default=10,
help='Truncation for fusion is derived as truncation_factor*voxel_size.')
return parser
def read_directory(self, directory):
"""
Read directory.
:param directory: path to directory
:return: list of files
"""
files = []
for filename in os.listdir(directory):
files.append(os.path.normpath(os.path.join(directory, filename)))
return files
def get_in_files(self):
if self.options.in_dir is not None:
assert os.path.exists(self.options.in_dir)
common.makedir(self.options.out_dir)
files = self.read_directory(self.options.in_dir)
else:
files = [self.options.in_file]
if not self.options.overwrite:
def file_filter(filepath):
outpath = self.get_outpath(filepath)
return not os.path.exists(outpath)
files = list(filter(file_filter, files))
return files
def get_outpath(self, filepath):
filename = os.path.basename(filepath)
if self.options.mode == 'render':
outpath = os.path.join(self.options.out_dir, filename + '.h5')
elif self.options.mode == 'fuse':
modelname = os.path.splitext(os.path.splitext(filename)[0])[0]
outpath = os.path.join(self.options.out_dir, modelname + '.off')
elif self.options.mode == 'sample':
modelname = os.path.splitext(os.path.splitext(filename)[0])[0]
outpath = os.path.join(self.options.out_dir, modelname + '.npz')
return outpath
def get_points(self):
"""
See https://stackoverflow.com/questions/9600801/evenly-distributing-n-points-on-a-sphere.
:param n_points: number of points
:type n_points: int
:return: list of points
:rtype: numpy.ndarray
"""
rnd = 1.
points = []
offset = 2. / self.options.n_views
increment = math.pi * (3. - math.sqrt(5.))
for i in range(self.options.n_views):
y = ((i * offset) - 1) + (offset / 2)
r = math.sqrt(1 - pow(y, 2))
phi = ((i + rnd) % self.options.n_views) * increment
x = math.cos(phi) * r
z = math.sin(phi) * r
points.append([x, y, z])
# visualization.plot_point_cloud(np.array(points))
return np.array(points)
def get_views(self):
"""
Generate a set of views to generate depth maps from.
:param n_views: number of views per axis
:type n_views: int
:return: rotation matrices
:rtype: [numpy.ndarray]
"""
Rs = []
points = self.get_points()
for i in range(points.shape[0]):
# https://math.stackexchange.com/questions/1465611/given-a-point-on-a-sphere-how-do-i-find-the-angles-needed-to-point-at-its-ce
longitude = - math.atan2(points[i, 0], points[i, 1])
latitude = math.atan2(points[i, 2], math.sqrt(points[i, 0] ** 2 + points[i, 1] ** 2))
R_x = np.array([[1, 0, 0],
[0, math.cos(latitude), -math.sin(latitude)],
[0, math.sin(latitude), math.cos(latitude)]])
R_y = np.array([[math.cos(longitude), 0, math.sin(longitude)],
[0, 1, 0],
[-math.sin(longitude), 0, math.cos(longitude)]])
R = R_y.dot(R_x)
Rs.append(R)
return Rs
def render(self, mesh, Rs):
"""
Render the given mesh using the generated views.
:param base_mesh: mesh to render
:type base_mesh: mesh.Mesh
:param Rs: rotation matrices
:type Rs: [numpy.ndarray]
:return: depth maps
:rtype: numpy.ndarray
"""
depthmaps = []
for i in range(len(Rs)):
np_vertices = Rs[i].dot(mesh.vertices.astype(np.float64).T)
np_vertices[2, :] += 1
np_faces = mesh.faces.astype(np.float64)
np_faces += 1
depthmap, mask, img = \
librender.render(np_vertices.copy(), np_faces.T.copy(),
self.render_intrinsics, self.znf, self.image_size)
# This is mainly result of experimenting.
# The core idea is that the volume of the object is enlarged slightly
# (by subtracting a constant from the depth map).
# Dilation additionally enlarges thin structures (e.g. for chairs).
depthmap -= self.options.depth_offset_factor * self.voxel_size
depthmap = ndimage.morphology.grey_erosion(depthmap, size=(3, 3))
depthmaps.append(depthmap)
return depthmaps
def fusion(self, depthmaps, Rs):
"""
Fuse the rendered depth maps.
:param depthmaps: depth maps
:type depthmaps: numpy.ndarray
:param Rs: rotation matrices corresponding to views
:type Rs: [numpy.ndarray]
:return: (T)SDF
:rtype: numpy.ndarray
"""
Ks = self.fusion_intrisics.reshape((1, 3, 3))
Ks = np.repeat(Ks, len(depthmaps), axis=0).astype(np.float32)
Ts = []
for i in range(len(Rs)):
Rs[i] = Rs[i]
Ts.append(np.array([0, 0, 1]))
Ts = np.array(Ts).astype(np.float32)
Rs = np.array(Rs).astype(np.float32)
depthmaps = np.array(depthmaps).astype(np.float32)
views = libfusion.PyViews(depthmaps, Ks, Rs, Ts)
# Note that this is an alias defined as libfusiongpu.tsdf_gpu or libfusioncpu.tsdf_cpu!
tsdf = compute_tsdf(views,
self.options.resolution, self.options.resolution,
self.options.resolution, self.voxel_size, self.truncation, False)
tsdf = np.transpose(tsdf[0], [2, 1, 0])
return tsdf
def run(self):
"""
Run the tool.
"""
common.makedir(self.options.out_dir)
files = self.get_in_files()
if self.options.mode == 'render':
method = self.run_render
elif self.options.mode == 'fuse':
method = self.run_fuse
elif self.options.mode == 'sample':
method = self.run_sample
else:
print('Invalid model, choose render or fuse.')
exit()
if self.options.n_proc == 0:
for filepath in files:
method(filepath)
else:
with Pool(self.options.n_proc) as p:
p.map(method, files)
def run_render(self, filepath):
"""
Run rendering.
"""
timer = common.Timer()
Rs = self.get_views()
timer.reset()
print('Rendering {}'.format(filepath))
mesh = common.Mesh.from_off(filepath)
depths = self.render(mesh, Rs)
depth_file = self.get_outpath(filepath)
common.write_hdf5(depth_file, np.array(depths))
print('[Data] wrote %s (%f seconds)' % (depth_file, timer.elapsed()))
def run_fuse(self, filepath):
"""
Run fusion.
"""
timer = common.Timer()
Rs = self.get_views()
# As rendering might be slower, we wait for rendering to finish.
# This allows to run rendering and fusing in parallel (more or less).
print('Fusing {}'.format(filepath))
depths = common.read_hdf5(filepath)
timer.reset()
tsdf = self.fusion(depths, Rs)
# To ensure that the final mesh is indeed watertight
tsdf = np.pad(tsdf, 1, 'constant', constant_values=1e6)
vertices, triangles = libmcubes.marching_cubes(-tsdf, 0)
# Remove padding offset
vertices -= 1
# Normalize to [-0.5, 0.5]^3 cube
vertices /= self.options.resolution
vertices -= 0.5
modelname = os.path.splitext(os.path.splitext(os.path.basename(filepath))[0])[0]
t_loc, t_scale = self.get_transform(modelname)
vertices = t_loc + t_scale * vertices
off_file = self.get_outpath(filepath)
libmcubes.export_off(vertices, triangles, off_file)
print('[Data] wrote %s (%f seconds)' % (off_file, timer.elapsed()))
def run_sample(self, filepath):
"""
Run sampling.
"""
timer = common.Timer()
Rs = self.get_views()
# As rendering might be slower, we wait for rendering to finish.
# This allows to run rendering and fusing in parallel (more or less).
depths = common.read_hdf5(filepath)
timer.reset()
tsdf = self.fusion(depths, Rs)
xs = np.linspace(-0.5, 0.5, tsdf.shape[0])
ys = np.linspace(-0.5, 0.5, tsdf.shape[1])
zs = np.linspace(-0.5, 0.5, tsdf.shape[2])
tsdf_func = rgi((xs, ys, zs), tsdf)
modelname = os.path.splitext(os.path.splitext(os.path.basename(filepath))[0])[0]
points = self.get_random_points(tsdf)
values = tsdf_func(points)
t_loc, t_scale = self.get_transform(modelname)
occupancy = (values <= 0.)
out_file = self.get_outpath(filepath)
np.savez(out_file, points=points, occupancy=occupancy, loc=t_loc, scale=t_scale)
print('[Data] wrote %s (%f seconds)' % (out_file, timer.elapsed()))
def get_transform(self, modelname):
if self.options.t_dir is not None:
t_filename = os.path.join(self.options.t_dir, modelname + '.npz')
t_dict = np.load(t_filename)
t_loc = t_dict['loc']
t_scale = t_dict['scale']
else:
t_loc = np.zeros(3)
t_scale = np.ones(3)
return t_loc, t_scale
def get_random_points(self, tsdf):
N1, N2, N3 = tsdf.shape
npoints = self.options.n_points
if not self.options.sample_weighted:
points = np.random.rand(npoints, 3)
else:
df = np.abs(tsdf)
scale = self.options.sample_scale * df.max()
indices = np.arange(N1*N2*N3)
prob = np.exp(-df.flatten() / scale)
prob = prob / prob.sum()
indices_rnd = np.random.choice(indices, size=npoints, p=prob)
idx1, idx2, idx3 = np.unravel_index(indices_rnd, [N1, N2, N3])
idx1 = idx1 + np.random.rand(npoints)
idx2 = idx2 + np.random.rand(npoints)
idx3 = idx3 + np.random.rand(npoints)
points = np.stack([idx1 / N1, idx2 / N2, idx3 / N3], axis=1)
points -= 0.5
return points
if __name__ == '__main__':
app = Fusion()
app.run()
| python |
arr=list(map(int,input().rstrip().split()))
fff=list(map(int,input().rstrip().split()))
a=0
for i in range(len(arr)):
a=a+abs(arr[i]-fff[i])
print(a)
| python |
# -*- coding: utf-8 -*-
# Generated by Django 1.11.16 on 2018-12-04 12:13
from __future__ import absolute_import, unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('icds_reports', '0080_merge_20181130_1818'),
]
operations = [
migrations.RenameField(
model_name='aggls',
old_name='unique_awc_vists',
new_name='awc_visits'
),
migrations.RenameField(
model_name='aggregatelsawcvisitform',
old_name='unique_awc_vists',
new_name='awc_visits'
)
]
| python |
import storage
import nomadlist
import wikivoyage
def build(cities=None):
index_guides(cities or index_cities())
return True
def index_cities():
cities = nomadlist.list_cities()
storage.upsert_cities(cities)
return cities
def index_guides(cities):
city_docs = map(build_guide, cities)
storage.upsert_cities(city_docs)
return True
def build_guide(city):
guide = wikivoyage.find_city(city['name'])
return dict(city.items() + guide.items())
| python |
# -*- coding: utf-8 -*-
"""
@Created on: 2019/5/23 16:23
@Author: heyao
@Description:
"""
import os
import warnings
from knowledge_graph.local_config import DevelopmentConfig
try:
from knowledge_graph.production_config import ProductionConfig
except ImportError:
warnings.warn("you dont have production config")
ProductionConfig = {}
config = dict(
default=DevelopmentConfig,
development=DevelopmentConfig,
production=ProductionConfig
)
env_name = os.environ.get("KG_CONFIG_NAME", "default")
print("you are on {env_name} server".format(env_name=env_name))
config = config[env_name]
| python |
"""Dataset setting and data loader for MNIST."""
import torch
from torchvision import datasets, transforms
import params
def get_svhn(train):
print("SVHN Data Loading ...")
train_dataset = datasets.SVHN(root='/home/hhjung/hhjung/SVHN/', split='train',
transform=transforms.Compose([transforms.Scale(28), transforms.ToTensor()
, transforms.Normalize(mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5))]),
download=True)
# test_dataset = datasets.SVHN(root='/home/hhjung/hhjung/SVHN/', split='test',
# transform=transforms.Compose([transforms.Scale(28), transforms.ToTensor()
# , transforms.Normalize(mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5))]),
# download=True)
train_loader = torch.utils.data.DataLoader(dataset=train_dataset, batch_size=params.batch_size, shuffle=True)
test_loader = torch.utils.data.DataLoader(dataset=train_dataset, batch_size=params.batch_size, shuffle=False)
if train:
return train_loader
else:
return test_loader | python |
from collections import namedtuple
ConnectArgsType = namedtuple('ConnectArgsType', [
'verify', 'verify_expiration', 'key', 'audience', 'issuer', 'algorithm', 'auth_header_prefix', 'decode_options'
])
CONNECT_ARGS = ConnectArgsType(
verify=None,
verify_expiration=None,
key=None,
audience=None,
issuer=None,
algorithm=None,
auth_header_prefix=None,
decode_options=None,
)
def configure(
key,
audience,
issuer,
algorithm,
verify=True,
verify_expiration=True,
auth_header_prefix='Bearer',
decode_options=None,
):
global CONNECT_ARGS
CONNECT_ARGS = ConnectArgsType(
verify=verify,
verify_expiration=verify_expiration,
key=key,
audience=audience,
issuer=issuer,
algorithm=algorithm,
auth_header_prefix=auth_header_prefix,
decode_options=decode_options or {},
)
__all__ = ["configure", "CONNECT_ARGS"]
| python |
#!/usr/bin/python3
"""
This module contains the function is_same_class
"""
def is_same_class(obj, a_class):
"""return true if obj is the exact class a_class, otherwise false"""
return (type(obj) == a_class)
| python |
from libtad.base_service import BaseService
from libtad.datatypes.places import Place
from libtad.common import XmlUtils
import libtad.constants as Constants
import xml.etree.ElementTree as ET
from urllib.parse import ParseResult, urlunparse, urlencode
from urllib.request import urlopen, Request
from ssl import SSLContext
from typing import List, Dict
class PlacesService(BaseService):
"""
The places service can be used to retrieve the list of supported places.
...
Attributes
----------
include_coordinates : bool
Return coordinates for the Geography object.
Methods
-------
get_places()
Gets list of supported places.
"""
def __init__(self, access_key: str, secret_key: str):
"""
Parameters
----------
access_key : str
Access key.
secret_key : str
Secret key.
"""
super().__init__(access_key, secret_key, "places")
self.include_coordinates: bool = True
def get_places(self) -> List[Place]:
"""
Gets list of supported places.
Returns
-------
places : list of Place
List of all currently known places, their identifiers and their
geographical location (if requested).
"""
args = self.__get_arguments()
url: str = Constants.ENTRYPOINT + "/" + self._service_name + "?" + urlencode(args)
req = Request(
url,
headers = { "User-Agent": "libtad-py"}
)
with urlopen(req, context=SSLContext()) as f:
result: str = f.read().decode("utf-8")
return self.__from_xml(result)
def __get_arguments(self) -> Dict[str, object]:
args: Dict[str, object] = self._authentication_options.copy()
args["lang"] = ",".join(self.language)
args["geo"] = int(self.include_coordinates)
args["version"] = str(self._version)
args["out"] = Constants.DEFAULTRETURNFORMAT
args["verbosetime"] = str(Constants.DEFAULTVERBOSETIMEVALUE)
return args
def __from_xml(self, result: str) -> List[Place]:
XmlUtils.check_for_errors(result)
xml: ET.Element = ET.fromstring(result)
places = xml.find("places")
return [Place(place_node) for place_node in places.findall("place")]
| python |
#!/usr/bin/env python3
# zeroex00.com
# rfc1413
import argparse
import socket
import sys
import threading
master_results = []
master_banners = {}
master_errors = []
def main(args):
if not args.query_port and not args.all_ports:
print("[!] you must specify at least one port or -a")
exit(2)
hostname = clean_host(args.host)
ip_addr = resolve_host(hostname)
# if not check_ident_port(args.host, args.port, ip_addr):
# print("[!] Exiting...")
# exit(1)
if args.all_ports:
query_ports = list(map(str, range(1, 65536)))
q_string = "1-65535"
else:
query_ports = args.query_port
q_string = " ".join(query_ports)
print(
"[+] starting scan on {0} ({1}) {2} for connections to {3}".format(
hostname, ip_addr, args.port, q_string
)
)
try:
do_threaded_work(args.host, args.port, query_ports, verbose=args.verbose)
except KeyboardInterrupt:
print("Interrupted! Printing results:")
print_results(suppress=True, verbose=args.verbose)
print("[!] Errors suppressed on interrupt!")
exit(1)
if args.all_ports:
print_results(suppress=True, verbose=args.verbose)
print("[!] Errors suppressed on full scan!")
else:
print_results(verbose=args.verbose)
exit(0)
def parse_args(argv):
parser = argparse.ArgumentParser()
parser.add_argument("host", help="host to scan")
parser.add_argument(
"-q",
"--query-port",
nargs="+",
help="port(s) which the scan will query(ex: 22 or 21 22 23)",
)
parser.add_argument(
"-p",
"--port",
default="113",
type=int,
help="port IDENT service is listening on (default: 113)",
)
parser.add_argument(
"-a", "--all-ports", action="store_true", help="queries ALL ports!"
)
parser.add_argument(
"-v",
"--verbose",
action="count",
default=0,
help="increase verbosity - v: shows full success responses; vv: shows all open port responses",
)
return parser.parse_args(argv)
def clean_host(host):
if host.startswith("http://"):
tmp_host = host[7:]
elif host.startswith("https://"):
tmp_host = host[8:]
else:
tmp_host = host
return tmp_host
def resolve_host(host):
try:
ip = socket.gethostbyname(host)
except socket.error:
return "?.?.?.?"
return ip
def check_ident_port(host, port, ip):
print("[+] Checking if {0} ({1}) is listening on port: {2}".format(host, ip, port))
try:
client = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
client.settimeout(5)
client.connect((host, port))
except socket.error:
print("[!] {0} ({1}) is not listening on port: {2}!".format(host, ip, port))
return False
except OverflowError:
print("[!] Invalid port!: {0}".format(port))
return False
client.close()
return True
def enum_port(host, port, query_port, verbose=0):
try:
client1 = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
client1.connect((host, query_port))
local_port = client1.getsockname()[1]
except socket.error:
master_errors.append("{0:>5}: connection refused".format(query_port))
return
except OverflowError:
master_errors.append("{0:>5}: invalid port".format(query_port))
return
client = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
client.connect((host, port))
try:
client.send(str(query_port) + "," + str(local_port) + "\x0d\x0a")
results = str(client.recv(4096))
client1.settimeout(1)
client1.send("\x0d\x0a")
try:
banner = str(client1.recv(4096)).strip()
except socket.error:
banner = ""
except Exception:
master_errors.append("{0:>5}: e".format(query_port))
client1.close()
client.close()
return
if verbose > 1:
master_results.append(results.strip())
master_banners[str(query_port)] = str(banner)
elif ": USERID :" in results:
master_results.append(results.strip())
master_banners[str(query_port)] = str(banner)
client1.close()
client.close()
def tqdm(iterable):
def report(i):
print(f"{i+1:>{formatter}}/{total}", file=sys.stderr, end="\r")
total = len(iterable)
formatter = len(str(total))
for i, el in enumerate(iterable):
yield el
report(i)
def do_threaded_work(host, port, q_ports, verbose=0):
threads = []
for i in tqdm(q_ports):
thread = threading.Thread(target=enum_port, args=(host, port, int(i), verbose))
threads.append(thread)
thread.start()
for thread in threads:
thread.join()
def print_results(suppress=False, verbose=0):
print("[*] Results:")
if verbose > 0:
print("\t(VERBOSE: Raw responses || Banners)")
elif verbose == 0:
print("\t{0:>5} {1:<20} {2}".format("Port", "Username", "Banner"))
print("\t{0:>5} {1:<20} {2}".format("----", "--------", "------"))
for each_result in master_results:
tmp_result = each_result.split(":") # ports, USERID, UNIX, username
result_port = str(tmp_result[0].split(",")[0]).strip()
result_username = tmp_result[3]
result_banner = master_banners.get(result_port, "")
if verbose > 0:
print("\t{0} || {1}".format(each_result, result_banner))
else:
print(
"\t{0:>5}: {1:<20} {2}".format(
result_port, result_username, result_banner
)
)
if suppress:
return
print("[!] Errors:")
for each_result in master_errors:
print("\t{0}".format(each_result))
if len(master_results) == 0 and len(master_errors) == 0:
print(
(
"[+] A lack of results AND errors could mean that the specified IDENT port is not actually running the "
"IDENT service"
)
)
if __name__ == "__main__":
main(parse_args(sys.argv[1:]))
| python |
import json
import sys
from twisted.internet import reactor, defer
from twisted.web.client import getPage, HTTPClientFactory
try:
from twisted.internet import ssl
except ImportError:
ssl = None
class Jar:
def __init__(self, name_long, name_short, url):
self.name_long = list(name_long)
self.name_short = list(name_short)
self.url = str(url)
for i, l in enumerate(self.name_long):
l = l.replace(' ', '-').lower()
if i > len(self.name_short):
self.name_short.append(l)
elif self.name_short[i] is None:
self.name_short[i] = l
def __repr__(self):
return '-'.join(self.name_short)
class JarProvider:
major = None
def __init__(self, deferred):
self.deferred = deferred
self.response = []
self.work()
def get(self, url, callback):
d = getPage(str(url))
d.addCallback(callback)
d.addErrback(self.error)
return d
def add(self, *a, **k):
self.response.append(Jar(*a, **k))
def commit(self, d=None):
self.deferred.callback(self.response)
def error(self, d=None):
self.deferred.errback(d)
def work(self):
raise NotImplementedError
class JenkinsJarProvider(JarProvider):
base = None
project = None
name = None
def work(self):
self.get('{}job/{}/lastSuccessfulBuild/api/json'.format(self.base, self.project), self.handle_data)
def handle_data(self, data):
data = json.loads(data)
url = '{}job/{}/lastSuccessfulBuild/artifact/{}'.format(self.base, self.project, data['artifacts'][0]['relativePath'])
self.add((self.name, 'Latest'), (None, None), url)
self.commit()
modules = []
for m in ['vanilla']:
try:
name = "mk2.servers.{}".format(m)
__import__(name)
modules.append(sys.modules[name])
except ImportError:
pass
def get_raw():
d_results = defer.Deferred()
dd = [defer.succeed([])]
for mod in modules:
d = defer.Deferred()
mod.ref(d)
dd.append(d)
dd = defer.DeferredList(dd, consumeErrors=True)
def callback2(raw):
results = []
for ok, data in raw:
if ok:
results.extend(data)
else:
print("error: {}".format(data.value))
d_results.callback(results)
dd.addCallback(callback2)
return d_results
def jar_list():
d_result = defer.Deferred()
def got_results(results):
listing = ""
o = []
m = 0
for r in results:
left = '-'.join(r.name_short)
right = ' '.join(r.name_long)
m = max(m, len(left))
o.append((left, right))
for left, right in sorted(o):
listing += " %s | %s\n" % (left.ljust(m), right)
d_result.callback(listing.rstrip())
d = get_raw()
d.addCallbacks(got_results, d_result.errback)
return d_result
def jar_get(name):
d_result = defer.Deferred()
def got_data(factory, data):
filename = factory.path.split('/')[-1]
#parse the Content-Disposition header
dis = factory.response_headers.get('content-disposition', None)
if dis:
dis = dis[0].split(';')
if dis[0] == 'attachment':
for param in dis[1:]:
key, value = param.strip().split('=')
if key == 'filename':
filename = value.replace("\"", "")
d_result.callback((filename, data))
def got_results(results):
for r in results:
if name == '-'.join(r.name_short):
factory = HTTPClientFactory(r.url)
if factory.scheme == 'https':
if ssl:
reactor.connectSSL(factory.host, factory.port, factory, ssl.ClientContextFactory())
else:
d_result.errback(Exception("{} is not available because this installation does not have SSL support!".format(name)))
else:
reactor.connectTCP(factory.host, factory.port, factory)
factory.deferred.addCallback(lambda d: got_data(factory, d))
factory.deferred.addErrback(d_result.errback)
return
d_result.errback(Exception("{} is not available!".format(name)))
d = get_raw()
d.addCallbacks(got_results, d_result.errback)
return d_result
| python |
import base64
import configparser
import click
import requests
from logbook import *
# from requests.cookies import RequestsCookieJar
import controller as ctrl
from config.base_settings import CAPTCHA_MODEL_NAME, TIMEOUT, USE_PROXY
from controller.url_config import url_captcha, url_login
# from service.log import init_log
from service.proxy import update_proxy, notify_ip_address, update_cookies
from service.sipoknn import get_captcha_result
logger = Logger(__name__)
account_notify_times = 0
description = (
'''
用户信息配置模块
由于专利网站的改版,现在要求必须要登录账号密码才能进行高级查询,
请使用者到专利网站自行注册账号,并修改一下USERNAME和PASSWORD的值
链接:http://www.pss-system.gov.cn/sipopublicsearch/portal/uiregister-showRegisterPage.shtml
'''
)
class Account:
"""
账户信息定义
"""
def __init__(self):
# 用户名,约定私有约束,使用请调用self.username
self._username = 'romaforever99'
# 密码,约定私有约束,使用请调用self.password
self._password = 'derossi16'
@property
def username(self):
return self._username
@username.setter
def username(self, username: str):
if username is None:
raise Exception('username invalid')
username = username.replace(' ', '')
if username == '':
raise Exception('username invalid')
self._username = username
@property
def password(self):
return self._password
@password.setter
def password(self, password: str):
if password is None or password == '':
raise Exception('password invalid')
self._password = password
def check_username(self, cfg: configparser.ConfigParser):
"""
用户名校验,设置
:param cfg:
:return:
"""
try:
username = cfg.get('account', 'username')
self.username = username
except:
click.echo(description)
while True:
try:
username = click.prompt('用户名出错,请填写')
self.username = username
break
except:
pass
def check_password(self, cfg: configparser.ConfigParser):
"""
密码校验,配置
:param cfg:
:return:
"""
try:
password = cfg.get('account', 'password')
self.password = password
except:
while True:
try:
password = click.prompt('密码出错,请填写')
self.password = password
break
except:
pass
# 账户信息的单例
account = Account()
def change_to_base64(source):
"""
将参数进行base64加密
:param source:
:return:
"""
return str(base64.b64encode(bytes(source, encoding='utf-8')), 'utf-8')
def get_captcha():
"""
获取验证码
:return:
"""
resp = requests.get(url=url_captcha.get('url'), cookies=ctrl.COOKIES, proxies=ctrl.PROXIES)
with open('captcha.png', 'wb') as f:
f.write(resp.content)
result = get_captcha_result(CAPTCHA_MODEL_NAME, 'captcha.png')
return result
def check_login_status():
if USE_PROXY:
try:
if ctrl.PROXIES is not None:
notify_ip_address()
logger.info('当前已有登录状态')
return True
except:
pass
return False
def login(username=None, password=None):
"""
登录API
:return: True: 登录成功; False: 登录失败
"""
if username is None or password is None:
username = account.username
password = account.password
ctrl.BEING_LOG = True
if check_login_status():
ctrl.BEING_LOG = False
return True
error_times = 0
while True:
try:
# logger.debug("before proxy")
update_proxy()
# logger.debug("before cookie")
update_cookies()
# logger.debug("after cookie")
busername = change_to_base64(username)
bpassword = change_to_base64(password)
captcha = get_captcha()
logger.info('验证码识别结果:%s' % captcha)
form_data = url_login.get('form_data')
form_data.__setitem__('j_validation_code', captcha)
form_data.__setitem__('j_username', busername)
form_data.__setitem__('j_password', bpassword)
resp = requests.post(url=url_login.get('url'), headers=url_login.get('headers'), data=form_data,
cookies=ctrl.COOKIES, proxies=ctrl.PROXIES, timeout=TIMEOUT)
if resp.text.find(username + ',欢迎访问') != -1:
# 网站调整了逻辑,下面这句不用了
# print(resp.cookies)
# ctrl.COOKIES.__delitem__('IS_LOGIN')
# ctrl.COOKIES.set('IS_LOGIN', 'true', domain='www.pss-system.gov.cn/sipopublicsearch/patentsearch')
jsession = ctrl.COOKIES.get('JSESSIONID')
resp.cookies.__delitem__('JSESSIONID')
resp.cookies.set('JSESSIONID', jsession, domain='www.pss-system.gov.cn')
update_cookies(resp.cookies)
requests.post(
'http://www.pss-system.gov.cn/sipopublicsearch/patentsearch/showViewList-jumpToView.shtml',
cookies=ctrl.COOKIES, proxies=ctrl.PROXIES)
ctrl.BEING_LOG = False
logger.info('登录成功')
return True
else:
if error_times > 5:
break
logger.error('登录失败')
error_times += 1
except Exception as e:
logger.error(e)
ctrl.BEING_LOG = False
return False
if __name__ == '__main__':
pass
#init_log()
#login('', '')
#print(notify_ip_address())
#resp = requests.post('http://www.pss-system.gov.cn/sipopublicsearch/patentsearch/showViewList-jumpToView.shtml', cookies=ctrl.COOKIES)
#print(resp.text)
#form_data = url_detail.get('form_data')
# # '''
# # 'nrdAn': '',
# # 'cid': '',
# # 'sid': '',
# # 'wee.bizlog.modulelevel': '0201101'
# # '''
#form_data.__setitem__('nrdAn', 'CN201520137687')
#form_data.__setitem__('cid', 'CN201520137687.320150916XX')
#form_data.__setitem__('sid', 'CN201520137687.320150916XX')
#print(ctrl.COOKIES)
#resp = requests.post(url_detail.get('url'), headers=url_detail.get('headers'), cookies=ctrl.COOKIES, data=form_data)
#print(resp.text)
| python |
constants.physical_constants["neutron to shielded proton mag. mom. ratio"] | python |
""" Test for building manifests for COMBINE archives
:Author: Jonathan Karr <[email protected]>
:Date: 2021-07-19
:Copyright: 2021, Center for Reproducible Biomedical Modeling
:License: MIT
"""
from biomodels_qc.utils import EXTENSION_COMBINE_FORMAT_MAP
import os
import unittest
class CombineArchiveCreationTestCase(unittest.TestCase):
def test_support_for_all_file_extensions(self):
base_dir = os.path.join(os.path.dirname(__file__), '..', 'final')
exts = set()
for root, dirs, files in os.walk(base_dir):
for name in files:
_, ext = os.path.splitext(name)
assert ext and ext[0] == '.', \
"`{}` does not have an extension".format(os.path.relpath(os.path.join(root, name), base_dir))
exts.add(ext)
unsupported_exts = exts.difference(set(EXTENSION_COMBINE_FORMAT_MAP.keys()))
if unsupported_exts:
msg = (
'biomodels_qc.utils.EXTENSION_COMBINE_FORMAT_MAP '
'must be extended to support these additional extensions:\n {}'
).format('\n '.join(sorted(unsupported_exts)))
raise NotImplementedError(msg)
| python |
from . import CostFunctions
from . import ActivationFunctions
from . import PyNet
#from . import tfNet #got rid of tensorflow
from . import Autoencoder
from .NeuralNetwork import NeuralNetwork ,NeuralNetworkArray
from .EvolutionaryNeuralNetwork import EvolutionaryNeuralNetwork,PyEvolutionaryNeuralNetwork
from .Tests import ReadCancerData,GetCancerNN
from ._CppInterface import _CreateNetwork,_DestroyNetwork,_LoadNetwork,_SaveNetwork,_InputTrainingData,_InputCrossValidationData,_TrainGradientDescent,_TrainGradientDescentSingle,_GetnSteps,_GetTrainingProgress,_GetCrossValidationProgress,_ClassifyData,_GetL,_Gets,_CreateEvolutionaryNetwork,_DestroyEvolutionaryNetwork,_EvolutionaryNetworkInputData,_EvolutionaryNetworkEvolve
from .MNIST import ReadMNISTLabels,ReadMNISTImages,GetMNISTData,ReadDigit,ReadDigits,_GetFrameRenderer,AnimateMNISTAutoencoder,MNISTAutoEncoder,MNISTClassifier
#---Custom---#"
from . import Globals
#---EndCustom---#
| python |
{
"targets": [
{
"target_name": "node_ovhook",
"cflags!": ["-fno-exceptions"],
"cflags_cc!": ["-fno-exceptions"],
"include_dirs": [
"<!@(node -p \"require('node-addon-api').include\")",
"./src",
],
'defines': ['NAPI_DISABLE_CPP_EXCEPTIONS', 'UNICODE'],
"sources": [
"./src/utils.hpp",
"./src/main.cc"
],
"libraries": ["user32.lib", "Psapi.lib"]
}
]
}
| python |
import ephem
from datetime import datetime
import pandas as pd
import numpy as np
import requests
from flask import Flask, render_template, session, redirect, request
import folium
import geocoder
app = Flask(__name__)
def get_latlng():
#Get user lat long via IP address
myloc = geocoder.ip('me')
return myloc.latlng
#https://stackoverflow.com/questions/19513212/can-i-get-the-altitude-with-geopy-in-python-with-longitude-latitude
#Credit: Iain D (https://stackoverflow.com/users/4486474/iain-d)
#Date: March 28, 2021
#This takes around 20ish seconds to run, if elevation not found, just returns 0
def get_elevation(lat, long):
query = ('https://api.open-elevation.com/api/v1/lookup'f'?locations={lat},{long}')
r = requests.get(query).json() # json object, various ways you can extract value
# extract elevation
elevation = pd.json_normalize(r, 'results')['elevation'].values[0]
return elevation
def make_observer(lat, long, elev):
obs = ephem.Observer()
obs.lat = lat
obs.lon = long
obs.elevation = elev
obs.date = datetime.now().strftime("%Y/%m/%d %H:%M:%S")
return obs
def calculate_visible(obs, map):
df = pd.read_csv('active.txt', delimiter = "\n", header= None)
#Reshape dataframe into three column dataframe
#Is there a better way to do this? Instead of reading in as a dataframe then reshaping, can we read it in a 3 column data frame?
#https://stackoverflow.com/questions/39761366/transpose-the-data-in-a-column-every-nth-rows-in-pandas
#Credit: jezrael (https://stackoverflow.com/users/2901002/jezrael)
new_df = pd.DataFrame(np.reshape(df.values,(int(df.shape[0] / 3),3)),columns=['Name','Line 1','Line 2'])
#Parse TLE data
for index, row in new_df.iterrows():
tle_rec = ephem.readtle(row['Name'], row['Line 1'], row['Line 2'])
#Perform TLE computations given some observer object
tle_rec.compute(obs)
#if altitude over local horizon > 0
if tle_rec.alt > 0:
coords = [tle_rec.sublat / ephem.degree, tle_rec.sublong / ephem.degree]
folium.Marker(coords, popup = tle_rec.name).add_to(map)
def generate_map(latlng):
#Get user lat long via IP address
myloc = geocoder.ip('me')
map = folium.Map(location = latlng, zoom_start = 13)
return map
@app.route('/')
def index():
return render_template('index.html')
@app.route('/map', methods=['GET', 'POST'])
def show_map():
#https://pythonise.com/series/learning-flask/flask-working-with-forms
#Author: Julian Nash
#Date: 2021-03-21
if request.method == 'POST':
req = request.form
auto_latlng = get_latlng()
#If blank, use values from geoIP
if req.get("latitude") == '':
latitude = auto_latlng[0]
else:
try:
#try to turn input value into float
latitude = float(req.get("latitude"))
#valid values for latitude are between -90 and 90
if latitude > 90.0 or latitude < -90.0:
return render_template('index.html')
except:
#return to main page if invalid input
return render_template('index.html')
#If blank, use values from geoIP
if req.get("longitude") == '':
longitude = auto_latlng[1]
else:
try:
#try to turn input value into float
longitude = float(req.get("longitude"))
#valid values for longitude are between -180 and 180
if longitude > 180.0 or longitude < -180.0:
return render_template('index.html')
except:
#return to main page if invalid input
return render_template('index.html')
#If blank, use values from geoIP
if req.get("elevation") == '':
elevation = get_elevation(latitude, longitude)
else:
try:
#try to turn input value into float
#allow any numeric values
elevation = float(req.get("elevation"))
except:
#return to main page if invalid input
return render_template('index.html')
latlng = [latitude, longitude]
map = generate_map(latlng)
obs = make_observer(latitude, longitude, elevation)
#TLE CALCULATION HERE
calculate_visible(obs, map)
return map._repr_html_()
return render_template('index.html')
if __name__ == '__main__':
app.run()
| python |
from bcc import BPF
# Hello BPF Program
bpf_text = """
#include <net/inet_sock.h>
#include <bcc/proto.h>
// 1. Attach kprobe to "inet_listen"
int kprobe__inet_listen(struct pt_regs *ctx, struct socket *sock, int backlog)
{
bpf_trace_printk("Hello World!\\n");
return 0;
};
int kprobe__ip_rcv(struct pt_regs *ctx, struct sk_buff *skb)
{
bpf_trace_printk("ip_rcv!\\n");
return 0;
};
"""
# 2. Build and Inject program
b = BPF(text=bpf_text)
# 3. Print debug output
while True:
print b.trace_readline()
"""
The first argument to int kprobe__<fn_name>(struct pt_regs *ctx, ...)
is always struct pt_regs *ctx
after that it is the list of arguments the <fn> takes and its optional to have.
"""
| python |
from tensornetwork.block_sparse import index
from tensornetwork.block_sparse import charge
from tensornetwork.block_sparse import blocksparsetensor
from tensornetwork.block_sparse import linalg
from tensornetwork.block_sparse.blocksparsetensor import (BlockSparseTensor,
ChargeArray,
tensordot,
outerproduct,
compare_shapes)
from tensornetwork.block_sparse.linalg import (svd, qr, diag, sqrt, trace, inv,#pylint: disable=redefined-builtin
pinv, eye, eigh, eig, conj,
reshape, transpose, norm, abs,
sign)
from tensornetwork.block_sparse.initialization import (zeros, ones, randn,
random, empty_like,
ones_like, zeros_like,
randn_like, random_like)
from tensornetwork.block_sparse.index import Index
from tensornetwork.block_sparse.caching import (get_cacher, enable_caching,
disable_caching, clear_cache,
get_caching_status,
set_caching_status)
from tensornetwork.block_sparse.charge import (U1Charge, BaseCharge, Z2Charge,
ZNCharge)
| python |
from SuperImpose import SuperImpose
class SuperImposeFields(SuperImpose):
'''
SuperImpose subclass implementing specific functionality to
super-impose all field images on background image
'''
@staticmethod
def run_super_impose_on_all_fields(back_rgba_img, fields_img_loc_dict):
'''
Inputs:
back_rgba_img: PIL object - background image in RGBA format
fields_img_loc_dict: Dict - dictionary i.e.:
{field_name: {img: PIL object, loc: (x,y) }}
Return:
superimposed_img: PIL object - superimposed image in RGBA format
'''
superimposed_img = back_rgba_img.copy()
for field_name, img_loc in fields_img_loc_dict.items():
field_img = img_loc['img']
field_loc = img_loc['loc']
superimposed_img = SuperImpose.super_impose(superimposed_img, field_img, field_loc[0], field_loc[1])
return superimposed_img
| python |
from django.urls import path
from . import views
app_name = 'articles'
urlpatterns = [
path('', views.index, name ='index'),
path('<int:article_id>/', views.detail, name ='detail'),
path('<int:article_id>/leave_comment/', views.leave_comment, name ='leave_comment'),
] | python |
# JEWELS AND STONES LEETCODE SOLUTION:
# creating a class.
class Solution(object):
# creating a function to solve the problem.
def numJewelsInStones(self, jewels, stones):
# creating a variable to track the count.
count = 0
# creating a for-loop to iterate for the elements in the stones.
for i in stones:
# creating a nested if-statement to check if the elements in the stones are jewels.
if i in jewels:
# code to increment the count if the condition is met.
count += 1
# returning the value of the count.
return count
| python |
[
{
'inputs': ['formula'],
'output': 'Property Band gap'
}, {
'inputs': ['formula', 'Temperature (Property Band gap)'],
'output': 'Property Band gap'
}, {
'inputs': ['formula'],
'output': 'Property Color'
},{
'inputs': ['formula', 'Property Band gap'],
'output': 'Property Color'
},
]
| python |
#-*- encoding:utf-8 -*-
import json
import unittest
import responses
try:
from unittest import mock
except:
import mock
from nta import (
NaverTalkApi
)
from nta.models import(
CompositeContent, Composite, ElementData, ElementList,
ButtonText, ButtonLink, ButtonCalendar, QuickReply
)
class TestNaverTalkAPI(unittest.TestCase):
def setUp(self):
self.tested = NaverTalkApi('test_naver_talk_access_token')
@responses.activate
def test_send_composite(self):
responses.add(
responses.POST,
NaverTalkApi.DEFAULT_API_ENDPOINT,
json={
"success": True,
"resultCode": "00"
},
status=200
)
counter = mock.MagicMock()
def test_callback(res, payload):
self.assertEqual(res.result_code, "00")
self.assertEqual(res.success, True)
self.assertEqual(
payload.as_json_dict(),
{
'event': 'send',
'user': 'test_user_id',
'compositeContent': {
'compositeList': [
{
'title': 'test_title',
'description': 'test_descript',
'image': {
'imageUrl': 'test_image'
},
'elementList':{
'type': 'LIST',
'data': [
{
'title': 'test_ed_title',
'description': 'test_ed_descript',
'subDescription': 'test_ed_subdescript',
'image': {
'imageUrl': 'test_ed_image'
},
'button':{
'type': 'TEXT',
'data': {
'title': 'test'
}
}
}
]
},
'buttonList': None
}
]
},
'options': {
'notification': False
}
}
)
counter()
self.tested.send(
'test_user_id',
message=CompositeContent(
composite_list=[
Composite(
title='test_title',
description='test_descript',
image='test_image',
element_list=ElementList([
ElementData(
title='test_ed_title',
description='test_ed_descript',
sub_description='test_ed_subdescript',
image='test_ed_image',
button=ButtonText('test')
)
])
)
]
),
callback=test_callback
)
self.assertEqual(counter.call_count, 1)
@responses.activate
def test_send_composite_with_quick_reply(self):
responses.add(
responses.POST,
NaverTalkApi.DEFAULT_API_ENDPOINT,
json={
"success": True,
"resultCode": "00"
},
status=200
)
counter = mock.MagicMock()
def test_callback(res, payload):
self.assertEqual(res.result_code, "00")
self.assertEqual(res.success, True)
self.assertEqual(
payload.as_json_dict(),
{
'event': 'send',
'user': 'test_user_id',
'compositeContent': {
'compositeList': [
{
'title': 'test_title',
'description': None,
'elementList': None,
'buttonList': None
}
],
'quickReply': {
'buttonList': [{
'data': {
'code': 'PAYLOAD',
'title': 'text'},
'type': 'TEXT'},
{
'data': {
'mobileUrl': None,
'title': 'text',
'url': 'PAYLOAD'},
'type': 'LINK'}]}
},
'options': {
'notification': False
}
}
)
counter()
self.tested.send(
'test_user_id',
message=CompositeContent(
composite_list=[
Composite(
title='test_title'
)
]
),
quick_reply=QuickReply(
[
{'type': 'TEXT', 'title': 'text', 'value': 'PAYLOAD'},
{'type': 'LINK', 'title': 'text', 'value': 'PAYLOAD'}
]
),
callback=test_callback
)
self.assertEqual(counter.call_count, 1)
self.tested.send(
'test_user_id',
message=CompositeContent(
composite_list=[
Composite(
title='test_title'
)
],
quick_reply=[
ButtonText('text', 'PAYLOAD'),
ButtonLink('text', 'PAYLOAD')
]
),
callback=test_callback
)
self.assertEqual(counter.call_count, 2)
@responses.activate
def test_composite_with_calendar(self):
responses.add(
responses.POST,
NaverTalkApi.DEFAULT_API_ENDPOINT,
json={
"success": True,
"resultCode": "00"
},
status=200
)
counter = mock.MagicMock()
def test_callback(res, payload):
target = {
"event": "send",
"user": "test_user_id",
"compositeContent": {
"compositeList": [
{
"title": "톡톡 레스토랑",
"description": "파스타가 맛있는집",
'elementList': None,
"buttonList": [
{
"type": "CALENDAR",
"data": {
"title": "방문 날짜 선택하기",
"code": "code_for_your_bot",
"options": {
"calendar": {
"placeholder": "방문 날짜를 선택해주세요.",
"start": "20180301",
"end": "20180430",
"disables": "1,20180309,20180315-20180316"
}
}
}
}
]
}
]
},
'options': {
'notification': False
}
}
self.assertEqual(target, payload.as_json_dict())
counter()
self.tested.send(
"test_user_id",
message=CompositeContent(
composite_list=[
Composite(
title= "톡톡 레스토랑",
description="파스타가 맛있는집",
button_list=[
ButtonCalendar(
title="방문 날짜 선택하기",
code="code_for_your_bot",
placeholder="방문 날짜를 선택해주세요.",
start="20180301",
end="20180430",
disables="1,20180309,20180315-20180316"
)
]
)
]
),
callback=test_callback
)
self.assertEqual(counter.call_count, 1) | python |
from flask import Flask, request, Response
import requests, json
app = Flask(__name__)
@app.route('/webhook', methods=["POST"])
def webhook():
print("Request received!")
print(request.json);
return relay(request.json)
def relay(data):
print("Relaying Request with data :" + json.dumps(data))
response = requests.post('REPLACEWITHDISCORDWEBHOOKURL_KEEPITSECRET', json=data)
print(response.status_code)
print(response.text)
return Response(status=response.status_code)
| python |
import unittest.mock as mock
from mtsync.action import ActionKind
from mtsync.connection import Connection
from mtsync.imagined import Imagined
from mtsync.settings import Settings
from mtsync.synchronizer import Synchronizer
from rich.console import Console
from testslide import StrictMock
from testslide.dsl import context
@context
def SynchronizerTest(context):
@context.before
async def prepare(self):
self.console = Console()
self.settings = Settings()
self.connection = StrictMock(template=Connection)
self.synchronizer = Synchronizer(
console=self.console,
connection=self.connection,
)
@context.sub_context
def score(context):
@context.sub_context
def test_equality(context):
@context.example
async def simple(self):
self.assertTrue(
Synchronizer._test_equality(
a={
".id": "1",
"field-a": "a",
"field-b": "b",
},
b={
".id": "1",
"field-a": "a",
"field-b": "b",
},
)
)
self.assertFalse(
Synchronizer._test_equality(
a={
".id": "1",
"field-a": "a",
"field-b": "b",
},
b={
".id": "1",
"field-a": "a",
"field-b": "bbb",
},
)
)
@context.example
async def without_id(self):
self.assertTrue(
Synchronizer._test_equality(
a={
".id": "1",
"field-a": "a",
"field-b": "b",
},
b={
"field-a": "a",
"field-b": "b",
},
)
)
@context.sub_context
def analyze(context):
@context.sub_context
def triage(context):
@context.before
async def prepare(self):
self.m_analyze_list = mock.patch.object(
self.synchronizer, "_analyze_list"
).__enter__()
self.m_analyze_dict = mock.patch.object(
self.synchronizer, "_analyze_dict"
).__enter__()
@context.sub_context
def empty(context):
@context.example
async def dict(self):
self.assertEqual(
await self.synchronizer._analyze(current_path="", tree={}),
[],
)
self.m_analyze_list.assert_not_called()
self.m_analyze_dict.assert_not_called()
@context.example
async def list(self):
with self.assertRaises(Exception):
await self.synchronizer._analyze(current_path="", tree=[])
self.m_analyze_list.assert_not_called()
self.m_analyze_dict.assert_not_called()
@context.example
async def none(self):
self.assertEqual(
await self.synchronizer._analyze(current_path="", tree=None),
[],
)
self.m_analyze_list.assert_not_called()
self.m_analyze_dict.assert_not_called()
@context.sub_context
def list(context):
@context.example
async def simple(self):
inner_list = [
{
"interface": "bridge",
"address": "2010::7/64",
"disabled": "false",
},
{
"interface": "bridge",
"address": "2010::1/64",
"disabled": "false",
"comment": "Hello worldd!",
},
]
await self.synchronizer._analyze(
current_path="",
tree={
"ipv6": {
"address": inner_list,
}
},
)
self.m_analyze_list.assert_called_with(
current_path="/ipv6/address",
analyzed_list=inner_list,
)
@context.sub_context
def dict(context):
@context.example
async def simple(self):
inner_dict = {"rp-filter": "no"}
await self.synchronizer._analyze(
current_path="",
tree={
"ip": {
"settings": inner_dict,
}
},
)
self.m_analyze_dict.assert_called_with(
current_path="/ip/settings",
analyzed_dict=inner_dict,
)
@context.sub_context
def dict(context):
@context.example
async def simple(self):
desired_dict = {
"rp-filter": "no",
"other-setting": "no",
}
self.mock_async_callable(self.connection, "get").to_return_value(
{
"rp-filter": "yes",
"other-setting": "no",
}
).and_assert_called_once()
response = await self.synchronizer._analyze_dict(
current_path="/ip/settings",
analyzed_dict=desired_dict,
)
self.assertEqual(len(response), 1)
self.assertEqual(response[0].set_dict["rp-filter"], "no")
self.assertEqual(response[0].set_dict["other-setting"], "no")
@context.example
async def no_op(self):
desired_dict = {
"rp-filter": "no",
"other-setting": "no",
}
self.mock_async_callable(self.connection, "get").to_return_value(
{
"rp-filter": "no",
"other-setting": "no",
}
).and_assert_called_once()
response = await self.synchronizer._analyze_dict(
current_path="/ip/settings",
analyzed_dict=desired_dict,
)
self.assertEqual(len(response), 0)
@context.sub_context
def list(context):
@context.sub_context
def triage(context):
pass # @TODO
@context.sub_context
def add_remove(context):
pass # @TODO
@context.sub_context
def reorder(context):
@context.example
async def simple(self):
actions = await self.synchronizer._analyze_list_reorder(
current_path="/ip/example",
imagined_items=Imagined(
initial_state=[
{"field": "value2", ".id": "1"},
{"field": "value3", ".id": "2"},
{"field": "value1", ".id": "3"},
]
),
desired_items=[
{"field": "value1"},
{"field": "value2"},
{"field": "value3"},
],
)
self.assertEqual(len(actions), 1, f"Got actions: {actions}")
action = actions[0]
self.assertEqual(action.kind, ActionKind.POST)
self.assertEqual(action.path, "/ip/example/move")
self.assertEqual(
action.set_dict,
{
"numbers": "3",
"destination": "1",
},
)
@context.example
async def same(self):
actions = await self.synchronizer._analyze_list_reorder(
current_path="/ip/example",
imagined_items=Imagined(
initial_state=[
{"field": "value", ".id": "1"},
{"field": "value", ".id": "2"},
]
),
desired_items=[
{"field": "value"},
{"field": "value"},
],
)
self.assertEqual(len(actions), 0, f"Got actions: {actions}")
@context.example
async def long(self):
actions = await self.synchronizer._analyze_list_reorder(
current_path="/ip/example",
imagined_items=Imagined(
initial_state=[
{"field": "value2", ".id": "1"},
{"field": "value3", ".id": "2"},
{"field": "value1", ".id": "3"},
{"field": "value5", ".id": "4"},
{"field": "value4", ".id": "5"},
{"field": "value6", ".id": "6"},
]
),
desired_items=[
{"field": "value1"},
{"field": "value2"},
{"field": "value3"},
{"field": "value4"},
{"field": "value5"},
{"field": "value6"},
],
)
self.assertEqual(len(actions), 2)
self.assertEqual(
actions[0].set_dict,
{"numbers": "3", "destination": "1"},
)
self.assertEqual(
actions[1].set_dict,
{"numbers": "5", "destination": "4"},
)
| python |
# Generated by Django 2.0.2 on 2018-08-15 16:17
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('wagtailcore', '0040_page_draft_title'),
('pymba', '0006_auto_20180316_1857'),
]
operations = [
migrations.CreateModel(
name='PymbaIndexPage',
fields=[
('page_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='wagtailcore.Page')),
('introduction', models.TextField(blank=True, help_text='Text to describe the page')),
],
options={
'abstract': False,
},
bases=('wagtailcore.page',),
),
migrations.AlterField(
model_name='pymbafinishingpage',
name='color',
field=models.CharField(blank=True, help_text='Accepts hex (#ffffff) or HTML color', max_length=250, null=True),
),
migrations.AlterField(
model_name='pymbafinishingpage',
name='image',
field=models.ForeignKey(blank=True, help_text='Sets the finishing general appearance', null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to='wagtailimages.Image'),
),
migrations.AlterField(
model_name='pymbafinishingpage',
name='intro',
field=models.CharField(blank=True, help_text='Finishing description', max_length=250, null=True),
),
migrations.AlterField(
model_name='pymbafinishingpage',
name='pattern',
field=models.BooleanField(default=False, help_text='Is it a 1x1 meter pattern?'),
),
migrations.AlterField(
model_name='pymbafinishingpage',
name='skirting_color',
field=models.CharField(default='white', help_text='Accepts hex (#ffffff) or HTML color', max_length=250),
),
migrations.AlterField(
model_name='pymbafinishingpage',
name='skirting_height',
field=models.CharField(default='0', help_text='Skirting height from in cm', max_length=250),
),
migrations.AlterField(
model_name='pymbafinishingpage',
name='skirting_image',
field=models.ForeignKey(blank=True, help_text='Sets the skirting general appearance', null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to='wagtailimages.Image'),
),
migrations.AlterField(
model_name='pymbafinishingpage',
name='skirting_pattern',
field=models.BooleanField(default=False, help_text='Is it a 1x1 meter pattern?'),
),
migrations.AlterField(
model_name='pymbafinishingpage',
name='tiling_color',
field=models.CharField(default='white', help_text='Accepts hex (#ffffff) or HTML color', max_length=250),
),
migrations.AlterField(
model_name='pymbafinishingpage',
name='tiling_height',
field=models.CharField(default='0', help_text='Tiling height from floor in cm', max_length=250),
),
migrations.AlterField(
model_name='pymbafinishingpage',
name='tiling_image',
field=models.ForeignKey(blank=True, help_text='Sets the tiling general appearance', null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to='wagtailimages.Image'),
),
migrations.AlterField(
model_name='pymbafinishingpage',
name='tiling_pattern',
field=models.BooleanField(default=False, help_text='Is it a 1x1 meter pattern?'),
),
migrations.AlterField(
model_name='pymbapage',
name='double_face',
field=models.BooleanField(default=False, help_text='Planes are visible on both sides?'),
),
migrations.AlterField(
model_name='pymbapage',
name='dxf_file',
field=models.ForeignKey(help_text='CAD file of your project', null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to='wagtaildocs.Document'),
),
migrations.AlterField(
model_name='pymbapage',
name='equirectangular_image',
field=models.ForeignKey(blank=True, help_text='Landscape surrounding your project', null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to='wagtailimages.Image'),
),
migrations.AlterField(
model_name='pymbapage',
name='fly_camera',
field=models.BooleanField(default=False, help_text='Vertical movement of camera?'),
),
migrations.AlterField(
model_name='pymbapage',
name='intro',
field=models.CharField(blank=True, help_text='Project description', max_length=250, null=True),
),
migrations.AlterField(
model_name='pymbapage',
name='shadows',
field=models.BooleanField(default=False, help_text='Want to cast shadows?'),
),
migrations.AlterField(
model_name='pymbapagematerialimage',
name='color',
field=models.CharField(default='white', help_text='Accepts hex (#ffffff) or HTML color', max_length=250),
),
migrations.AlterField(
model_name='pymbapagematerialimage',
name='image',
field=models.ForeignKey(blank=True, help_text='Sets general appearance of material', null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to='wagtailimages.Image'),
),
migrations.AlterField(
model_name='pymbapagematerialimage',
name='layer',
field=models.CharField(default='0', help_text='Layer name in CAD file', max_length=250),
),
migrations.AlterField(
model_name='pymbapagematerialimage',
name='pattern',
field=models.BooleanField(default=False, help_text='Is it a 1x1 meter pattern?'),
),
migrations.AlterField(
model_name='pymbapartitionpage',
name='color',
field=models.CharField(blank=True, help_text='Accepts hex (#ffffff) or HTML color', max_length=250, null=True),
),
migrations.AlterField(
model_name='pymbapartitionpage',
name='image',
field=models.ForeignKey(blank=True, help_text='Sets the partition general appearance', null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to='wagtailimages.Image'),
),
migrations.AlterField(
model_name='pymbapartitionpage',
name='intro',
field=models.CharField(blank=True, help_text='Partition description', max_length=250, null=True),
),
migrations.AlterField(
model_name='pymbapartitionpage',
name='pattern',
field=models.BooleanField(default=False, help_text='Is it a 1x1 meter pattern?'),
),
migrations.AlterField(
model_name='pymbapartitionpagelayers',
name='material',
field=models.CharField(default='brick', help_text='Material description', max_length=250),
),
migrations.AlterField(
model_name='pymbapartitionpagelayers',
name='thickness',
field=models.CharField(default='0', help_text='In centimeters', max_length=250),
),
migrations.AlterField(
model_name='pymbapartitionpagelayers',
name='weight',
field=models.CharField(default='0', help_text='In kilos per cubic meter', max_length=250),
),
]
| python |
'''
This script has functions in it which are used in network which evaluate images.
If this script here is run it returns the object_dc-score of each segmented object by the predicition with respect to the groundtruth
'''
import os
import skimage
import scipy
import numpy as np
import matplotlib.pyplot as plt
#####################################
# Plotting functions #
#####################################
def plot_img_and_hist(image, axes, bins=256):
"""Plot an image along with its histogram and cumulative histogram.
Source: https://scikit-image.org/docs/stable/auto_examples/color_exposure/plot_equalize.html#sphx-glr-auto-examples-color-exposure-plot-equalize-py
"""
image = skimage.img_as_float(image)
ax_img, ax_hist = axes
ax_cdf = ax_hist.twinx()
# Display image
ax_img.imshow(image, cmap=plt.cm.gray)
ax_img.set_axis_off()
# Display histogram
ax_hist.hist(image.ravel(), bins=bins, histtype='step', color='black')
ax_hist.ticklabel_format(axis='y', style='scientific', scilimits=(0, 0))
ax_hist.set_xlabel('Pixel intensity')
ax_hist.set_xlim(0, 1)
ax_hist.set_yticks([])
# Display cumulative distribution
img_cdf, bins = skimage.exposure.cumulative_distribution(image, bins)
ax_cdf.plot(bins, img_cdf, 'r')
ax_cdf.set_yticks([])
return ax_img, ax_hist, ax_cdf
def plot_img_and_segmentations(imgs_dict, names_list, color_list):
fig, axs = plt.subplots(1, len(names_list), figsize=(5 * len(names_list),5))
#plt.title('Visualization of data and prediction')
for ax, img_name, colormap in zip(axs, names_list, color_list):
pic = imgs_dict[img_name]
ax.imshow(pic, cmap=colormap)
ax.axis('off')
ax.set_title(img_name.capitalize())
plt.show()
return
def plot_img_and_segm_overlayed(img, msks_dict, msk_names_list, color_list, change_bg_color_list):
fig, axs = plt.subplots(len(msk_names_list), 1, figsize=(15, 15 * len(msk_names_list)))
for ax, msk_name, colormap, change_bg in zip(axs, msk_names_list, color_list, change_bg_color_list):
ax.imshow(img)
if change_bg:
overlay_mask = msks_dict[msk_name]
else:
overlay_mask = np.ma.masked_array(msks_dict[msk_name], msks_dict[msk_name] == 0)
ax.imshow(overlay_mask, colormap, alpha=0.5)
ax.axis('off')
ax.set_title(msk_name.capitalize())
plt.show()
def plot_segmentations_dice(imgs_dict, names_list, label_list):
fig, axs = plt.subplots(1, len(names_list), figsize=(len(names_list) * 10, 10))
handles = label_list
# plt.title('Visualization of data and prediction')
for ax, msk_name, in zip(axs, names_list):
pic = imgs_dict[msk_name]
ax.imshow(pic * 255)
ax.axis('off')
subtitle = msk_name + " comparison"
ax.set_title(subtitle.capitalize())
ax.legend(handles=handles)
plt.show()
return
####################################
# Metric, Micron extraction #
####################################
def dice_coeff_numpy(y_true, y_pred):
intersection = np.sum(y_true * y_pred)
score = (2 * intersection + 1.) / (y_true.sum() + y_pred.sum() + 1.)
return score
def get_micron_info(pathtofile, filename):
"""
Returns the pixel per micron ratio for x and y.
Works with .tif images from ImageJ
Parameters:
-----------
pathtofile: string
path of the folder where the file is in
filename: string
name of the file
Returns:
--------
(pix mic x, pix mic y) tuple
Tuple with the pixel per micron ratio for x and y
"""
# Load microns unit
with skimage.external.tifffile.TiffFile(os.path.join(pathtofile, filename)) as tif:
metadata = tif.info()
# Find info about pixels per micron
x_pos = metadata.find("* 282 x_resolution")
y_pos = metadata.find("* 283 y_resolution")
pixel_per_micron_x = float(metadata[x_pos + 25: x_pos + 32]) * 0.000001
pixel_per_micron_y = float(metadata[y_pos + 25: y_pos + 32]) * 0.000001
if pixel_per_micron_x != pixel_per_micron_y:
print("Error. The resolution in micron in x and y are different. ",
"Please check the image. If there is no error in the image, this has to be implemented!",
"get_micron_info will return nothing.")
return
return (pixel_per_micron_x, pixel_per_micron_y)
####################################
# Area analyis of images #
####################################
def get_zero_area_in_img(image, area_threshold=0.1):
"""
Finds the sliced away area in an image
Parameters:
-----------
image: array
with shape e.g. (1024, 1024, 3)
values in [0,1]
area_threshold: float
values in [0,1]
percentage of zero_area size necessary to define it as cropped_img_area
Returns:
--------
cropped_img_area: array
with same shape as image
values: True or False
"""
# Reduce image to grayscale image
grayscale_image = skimage.color.rgb2gray(image)
# Set all values which are 0 to 1 in a new array
cropped_img_area = np.zeros(grayscale_image.shape)
cropped_img_area[grayscale_image == 0] = 1
# Find connected components
labelled_image, count_image = scipy.ndimage.label(cropped_img_area)
refined_cropped_img_area = cropped_img_area.copy()
# Filter out all connected components with size smaller or equal area_threshold
for label in range(1, count_image + 1):
if len(refined_cropped_img_area[labelled_image == label]) <= area_threshold * cropped_img_area.size:
refined_cropped_img_area[labelled_image == label] = 0
# count_refined_mask -= 1
# Return a boolean array
final_cropped_img_area = np.array(refined_cropped_img_area > 0)
# Debug:
if np.max(final_cropped_img_area) > 0:
print("zero area in image detected")
print("Percentage of cropped area:", np.sum(final_cropped_img_area) / final_cropped_img_area.size)
return final_cropped_img_area
def get_count_and_area(mask, filter_th, keep_only_largest_label=False, verbose=False):
labelled_mask, count_mask = scipy.ndimage.label(mask)
# Keep only the biggest connected component
if keep_only_largest_label:
refined_mask = mask.copy()
len_largest_label = 0
id_largest_label = 0
for label in range(1, count_mask + 1):
if len(refined_mask[labelled_mask == label]) > len_largest_label:
len_largest_label = len(refined_mask[labelled_mask == label])
id_largest_label = label
refined_mask[:] = 0
refined_mask[labelled_mask == id_largest_label] = 1
count_mask = 1
if verbose:
print(refined_mask.shape, refined_mask.min(), refined_mask.max())
print("Kept only the largest region and set count_mask to 1.")
else:
# count_refined_mask = count_mask
refined_mask = mask.copy()
# Filter out all connected components with size smaller or equal filter_th
for label in range(1, count_mask + 1):
if len(refined_mask[labelled_mask == label]) <= filter_th:
refined_mask[labelled_mask == label] = 0
# count_refined_mask -= 1
# refined_mask has to be relabeled now.
relabelled_mask, recounted_mask = scipy.ndimage.label(refined_mask)
if recounted_mask < count_mask and verbose:
print("Removed ", count_mask - recounted_mask, " regions because they are smaller or equal ", filter_th,
" pixels.")
filtered_mask = np.array(relabelled_mask > 0)
return relabelled_mask, recounted_mask, filtered_mask
def get_count_and_area_rmv_podo_outside(cfg, mask, filter_mask, filter_th, verbose=False):
# Outputs the labelled_mask, the mask_count and the filtered_mask
# The mask is labeled, then cropped by the filter_mask
# Afterwards, all labels which are contained in the mask are not removed in the labelled_mask
labelled_mask, count_mask = scipy.ndimage.label(mask)
if cfg.GLOM_POSTPROCESSING_KEEP_ONLY_LARGEST is True:
labeled_filter_mask, dataset_filter_mask_count, filtered_filter_mask = get_count_and_area\
(filter_mask, cfg.FILTER_CLASSES[0], keep_only_largest_label=True, verbose=verbose)
else:
labeled_filter_mask, dataset_filter_mask_count, filtered_filter_mask = get_count_and_area\
(filter_mask, cfg.FILTER_CLASSES[0], verbose=verbose)
labelled_mask_copy = labelled_mask.copy()
labelled_mask_copy2 = labelled_mask.copy()
labelled_mask_copy[filtered_filter_mask == 0] = 0
if verbose:
print(labelled_mask_copy.max(), labelled_mask_copy.min())
labels_not_cropped = np.unique(labelled_mask_copy)
labels_not_cropped = np.trim_zeros(labels_not_cropped)
if verbose:
print(labels_not_cropped)
final_mask = np.isin(labelled_mask_copy2, labels_not_cropped)
if verbose:
print(final_mask.max(), final_mask.min())
return get_count_and_area(final_mask, filter_th, verbose=verbose)
def image_to_label_image(img):
label, count = scipy.ndimage.label(img)
return label, count
def coregistrate_and_get_object_dc_score(label_pred, count_pred, label_mask, count_mask, verbose=0):
def dice_coeff_with_intersect_matrix(matrix, tensor):
intersection_matrices = matrix * tensor
intersection_sum_array = np.sum(intersection_matrices, axis=(1,2))
score_array = (2 * intersection_sum_array + 1.) / (np.sum(matrix) + np.sum(tensor, axis=(1,2)) + 1.)
return score_array, intersection_sum_array
def get_true_positives_and_false_negatives_all_cells():
true_positives = []
false_negatives = []
array_dim = label_pred.shape
prediction_array = np.empty((count_pred, array_dim[0], array_dim[1]))
score_arrays = np.zeros((count_mask, count_pred))
for i in range(count_pred):
prediction_array[i,:,:] = np.array([label_pred == i+1])
if verbose:
print(prediction_array.shape)
print(np.max(prediction_array))
print(np.min(prediction_array))
for k in range(1, count_mask + 1):
score_arr, intersection_sum_arr = dice_coeff_with_intersect_matrix(np.array([label_mask == k]),
prediction_array)
if verbose:
print("Intersection array: ")
print(intersection_sum_arr)
print("Score array: ")
print(score_arr)
if np.max(intersection_sum_arr) == 0:
if verbose:
print("cell ", k, " in the groundtruth colocalizes with no cell in the prediction")
false_negatives.append((k, 0))
elif np.max(intersection_sum_arr > 0):
score_arrays[k-1, :] = score_arr
cells_to_process = min(count_mask - len(false_negatives), count_pred)
while cells_to_process:
i, j = np.unravel_index(score_arrays.argmax(), score_arrays.shape)
cell_mask = i + 1
cell_pred = j + 1
if verbose:
print("Cells to process: ", cells_to_process)
print("cell ", cell_mask, " in groundtruth colocalizes the BEST with cell ", cell_pred,
" in the prediction")
true_positives.append((cell_mask, cell_pred, np.max(score_arrays)))
score_arrays[i, :] = 0
score_arrays[:, j] = 0
cells_to_process -= 1
true_positives.sort()
list_tp= [x[0] for x in true_positives]
list_mask = list(range(1, count_mask + 1))
for element in false_negatives:
list_mask.remove(element[0])
additional_false_negs = list(set(list_mask) - set(list_tp))
additional_false_negs = [(x, 0) for x in additional_false_negs]
additional_false_negs.sort()
if verbose:
print("The cells ", additional_false_negs, " in the groundtruth colocalize with prediction cells that "
"match better to other cells. Thus this cells will be counted "
"as false negative.")
false_negatives = false_negatives + additional_false_negs
return true_positives, false_negatives
def get_false_positives(tp):
list_tp = [x[1] for x in tp]
list_pred = list(range(1, count_pred + 1))
false_positives = list(set(list_pred) - set(list_tp))
false_positives = [(0, x) for x in false_positives]
false_positives.sort()
return false_positives
if np.max(label_pred) > 0:
# True positives, false negatives
tp, fn = get_true_positives_and_false_negatives_all_cells()
# False positives
fp = get_false_positives(tp)
else:
print("Warning. label_pred is a zero array. Thus TP = 0, FP = 0.")
tp, fp = [], []
fn = [(k, 0) for k in range(1,count_mask+1)]
# object_dc-score
if len(tp) > 0:
object_dc_score = (2 * len(tp)) / (len(fp) + len(fn) + 2 * len(tp))
else:
object_dc_score = 0
return object_dc_score, tp, fp, fn
def run_script():
import yaml
with open("config/parameters_train.yml", 'r') as ymlfile:
cfg = yaml.load(ymlfile)
path = "/source/"
mask = skimage.io.imread(path + 'groundtruth/podocytes/A_mask_podo.tif')
pred = skimage.io.imread(path + 'imagej/podocytes/A_mask_podo.tif')
label_pred, count_pred = image_to_label_image(pred)
label_mask, count_mask = image_to_label_image(mask)
print("The pred image has ", count_pred, " cells.")
print("The mask image has ", count_mask, " cells.")
object_dc, tp, fp, fn = coregistrate_and_get_object_dc_score(label_pred, count_pred, label_mask, count_mask, verbose=1)
print("The object_dc-score is: ", object_dc)
print("There are ", len(tp), " TP cells: ", tp)
print("There are ", len(fp), " FP cells: ", fp)
print("There are ", len(fn), " FN cells: ", fn)
return
if __name__ == '__main__':
from config import Config
# Uncomment to test object_dv, tp, fp, fn
#run_script()
# Uncomment to do no testing of Remove podocytes outside glom
#"""
cfg = Config()
# Create a dict containing the masks
msks_dict = {}
mask_list = cfg.NAMES_CLASSES
# Load img and masks
path = '/data/test_postprocessing'
img = skimage.io.imread(os.path.join(path, 'images', 'A.tif'))
mask_glom_name = 'A_mask_glom.tif'
mask_podo_name = 'A_mask_podo.tif'
mask_glom = skimage.io.imread(os.path.join(path, 'masks', mask_glom_name))
mask_podo = skimage.io.imread(os.path.join(path, 'masks', mask_podo_name))
# Display img and masks
msks_dict[mask_list[0]] = mask_glom
msks_dict[mask_list[1]] = mask_podo
plot_img_and_segm_overlayed(img[:, :, (1,2,0)], msks_dict, mask_list, ['Set1', 'hot'], [False, True])
# Remove podocytes outside glom
filter_th = 0
relabelled_mask, recounted_mask, filtered_mask = get_count_and_area_rmv_podo_outside(
cfg, mask_podo, mask_glom, filter_th, verbose=False)
# Plot img and processed masks
msks_dict[mask_list[0]] = mask_glom
msks_dict[mask_list[1]] = filtered_mask
plot_img_and_segm_overlayed(img[:, :, (1, 2, 0)], msks_dict, mask_list, ['Set1', 'hot'], [False, True])
| python |
# -*- coding: utf-8 -*-
"""
Created on Wed Jan 13 06:41:54 2016
@author: piotr at nicecircuits.com
"""
from libraryManager.library import libraryClass
from libraryManager.part import part
from footprints.footprintSmdQuad import footprintQfp
from footprints.footprintSmdDualRow import footprintTssop
from libraryManager.footprintPrimitive import *
from libraryManager.defaults import *
from symbols.symbolsIC import symbolIC
from libraryManager.symbolPrimitive import *
from parts.icGenerator import icGenerator
import os.path
from libraryManager.generateLibraries import generateLibraries
class librarySTM32(libraryClass):
"""
"""
def __init__(self):
super().__init__("niceSTM32")
# ============== STM32 LQFP64 ==============
pinNames=[
[None,"14","15","16","17","20","21","22","23","41","42","43","44","45","46","49","50",None,"54"],
[None,"26","27","28","55","56","57","58","59","61","62","29","30","33","34","35","36"],
["7","60",None,"1",None,"13",None,"32","64","48","19",None,None,"12",None,"31","63","47","18",],
["8","9","10","11","24","25","37","38","39","40","51","52","53","2","3","4",None,"5","6"]
]
footprints = [footprintQfp(64, 0.5, density=density) for density in ["N", "L", "M"]]
path=os.path.join(os.path.dirname(__file__),"STM32_LQFP64.ods")
#generate symbols configured by pinNames
self.parts.extend(icGenerator.generate(path,pinNames,footprints,size=3000))
#generate quad pin-by-pin symbols
self.parts.extend(icGenerator.generate(path,pinNames=None,\
footprints=footprints,symbolType="quad",namePosfix="_q",size=3100))
# ============== STM32 TSSOP20 ==============
pinNames=[
["4","1",None,"16","5",None,None,None,None,"2","3",None,"15"],
["6","7","8","9","10","11","12","13","17","18","19","20","14"]
]
footprints = [footprintTssop(20, density=density) for density in ["N", "L", "M"]]
path=os.path.join(os.path.dirname(__file__),"STM32_TSSOP20.ods")
#generate symbols configured by pinNames
self.parts.extend(icGenerator.generate(path,pinNames,footprints,size=2000))
# ============== STM32 LQFP48 ==============
path=os.path.join(os.path.dirname(__file__),"STM32_LQFP48.ods")
self.parts.extend(icGenerator.generate_advanced(path))
# ============== STM32 LQFP32 ==============
path=os.path.join(os.path.dirname(__file__),"STM32_LQFP32.ods")
self.parts.extend(icGenerator.generate_advanced(path))
if __name__ == "__main__":
generateLibraries([librarySTM32()]) | python |
'''
Pattern:
Enter the number of rows: 5
A
A B A
A B C A B
A B C D A B C
A B C D E A B C D
'''
print('Alphabet Pattern: ')
number_rows=int(input('Enter number of rows: '))
for row in range(1,number_rows+1):
print(' '*(number_rows-row),end=' ')
for column in range(1,row+1):
print(chr(column+64),end=' ')
for column in range(1,row):
print(chr(64+column),end=' ')
print() | python |
disliked_ids = {
"items" : [ {
"track" : {
"album" : {
"name" : "W Hotel (feat. Smokepurpp, Blueface)"
},
"id" : "3Ap32KanuR59wfKcs9j2pb",
"name" : "W Hotel (feat. Smokepurpp, Blueface)"
}
}, {
"track" : {
"album" : {
"name" : "Me Molesta"
},
"id" : "0eVnN0I8WCMGOeID68Dx6n",
"name" : "Me Molesta"
}
}, {
"track" : {
"album" : {
"name" : "Un Trago"
},
"id" : "51fKrxgweK5TqUEuzYXswm",
"name" : "Un Trago"
}
}, {
"track" : {
"album" : {
"name" : "Big Tales"
},
"id" : "2Os3uq6WxoQmhdUJkbToIq",
"name" : "Orange Evening"
}
}, {
"track" : {
"album" : {
"name" : "Good Intentions"
},
"id" : "2clQy4kJpJeypgc365VW4H",
"name" : "She Hurtin"
}
}, {
"track" : {
"album" : {
"name" : "BACK TO ROCKPORT"
},
"id" : "7b9kVs2a1ljoTqZp6TRezW",
"name" : "VAMONOS"
}
}, {
"track" : {
"album" : {
"name" : "Ya Supérame (En Vivo)"
},
"id" : "6HIIuuUIEzH1meVdGbMXyf",
"name" : "Ya Supérame (En Vivo)"
}
}, {
"track" : {
"album" : {
"name" : "DEV, Vol. 2"
},
"id" : "4H3t8C59tXjKf1R2iKtc5M",
"name" : "Ya Acabo"
}
}, {
"track" : {
"album" : {
"name" : "Cruisin' with Junior H"
},
"id" : "3pQYf90V5idOGrXzosm9rt",
"name" : "Se Amerita"
}
}, {
"track" : {
"album" : {
"name" : "Loco"
},
"id" : "6PDlkWmrq2ZKiUuFt2aQsH",
"name" : "Loco"
}
}, {
"track" : {
"album" : {
"name" : "Dueles Tan Bien"
},
"id" : "75ncCwXqalTnnl6t1ruQRq",
"name" : "Dueles Tan Bien"
}
}, {
"track" : {
"album" : {
"name" : "Dr. Feelgood"
},
"id" : "7GonnnalI2s19OCQO1J7Tf",
"name" : "Kickstart My Heart"
}
}, {
"track" : {
"album" : {
"name" : "Everything to Lose"
},
"id" : "1QD631AEGHmUoP6qxF5wb8",
"name" : "Everything to Lose - Single Edit"
}
}, {
"track" : {
"album" : {
"name" : "Reach Out / Olympus"
},
"id" : "5hVKXeJg1R9qGbrGW5eHNl",
"name" : "Reach Out"
}
}, {
"track" : {
"album" : {
"name" : "Terrace Rain/Grid Search"
},
"id" : "3IneYkIxkwFCdb68hICqWA",
"name" : "Terrace Rain"
}
}, {
"track" : {
"album" : {
"name" : "Campfire"
},
"id" : "41ME5dAx2Qe1pfZ0ypuCBu",
"name" : "Campfire"
}
}, {
"track" : {
"album" : {
"name" : "Te Odio"
},
"id" : "7sUA9Z7am1vHV7BGwNB8h8",
"name" : "Te Odio"
}
}, {
"track" : {
"album" : {
"name" : "Índigo"
},
"id" : "4knc1Fp3kbuq8bH2byOvLu",
"name" : "Índigo"
}
}, {
"track" : {
"album" : {
"name" : "Sukutubla"
},
"id" : "0ue1fotUAGcDwl3XWoaxxM",
"name" : "Sukutubla"
}
}, {
"track" : {
"album" : {
"name" : "Mi Canción"
},
"id" : "53OGZ25nljLVQAVYaDw0r5",
"name" : "De Noche"
}
}, {
"track" : {
"album" : {
"name" : "Mi Canción"
},
"id" : "6inmGRvhbtgrBoWYxau3wU",
"name" : "Ser Yo"
}
}, {
"track" : {
"album" : {
"name" : "A Dream I Have"
},
"id" : "04IEe7T9LrB5tnrydpSHFg",
"name" : "A Dream I Have"
}
}, {
"track" : {
"album" : {
"name" : "Quince Mil Días"
},
"id" : "70vhN2B10N0pLcUIe2bARB",
"name" : "Quince Mil Días"
}
}, {
"track" : {
"album" : {
"name" : "Santé"
},
"id" : "3vXnuFnC5RhPGwsFi0ORcI",
"name" : "Santé"
}
}, {
"track" : {
"album" : {
"name" : "Canciones Mamalonas 2"
},
"id" : "1nvygjj05E6AK7qR44AP8i",
"name" : "Siempre Es Lo Mismo"
}
}, {
"track" : {
"album" : {
"name" : "Préndete Un Blunt (feat. Zimple) [Remix]"
},
"id" : "1Fjuba2hK1V3IRFHAqFyX6",
"name" : "Préndete Un Blunt (feat. Zimple) - Remix"
}
}, {
"track" : {
"album" : {
"name" : "Led Zeppelin II (1994 Remaster)"
},
"id" : "0hCB0YR03f6AmQaHbwWDe8",
"name" : "Whole Lotta Love - 1990 Remaster"
}
}, {
"track" : {
"album" : {
"name" : "A Town Called Paradise"
},
"id" : "5L2l7mI8J1USMzhsmdjat9",
"name" : "Red Lights"
}
}, {
"track" : {
"album" : {
"name" : "End Of The World"
},
"id" : "25tekS8txsCQov85px1xm2",
"name" : "End Of The World"
}
}, {
"track" : {
"album" : {
"name" : "Punk"
},
"id" : "0AkI0KKi2cSfIKGyMMu7iZ",
"name" : "Peepin Out The Window (with Future & Bslime)"
}
}, {
"track" : {
"album" : {
"name" : "Se Me Olvidó"
},
"id" : "7xLYLM5K6S1TwiSdfuhZQg",
"name" : "Se Me Olvidó"
}
}, {
"track" : {
"album" : {
"name" : "DIE FOR MY BITCH"
},
"id" : "58k32my5lKofeZRtIvBDg9",
"name" : "HONEST"
}
}, {
"track" : {
"album" : {
"name" : "Se Me Pasó"
},
"id" : "7q6uwjL8IQ4cTJplzwdqu6",
"name" : "Se Me Pasó"
}
}, {
"track" : {
"album" : {
"name" : "Mil Vueltas a Este Pantalón"
},
"id" : "6GhcDZBtpfIrEZb0Yk0dZY",
"name" : "Mil Vueltas a Este Pantalón"
}
}, {
"track" : {
"album" : {
"name" : "La Sinvergüenza"
},
"id" : "1xO7tp4J5Wj0NeKrzIpd1V",
"name" : "La Sinvergüenza"
}
}, {
"track" : {
"album" : {
"name" : "Somebody's Watching Me (Syzz Halloween Remix)"
},
"id" : "4dKgJOFyPs5qMTC925ikc3",
"name" : "Somebody's Watching Me - Syzz Halloween Remix"
}
}, {
"track" : {
"album" : {
"name" : "Oohla Oohla"
},
"id" : "3g36KmRGI8hmnCcTFak4Wn",
"name" : "Oohla Oohla"
}
}, {
"track" : {
"album" : {
"name" : "Control (feat. Ty Dolla $ign)"
},
"id" : "0AUo7RatplZTIoZaRkQWDz",
"name" : "Control (feat. Ty Dolla $ign)"
}
}, {
"track" : {
"album" : {
"name" : "Faces"
},
"id" : "40dlJFdqfm8CayhmmS9UB7",
"name" : "Here We Go"
}
}, {
"track" : {
"album" : {
"name" : "Real One"
},
"id" : "7hwBuXZkPzNUTNhBQPyTxu",
"name" : "Real One"
}
}, {
"track" : {
"album" : {
"name" : "Lumbre"
},
"id" : "59ilCs0OhtM96JNFqWS0yW",
"name" : "Lumbre"
}
}, {
"track" : {
"album" : {
"name" : "Necesitaba Estar Hecho"
},
"id" : "7DJnWboNefoXfb7kySFldt",
"name" : "Por Ti"
}
}, {
"track" : {
"album" : {
"name" : "Balenciaga"
},
"id" : "6Tcb2f0TY9VgVmJ8qoHzn4",
"name" : "Balenciaga"
}
}, {
"track" : {
"album" : {
"name" : "Endgame"
},
"id" : "5sG3G54H21hNfd5etlheoe",
"name" : "Satellite"
}
}, {
"track" : {
"album" : {
"name" : "Vampire Weekend"
},
"id" : "5dKBaysNJtfpyNTRa5lqDb",
"name" : "A-Punk"
}
}, {
"track" : {
"album" : {
"name" : "Say Amen for Silver Linings"
},
"id" : "4qSsjDGXplb6422X2INvFW",
"name" : "Say Amen (Saturday Night)"
}
}, {
"track" : {
"album" : {
"name" : "Fear Inoculum"
},
"id" : "03sEzk1VyrUZSgyhoQR0LZ",
"name" : "Pneuma"
}
}, {
"track" : {
"album" : {
"name" : "A Beautiful Lie"
},
"id" : "0Dx3pLp5cHb5RKvCNHKdlK",
"name" : "From Yesterday"
}
}, {
"track" : {
"album" : {
"name" : "Time To Tango"
},
"id" : "168P6e9mrfugeE9nKhEE8C",
"name" : "Bomba"
}
}, {
"track" : {
"album" : {
"name" : "Love Gun"
},
"id" : "0SPBrxOUEMIKugXR4bFhxs",
"name" : "Love Gun"
}
}, {
"track" : {
"album" : {
"name" : "Number Three"
},
"id" : "6VtcgrVYo2xfygcWAfRpd1",
"name" : "The World Is Ugly"
}
}, {
"track" : {
"album" : {
"name" : "Firepower"
},
"id" : "4CONJphSrdS0vIAGDrThGS",
"name" : "Firepower"
}
}, {
"track" : {
"album" : {
"name" : "Appeal To Reason"
},
"id" : "3asFGFY3uLjMDmML1p0tYm",
"name" : "Savior"
}
}, {
"track" : {
"album" : {
"name" : "Atoma"
},
"id" : "4HlVUapocBDBqkPtET2knz",
"name" : "Atoma"
}
}, {
"track" : {
"album" : {
"name" : "If You Have Ghost"
},
"id" : "0PSWSiRXsxsLAEdEhaJAId",
"name" : "Crucified"
}
}, {
"track" : {
"album" : {
"name" : "Bloody Kisses (Top Shelf Edition)"
},
"id" : "710B9xFjNOisQtKtppZE9p",
"name" : "Black No. 1 (Little Miss Scare -All)"
}
}, {
"track" : {
"album" : {
"name" : "TalkTalk"
},
"id" : "47ih1BN9dECO0Gu0yPeMyD",
"name" : "TalkTalk"
}
}, {
"track" : {
"album" : {
"name" : "White Pony"
},
"id" : "51c94ac31swyDQj9B3Lzs3",
"name" : "Change (In the House of Flies)"
}
}, {
"track" : {
"album" : {
"name" : "October Rust (Special Edition)"
},
"id" : "1Yb9Nq9PTEegiOUGwyGHuP",
"name" : "Wolf Moon (Including Zoanthropic Paranoia)"
}
}, {
"track" : {
"album" : {
"name" : "The Money Store"
},
"id" : "7nCONy10IHp7XD3oYZ0lcx",
"name" : "I've Seen Footage"
}
}, {
"track" : {
"album" : {
"name" : "The Money Store"
},
"id" : "7y8X0Z04gJCKtfrnSAMywJ",
"name" : "Hacker"
}
}, {
"track" : {
"album" : {
"name" : "No Love Deep Web"
},
"id" : "5fDj1YVNR04RtQNP4iYapO",
"name" : "No Love"
}
}, {
"track" : {
"album" : {
"name" : "Infinity Overhead"
},
"id" : "1sFMp92IOMEXvza2liF4DZ",
"name" : "Cold Company"
}
}, {
"track" : {
"album" : {
"name" : "October Rust (Special Edition)"
},
"id" : "58RDwkonFMOkoytBtIQetc",
"name" : "Love You to Death"
}
}, {
"track" : {
"album" : {
"name" : "Viva La Vida or Death and All His Friends"
},
"id" : "1mea3bSkSGXuIRvnydlB5b",
"name" : "Viva La Vida"
}
}, {
"track" : {
"album" : {
"name" : "Dreaming Out Loud"
},
"id" : "1NrJYpdAi7uosDRPmSYrsG",
"name" : "Apologize"
}
}, {
"track" : {
"album" : {
"name" : "A Rush of Blood to the Head"
},
"id" : "75JFxkI2RXiU7L9VXzMkle",
"name" : "The Scientist"
}
}, {
"track" : {
"album" : {
"name" : "Indiana"
},
"id" : "5uNEaSgkkPw6vLCUh3KsdQ",
"name" : "Beautiful Disaster"
}
}, {
"track" : {
"album" : {
"name" : "Ocean Eyes"
},
"id" : "3DamFFqW32WihKkTVlwTYQ",
"name" : "Fireflies"
}
}, {
"track" : {
"album" : {
"name" : "Overexposed"
},
"id" : "1LmN9SSHISbtp9LoaR5ZVJ",
"name" : "Payphone"
}
}, {
"track" : {
"album" : {
"name" : "VHS"
},
"id" : "7lGKEWMXVWWTt3X71Bv44I",
"name" : "Unsteady"
}
}, {
"track" : {
"album" : {
"name" : "All I Ever Wanted"
},
"id" : "4Dm32oO01YpIubCHaAtKkN",
"name" : "My Life Would Suck Without You"
}
}, {
"track" : {
"album" : {
"name" : "Science & Faith"
},
"id" : "49kjlZP49LMD1MrrcvXDET",
"name" : "For the First Time"
}
}, {
"track" : {
"album" : {
"name" : "Götterdämmerung"
},
"id" : "2af26RNEV5okdhwPSet5b5",
"name" : "Götterdämmerung"
}
}, {
"track" : {
"album" : {
"name" : "Barren Cloth Mother"
},
"id" : "1LtFsJIocrUsFXTzdilfNM",
"name" : "Barren Cloth Mother"
}
} ]
} | python |
# Copyright © 2019 Province of British Columbia
#
# Licensed under the Apache License, Version 2.0 (the 'License');
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an 'AS IS' BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Searching on a business tasks.
Provides all the search and retrieval from the business filings datastore.
"""
from datetime import datetime
from http import HTTPStatus
import datedelta
from flask import jsonify
from flask_restplus import Resource, cors
from legal_api.models import Business, Filing
from legal_api.services.filings import validations
from legal_api.utils.util import cors_preflight
from .api_namespace import API
@cors_preflight('GET,')
@API.route('/<string:identifier>/tasks', methods=['GET', 'OPTIONS'])
class TaskListResource(Resource):
"""Business Tasks service - Lists all incomplete filings and to-dos."""
@staticmethod
@cors.crossdomain(origin='*')
def get(identifier):
"""Return a JSON object with meta information about the Service."""
business = Business.find_by_identifier(identifier)
if not business:
return jsonify({'message': f'{identifier} not found'}), HTTPStatus.NOT_FOUND
rv = TaskListResource.construct_task_list(business)
return jsonify(tasks=rv)
@staticmethod
def construct_task_list(business):
"""
Return all current pending tasks to do.
First retrieves filings that are either drafts, or incomplete,
then populate AR filings that have not been started for
years that are due.
Rules for AR filings:
- Co-ops must file one AR per year. The next AR date must be AFTER the most recent
AGM date. The calendar year of the filing is the first contiguous year following
the last AGM date
- Corporations must file one AR per year, on or after the anniversary of the founding date
"""
tasks = []
order = 1
check_agm = validations.annual_report.requires_agm(business)
# If no filings exist in legal API db (set after this line), use the business' next anniversary date
todo_start_date = business.next_anniversary.date()
# Retrieve filings that are either incomplete, or drafts
pending_filings = Filing.get_filings_by_status(business.id, [Filing.Status.DRAFT.value,
Filing.Status.PENDING.value,
Filing.Status.ERROR.value])
# Create a todo item for each pending filing
for filing in pending_filings:
task = {'task': filing.json, 'order': order, 'enabled': True}
tasks.append(task)
order += 1
last_ar_date = business.last_ar_date
if last_ar_date and check_agm:
# If this is a CO-OP, set the start date to the first day of the year, since an AR filing
# is available as of Jan/01
todo_start_date = (datetime(last_ar_date.year + 1, 1, 1)).date()
# Retrieve all previous annual report filings. If there are existing AR filings, determine
# the latest date of filing
annual_report_filings = Filing.get_filings_by_type(business.id, 'annualReport')
if annual_report_filings:
# get last AR date from annualReportDate; if not present in json, try annualGeneralMeetingDate and
# finally filing date
last_ar_date = \
annual_report_filings[0].filing_json['filing']['annualReport'].get('annualReportDate', None)
if not last_ar_date:
last_ar_date = annual_report_filings[0].filing_json['filing']['annualReport']\
.get('annualGeneralMeetingDate', None)
if not last_ar_date:
last_ar_date = annual_report_filings[0].filing_date
last_ar_date = datetime.fromisoformat(last_ar_date)
if check_agm:
todo_start_date = (datetime(last_ar_date.year+1, 1, 1)).date()
else:
todo_start_date = (last_ar_date+datedelta.YEAR).date()
start_year = todo_start_date.year
while todo_start_date <= datetime.now().date():
enabled = not pending_filings and todo_start_date.year == start_year
tasks.append(TaskListResource.create_todo(business, todo_start_date.year, order, enabled))
todo_start_date += datedelta.YEAR
order += 1
return tasks
@staticmethod
def create_todo(business, todo_year, order, enabled):
"""Return a to-do JSON object."""
todo = {
'task': {
'todo': {
'business': business.json(),
'header': {
'name': 'annualReport',
'ARFilingYear': todo_year,
'status': 'NEW'
}
}
},
'order': order,
'enabled': enabled
}
return todo
| python |
#! /usr/bin/python
import sys
import os
import sqlite3
import datetime
import threading
import Queue
class SQLiteThread(threading.Thread):
def __init__(self, config, logger):
threading.Thread.__init__(self)
self.setDaemon(True)
self.filename = config.get_setting("sqlite_filename", "")
self.logger = logger
self.msgport = Queue.Queue()
def run(self):
self.logger.info("SQLite uses filename %r" % (self.filename, ))
self.db = sqlite3.connect(self.filename)
self.create_default_tables()
while True:
msg = self.msgport.get()
msg[0](*msg[1:])
def create_default_tables(self):
# a user is a mailbox on the system. a system can have any number of mailboxes.
self.db.execute("""CREATE TABLE IF NOT EXISTS users (
name TEXT PRIMARY KEY,
password TEXT,
homedir TEXT,
perm TEXT,
msg_login TEXT,
msg_quit TEXT);""")
self.db.commit()
def select(self, stmt, params, result):
result.put([line for line in self.db.execute(stmt, params)])
def commit(self, stmt, params, result):
t = self.db.execute(stmt, params)
self.db.commit()
result.put(t)
def execute_many(self, stmt, params, result):
t = self.db.executemany(stmt, params)
self.db.commit()
result.put(t)
def disconnect(self):
self.db.close()
self.db = None
class SQLite3Database(object):
def __init__(self, config, logger):
self.sqlite_thread = SQLiteThread(config, logger)
self.logger = logger
self.sqlite_thread.start()
def select(self, stmt, params):
result = Queue.Queue()
self.sqlite_thread.msgport.put([self.sqlite_thread.select, stmt, params, result])
return result.get()
def commit(self, stmt, params):
result = Queue.Queue()
self.sqlite_thread.msgport.put([self.sqlite_thread.commit, stmt, params, result])
return result.get()
def execute_many(self, stmt, params):
result = Queue.Queue()
self.sqlite_thread.msgport.put([self.sqlite_thread.execute_many, stmt, params, result])
return result.get()
def add_user(self, username, password, homedir, perm, msg_login, msg_quit):
stmt = "INSERT INTO users (name, password, homedir, perm, msg_login, msg_quit) VALUES (?,?,?,?,?,?);"
args = (username, password, homedir, perm, msg_login, msg_quit, )
return self.commit(stmt, args)
def remove_user(self, username):
stmt = "DELETE FROM users WHERE name=?;"
args = (username, )
return self.commit(stmt, args)
result = self.db.identify(username)
if result is None:
self.logger.warn("Warning, validate_authentication(%r) failed: no such user" % (username, ))
return False
def identify(self, username):
stmt = "SELECT password,homedir,perm,msg_login,msg_quit FROM users WHERE name=?"
args = (username, )
for row in self.select(stmt, args):
return tuple(row)
return None
def has_user(self, username):
stmt = "SELECT password FROM users WHERE name=?"
args = (username, )
for row in self.select(stmt, args):
return True
return False
def list_users(self):
return self.select("SELECT name,password,homedir,perm,msg_login,msg_quit FROM users ORDER BY name", [])
if __name__ == "__main__":
import eftepede_server
eftepede_server.main()
| python |
"""
Unit tests for our validators
"""
from dbas.database.discussion_model import ReviewDelete
from dbas.tests.utils import TestCaseWithConfig, construct_dummy_request
from dbas.validators.core import has_keywords_in_json_path, spec_keyword_in_json_body
from dbas.validators.reviews import valid_not_executed_review
class TestHasKeywords(TestCaseWithConfig):
def test_has_one_keyword(self):
request = construct_dummy_request(json_body={'string': 'foo'})
response = has_keywords_in_json_path(('string', str))(request)
self.assertTrue(response)
self.assertIn('string', request.validated)
def test_has_multiple_keywords(self):
request = construct_dummy_request(json_body={
'string': 'foo',
'bool': True
})
response = has_keywords_in_json_path(('string', str), ('bool', bool))(request)
self.assertTrue(response)
self.assertIn('string', request.validated)
self.assertIn('bool', request.validated)
def test_has_number_keywords(self):
request = construct_dummy_request(json_body={
'int': 4,
'float': 4.0
})
response = has_keywords_in_json_path(('int', int), ('float', float))(request)
self.assertTrue(response)
self.assertIn('int', request.validated)
self.assertIn('float', request.validated)
def test_has_list_keywords(self):
request = construct_dummy_request(json_body={'list': ['<:)']})
response = has_keywords_in_json_path(('list', list))(request)
self.assertTrue(response)
self.assertIn('list', request.validated)
def test_has_keywords_with_wrong_type(self):
request = construct_dummy_request(json_body={'int': 4})
response = has_keywords_in_json_path(('int', float))(request)
self.assertFalse(response)
self.assertNotIn('int', request.validated)
def test_has_keywords_without_keyword(self):
request = construct_dummy_request(json_body={'foo': 42})
response = has_keywords_in_json_path(('bar', int))(request)
self.assertFalse(response)
self.assertNotIn('bar', request.validated)
class TestExecutedReviews(TestCaseWithConfig):
def test_valid_not_executed_review(self):
request = construct_dummy_request(json_body={'ruid': 4})
response = valid_not_executed_review('ruid', ReviewDelete)(request)
self.assertTrue(response)
def test_valid_not_executed_review_error(self):
request = construct_dummy_request(json_body={'ruid': 1})
response = valid_not_executed_review('ruid', ReviewDelete)(request)
self.assertFalse(response)
class TestSpecKeywords(TestCaseWithConfig):
def test_empty_dummy_request_should_fail(self):
request = construct_dummy_request()
fn = spec_keyword_in_json_body((int, 'foo', lambda foo, varType: isinstance(foo, varType)))
response = fn(request)
self.assertIsInstance(response, bool)
self.assertFalse(response)
def test_provided_string_expected_int_should_fail(self):
request = construct_dummy_request(json_body={'foo': 'bar'})
fn = spec_keyword_in_json_body((int, 'foo', lambda foo, varType: isinstance(foo, varType)))
response = fn(request)
self.assertIsInstance(response, bool)
self.assertFalse(response)
def test_provided_int_expected_int_should_succed(self):
request = construct_dummy_request(json_body={'foo': 2})
fn = spec_keyword_in_json_body((int, 'foo', lambda foo, varType: isinstance(foo, varType)))
response = fn(request)
self.assertIsInstance(response, bool)
self.assertTrue(response)
def test_provided_empty_string_should_fail(self):
request = construct_dummy_request(json_body={'foo': ''})
fn = spec_keyword_in_json_body((str, 'foo', lambda foo, varType: isinstance(foo, varType) and foo != ''))
response = fn(request)
self.assertIsInstance(response, bool)
self.assertFalse(response)
def test_provided_string_should_succed(self):
request = construct_dummy_request(json_body={'foo': 'bar'})
fn = spec_keyword_in_json_body((str, 'foo', lambda foo, varType: isinstance(foo, varType) and foo != ''))
response = fn(request)
self.assertIsInstance(response, bool)
self.assertTrue(response)
| python |
#!/usr/bin/python
import pymysql
import config
def add_feedback(email,f_text):
conn,cursor=config.connect_to_database()
sql="insert into feedbacks(email,f_text) values('%s','%s')"%(email,f_text);
try:
cursor.execute(sql)
conn.commit()
return "11"
except:
conn.rollback()
return "0"
def check_same_feedback(email,f_text):
conn,cursor=config.connect_to_database()
sql="select * from feedbacks where email='%s'"%email
try:
cursor.execute(sql)
results=cursor.fetchall()
for row in results:
got_f_text=str(row[2]).lower()
f_text=str(f_text).lower()
if (f_text==got_f_text):
return '1'
return "11"
except:
conn.rollback()
return "0" | python |
import unittest
from point import Point
class PointTests(unittest.TestCase):
"""Tests for Point."""
def test_attributes(self):
point = Point(1, 2, 3)
self.assertEqual((point.x, point.y, point.z), (1, 2, 3))
point.x = 4
self.assertEqual(point.x, 4)
def test_string_representation(self):
point = Point(1, 2, 3)
self.assertEqual(str(point), 'Point(x=1, y=2, z=3)')
self.assertEqual(repr(point), 'Point(x=1, y=2, z=3)')
point.y = 4
self.assertEqual(str(point), 'Point(x=1, y=4, z=3)')
self.assertEqual(repr(point), 'Point(x=1, y=4, z=3)')
def test_equality_and_inequality(self):
p1 = Point(1, 2, 3)
p2 = Point(1, 2, 4)
p3 = Point(1, 2, 3)
self.assertNotEqual(Point(1, 2, 3), Point(1, 2, 4))
self.assertEqual(Point(1, 2, 3), Point(1, 2, 3))
self.assertFalse(Point(1, 2, 3) != Point(1, 2, 3))
self.assertNotEqual(p1, p2)
self.assertEqual(p1, p3)
p3.x, p3.z = p3.z, p3.x
self.assertNotEqual(p1, p3)
self.assertTrue(p1 != p3)
self.assertFalse(p1 == p3)
# To test the Bonus part of this exercise, comment out the following line
# @unittest.expectedFailure
def test_shifting(self):
p1 = Point(1, 2, 3)
p2 = Point(4, 5, 6)
p3 = p2 + p1
p4 = p3 - p1
self.assertEqual((p3.x, p3.y, p3.z), (5, 7, 9))
self.assertEqual((p4.x, p4.y, p4.z), (p2.x, p2.y, p2.z))
# To test the Bonus part of this exercise, comment out the following line
# @unittest.expectedFailure
def test_scale(self):
p1 = Point(1, 2, 3)
p2 = p1 * 2
self.assertEqual((p2.x, p2.y, p2.z), (2, 4, 6))
p3 = 3 * p1
self.assertEqual((p3.x, p3.y, p3.z), (3, 6, 9))
# To test the Bonus part of this exercise, comment out the following line
# @unittest.expectedFailure
def test_iterable_point(self):
point = Point(x=1, y=2, z=3)
x, y, z = point
self.assertEqual((x, y, z), (1, 2, 3))
if __name__ == "__main__":
unittest.main(verbosity=2) | python |
from bigchaindb_driver import BigchainDB
from bigchaindb_driver.crypto import generate_keypair
from time import sleep
from sys import exit
def asset_creation(farmer, tomatos, tomatos_metadata, bdb):
prepare_cr_tx = bdb.transactions.prepare(
operation = 'CREATE',
signers = farmer.public_key,
asset = tomatos,
metadata = tomatos_metadata
)
fulfilled_cr_tx = bdb.transactions.fulfill(
prepare_cr_tx,
private_keys = farmer.private_key
)
sent_cr_tx = bdb.transactions.send_commit(fulfilled_cr_tx)
print("Creation done (status): ",fulfilled_cr_tx == sent_cr_tx)
return sent_cr_tx
def asset_transfer(fulfilled_cr_tx, buyer, farmer, bdb):
transfer_asset = {'id':fulfilled_cr_tx['id']}
output = fulfilled_cr_tx['outputs'][0]
transfer_input = {
'fulfillment': output['condition']['details'],
'fulfills': {
'output_index': 0,
'transaction_id': fulfilled_cr_tx['id']
},
'owners_before': output['public_keys']
}
prepared_transfer_tx = bdb.transactions.prepare(
operation='TRANSFER',
asset=transfer_asset,
inputs=transfer_input,
recipients=buyer.public_key,
)
fulfilled_transfer_tx = bdb.transactions.fulfill(
prepared_transfer_tx,
private_keys=farmer.private_key,
)
sent_transfer_tx = bdb.transactions.send_commit(fulfilled_transfer_tx)
print("Transfer done (status): ", fulfilled_transfer_tx == sent_transfer_tx)
return sent_transfer_tx
def run(tomato, tomato_metadata, farmer, buyer, bdb):
fulfilled_cr = asset_creation(farmer, tomatos,tomatos_metadata, bdb)
sent_trans = asset_transfer(fulfilled_cr, buyer, farmer, bdb)
#print("Is Buyer the owner?",
# sent_transfer_tx['outputs'][0]['public_keys'][0] == farmer.public_key)
if __name__ == '__main__':
bdb = BigchainDB("https://test.bigchaindb.com")
farmer, trader, buyer = generate_keypair(), generate_keypair(), generate_keypair()
while(input("Press q (quit) to stop ") != 'q'):
tomatos = {
'data': {
input("Product name: ") : {
'price_euros' : float(input("Product price: ")),
'quantity_kilos': float(input("Product quantity: "))
},
},
}
tomatos_metadata = {'plant' : 'farm'}
run(tomatos, tomatos_metadata, farmer, buyer, bdb)
| python |
from __future__ import print_function
from memorytestgame.lib.game import Game
import time
import unittest
class GameTestCase(unittest.TestCase):
LEDS = ()
SWITCHES = ()
COUNTDOWN = 0
GAME_TIME = 0
SCORE_INCREMENT = 1
game = None
def setUp(self):
self.game = Game(self.LEDS, self.SWITCHES, self.COUNTDOWN, self.GAME_TIME, self.SCORE_INCREMENT)
def test__init__(self):
self.assertIsInstance(self.game, Game)
def test_start(self):
self.game.GAME_TIME = 2
self.assertIs(self.game.start(True), None)
def test_get_score(self):
self.assertIs(self.game.get_score(), 0)
def test_print_information(self):
self.game.FINISH_TIME = time.time()
self.assertIs(self.game.print_information(), None)
def test_print_score(self):
self.assertIs(self.game.print_score(), None)
self.assertIs(self.game.print_score(True), None)
def test_flash_buttons(self):
self.assertIs(self.game.flash_buttons(self.game.BUTTON_ACTION_ALL), None)
self.assertIs(self.game.flash_buttons(self.game.BUTTON_ACTION_SNAKE), None)
def test_finish(self):
self.assertIs(self.game.finish(), None)
def test_reset(self):
self.assertIs(self.game.reset(), None)
def test_cleanup(self):
self.assertIs(self.game.cleanup(), None)
def test__exit__(self):
self.assertIs(self.game.__exit__(), None)
if __name__ == '__main__':
unittest.main()
| python |
"""
Otrzymujesz liste liczb oraz liczbe n. Lista reprezentuje ceny sznurka o dlugosci
rownej indeksowi powiekszonemu o 1. Zaleznosc miedzy cenami i dlugoscia sznurka jest
przypadkowa. Przykladowo sznurek o dlugosci rownej 2 jednostkom moze kosztowac tyle
samo co sznurek o dlugosci rownej 3 jednostkom i byc piec razy drozszy niz sznurek o
dlugosci 1 jednostce. Liczba n to dlugosc sznurka jaki mamy szprzedac. Na ile czesci
powinnismy podzielic nasz sznurek, aby zarobic jak najwiecej na sprzedazy.
"""
# Zlozonosc czasowa: O(n^n)
# Zlozonosc pamieciowa: O(1)
def podziel_sznurek_v1(ceny, n):
if len(ceny) < n:
raise ValueError(
"Dlugosc sznurka do sprzedazy nie moze byc wieksza od liczby elementow listy ceny."
)
if n == 0:
return 0
maks = 0
for i in range(n):
cena = ceny[i] + podziel_sznurek_v1(ceny, n - i - 1)
if cena > maks:
maks = cena
return maks
# Zlozonosc czasowa: O(n^2)
# Zlozonosc pamieciowa: O(n)
def podziel_sznurek_v2(ceny, n):
if len(ceny) < n:
raise ValueError(
"Dlugosc sznurka do sprzedazy nie moze byc wieksza od liczby elementow listy ceny."
)
pom = [0] * (n + 1)
for i in range(n):
for j in range(i + 1):
pom[i + 1] = max(pom[i + 1], ceny[j] + pom[i - j])
return pom[n]
# Testy Poprawnosci
def test_1():
ceny = [1, 5, 8, 9, 10, 17, 17, 20]
n = 4
wynik = 10
assert podziel_sznurek_v1(ceny, n) == wynik
assert podziel_sznurek_v2(ceny, n) == wynik
def test_2():
ceny = [3, 9, 10, 20]
n = 5
for funkcja in [podziel_sznurek_v1, podziel_sznurek_v2]:
try:
funkcja(ceny, n)()
assert False
except ValueError:
assert True
def test_3():
ceny = [5]
n = 1
wynik = 5
assert podziel_sznurek_v1(ceny, n) == wynik
assert podziel_sznurek_v2(ceny, n) == wynik
def main():
test_1()
test_2()
test_3()
if __name__ == "__main__":
main()
| python |
# PhotoBot 0.8 beta - last updated for NodeBox 1rc4
# Author: Tom De Smedt <[email protected]>
# Manual: http://nodebox.net/code/index.php/PhotoBot
# Copyright (c) 2006 by Tom De Smedt.
# Refer to the "Use" section on http://nodebox.net/code/index.php/Use
from __future__ import print_function
ALL = ['canvas', 'Layers', 'Layer', 'invertimage', 'cropimage',
'aspectRatio', 'normalizeOrientationImage', 'insetRect',
'cropImageToRatioHorizontal', 'scaleLayerToHeight', 'placeImage',
'resizeImage', 'hashFromString', 'makeunicode', 'datestring',
'label' ]
import sys
import os
import random
import math
sqrt = math.sqrt
pow = math.pow
sin = math.sin
cos = math.cos
degrees = math.degrees
radians = math.radians
asin = math.asin
import fractions
Fraction = fractions.Fraction
import datetime
import time
import hashlib
import unicodedata
import colorsys
import io
import PIL
import PIL.ImageFilter as ImageFilter
import PIL.Image as Image
import PIL.ImageChops as ImageChops
import PIL.ImageEnhance as ImageEnhance
import PIL.ImageOps as ImageOps
import PIL.ImageDraw as ImageDraw
import PIL.ImageStat as ImageStat
import PIL.ImageFont as ImageFont
# disable large image warning
old = Image.MAX_IMAGE_PIXELS
Image.MAX_IMAGE_PIXELS = None # 200000000
# print( "MAX_IMAGE_PIXELS: %i" % old)
import pdb
import pprint
pp = pprint.pprint
kwdbg = 0
kwlog = 0
import traceback
# py3 stuff
py3 = False
try:
unicode('')
punicode = unicode
pstr = str
punichr = unichr
except NameError:
punicode = str
pstr = bytes
py3 = True
punichr = chr
long = int
xrange = range
# PIL interpolation modes
NEAREST = Image.NEAREST
BILINEAR = Image.BILINEAR
BICUBIC = Image.BICUBIC
LANCZOS = Image.LANCZOS
INTERPOLATION = Image.BICUBIC
LAYERS = []
# blend modes
NORMAL = "normal"
MULTIPLY = "multiply"
SCREEN = "screen"
OVERLAY = "overlay"
HUE = "hue"
COLOR = "color"
ADD = "add"
SUBTRACT = "subtract"
ADD_MODULO = "add_modulo"
SUBTRACT_MODULO = "subtract_modulo"
DIFFERENCE = "difference"
HORIZONTAL = 1
VERTICAL = 2
SOLID = "solid"
LINEAR = "linear"
RADIAL = "radial"
DIAMOND = "diamond"
SCATTER = "scatter"
COSINE = "cosine"
SINE = "sine"
ROUNDRECT = "roundrect"
RADIALCOSINE = "radialcosine"
QUAD = "quad"
class Canvas:
"""Implements a canvas with layers.
A canvas is an empty Photoshop document,
where layers can be placed and manipulated.
"""
def __init__(self, w, h):
"""Creates a new canvas.
Creates the working area on which to blend layers.
The canvas background is transparent,
but a background color could be set using the fill() function.
"""
self.interpolation = INTERPOLATION
self.layers = Layers()
self.w = w
self.h = h
img = Image.new("RGBA", (w,h), (255,255,255,0))
self.layer(img, name="_bg")
del img
def layer(self, img, x=0, y=0, name=""):
"""Creates a new layer from file, Layer, PIL Image.
If img is an image file or PIL Image object,
Creates a new layer with the given image file.
The image is positioned on the canvas at x, y.
If img is a Layer,
uses that layer's x and y position and name.
"""
if isinstance(img, Image.Image):
img = img.convert("RGBA")
self.layers.append( Layer(self, img, x, y, name) )
return len(self.layers) - 1
if isinstance(img, Layer):
img.canvas = self
self.layers.append(img)
return len(self.layers) - 1
if type(img) in (pstr, punicode):
try:
img = Image.open(img)
img = img.convert("RGBA")
self.layers.append( Layer(self, img, x, y, name) )
del img
return len(self.layers) - 1
except Exception as err:
print( "Canvas.layer( %s ) FAILED." %repr( img ) )
print(err)
print()
exc_type, exc_value, exc_tb = sys.exc_info()
traceback.print_exception(exc_type, exc_value, exc_tb)
print()
return None
def fill(self, rgb, x=0, y=0, w=None, h=None, name=""):
"""Creates a new fill layer.
Creates a new layer filled with the given rgb color.
For example, fill((255,0,0)) creates a red fill.
The layers fills the entire canvas by default.
"""
if w == None:
w = self.w - x
if h == None:
h = self.h - y
img = Image.new("RGBA", (w,h), rgb)
result = self.layer(img, x, y, name)
del img
return result
def makegradientimage(self, style, w, h):
"""Creates the actual gradient image.
This has been factored out of gradient() so complex gradients like
ROUNDRECT which consist of multiple images can be composed.
"""
if type(w) == float:
w *= self.w
if type(h) == float:
h *= self.h
# prevent some div by 0 errors
if w < 0:
w = -w
if h < 0:
h = -h
w = int( round( max(1,w) ))
h = int( round( max(1,h) ))
w2 = w // 2
h2 = h // 2
if kwlog:
print( (style, self.w,self.h,w,h) )
if style in (RADIALCOSINE,): #, SCATTER):
img = Image.new("L", (w, h), 0)
elif style in (SCATTER, ):
img = Image.new("L", (w, h), 0)
# img = Image.new("RGBA", (w, h), (0,0,0,0))
else:
img = Image.new("L", (w, h), 255)
draw = ImageDraw.Draw(img)
if style == SOLID:
draw.rectangle((0, 0, w, h), fill=255)
if style == LINEAR:
for i in xrange( w ):
k = int( round( 255.0 * i / w ))
draw.rectangle((i, 0, i, h), fill=k)
if style == RADIAL:
r = min(w,h) / 2.0
r0 = int( round( r ))
for i in xrange( r0 ):
k = int( round( 255 - 255.0 * i/r ))
draw.ellipse((w/2-r+i, h/2-r+i,
w/2+r-i, h/2+r-i), fill=k)
if style == RADIALCOSINE:
r = max(w,h) / 2.0
rx = w / 2.0
ry = h / 2.0
r0 = int( round( r ))
deg = 90
base = 90 - deg
deltaxdeg = deg / rx
deltaydeg = deg / ry
deltadeg = deg / r
step = min(deltaxdeg, deltaydeg)
for i in xrange( r0 ):
# k = 255.0 * i/r
k = int( round( 256 * sin( radians( base + i * deltadeg ) ) ))
ix = i * (rx / r)
iy = i * (ry / r)
draw.ellipse((0 + ix, 0 + iy,
w - ix, h - iy), fill=k)
if style == DIAMOND:
maxwidthheight = int( round( max(w,h) ))
widthradius = w * 0.5
heightradius = h * 0.5
for i in xrange( maxwidthheight ):
ratio = i / float( maxwidthheight )
x = int( round( ratio * widthradius ) )
y = int( round( ratio * heightradius ) )
k = int( round( 256.0 * ratio ))
draw.rectangle((x, y, w-x, h-y), outline=k)
if style == SCATTER:
# scatter should be some circles randomly across WxH
# img, draw
maxwidthheight = int( round( max(w,h) ))
minwidthheight = int( round( min(w,h) ))
def rnd( w, offset ):
r = random.random()
o2 = offset / 2.0
result = o2 + r * (w - (offset * 2))
return result
# circles at 12.5%
circleplacemin = int( round( minwidthheight / 9.0 ) )
circleplacemax = int( round( maxwidthheight / 9.0 ) )
c2 = 2 * circleplacemin
for count in xrange( 511 ):
tempimage = Image.new("L", (w, h), (0,) )
draw2 = ImageDraw.Draw( tempimage )
x = int( round( rnd( w, circleplacemin ) ))
y = int( round( rnd( h, circleplacemin ) ))
k = min(255, int( round( 33 + random.random() * 127)) )
r = (circleplacemin / 4.0) + random.random() * (circleplacemin / 4.0)
bottom = int(round(y + r))
right = int(round(x + r))
draw2.ellipse( (x, y, right, bottom), fill=( k ) )
if 0:
print( (x, y, bottom, right) )
# merge
img = ImageChops.add(img, tempimage)
del draw2
img = img.convert("L")
if style in (SINE, COSINE):
# sin/cos 0...180 left to right
action = sin
deg = 180.0
base = 0
if style == COSINE:
action = cos
deg = 90.0
base = 90.0 - deg
deltadeg = deg / w
for i in xrange( w ):
k = int( round( 256.0 * action( radians( base + i * deltadeg ) ) ))
draw.line( (i,0,i, h), fill=k, width=1)
result = img.convert("RGBA")
del img
del draw
return result
def gradient(self, style=LINEAR, w=1.0, h=1.0, name="",
radius=0, radius2=0):
"""Creates a gradient layer.
Creates a gradient layer, that is usually used together
with the mask() function.
All the image functions work on gradients, so they can
easily be flipped, rotated, scaled, inverted, made brighter
or darker, ...
Styles for gradients are LINEAR, RADIAL, DIAMOND, SCATTER,
SINE, COSINE and ROUNDRECT
"""
w0 = self.w
h0 = self.h
if type(w) == float:
w = int( round( w * w0 ))
if type(h) == float:
h = int( round( h * h0 ))
img = None
if style in (SOLID, LINEAR, RADIAL, DIAMOND,
SCATTER, SINE, COSINE, RADIALCOSINE):
img = self.makegradientimage(style, w, h)
img = img.convert("RGBA")
return self.layer(img, 0, 0, name=name)
if style == QUAD:
# make a rectangle with softened edges
result = Image.new("L", ( w, h ), 255)
mask = Image.new("L", ( w, h ), 255)
draw = ImageDraw.Draw(mask)
if radius == 0 and radius2 == 0:
radius = w / 4.0
radius2 = w / 10.0
r1 = int(round( radius ))
r2 = int(round( radius2 ))
if r1 == 0:
r1 = 1
if r2 == 0:
r2 = 1
d1 = 2 * r1
d2 = 2 * r2
# create the base rect
baserect = self.makegradientimage(SOLID, w-d1, h-d2)
# create the vertical gradients
verleft = self.makegradientimage(COSINE, r1, h)
verleft = verleft.transpose(Image.FLIP_LEFT_RIGHT)
vertright = verleft.rotate( 180 )
# create the horizontal gradients
# since LINEAR goes from left to right,
horup = self.makegradientimage(COSINE, r2, w)
horup = horup.transpose(Image.FLIP_LEFT_RIGHT)
hordown = horup.rotate( -90, expand=1 )
horup = hordown.rotate( 180 )
# assemble
result.paste( baserect, box=( r1, 0) )
result.paste( verleft, box=( 0, 0) )
result.paste( vertright,box=( w-r1, 0) )
mask.paste( hordown, box=( 0, 0) )
mask.paste( horup, box=( 0, h-r2) )
result = ImageChops.darker(result, mask)
result = result.convert("RGBA")
del mask, horup, hordown
del baserect, verleft, vertright
return self.layer(result, 0, 0, name=name)
if style == ROUNDRECT:
result = Image.new("L", ( w, h ), 255)
r1 = int( round( radius ))
r2 = int( round( radius2 ))
if r1 == 0:
r1 = 1
if r2 == 0:
r2 = 1
d1 = 2 * r1
d2 = 2 * r2
# take 1 radial grad for the 4 corners
corners = self.makegradientimage(RADIALCOSINE, d1, d2)
# top left
b = corners.copy()
tl = b.crop( box=(0,0,r1,r2) )
# top right
b = corners.copy()
tr = b.crop( box=(r1,0,d1,r2) )
# bottom left
b = corners.copy()
bl = b.crop( box=(0,r2,r1,d2) )
# bottom right
b = corners.copy()
br = b.crop( box=(r1,r2,d1,d2) )
# create the base rect
brw = w - d1
brh = h - d2
baserect = self.makegradientimage(SOLID, brw, brh)
# create the vertical gradients
verleft = self.makegradientimage(COSINE, r1, brh)
verleft = verleft.transpose(Image.FLIP_LEFT_RIGHT)
vertright = verleft.rotate( 180 )
# create the horizontal gradients
# since LINEAR goes from left to right,
horup = self.makegradientimage(COSINE, r2, brw)
horup = horup.transpose(Image.FLIP_LEFT_RIGHT)
hordown = horup.rotate( -90, expand=1 )
horup = hordown.rotate( 180 )
# assemble
result.paste( baserect, box=( r1, r2) )
result.paste( hordown, box=( r1, 0) )
result.paste( horup, box=( r1, brh+r2) )
result.paste( verleft, box=( 0, r2) )
result.paste( vertright,box=( brw+r1, r2) )
result.paste( tl, box=( 0, 0) )
result.paste( tr, box=( brw+r1, 0) )
result.paste( bl, box=( 0, brh+r2) )
result.paste( br, box=( brw+r1, brh+r2) )
img = result.convert("RGBA")
del corners, tl, tr, bl, br, b
del horup, hordown
del baserect
del verleft, vertright
return self.layer(img, 0, 0, name=name)
def merge(self, layers):
"""Flattens the given layers on the canvas.
Merges the given layers with the indices in the list
on the bottom layer in the list.
The other layers are discarded.
"""
layers.sort()
if layers[0] == 0:
del layers[0]
self.flatten(layers)
def flatten(self, layers=[]):
"""Flattens all layers according to their blend modes.
Merges all layers to the canvas, using the
blend mode and opacity defined for each layer.
Once flattened, the stack of layers is emptied except
for the transparent background (bottom layer).
"""
# When the layers argument is omitted,
# flattens all the layers on the canvas.
# When given, merges the indexed layers.
# Layers that fall outside of the canvas are cropped:
# this should be fixed by merging to a transparent background
# large enough to hold all the given layers' data
# (=time consuming).
if kwlog:
start = time.time()
if layers == []:
layers = xrange(1, len(self.layers))
background = self.layers._get_bg()
background.name = "Background"
for i in layers:
layer = self.layers[i]
# Determine which portion of the canvas
# needs to be updated with the overlaying layer.
x = max(0, layer.x)
y = max(0, layer.y)
w = min(background.w, layer.x+layer.w)
h = min(background.h, layer.y+layer.h)
baseimage = background.img.crop( (x, y, w, h) )
# Determine which piece of the layer
# falls within the canvas.
x = max(0, -layer.x)
y = max(0, -layer.y)
w -= layer.x
h -= layer.y
blendimage = layer.img.crop( (x, y, w, h) )
lblend = blendimage.convert("L")
bwblend = lblend.convert("1")
# Buffer layer blend modes:
# the base below is a flattened version
# of all the layers below this one,
# on which to merge this blended layer.
if layer.blend == NORMAL:
buffimage = blendimage
elif layer.blend == MULTIPLY:
buffimage = ImageChops.multiply(baseimage, blendimage)
elif layer.blend == SCREEN:
buffimage = ImageChops.screen(baseimage, blendimage)
elif layer.blend == OVERLAY:
buffimage = Blend().overlay(baseimage, blendimage)
elif layer.blend == HUE:
buffimage = Blend().hue(baseimage, blendimage)
elif layer.blend == COLOR:
buffimage = Blend().color(baseimage, blendimage)
elif layer.blend == ADD:
buffimage = ImageChops.add(baseimage, blendimage)
elif layer.blend == SUBTRACT:
img1 = baseimage.convert("RGB")
img2 = blendimage.convert("RGB")
buffimage = ImageChops.subtract(img1, img2)
buffimage = buffimage.convert("RGBA")
del img1, img2
# buffimage = ImageChops.subtract(baseimage, blendimage)
# buffimage = Blend().subtract(baseimage, blendimage)
elif layer.blend == ADD_MODULO:
buffimage = ImageChops.add_modulo(baseimage, blendimage)
elif layer.blend == SUBTRACT_MODULO:
buffimage = Blend().subtract_modulo(baseimage, blendimage)
elif layer.blend == DIFFERENCE:
# buffimage = ImageChops.difference(baseimage, blendimage)
img1 = baseimage.convert("RGB")
img2 = blendimage.convert("RGB")
buffimage = ImageChops.difference(img1, img2)
buffimage = buffimage.convert("RGBA")
del img1, img2
# Buffer a merge between the base and blend
# according to the blend's alpha channel:
# the base shines through where the blend is less opaque.
# Merging the first layer to the transparent canvas
# works slightly different than the other layers.
# buffalpha = buffimage.split()[3]
buffalpha = buffimage.getchannel("A")
basealpha = baseimage.getchannel("A")
if i == 1:
buffimage = Image.composite(baseimage, buffimage, basealpha)
else:
buffimage = Image.composite(buffimage, baseimage, buffalpha)
# The alpha channel becomes a composite of this layer and the base:
# the base's (optional) tranparent background
# is retained in arrays where the blend layer
# is transparent as well.
buffalpha = ImageChops.lighter(buffalpha, basealpha) #baseimage.split()[3])
try:
buffimage.putalpha(buffalpha)
except Exception as err:
if kwdbg:
pdb.set_trace()
# TBD This needs fixing
print("PILLOW ERROR:", err)
# Apply the layer's opacity,
# merging the buff to the base with
# the given layer opacity.
baseimage = Image.blend(baseimage, buffimage, layer.alpha)
# Merge the base to the flattened canvas.
x = max(0, int( round( layer.x )) )
y = max(0, int( round( layer.y )) )
background.img.paste(baseimage, (x,y) )
del baseimage, buffimage, buffalpha, basealpha, blendimage
layers = list(layers)
layers.reverse()
for i in layers:
del self.layers[i].img
del self.layers[i]
img = Image.new("RGBA", (self.w,self.h), (255,255,255,0))
self.layers._set_bg( Layer(self, img, 0, 0, name="_bg") )
if len(self.layers) == 1:
self.layers.append(background)
else:
self.layers.insert(layers[-1], background)
del img
if kwlog:
stop = time.time()
print("Canvas.flatten( %s ) in %.3fsec." % (repr(layers), stop-start))
def export(self, name, ext=".png", format="PNG", unique=False):
"""Exports the flattened canvas.
Flattens the canvas.
PNG retains the alpha channel information.
Other possibilities are JPEG and GIF.
"""
start = time.time()
if not name:
name = "photobot_" + datestring()
if os.sep in name:
name = os.path.abspath( os.path.expanduser( name ))
folder, name = os.path.split( name )
if not folder:
folder = os.path.abspath( os.curdir )
folder = os.path.join( folder, "exports" )
folder = os.path.abspath( folder )
filename = name + ext
if name.endswith( ext ):
filename = name
if not os.path.exists( folder ):
try:
os.makedirs( folder )
except:
pass
try:
path = os.path.join( folder, filename )
path = os.path.abspath( path )
except:
pass
if unique or os.path.exists( path ):
path = uniquepath(folder, name, ext, nfill=2, startindex=1, sep="_", always=unique)
if kwdbg and 0:
# if debugging is on export each layer separately
basename = "photobot_" + datestring() + "_layer_%i_%s" + ext
background = self.layers._get_bg()
background.name = "Background"
layers = xrange(1, len(self.layers) )
for i in layers:
layer = self.layers[i]
# Determine which portion of the canvas
# needs to be updated with the overlaying layer.
x = max(0, layer.x)
y = max(0, layer.y)
w = min(background.w, layer.x+layer.w)
h = min(background.h, layer.y+layer.h)
base = background.img.crop((0, 0, background.w, background.h))
# Determine which piece of the layer
# falls within the canvas.
x = max(0, -layer.x)
y = max(0, -layer.y)
w -= layer.x
h -= layer.y
blend = layer.img.crop((x, y, w, h))
# alpha = blend.split()[3]
alpha = blend.getchannel("A")
buffer = Image.composite(blend, base, alpha)
n = basename % (i, layer.name)
path = os.path.join( folder, n )
buffer.save( path, format=format, optimize=False)
print( "export() DBG: '%s'" % path.encode("utf-8") )
self.flatten()
if format in ("JPEG",):
if self.layers[1].img.mode == "RGBA":
self.layers[1].img = self.layers[1].img.convert("RGB")
self.layers[1].img.save(path, format=format, optimize=False)
if kwlog:
print( "export() %s" % path.encode("utf-8") )
if kwlog:
stop = time.time()
print("Canvas.export(%s) in %.3f sec." % (name, stop-start))
return path
def draw(self, x=0, y=0, name="", ext=".png", format='PNG'):
"""Places the flattened canvas in NodeBox.
Exports to a temporary PNG file.
# Draws the PNG in NodeBox using the image() command.
# Removes the temporary file.
"""
#if not name:
# name = "photobot_" + datestring()
#if not ext:
# ext = ".png"
#folder = os.path.abspath( os.curdir )
#folder = os.path.join( folder, "exports" )
#if not os.path.exists( folder ):
# try:
# os.makedirs( folder )
# except:
# pass
try:
#filename = os.path.join( folder, name + ext )
#filename = os.path.abspath(filename)
# path = self.export(filename)
path = self.export(name, ext, format)
try:
#if nodeboxlib:
_ctx.image(path, x, y)
except NameError as err:
pass
if 0:
os.unlink( path )
return path
except Exception as err:
print(err)
print()
exc_type, exc_value, exc_tb = sys.exc_info()
traceback.print_exception(exc_type, exc_value, exc_tb)
print()
def preferences(self, interpolation=INTERPOLATION):
"""Settings that influence image manipulation.
Currently, only defines the image interpolation, which
can be set to NEAREST, BICUBIC, BILINEAR or LANCZOS.
"""
self.interpolation = interpolation
#
# Some stack operations
#
# some inspiration from a forth wiki page
# dup ( a -- a a )
# drop ( a -- )
# swap ( a b -- b a )
# over ( a b -- a b a )
# rot ( a b c -- b c a )
# nip ( a b -- b ) swap drop ;
# tuck ( a b -- b a b ) swap over ;
@property
def top(self):
"""Interface to top layer.
"""
return self.layers[-1]
@property
def topindex(self):
"""get index of top layer.
"""
return len(self.layers)-1
@property
def dup(self):
"""Duplicate top layer/stackelement.
"""
layer = self.top.copy()
layer.canvas = self
self.layers.append( layer )
return self.top
def copy(self):
"""Returns a copy of the canvas.
"""
_canvas = canvas( self.w, self.h )
_canvas.interpolation = self.interpolation
_canvas.layers = Layers()
_canvas.w = self.w
_canvas.h = self.h
for layer in self.layers:
layercopy = layer.copy()
layercopy.canvas = self
_canvas.layer( layercopy )
return _canvas
def canvas(w, h):
return Canvas(w, h)
class Layers(list):
"""Extends the canvas.layers[] list so it indexes layers names.
When the index is an integer, returns the layer at that index.
When the index is a string, returns the first layer with that name.
The first element, canvas.layers[0],
is the transparent background and must remain untouched.
"""
def __getitem__(self, index):
if type(index) in (int, long):
return list.__getitem__(self, index)
elif type(index) in (pstr, punicode):
for layer in self:
if layer.name == index:
return layer
return None
def _get_bg(self):
return list.__getitem__(self, 0)
def _set_bg(self, layer):
list.__setitem__(self, 0, layer)
class Layer:
"""Implements a layer on the canvas.
A canvas layer stores an image at a given position on the canvas,
and all the Photoshop transformations possible for this layer:
duplicate(), desature(), overlay(), rotate(), and so on.
"""
def __init__(self, canvas, img, x=0, y=0, name=""):
self.canvas = canvas
self.name = name
self.img = img
self.x = x
self.y = y
self.w = img.size[0]
self.h = img.size[1]
self.alpha = 1.0
self.blend = NORMAL
self.pixels = Pixels(self.img, self)
def prnt(self):
# for debugging
print("-" * 20)
print( "name: '%s' " % self.name.encode("utf-8") )
print("xy: %i %i" % (self.x, self.y) )
print("wh: %i %i" % (self.w, self.h) )
print("alpha: %.2f" % self.alpha)
print("blend: %.2f" % self.blend)
print("-" * 20)
def index(self):
"""Returns this layer's index in the canvas.layers[].
Searches the position of this layer in the canvas'
layers list, return None when not found.
"""
for i in xrange(len(self.canvas.layers)):
if self.canvas.layers[i] == self:
break
if self.canvas.layers[i] == self:
return i
else:
return None
def copy(self):
"""Returns a copy of the layer.
This is different from the duplicate() method,
which duplicates the layer as a new layer on the canvas.
The copy() method returns a copy of the layer
that can be added to a different canvas.
"""
layer = Layer(None, self.img.copy(), self.x, self.y, self.name)
layer.w = self.w
layer.h = self.h
layer.alpha = self.alpha
layer.blend = self.blend
return layer
def delete(self):
"""Removes this layer from the canvas.
"""
i = self.index()
if i != None:
del self.canvas.layers[i]
def up(self):
"""Moves the layer up in the stacking order.
"""
i = self.index()
if i != None:
del self.canvas.layers[i]
i = min(len(self.canvas.layers), i+1)
self.canvas.layers.insert(i, self)
def down(self):
"""Moves the layer down in the stacking order.
"""
i = self.index()
if i != None:
del self.canvas.layers[i]
i = max(0, i-1)
self.canvas.layers.insert(i, self)
def bounds(self):
"""Returns the size of the layer.
This is the width and height of the bounding box,
the invisible rectangle around the layer.
"""
return self.img.size
def select(self, path, feather=True):
"""Applies the polygonal lasso tool on a layer.
The path paramater is a list of points,
either [x1, y1, x2, y2, x3, y3, ...]
or [(x1,y1), (x2,y2), (x3,y3), ...]
The parts of the layer that fall outside
this polygonal area are cut.
The selection is not anti-aliased,
but the feather parameter creates soft edges.
"""
w, h = self.img.size
mask = Image.new("L", (w,h), 0)
draw = ImageDraw.Draw(mask)
draw.polygon(path, fill=255)
if feather:
mask = mask.filter(ImageFilter.SMOOTH_MORE)
mask = mask.filter(ImageFilter.SMOOTH_MORE)
mask = ImageChops.darker(mask, self.img.getchannel("A")) #self.img.split()[3])
self.img.putalpha(mask)
def mask(self):
"""Masks the layer below with this layer.
Commits the current layer to the alpha channel of
the previous layer. Primarily, mask() is useful when
using gradient layers as masks on images below.
For example:
canvas.layer("image.jpg")
canvas.gradient()
canvas.layer(2).flip()
canvas.layer(2).mask()
Adds a white-to-black linear gradient to
the alpha channel of image.jpg,
making it evolve from opaque on
the left to transparent on the right.
"""
if len(self.canvas.layers) < 2:
return
i = self.index()
if i == 0:
return
layer = self.canvas.layers[i-1]
alpha = Image.new("L", layer.img.size, 0)
# Make a composite of the mask layer in grayscale
# and its own alpha channel.
mask = self.canvas.layers[i]
flat = ImageChops.darker(mask.img.convert("L"), mask.img.getchannel("A")) #mask.img.split()[3])
alpha.paste(flat, (mask.x,mask.y))
alpha = ImageChops.darker(alpha, layer.img.getchannel("A")) #layer.img.split()[3])
layer.img.putalpha(alpha)
self.delete()
def duplicate(self):
"""Creates a copy of the current layer.
This copy becomes the top layer on the canvas.
"""
i = self.canvas.layer(self.img.copy(), self.x, self.y, self.name)
clone = self.canvas.layers[i]
clone.alpha = self.alpha
clone.blend = self.blend
def opacity(self, a=100):
self.alpha = a * 0.01
def multiply(self):
self.blend = MULTIPLY
def add(self):
self.blend = ADD
def subtract(self):
self.blend = SUBTRACT
def add_modulo(self):
self.blend = ADD_MODULO
def subtract_modulo(self):
self.blend = SUBTRACT_MODULO
def difference(self):
self.blend = DIFFERENCE
def screen(self):
self.blend = SCREEN
def overlay(self):
self.blend = OVERLAY
def hue(self):
self.blend = HUE
def color(self):
self.blend = COLOR
def brightness(self, value=1.0):
"""Increases or decreases the brightness in the layer.
The given value is a percentage to increase
or decrease the image brightness,
for example 0.8 means brightness at 80%.
"""
if value > 5:
value = value * 0.01
b = ImageEnhance.Brightness(self.img)
self.img = b.enhance(value)
def contrast(self, value=1.0):
"""Increases or decreases the contrast in the layer.
The given value is a percentage to increase
or decrease the image contrast,
for example 1.2 means contrast at 120%.
"""
# this crashes sometimes
try:
if value > 5:
value = value * 0.01
c = ImageEnhance.Contrast(self.img)
self.img = c.enhance(value)
except:
pass
def desaturate(self):
"""Desaturates the layer, making it grayscale.
Instantly removes all color information from the layer,
while maintaing its alpha channel.
"""
# alpha = self.img.split()[3]
alpha = self.img.getchannel("A")
self.img = self.img.convert("L")
self.img = self.img.convert("RGBA")
self.img.putalpha(alpha)
def colorize(self, black, white, mid=None,
blackpoint=0, whitepoint=255, midpoint=127):
"""Use the ImageOps.colorize() on desaturated layer.
"""
#
# alpha = self.img.split()[3]
alpha = self.img.getchannel("A")
img = self.img.convert("L")
img = ImageOps.colorize(img, black, white, mid,
blackpoint=0, whitepoint=255, midpoint=127)
img = img.convert("RGBA")
img.putalpha(alpha)
self.img = img
def posterize(self, bits=8):
alpha = self.img.getchannel("A")
img = self.img.convert("RGB")
img = ImageOps.posterize(img, bits)
img = img.convert("RGBA")
img.putalpha(alpha)
self.img = img
def solarize(self, threshhold):
# alpha = self.img.split()[3]
alpha = self.img.getchannel("A")
img = self.img.convert("RGB")
img = ImageOps.solarize(img, threshhold)
img = img.convert("RGBA")
img.putalpha(alpha)
self.img = img
def autocontrast(self, cutoff=0, ignore=None):
if 0: #not (1 <= bits <= 8):
return
# alpha = self.img.split()[3]
alpha = self.img.getchannel("A")
img = self.img.convert("RGB")
img = ImageOps.autocontrast(img, cutoff, ignore)
img = img.convert("RGBA")
img.putalpha(alpha)
self.img = img
def deform( self, deformer, resample=BICUBIC ):
self.img = ImageOps.deform(self.img, deformer, resample)
def equalize(self, mask=None):
alpha = self.img.getchannel("A")
img = self.img.convert("RGB")
img = ImageOps.equalize(img, mask)
img = img.convert("RGBA")
img.putalpha(alpha)
self.img = img
def invert(self):
"""Inverts the layer.
"""
self.img = invertimage( self.img )
def translate(self, x, y):
"""Positions the layer at the given coordinates.
The x and y parameters define where to position
the top left corner of the layer,
measured from the top left of the canvas.
"""
self.x = int( round( x ))
self.y = int( round( y ))
def scale(self, w=1.0, h=1.0):
"""Resizes the layer to the given width and height.
When width w or height h is a floating-point number,
scales percentual,
otherwise scales to the given size in pixels.
"""
w0, h0 = self.img.size
if type(w) == float:
w = int( round( w*w0 ) )
if type(h) == float:
h = int( round( h*h0 ) )
self.img = self.img.resize((w,h), resample=LANCZOS)
self.w = w
self.h = h
def rotate(self, angle):
"""Rotates the layer.
Rotates the layer by given angle.
Positive numbers rotate counter-clockwise,
negative numbers rotate clockwise.
Rotate commands are executed instantly,
so many subsequent rotates will distort the image.
"""
# When a layer rotates, its corners will fall
# outside of its defined width and height.
# Thus, its bounding box needs to be expanded.
# Calculate the diagonal width, and angle from
# the layer center. This way we can use the
# layers's corners to calculate the bounding box.
def mid( t1, t2, makeint=True ):
# calculate the middle between t1 and t2
return int( round( (t2-t1) / 2.0 ))
w0, h0 = self.img.size
diag0 = sqrt(pow(w0,2) + pow(h0,2))
d_angle = degrees(asin((w0*0.5) / (diag0*0.5)))
angle = angle % 360
if ( angle > 90
and angle <= 270):
d_angle += 180
w = sin(radians(d_angle + angle)) * diag0
w = max(w, sin(radians(d_angle - angle)) * diag0)
w = int( round( abs(w) ))
h = cos(radians(d_angle + angle)) * diag0
h = max(h, cos(radians(d_angle - angle)) * diag0)
h = int( round( abs(h) ))
diag1 = int( round( diag0 ))
# The rotation box's background color
# is the mean pixel value of the rotating image.
# This is the best option to avoid borders around
# the rotated image.
bg = ImageStat.Stat(self.img).mean
bg = (int(bg[0]), int(bg[1]), int(bg[2]), 0)
box = Image.new("RGBA", (diag1,diag1), bg)
dw02 = mid( w0, diag0 ) # (diag0-w0)/2
dh02 = mid( h0, diag0 ) # (diag0-h0)/2
box.paste(self.img, (dw02, dh02))
box = box.rotate(angle, Image.BICUBIC)
dw2 = mid(w, diag0) # int( (diag0-w) / 2.0 )
dh2 = mid(h, diag0) #int( (diag0-h) / 2.0 )
box = box.crop(( dw2+2, dh2, diag1-dw2, diag1-dh2))
self.img = box
# Since rotate changes the bounding box size,
# update the layers' width, height, and position,
# so it rotates from the center.
self.x += mid( w, self.w ) # int( (self.w-w)/2.0 )
self.y += mid( h, self.h ) # int( (self.h-h)/2.0 )
self.w = w
self.h = h
def distort(self, x1=0,y1=0, x2=0,y2=0, x3=0,y3=0, x4=0,y4=0):
"""Distorts the layer.
Distorts the layer by translating
the four corners of its bounding box to the given coordinates:
upper left (x1,y1), upper right(x2,y2),
lower right (x3,y3) and lower left (x4,y4).
"""
w, h = self.img.size
quad = (-x1,-y1, -x4,h-y4, w-x3,w-y3, w-x2,-y2)
# quad = (x1,y1, x2,y2, x3,y3, x4,y4) #, LANCZOS)
self.img = self.img.transform(self.img.size, Image.QUAD, quad)
def flip(self, axis=HORIZONTAL):
"""Flips the layer, either HORIZONTAL or VERTICAL.
"""
if axis & HORIZONTAL:
self.img = self.img.transpose(Image.FLIP_LEFT_RIGHT)
if axis & VERTICAL:
self.img = self.img.transpose(Image.FLIP_TOP_BOTTOM)
def crop( self, bounds):
"""Crop a pillow image at bounds(left, top, right, bottom)
"""
w0, h0 = self.img.size
x, y = self.x, self.y
left, top, right, bottom = bounds
left = max(x, left)
top = max(y, top)
right = min(right, w0)
bottom = min(bottom, h0)
self.img = self.img.crop( (left, top, right, bottom) )
self.w, self.h = self.img.size
def blur(self):
"""Blurs the layer.
"""
self.img = self.img.filter(ImageFilter.BLUR)
def boxblur(self, radius=2):
"""Blurs the layer.
"""
self.img = self.img.filter( ImageFilter.BoxBlur( radius ) )
# new
def contour(self):
"""Contours the layer.
"""
self.img = self.img.filter(ImageFilter.CONTOUR)
# new
def detail(self):
"""Details the layer.
"""
self.img = self.img.filter(ImageFilter.DETAIL)
# new
def edge_enhance(self):
"""Edge enhances the layer.
"""
self.img = self.img.filter(ImageFilter.EDGE_ENHANCE)
# new
def edge_enhance_more(self):
"""Edge enhances more the layer.
"""
self.img = self.img.filter(ImageFilter.EDGE_ENHANCE_MORE)
# new
def emboss(self):
"""Embosses the layer.
"""
self.img = self.img.filter(ImageFilter.EMBOSS)
# new
def find_edges(self):
"""Find edges in the layer.
"""
alpha = self.img.getchannel("A")
self.img = self.img.filter(ImageFilter.FIND_EDGES)
self.img = self.img.convert("RGBA")
self.img.putalpha(alpha)
# new
def smooth(self):
"""Smoothes the layer.
"""
self.img = self.img.filter(ImageFilter.SMOOTH)
# new
def smooth_more(self):
"""Smoothes the layer more.
"""
self.img = self.img.filter(ImageFilter.SMOOTH_MORE)
def sharpen(self, value=1.0):
"""Increases or decreases the sharpness in the layer.
The given value is a percentage to increase
or decrease the image sharpness,
for example 0.8 means sharpness at 80%.
"""
s = ImageEnhance.Sharpness(self.img)
self.img = s.enhance(value)
def convolute(self, kernel, scale=None, offset=0):
"""A (3,3) or (5,5) convolution kernel.
The kernel argument is a list with either 9 or 25 elements,
the weight for each surrounding pixels to convolute.
"""
if len(kernel) == 9: size = (3,3)
elif len(kernel) == 25: size = (5,5)
else: return
if scale == None:
scale = 0
for x in kernel:
scale += x
if scale == 0:
scale = 1
f = ImageFilter.Kernel(size, kernel, scale=scale, offset=offset)
# alpha = self.img.split()[3]
alpha = self.img.getchannel("A")
img = self.img.convert("RGB")
# f = ImageFilter.BuiltinFilter()
# f.filterargs = size, scale, offset, kernel
img = img.filter(f)
img = img.convert("RGBA")
img.putalpha( alpha )
self.img = img
def statistics(self):
alpha = self.img.getchannel("A")
return ImageStat.Stat(self.img, alpha) #self.img.split()[3])
def levels(self):
"""Returns a histogram for each RGBA channel.
Returns a 4-tuple of lists, r, g, b, and a.
Each list has 255 items, a count for each pixel value.
"""
h = self.img.histogram()
r = h[0:255]
g = h[256:511]
b = h[512:767]
a = h[768:1024]
return r, g, b, a
class Blend:
"""Layer blending modes.
Implements additional blending modes to those present in PIL.
These blending functions can not be used separately from
the canvas.flatten() method, where the alpha compositing
of two layers is handled.
Since these blending are not part of a C library,
but pure Python, they take forever to process.
"""
def subtract(self, img1, img2, scale=1.0, offset=0):
base = img1.convert("RGB")
blend = img2.convert("RGB")
result = ImageChops.subtract(base, blend, scale=scale, offset=offset)
result = result.convert("RGBA")
return result
def subtract_modulo(self, img1, img2):
base = img1.convert("RGB")
blend = img2.convert("RGB")
result = ImageChops.subtract_modulo(base, blend)
result = result.convert("RGBA")
return result
def overlay(self, img1, img2):
"""Applies the overlay blend mode.
Overlays image img2 on image img1.
The overlay pixel combines multiply and screen:
it multiplies dark pixels values and screen light values.
Returns a composite image with the alpha channel retained.
"""
p1 = list( img1.getdata() )
p2 = list( img2.getdata() )
for i in xrange(len(p1)):
p3 = ()
for j in xrange(len(p1[i])):
a = p1[i][j] / 255.0
b = p2[i][j] / 255.0
# When overlaying the alpha channels,
# take the alpha of the most transparent layer.
if j == 3:
# d = (a+b) * 0.5
# d = a
d = min(a,b)
elif a > 0.5:
d = 2 * (a+b - a*b)-1
else:
d = 2*a*b
p3 += ( int( round(d * 255.0)), )
p1[i] = p3
img = Image.new("RGBA", img1.size, 255)
img.putdata(p1)
return img
def hue(self, img1, img2):
"""Applies the hue blend mode.
Hues image img1 with image img2.
The hue filter replaces the hues of pixels in img1
with the hues of pixels in img2.
Returns a composite image with the alpha channel retained.
"""
p1 = list(img1.getdata())
p2 = list(img2.getdata())
for i in xrange(len(p1)):
r1, g1, b1, a1 = p1[i]
r1 = r1 / 255.0
g1 = g1 / 255.0
b1 = b1 / 255.0
h1, s1, v1 = colorsys.rgb_to_hsv(r1, g1, b1)
r2, g2, b2, a2 = p2[i]
r2 = r2 / 255.0
g2 = g2 / 255.0
b2 = b2 / 255.0
h2, s2, v2 = colorsys.rgb_to_hsv(r2, g2, b2)
r3, g3, b3 = colorsys.hsv_to_rgb(h2, s1, v1)
r3 = int( round( r3*255.0 ))
g3 = int( round( g3*255.0 ))
b3 = int( round( b3*255.0 ))
p1[i] = (r3, g3, b3, a1)
img = Image.new("RGBA", img1.size, 255)
img.putdata(p1)
return img
def color(self, img1, img2):
"""Applies the color blend mode.
Colorize image img1 with image img2.
The color filter replaces the hue and saturation of pixels in img1
with the hue and saturation of pixels in img2.
Returns a composite image with the alpha channel retained.
"""
p1 = list(img1.getdata())
p2 = list(img2.getdata())
for i in xrange(len(p1)):
r1, g1, b1, a1 = p1[i]
r1 = r1 / 255.0
g1 = g1 / 255.0
b1 = b1 / 255.0
h1, s1, v1 = colorsys.rgb_to_hsv(r1, g1, b1)
r2, g2, b2, a2 = p2[i]
r2 = r2 / 255.0
g2 = g2 / 255.0
b2 = b2 / 255.0
h2, s2, v2 = colorsys.rgb_to_hsv(r2, g2, b2)
r3, g3, b3 = colorsys.hsv_to_rgb(h2, s2, v1)
r3 = int( round( r3*255.0 ))
g3 = int( round( g3*255.0 ))
b3 = int( round( b3*255.0 ))
p1[i] = (r3, g3, b3, a1)
img = Image.new("RGBA", img1.size, 255)
img.putdata(p1)
return img
class Pixels:
"""Provides direct access to a layer's pixels.
The layer.pixels[] contains all pixel values
in a 1-dimensional array.
Each pixel is a tuple containing (r,g,b,a) values.
After the array has been updated, layer.pixels.update()
must be called for the changes to commit.
"""
def __init__(self, img, layer):
self.layer = layer
self.img = img
self.data = None
def __getitem__(self, i):
w, h = self.img.size
noofpixels = w * h
if i >= noofpixels:
i -= noofpixels
if i < 0:
i += noofpixels
if self.data == None:
self.data = list(self.img.getdata())
return self.data[i]
def __setitem__(self, i, rgba):
w, h = self.img.size
noofpixels = w * h
if i >= noofpixels:
i -= noofpixels
if i < 0:
i += noofpixels
if self.data == None:
self.data = list(self.img.getdata())
self.data[i] = rgba
def __iter__(self):
for i in xrange(len(self)):
yield self[i]
def __len__(self):
w, h = self.img.size
return w * h
def update(self):
if self.data != None:
self.img.putdata(self.data)
self.data = None
def convolute(self, kernel, scale=None, offset=0):
"""A (3,3) or (5,5) convolution kernel.
The kernel argument is a list with either 9 or 25 elements,
the weight for each surrounding pixels to convolute.
"""
if len(kernel) == 9: size = (3,3)
elif len(kernel) == 25: size = (5,5)
else: return
if scale == None:
scale = 0
for x in kernel:
scale += x
if scale == 0:
scale = 1
# f = ImageFilter.BuiltinFilter()
# f.filterargs = size, scale, offset, kernel
f = ImageFilter.Kernel(size, kernel, scale=scale, offset=offset)
self.layer.img = self.layer.img.filter(f)
#
# nodebox & standalone pillow tools
#
def makeunicode(s, srcencoding="utf-8", normalizer="NFC"):
typ = type(s)
# convert to str first; for number types etc.
if typ not in (punicode,):
if typ not in (pstr,):
s = str(s)
try:
s = punicode( s, srcencoding )
except TypeError as err:
# pdb.set_trace()
print( "makeunicode(): %s" % repr(err) )
print( "%s - %s" % (type(s), repr(s)) )
return s
if typ in (punicode,):
s = unicodedata.normalize(normalizer, s)
return s
def uniquepath(folder, filenamebase, ext, nfill=1, startindex=1, sep="_", always=False):
folder = os.path.abspath( folder )
if not always:
path = os.path.join(folder, filenamebase + ext )
if not os.path.exists( path ):
return path
n = startindex
while True:
serialstring = str(n).rjust(nfill, "0")
filename = filenamebase + sep + serialstring + ext
fullpath = os.path.join(folder, filename)
if n >= 10**nfill:
nfill = nfill + 1
if not os.path.exists(fullpath):
return fullpath
n += 1
def hashFromString( s ):
h = hashlib.sha1()
h.update( s )
return h.hexdigest()
def datestring(dt = None, dateonly=False, nospaces=True, nocolons=True):
"""Make an ISO datestring. The defaults are good for using the result of
'datestring()' in a filename.
"""
if not dt:
now = str(datetime.datetime.now())
else:
now = str(dt)
if not dateonly:
now = now[:19]
else:
now = now[:10]
if nospaces:
now = now.replace(" ", "_")
if nocolons:
now = now.replace(":", "")
return now
def grid(cols, rows, colSize=1, rowSize=1, shuffled=False):
"""Returns an iterator that contains coordinate tuples.
The grid can be used to quickly create grid-like structures.
A common way to use them is:
for x, y in grid(10,10,12,12):
rect(x,y, 10,10)
"""
# Prefer using generators.
rowRange = range( int(rows) )
colRange = range( int(cols) )
# Shuffled needs a real list, though.
if (shuffled):
rowRange = list(rowRange)
colRange = list(colRange)
random.shuffle(rowRange)
random.shuffle(colRange)
for y in rowRange:
for x in colRange:
yield (x*colSize, y*rowSize)
#
# image tools section
#
def invertimage( img ):
# alpha = img.split()[3]
alpha = img.getchannel("A")
img = img.convert("RGB")
img = ImageOps.invert(img)
img = img.convert("RGBA")
img.putalpha(alpha)
return img
def cropimage( img, bounds):
"""Crop a pillow image at bounds(left, top, right, bottom)
"""
return img.crop( bounds )
def splitrect( left, top, right, bottom, hor=True, t=0.5 ):
"""Split a PIL image horizontally or vertically.
A split is horizontal if the splitline is horizontal.
Return a list with images.
"""
# w,h = img.size
w = int( round( right-left ))
h = int( round( bottom-top ))
w2 = int( round( w * t ))
h2 = int( round( h * t ))
if hor:
rects = [ (left, top, right, top+h2), (left, top+h2+1, right, bottom) ]
else:
rects = [ (left, top, l+w2, bottom), (left+w2+1, top, right, bottom) ]
return rects
def splitimage( img ):
pass
# gridsizeh = w // hor
# remainderh = w % hor
# noofmainchunks = noofrecords // chunksize
# remainingrecords = noofrecords % chunksize
"""
with Image.open("hopper.jpg") as im:
# The crop method from the Image module takes four coordinates as input.
# The right can also be represented as (left+width)
# and lower can be represented as (upper+height).
(left, upper, right, lower) = (20, 20, 100, 100)
# Here the image "im" is cropped and assigned to new variable im_crop
im_crop = im.crop((left, upper, right, lower))
"""
def aspectRatio(size, maxsize, height=False, width=False, assize=False):
"""Resize size=(w,h) to maxsize.
use height == maxsize if height==True
use width == maxsize if width==True
use max(width,height) == maxsize if width==height==False
"""
w, h = size
scale = 1.0
if width !=False:
currmax = w
elif height !=False:
currmax = h
else:
currmax = max( (w,h) )
if width and height:
currmax = min( (w,h) )
if currmax == maxsize:
# return 1.0
pass
elif maxsize == 0:
#return 1.0
pass
else:
scale = float(maxsize) / currmax
w = int( round( w*scale ) )
h = int( round( h*scale ) )
size = (w,h)
if assize:
return size
return scale
def innerRect( w0, h0, w1, h1):
"""Create an inner size crop rect (0,0,w1,h1) + translation
"""
pass
def insetRect( rectangle, horInset, vertInset):
"""
"""
x, y, w, h = rectangle
dh = horInset / 2.0
dv = vertInset / 2.0
return x+dh, y+dv, w-horInset, h-vertInset
def cropImageToRatioHorizontal( layer, ratio ):
"""
"""
w, h = layer.bounds()
newwidth = int( round( h*ratio ))
d = int( newwidth / 2.0 )
x,y,w,h = insetRect( (0,0,w,h), d, 0 )
layer.img = layer.img.crop(box=(x,y,x+w,y+h))
return layer
def scaleLayerToHeight( layer, newheight ):
# get current image bounds
w, h = layer.bounds()
# calculate scale & apply
s = aspectRatio( (w,h), newheight, height=True)
layer.scale(s, s)
return layer
def placeImage(canv, path, x, y, maxsize=None, name="", width=True, height=False):
"""Create an image layer.
"""
if maxsize:
img1 = resizeImage(path, maxsize, width=width, height=height)
top = canv.layer(img1, name=name)
else:
top = canv.layer(path, name=name)
canv.top.translate(x, y)
w, h, = canv.top.bounds()
return top, w, h
def resizeImage( filepath, maxsize, orientation=True, width=True, height=True):
"""Get a downsampled image for use in layers.
"""
f = False
try:
img = Image.open(filepath)
except Exception as err:
print("\nresizeImage() Image.open() FAILED '%s'" % filepath.encode("utf-8"))
print(err)
return ""
# downsample the image
if maxsize:
w,h = aspectRatio( (img.size), maxsize,
height=height, width=height, assize=True)
img = img.resize( (w,h), resample=Image.LANCZOS)
# respect exif orientation
if orientation:
img = normalizeOrientationImage( img )
if f:
f.close()
return img.convert("RGBA")
def normalizeOrientationImage( img ):
"""Rotate an image according to exif info.
"""
rotation = 0
try:
info = img._getexif()
if 274 in info:
r = info[274]
if r == 3:
rotation = 180
elif r == 6:
rotation = -90
elif r == 8:
rotation = 90
except (Exception, IndexError) as err:
pass
if rotation != 0:
return img.rotate( rotation )
return img
#
# text section
#
def label( canvas, string, x, y, fontsize=18, fontpath="" ):
"""Needs to be written...
"""
# search for a usable font
systemarials = [
"C:\Windows\Fonts\arial.ttf",
"/Library/Fonts/Arial.ttf"]
systemarials.insert(0, fontpath)
font = False
for f in systemarials:
if os.path.exists( f ):
font = f
break
if not font:
return False
w,h = canvas.w, canvas.h
mask = Image.new("L", (w, h), 0)
blatt = Image.new("RGBA", (w, h), (0,0,0,0))
drawtext = ImageDraw.Draw( blatt )
drawmask = ImageDraw.Draw( mask )
# use a bitmap font
font = PIL.ImageFont.truetype(font=font, size=fontsize, index=0, encoding='')
drawtext.text((x, y), string, font=font, fill=(192,192,192,255))
drawmask.text((x, y), string, font=font, fill=192)
drawtext.text((x-1, y-1), string, font=font, fill=(0,0,0,255))
drawmask.text((x-1, y-1), string, font=font, fill=255)
canvas.layer( blatt )
canvas.layer( mask )
canvas.top.mask()
| python |
# This code is part of Ansible, but is an independent component.
# This particular file snippet, and this file snippet only, is BSD licensed.
# Modules you write using this snippet, which is embedded dynamically by Ansible
# still belong to the author of the module, and may assign their own license
# to the complete work.
#
# (c) 2019 Fortinet, Inc
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = """
---
author:
- Miguel Angel Munoz (@magonzalez)
httpapi : fortios
short_description: HttpApi Plugin for Fortinet FortiOS Appliance or VM
description:
- This HttpApi plugin provides methods to connect to Fortinet FortiOS Appliance or VM via REST API
version_added: "2.9"
"""
import json
from ansible.plugins.httpapi import HttpApiBase
from ansible.module_utils.basic import to_text
from ansible.module_utils.six.moves import urllib
import re
from datetime import datetime
class HttpApi(HttpApiBase):
def __init__(self, connection):
super(HttpApi, self).__init__(connection)
self._conn = connection
self._ccsrftoken = ''
self._system_version = None
self._ansible_fos_version = 'v6.0.0'
self._ansible_galaxy_version = '1.1.8'
self._log = open("/tmp/fortios.ansible.log", "a")
def log(self, msg):
log_message = str(datetime.now())
log_message += ": " + str(msg) + '\n'
self._log.write(log_message)
self._log.flush()
def get_access_token(self):
'''this is only available after a module is initialized'''
token = self._conn.get_option('access_token') if 'access_token' in self._conn._options else None
return token
def set_become(self, become_context):
"""
Elevation is not required on Fortinet devices - Skipped
:param become_context: Unused input.
:return: None
"""
return None
def login(self, username, password):
"""Call a defined login endpoint to receive an authentication token."""
if (username is None or password is None) and self.get_access_token() is None:
raise Exception('Please provide access token or username/password to login')
if self.get_access_token() is None:
self.log('login with username and password')
data = "username=" + urllib.parse.quote(username) + "&secretkey=" + urllib.parse.quote(password) + "&ajax=1"
dummy, result_data = self.send_request(url='/logincheck', data=data, method='POST')
self.log('login with user: %s %s' % (username, 'succeeds' if result_data[0] == '1' else 'fails'))
if result_data[0] != '1':
raise Exception('Wrong credentials. Please check')
# If we succeed to login, we retrieve the system status first
else:
self.log('login with access token')
self.send_request(url='/logincheck')
status, _ = self.send_request(url='/api/v2/cmdb/system/interface?vdom=root&action=schema')
if status == 401:
raise Exception('Invalid access token. Please check')
self.update_system_version()
def logout(self):
""" Call to implement session logout."""
self.log('logout')
self.send_request(url='/logout', method="POST")
def update_auth(self, response, response_text):
"""
Get cookies and obtain value for csrftoken that will be used on next requests
:param response: Response given by the server.
:param response_text Unused_input.
:return: Dictionary containing headers
"""
if self.get_access_token() is None:
headers = {}
for attr, val in response.getheaders():
if attr == 'Set-Cookie' and 'APSCOOKIE_' in val:
headers['Cookie'] = val
elif attr == 'Set-Cookie' and 'ccsrftoken=' in val:
csrftoken_search = re.search('\"(.*)\"', val)
if csrftoken_search:
self._ccsrftoken = csrftoken_search.group(1)
headers['x-csrftoken'] = self._ccsrftoken
self.log('update x-csrftoken: %s' % (self._ccsrftoken))
return headers
else:
self.log('using access token - setting header')
return {
"Accept": "application/json"
}
def handle_httperror(self, exc):
"""
propogate exceptions to users
:param exc: Exception
"""
self.log('Exception thrown from handling http: ' + to_text(exc))
return exc
def _concat_token(self, url):
if self.get_access_token():
token_pair = 'access_token=' + self.get_access_token()
return url + '&' + token_pair if '?' in url else url + '?' + token_pair
return url
def _concat_params(self, url, params):
if not params or not len(params):
return url
url = url + '?' if '?' not in url else url
for param_key in params:
param_value = params[param_key]
if url[-1] == '?':
url += '%s=%s' % (param_key, param_value)
else:
url += '&%s=%s' % (param_key, param_value)
return url
def send_request(self, **message_kwargs):
"""
Responsible for actual sending of data to the connection httpapi base plugin.
:param message_kwargs: A formatted dictionary containing request info: url, data, method
:return: Status code and response data.
"""
url = message_kwargs.get('url', '/')
if self.get_access_token() is not None:
url = self._concat_token(message_kwargs.get('url', '/'))
data = message_kwargs.get('data', '')
method = message_kwargs.get('method', 'GET')
params = message_kwargs.get('params', {})
url = self._concat_params(url, params)
self.log('send request: METHOD:%s URL:%s DATA:%s' % (method, url, data))
try:
response, response_data = self.connection.send(url, data, method=method)
json_formatted = to_text(response_data.getvalue())
return response.status, json_formatted
except Exception as err:
raise Exception(err)
def update_system_version(self):
"""
retrieve the system status of fortigate device
"""
url = '/api/v2/cmdb/system/interface?vdom=root&action=schema'
status, result = self.send_request(url=url)
self.log('update sys ver: ' + str(status) + ' len=' + str(len(to_text(result))))
result_json = json.loads(result)
self._system_version = result_json.get('version', 'undefined')
self.log('system version: %s' % (self._system_version))
self.log('ansible version: %s' % (self._ansible_fos_version))
def get_system_version(self):
if not self._system_version:
raise Exception('Wrong calling stack, httpapi must login!')
system_version_words = self._system_version.split('.')
ansible_version_words = self._ansible_fos_version.split('.')
result = dict()
result['system_version'] = self._system_version
result['ansible_collection_version'] = self._ansible_fos_version + ' (galaxy: %s)' % (self._ansible_galaxy_version)
result['matched'] = system_version_words[0] == ansible_version_words[0] and system_version_words[1] == ansible_version_words[1]
if not result['matched']:
result['message'] = 'Please follow steps in FortiOS versioning notes: https://ansible-galaxy-fortios-docs.readthedocs.io/en/latest/version.html'
else:
result['message'] = 'versions match'
return result
| python |
# =============================================================================== #
# #
# This file has been generated automatically!! Do not change this manually! #
# #
# =============================================================================== #
from __future__ import annotations
from pydantic import Field
from ..base_object import BaseObject
class ReorderInstalledStickerSets(BaseObject):
"""
Changes the order of installed sticker sets
:param is_masks: Pass true to change the order of mask sticker sets; pass false to change the order of ordinary sticker sets
:type is_masks: :class:`bool`
:param sticker_set_ids: Identifiers of installed sticker sets in the new correct order
:type sticker_set_ids: :class:`list[int]`
"""
ID: str = Field("reorderInstalledStickerSets", alias="@type")
is_masks: bool
sticker_set_ids: list[int]
@staticmethod
def read(q: dict) -> ReorderInstalledStickerSets:
return ReorderInstalledStickerSets.construct(**q)
| python |
from ..container import container
from ..parallel import rank0_obj
import logging
logger: logging.Logger = rank0_obj(container.get(logging.Logger))
# logger: logging.Logger = container.get(logging.Logger)
| python |
from django.contrib import admin
from personal.models import ToDo
from .to_do import ToDoAdmin
admin.site.register(ToDo, ToDoAdmin)
| python |
from swagger_server.models.beacon_concept import BeaconConcept # noqa: E501
from swagger_server.models.beacon_concept_with_details import BeaconConceptWithDetails # noqa: E501
from swagger_server.models.beacon_concept_detail import BeaconConceptDetail
from swagger_server.models.exact_match_response import ExactMatchResponse # noqa: E501
from beacon_controller import biolink_model as blm
from beacon_controller.providers import rhea
from beacon_controller.providers.xrefs import get_xrefs
from beacon_controller.const import Category, Predicate
def get_concept_details(concept_id): # noqa: E501
"""get_concept_details
Retrieves details for a specified concepts in the system, as specified by a (url-encoded) CURIE identifier of a concept known the given knowledge source. # noqa: E501
:param concept_id: (url-encoded) CURIE identifier of concept of interest
:type concept_id: str
:rtype: BeaconConceptWithDetails
"""
concept_id = concept_id.upper()
if concept_id.startswith('EC:'):
concept = rhea.get_enzyme(concept_id)
if concept is None:
return None
_, ec_number = concept_id.split(':', 1)
synonyms = concept.get('Synonyms')
if isinstance(synonyms, str):
synonyms = synonyms.split(';')
else:
synonyms = []
return BeaconConceptWithDetails(
id=concept_id,
uri=f'https://enzyme.expasy.org/EC/{ec_number}',
name=concept.get('Name'),
symbol=None,
categories=[Category.protein.name],
description=None,
synonyms=synonyms,
exact_matches=[],
details=[]
)
elif concept_id.startswith('RHEA:'):
records = rhea.get_records(f"""
PREFIX rh:<http://rdf.rhea-db.org/>
SELECT
?equation
?reaction
WHERE {{
?reaction rh:accession "{concept_id}" .
?reaction rh:equation ?equation .
}}
LIMIT 1
""")
for record in records:
return BeaconConceptWithDetails(
id=concept_id,
uri=record['reaction']['value'],
name=record['equation']['value'],
symbol=None,
categories=[Category.molecular_activity.name],
description=None,
synonyms=[],
exact_matches=[],
details=[]
)
else:
records = rhea.get_records(f"""
PREFIX rh:<http://rdf.rhea-db.org/>
SELECT ?compoundAc ?chebi
(count(distinct ?reaction) as ?reactionCount)
?compoundName
WHERE {{
?reaction rdfs:subClassOf rh:Reaction .
?reaction rh:status rh:Approved .
?reaction rh:side ?reactionSide .
?reactionSide rh:contains ?participant .
?participant rh:compound ?compound .
OPTIONAL {{ ?compound rh:chebi ?chebi . }}
?compound rh:name ?compoundName .
?compound rh:accession "{concept_id}" .
}}
LIMIT 1
""")
try:
uri = record['chebi']['value']
except:
uri = None
for record in records:
return BeaconConceptWithDetails(
id=concept_id,
uri=uri,
name=record['compoundName']['value'],
symbol=None,
categories=[Category.chemical_substance.name],
description=None,
synonyms=[],
exact_matches=[],
details=[BeaconConceptDetail(tag='reactionCount', value=record['reactionCount']['value'])]
)
def get_concepts(keywords=None, categories=None, offset=None, size=None): # noqa: E501
"""get_concepts
Retrieves a list of whose concept in the beacon knowledge base with names and/or synonyms matching a set of keywords or substrings. The results returned should generally be returned in order of the quality of the match, that is, the highest ranked concepts should exactly match the most keywords, in the same order as the keywords were given. Lower quality hits with fewer keyword matches or out-of-order keyword matches, should be returned lower in the list. # noqa: E501
:param keywords: (Optional) array of keywords or substrings against which to match concept names and synonyms
:type keywords: List[str]
:param categories: (Optional) array set of concept categories - specified as Biolink name labels codes gene, pathway, etc. - to which to constrain concepts matched by the main keyword search (see [Biolink Model](https://biolink.github.io/biolink-model) for the full list of terms)
:type categories: List[str]
:param offset: offset (cursor position) to next batch of statements of amount 'size' to return.
:type offset: int
:param size: maximum number of concept entries requested by the client; if this argument is omitted, then the query is expected to returned all the available data for the query
:type size: int
:rtype: List[BeaconConcept]
"""
if size is None:
size = 10
concepts = []
if categories is None or any(a in categories for a in blm.ancestors(Category.protein.name)):
enzymes, total_num_rows = rhea.find_enzymes(keywords, offset, size, metadata=True)
for enzyme in enzymes:
concepts.append(BeaconConcept(
id=f'EC:{enzyme.get("ID")}',
name=enzyme.get('Name'),
categories=[Category.protein.name],
description=None
))
if size is not None and len(concepts) < size:
offset = max(0, offset - total_num_rows) if offset is not None else None
size = size - len(concepts) if size is not None else None
elif size is not None and len(concepts) >= size:
return concepts
if categories is None or any(a in categories for a in blm.ancestors(Category.chemical_substance.name)):
compounds = rhea.find_compounds(keywords, offset=offset, limit=size)
for compound in compounds:
concepts.append(BeaconConcept(
id=compound.get('compoundAc').get('value'),
name=compound.get('compoundName').get('value'),
categories=[Category.chemical_substance.name],
description=None
))
return concepts
def get_exact_matches_to_concept_list(c): # noqa: E501
"""get_exact_matches_to_concept_list
Given an input array of [CURIE](https://www.w3.org/TR/curie/) identifiers of known exactly matched concepts [*sensa*-SKOS](http://www.w3.org/2004/02/skos/core#exactMatch), retrieves the list of [CURIE](https://www.w3.org/TR/curie/) identifiers of additional concepts that are deemed by the given knowledge source to be exact matches to one or more of the input concepts **plus** whichever concept identifiers from the input list were specifically matched to these additional concepts, thus giving the whole known set of equivalent concepts known to this particular knowledge source. If an empty set is returned, the it can be assumed that the given knowledge source does not know of any new equivalent concepts matching the input set. The caller of this endpoint can then decide whether or not to treat its input identifiers as its own equivalent set. # noqa: E501
:param c: an array set of [CURIE-encoded](https://www.w3.org/TR/curie/) identifiers of concepts thought to be exactly matching concepts, to be used in a search for additional exactly matching concepts [*sensa*-SKOS](http://www.w3.org/2004/02/skos/core#exactMatch).
:type c: List[str]
:rtype: List[ExactMatchResponse]
"""
results = []
for conceptId in c:
if ':' not in conceptId:
continue
xrefs = get_xrefs(conceptId)
if xrefs != []:
results.append(ExactMatchResponse(
id=conceptId,
within_domain=True,
has_exact_matches=xrefs
))
else:
results.append(ExactMatchResponse(
id=conceptId,
within_domain=False,
has_exact_matches=[]
))
return results
| python |
import pyautogui as pt
import pyperclip as pc
from pynput.mouse import Controller, Button
from time import sleep
from whatsapp_responses import response
#Mause click workaround for MAc os
mouse = Controller()
#Instruction for our whatsapp Bot
class WhatsApp:
#define the starting values
def __init__(self, speed=.5, click_speed=.3):
self.speed = speed
self.click_speed = click_speed
self.message = ""
self.last_message = ""
#Navigate tot the green dots for new messages
def nav_green_dot(self):
try:
position = pt.locateOnScreen('green_dot.png', confidence=.7)
print(position)
pt.moveTo(position[0:2], duration =self.speed)
pt.moveRel(-100, 0, duration= self.speed)
pt.doubleClick(interval=self.click_speed)
except Exception as e:
print('Exception (nav_green_dot): ', e)
#Naviagte to our message input box
def nav_input_box(self):
try:
position = pt.locateOnScreen('paperclip.png', confidence=.7)
pt.moveTo(position[0:2], duration =self.speed)
pt.moveRel(100, 10, duration= self.speed)
pt.doubleClick(interval=self.click_speed)
except Exception as e:
print('Exception (nav_input_box): ', e)
#Navigate to the messag we want to respond to
def nav_message(self):
try:
position = pt.locateOnScreen('paperclip.png', confidence=.7)
pt.moveTo(position[0:2], duration =self.speed)
pt.moveRel(35, -50, duration= self.speed)
except Exception as e:
print('Exception (nav_message): ', e)
#copies the message that we want to proceed
def get_message(self):
mouse.click(Button.left, 3)
sleep(self.speed)
mouse.click(Button.right, 1)
sleep(self.speed)
pt.moveRel(10, 10, duration=self.speed)
mouse.click(Button.left, 1)
sleep(1)
self.message = pc.paste()
print('User says: ', self.message)
# send the message to the user
def send_message(self):
try:
#Checks whether the last message was the same
if self.message != self.last_message :
bot_response = response(self.message)
print('You say: ', bot_response)
pt.typewrite(bot_response, interval=.1)
pt.typewrite('\n') #sends the message (disable while testing)
#assign then the last message
self.last_message = self.message
else:
print('No new message...')
except Exception as e:
print('Exception (send_message): ', e)
#close the response box
def nav_x(self):
try:
position = pt.locateOnScreen('paperclip.png', confidence=.7)
position = pt.locateOnScreen('x.png', confidence=.7)
pt.moveTo(position[0:2], duration =self.speed)
pt.moveRel(3, 10, duration= self.speed)
mouse.click(Button.left, 1)
except Exception as e:
print('Exception (nav_x): ', e)
wa_bot = WhatsApp(speed=.5, click_speed=.4)
sleep(2)
while True:
wa_bot.nav_green_dot()
wa_bot.nav_x()
wa_bot.nav_message()
wa_bot.get_message()
wa_bot.nav_input_box()
wa_bot.send_message()
sleep(20) | python |
import json
import logging
import os
import uuid
from datetime import datetime, timedelta
import boto3
import telegram
from telegram import InlineKeyboardMarkup, InlineKeyboardButton
logger = logging.getLogger()
if logger.handlers:
for handler in logger.handlers:
logger.removeHandler(handler)
logging.basicConfig(level=logging.INFO)
s3 = boto3.client("s3")
OK_RESPONSE = {
"statusCode": 200,
"headers": {"Content-Type": "application/json"},
"body": json.dumps("ok"),
}
ERROR_RESPONSE = {"statusCode": 400, "body": json.dumps("Oops, something went wrong!")}
BOT_USERMAME = os.environ.get("BOT_USERMAME")
def configure_telegram():
"""
Configures the bot with a Telegram Token.
Returns a bot instance.
"""
telegram_token = os.environ.get("TELEGRAM_TOKEN")
if not telegram_token:
logger.error("The TELEGRAM_TOKEN must be set")
raise NotImplementedError
return telegram.Bot(telegram_token)
bot = configure_telegram()
def handler(event, context):
logger.info(f"Event: {event}")
if event.get("httpMethod") == "POST" and event.get("body"):
update = telegram.Update.de_json(json.loads(event.get("body")), bot)
chat_id = update.effective_message.chat.id if update.effective_message else None
text = update.effective_message.text
attachment = update.effective_message.effective_attachment
if text in ["/start", f"/start@{BOT_USERMAME}"]:
bot.send_message(chat_id=chat_id, text="Beep boop I'm under construction!")
elif attachment:
bot.send_message(chat_id=chat_id, text="Processing...")
file_name = uuid.uuid4()
file_path = f"/tmp/{file_name}.mov"
attachment_file = bot.get_file(attachment.file_id)
attachment_file.download(file_path)
with open(file_path, "rb") as reader:
os.remove(file_path)
s3.put_object(
Bucket=os.environ["INPUT_BUCKET_NAME"],
Key=f"{file_name}.mov",
Body=reader.read(),
Expires=datetime.now() + timedelta(hours=1),
Metadata={
"chat-id": str(chat_id),
"input-format": "mov",
"target-format": "mp4",
},
)
return OK_RESPONSE
return ERROR_RESPONSE
def on_convert(event, context):
logger.info(f"Event: {event}")
if "Records" not in event:
logger.info("Not a S3 invocation")
return
for record in event["Records"]:
if "s3" not in record:
logger.info("Not a S3 invocation")
continue
bucket = record["s3"]["bucket"]["name"]
key = record["s3"]["object"]["key"]
if bucket != os.environ["OUTPUT_BUCKET_NAME"]:
logger.info("Not an output bucket invocation")
continue
s3_object = s3.get_object(Bucket=bucket, Key=key)
chat_id = s3_object["Metadata"].get("chat-id")
bot.send_message(
chat_id=chat_id, text=f"https://{bucket}.s3.amazonaws.com/{key}"
)
def set_webhook(event, context):
"""
Sets the Telegram bot webhook.
"""
host = event.get("headers").get("Host")
stage = event.get("requestContext").get("stage")
url = f"https://{host}/{stage}/"
webhook = bot.set_webhook(url)
if webhook:
return OK_RESPONSE
return ERROR_RESPONSE
def build_inline_keyboard(file_name: str, file_extension: str) -> InlineKeyboardMarkup:
keyboard = []
formats = []
for format_name in formats:
callback_data = f'{file_name}-{file_extension}__{format_name}'
keyboard.append(InlineKeyboardButton(format_name, callback_data=callback_data))
return InlineKeyboardMarkup(keyboard)
| python |
import pandas as pd
from IPython import embed
import sys
if __name__ == '__main__':
if len(sys.argv) < 2:
print('please input params: <tnse.csv> file')
exit(1)
path = sys.argv[1]
df = pd.read_csv(path)
filtered_df = []
i=0
for idx, r in df.iterrows():
if(r['domain']=='askubuntu' and i < 700):
i+=1
filtered_df.append(r)
elif(r['domain']!='askubuntu'):
filtered_df.append(r)
filtered_df = pd.DataFrame(filtered_df)
filtered_df.to_csv(path+"_filtered")
# embed() | python |
class TimePattern(object):
def __str__(self):
raise NotImplementedError('Please implement __str__ function')
class SimpleTimer(TimePattern):
def __init__(self, seconds=0, minutes=0, hours=0):
self.seconds = seconds
self.minutes = minutes
self.hours = hours
def __str__(self):
total_seconds = self.hours * 3600 + self.minutes * 60 + self.seconds
return 'PT{0:0>2}:{1:0>2}:{2:0>2}'.format(total_seconds // 3600, total_seconds % 3600 // 60, total_seconds % 60)
| python |
from setuptools import setup, find_packages
setup(
name = "cascade",
version = "0.1",
packages = find_packages(),
install_requires = ['progressbar', 'imaplib2'],
author = "Oz Akan",
author_email = "[email protected]",
description = "Cascade copies e-mails between IMAP servers",
license = "Apache Version 2.o",
url = "https://github.com/humankeyboard/cascade",
classifiers = [
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 2.7"
],
entry_points = {
'console_scripts' : [
'cascade = cmd.app:app'
]
}
)
| python |
# Fichier permettant de moduler les differentes methodes de clustering
try:
# Import generaux
import numpy as np
import pylab
import sys
import platform
import matplotlib.pyplot as plt
import re
# Import locaux
import kmeans
import rkde
except:
exit(1)
""" Clustering """
# Clusterise les donnees avec la methode desiree
# Entree :
# - M : la matrice des distances entre les objets
# - methode : une chaine de caractere donnant le nom de la methode (nom de module)
# - params : une liste des parametres requis pour la methode demandee
# - kmeans : params = [k, n_iter]
# - rkde : params = [bandwidth, prob]
# Sortie :
# - assign : un tableau donnant pour chaque entier (objet) son numero de cluster
# - nb_cluster : le nombre de clusters formes
def make_clusters(M, methode, params):
function = methode + ".do"
assign, nb_clusters = eval(function)(M, params[0], params[1])
return assign, nb_clusters
""" Lecture et affichage de donnees """
# Fonction de lecture dans un fichier
# Entree :
# - file_name : une chaine de caracteres donnant le nom du fichier a ouvrir
# - nb_item : nombre de lignes a lire (-1 pour tout lire, defaut a -1)
# Sortie :
# - data : une liste de liste de flottants
def read_data(file_name, nb_item = -1):
f = open(file_name,'r')
data = []
cpt = 0
for line in f:
if (0 <= nb_item and nb_item <= cpt):
break
line = re.split('\s+', line) # '\s' matches whitespace characters
line = [float(x) for x in line if x != '']
data.append(line)
cpt += 1
f.close()
return data
# Fonction d'affichage d'un nuage de points
# Entree :
# - data : un ensemble de points sous la forme d'une matrice de taille n*2
# - assign : un tableau de taille n representant une assignation de [data]
def show(data, assign):
colors = "bgrcmyk"
symbols = ".ov18sp*h+xD_"
nb_clusters = max(assign) + 1
pylab.figure()
mini = min( min(data[:][0]), min(data[:][1]) )
maxi = max( max(data[i][0]), max(data[i][1]) )
pylab.xlim([mini, maxi])
pylab.ylim([mini, maxi])
if (nb_clusters < 8):
for i_k in range(nb_clusters):
pylab.plot([data[i][0] for i in range(len(data)) if assign[i] == i_k],
[data[i][1] for i in range(len(data)) if assign[i] == i_k],
colors[i_k] + ".")
else:
for i_k in range(nb_clusters):
pylab.plot( [data[i][0] for i in range(len(data)) if assign[i] == i_k],
[data[i][1] for i in range(len(data)) if assign[i] == i_k],
colors[i_k % 7]) + symbols[int(i_k / 7)]
pylab.show()
""" Lecture et ecriture d'une assignation """
# Lis un fichier ou est inscrit une assignation.
# Entree :
# - file : adresse et nom du fichier
# Sortie :
# - assign : un vecteur numpy d'entiers
def read_assign(file_name):
f = open(file_name,'r')
assign_tmp = []
i = 0
for line in f:
try:
assign_tmp.append(int(line))
i = i + 1
except ValueError:
continue
f.close()
return np.array(assign_tmp)
# Ecris une assignation dans un fichier
# Entree :
# - file_name : adresse et nom d'un fichier
# - assign : l'assignation a ecrire
# - nb_iter : le nombre d'iterations faites par l'algorithme (-1) s'il n'est pas
# base sur ce principe
# - s : la seed utilisee pour le clustering
def write_cluster(file_name, assign, nb_iter, s):
nb_data = len(assign)
nb_cluster = max(assign) + 1
f = open(file_name, 'w')
f.write('nb_cluster = ' + str(nb_cluster) + '\n')
f.write('nb_iter = ' + str(nb_iter) + '\n')
f.write('nb_data = ' + str(nb_data) + '\n')
f.write('seed = ' + str(s) + '\n')
for i in assign:
f.write(str(i) + '\n')
f.close()
""" Fonctions non encore retravaillees """
# Fonction pour enregistrer des images :
# data_file = fichier contenant les donnees
# assign_file = fichier cree a partir du clustering et contenant la table d'assignation
# file_figure = nom du fichier dans lequel sera enregistre l'image
# format = nom de l'extention du fichier cree (pdf,svg,png...)
# exemple : save('cercles/cercles.txt', 'cercles_kmeans', 'figure_cercles_kmeans', 'pdf')
def save(data_file, assign_file,file_figure,format):
data = read_data(data_file)
assign = read_assign(data,assign_file)
nombre_clusters = numpy.amax(assign) +1
plt.ioff()
fig = plt.figure()
colors = "bgrcmyk"
symbols = ".ov18sp*h+xD_"
mini = min( min([data[i][0] for i in range(len(data))]), min([data[i][1] for i in range(len(data))]) )
maxi = max( max([data[i][0] for i in range(len(data))]), max([data[i][1] for i in range(len(data))]) )
plt.xlim([mini, maxi])
plt.ylim([mini, maxi])
if (nombre_clusters < 8):
for i_k in range(nombre_clusters):
plt.plot([data[i][0] for i in range(len(data)) if assign[i] == i_k],
[data[i][1] for i in range(len(data)) if assign[i] == i_k],
colors[i_k] + ".")
else:
if (nombre_clusters < 85):
for i_k in range(nombre_clusters):
plt.plot( [data[i][0] for i in range(len(data)) if assign[i] == i_k],
[data[i][1] for i in range(len(data)) if assign[i] == i_k],
colors[i_k % 7] + symbols[int(i_k / 7)] )
else:
print("too many clusters")
if (platform.system() == "Windows"):
plt.savefig('C:/users/alex/documents/Alex/Cours/ENS/M1_Cours/Projet/data/Results/'+file_figure+'.'+format)
else:
plt.savefig('../data/Results/'+file_figure+'.'+format)
plt.close(fig)
| python |
from flask_wtf import FlaskForm
from wtforms import StringField, IntegerField, PasswordField
from wtforms import HiddenField, TextAreaField, FileField, SubmitField
from wtforms.validators import DataRequired, NumberRange
ATTR_DATA = 'data'
ATTR_ACTION = 'action'
ATTR_MANAGER = 'manager'
ATTR_KEY_LEN = 'length'
ATTR_KEY_E = 'keyE'
ATTR_KEY_D = 'keyD'
ATTR_KEY_N = 'keyN'
ACTION_GEN_KEY = 'gen_key'
ACTION_SET_KEY = 'set_key'
ACTION_GO = 'go'
ACTION_POST = 'post'
class AuthForm(FlaskForm):
login = StringField('Логин', validators=[DataRequired()])
passw = PasswordField('Пароль', validators=[DataRequired()])
submit = SubmitField('Войти')
class KeyGenForm(FlaskForm):
action = HiddenField(render_kw={'value': ACTION_GEN_KEY})
length = IntegerField('Длина ключа', validators=[DataRequired()])
submit = SubmitField('Сгенерировать')
class KeySetForm(FlaskForm):
action = HiddenField(render_kw={'value': ACTION_SET_KEY})
keyE = IntegerField('E', validators=[DataRequired(), NumberRange()])
keyD = IntegerField('D', validators=[DataRequired(), NumberRange()])
keyN = IntegerField('N', validators=[DataRequired(), NumberRange()])
submit = SubmitField('Установить')
class MessageForm(FlaskForm):
action = HiddenField(render_kw={'value': ACTION_GO})
message = TextAreaField()
inFile = FileField()
submit = SubmitField('Зашифровать')
| python |
# coding=utf-8
import random
from common import constant
from common import errcode
from dao.sms.sms_dao import SmsDao
from handlers.base.base_handler import BaseHandler
from mycelery.tasks import send_sms_task
class SmsChangePhoneHandler(BaseHandler):
methods = ['POST']
def __init__(self):
expect_request_para = {
"phone": None,
"common_param": None,
}
need_para = (
"phone",
"common_param",
)
super(SmsChangePhoneHandler, self).__init__(expect_request_para, need_para)
# 特殊控制,此接口sid可以为空
self.sid_control_level = constant.SID_CAN_BE_NULL
def _process_imp(self):
# 保存到数据库
code = random.randint(1000, 9999)
SmsDao.insert(self.para_map["phone"], code, constant.SMS_CHANGE_PHONE)
# 发送短信
send_sms_task.send_change_phone_sms.delay(self.para_map["phone"], code)
self.ret_code = errcode.NO_ERROR
self.ret_msg = 'ok'
return
| python |
# Definition for singly-linked list.
# class ListNode:
# def __init__(self, x):
# self.val = x
# self.next = None
class Solution:
def addTwoNumbers(self, l1, l2):
"""
:type l1: ListNode
:type l2: ListNode
:rtype: ListNode
"""
l1_unlinked = []
l2_unlinked = []
while l1 != None:
l1_unlinked.append(l1.val)
l1 = l1.next
while l2 != None:
l2_unlinked.append(l2.val)
l2 = l2.next
l1_unlinked.reverse()
l2_unlinked.reverse()
l1_comb = int(''.join(map(str, l1_unlinked)))
l2_comb = int(''.join(map(str, l2_unlinked)))
add_two = l1_comb + l2_comb
add_two_list = [int(i) for i in str(add_two)]
add_two_list.reverse()
return add_two_list
| python |
# AUTOGENERATED! DO NOT EDIT! File to edit: notebooks/labelbox/1_CocoExporter.ipynb (unless otherwise specified).
__all__ = ['UnknownFormatError', 'coco_from_json', 'make_coco_metadata', 'add_label', 'append_polygons_as_annotations',
'label_to_polygons', 'LOGGER']
# Cell
"""
Module for converting labelbox.com JSON exports to MS COCO format.
"""
# https://raw.githubusercontent.com/Labelbox/Labelbox/master/exporters/coco-exporter/coco_exporter.py
# Cell
import datetime as dt
import json
import logging
from typing import Any, Dict
from PIL import Image
import requests
from shapely import wkt
from shapely.geometry import Polygon
# Cell
class UnknownFormatError(Exception):
"""Exception raised for unknown label_format"""
def __init__(self, label_format):
Exception.__init__(self)
self.message = "Provided label_format '{}' is unsupported".format(label_format)
LOGGER = logging.getLogger(__name__)
def coco_from_json(labeled_data, coco_output, label_format='XY'):
"Writes labelbox JSON export into MS COCO format."
# read labelbox JSON output
with open(labeled_data, 'r') as file_handle:
label_data = json.loads(file_handle.read())
# setup COCO dataset container and info
coco = make_coco_metadata(label_data[0]['Project Name'], label_data[0]['Created By'],)
for data in label_data:
# Download and get image name
try:
add_label(coco, data['ID'], data['Labeled Data'], data['Label'], label_format)
except requests.exceptions.MissingSchema as exc:
LOGGER.warning(exc)
continue
except requests.exceptions.ConnectionError:
LOGGER.warning('Failed to fetch image from %s, skipping', data['Labeled Data'])
continue
with open(coco_output, 'w+') as file_handle:
file_handle.write(json.dumps(coco))
def make_coco_metadata(project_name: str, created_by: str) -> Dict[str, Any]:
"""Initializes COCO export data structure.
Args:
project_name: name of the project
created_by: email of the project creator
Returns:
The COCO export represented as a dictionary.
"""
return {
'info': {
'year': dt.datetime.now(dt.timezone.utc).year,
'version': None,
'description': project_name,
'contributor': created_by,
'url': 'labelbox.com',
'date_created': dt.datetime.now(dt.timezone.utc).isoformat()
},
'images': [],
'annotations': [],
'licenses': [],
'categories': []
}
def add_label(
coco: Dict[str, Any], label_id: str, image_url: str,
labels: Dict[str, Any], label_format: str):
"""Incrementally updates COCO export data structure with a new label.
Args:
coco: The current COCO export, will be incrementally updated by this method.
label_id: ID for the instance to write
image_url: URL to download image file from
labels: Labelbox formatted labels to use for generating annotation
label_format: Format of the labeled data. Valid options are: "WKT" and
"XY", default is "XY".
Returns:
The updated COCO export represented as a dictionary.
"""
image = {
"id": label_id,
"file_name": image_url,
"license": None,
"flickr_url": image_url,
"coco_url": image_url,
"date_captured": None,
}
response = requests.get(image_url, stream=True, timeout=10.0)
response.raw.decode_content = True
image['width'], image['height'] = Image.open(response.raw).size
coco['images'].append(image)
# remove classification labels (Skip, etc...)
if not callable(getattr(labels, 'keys', None)):
return
# convert label to COCO Polygon format
for category_name, label_data in labels.items():
try:
# check if label category exists in 'categories' field
category_id = [c['id']
for c in coco['categories']
if c['supercategory'] == category_name][0]
except IndexError:
category_id = len(coco['categories']) + 1
category = {
'supercategory': category_name,
'id': category_id,
'name': category_name
}
coco['categories'].append(category)
polygons = label_to_polygons(label_format, label_data)
append_polygons_as_annotations(coco, image, category_id, polygons)
def append_polygons_as_annotations(coco, image, category_id, polygons):
"Adds `polygons` as annotations in the `coco` export"
for polygon in polygons:
segmentation = []
for x_val, y_val in polygon.exterior.coords:
segmentation.extend([x_val, y_val])
annotation = {
"id": len(coco['annotations']) + 1,
"image_id": image['id'],
"category_id": category_id,
"segmentation": [segmentation],
"area": polygon.area, # float
"bbox": [polygon.bounds[0], polygon.bounds[1],
polygon.bounds[2] - polygon.bounds[0],
polygon.bounds[3] - polygon.bounds[1]],
"iscrowd": 0
}
coco['annotations'].append(annotation)
def label_to_polygons(label_format, label_data):
"Converts segmentation `label: String!` into polygons"
if label_format == 'WKT':
if isinstance(label_data, list): # V3
polygons = map(lambda x: wkt.loads(x['geometry']), label_data)
else: # V2
polygons = wkt.loads(label_data)
elif label_format == 'XY':
polygons = []
for xy_list in label_data:
if 'geometry' in xy_list: # V3
xy_list = xy_list['geometry']
# V2 and V3
if not isinstance(xy_list, list):
LOGGER.warning('Could not get an point list to construct polygon, skipping')
continue
else: # V2, or non-list
if not isinstance(xy_list, list) or not xy_list or 'x' not in xy_list[0]:
# skip non xy lists
LOGGER.warning('Could not get an point list to construct polygon, skipping')
continue
if len(xy_list) > 2: # need at least 3 points to make a polygon
polygons.append(Polygon(map(lambda p: (p['x'], p['y']), xy_list)))
else:
exc = UnknownFormatError(label_format=label_format)
LOGGER.exception(exc.message)
raise exc
return polygons | python |
# Copyright 2020 Makani Technologies LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Custom flag type definitions."""
import gflags
import numpy
def DEFINE_linspace(name, default, help_string,
nonempty=False,
increasing=False,
flag_values=gflags.FLAGS,
**kwargs): # pylint: disable=invalid-name
"""Defines a 'linspace' flag.
The flag value should be specified as <lower>,<upper>,<count>. The
components are used as arguments to numpy.linspace, so they must be
parsable as float, float, and int, respectively. The parsed flag
value will be a 1-dimensional numpy.ndarray.
Args:
name: Name of the flag.
default: Default value (as unparsed string), or None if flag is unset by
default.
help_string: Helpful description of the flag.
nonempty: Indicates whether the flag value is required to be nonempty. If
True, None is still an allowable default. Use gflags.MarkFlagAsRequired
to disallow None.
increasing: Indicates whether the flag value should be an increasing array.
This is only enforced if the parsed value has >=2 elements.
flag_values: The gflags.FlagValues object in which to define the flag.
**kwargs: See gflags.DEFINE.
"""
gflags.DEFINE(_LinspaceParser(), name, default, help_string,
flag_values=flag_values, **kwargs)
if nonempty:
# numpy.array can't be implicitly converted to a boolean.
# pylint: disable=g-explicit-length-test
gflags.RegisterValidator(name, lambda v: len(v) > 0,
'--%s must specify a nonempty range.' % name,
flag_values=flag_values)
if increasing:
gflags.RegisterValidator(name, lambda v: len(v) < 2 or v[-1] > v[0],
'--%s must specify an increasing range.',
flag_values=flag_values)
class _LinspaceParser(gflags.ArgumentParser):
"""Parser for 'linspace' flag type."""
def Parse(self, argument):
parts = argument.split(',')
if len(parts) != 3:
raise ValueError('Wrong number of components. Must be of the form '
'<lower>,<upper>,<count>', argument)
try:
lower, upper, count = float(parts[0]), float(parts[1]), int(parts[2])
except ValueError:
raise ValueError('Bad value. Components must be parsable as float, '
'float, and int, respectively', argument)
return numpy.linspace(lower, upper, count)
def Type(self):
return numpy.ndarray
| python |
"""
authentication/views.py
Created on Oct. 23, 2017
by Jiayao
"""
from __future__ import (absolute_import)
from django.shortcuts import render
from django.http import HttpResponseRedirect
from django.urls import reverse
from django.views import generic
from django.contrib.auth import (authenticate, login, logout)
from django.contrib.auth.decorators import login_required
from account.models import (User, Tutor)
from .forms import (UserForm, TutorForm, UpdateUserForm, UpdateTutorForm)
class SINGUP_STATUS:
NONE = 0
SUCCESS = 1
EXISTED = 2
FAILED = 3
class ProfileView(generic.TemplateView):
'""Models the profile view.""'
model = User
template_name = 'profile.html'
login_required = True
def get_context_data(self, **kwargs):
context = super(ProfileView, self).get_context_data(**kwargs)
context['user_form'] = None
context['tutor_form'] = None
context['tutor_type'] = None
context['tutor'] = None
return context
def get(self, req, *args, **kwargs):
context = self.get_context_data(**kwargs)
user = User.objects.get(username=req.session['username'])
context['user_form'] = UpdateUserForm(prefix='user_form', instance=user)
if user.tutor is not None:
if user.tutor.tutor_type == Tutor.PRIVATE_TUTOR:
context['tutor_type'] = 'Private'
else:
context['tutor_type'] = 'Contracted'
context['tutor_form'] = UpdateTutorForm(
prefix='tutor_form', instance=user.tutor)
context['tutor'] = user.tutor
return self.render_to_response(context)
def post(self, req, *args, **kwargs):
user = User.objects.get(username=req.session['username'])
user_form = UpdateUserForm(req.POST,
prefix='user_form', instance=user)
if user_form.is_valid():
user_form.save()
else:
return render(req, 'message.html', {'message_title': 'Profile Update Failure',
'message_content': 'Please enter valid information.'})
if user.tutor is not None:
tutor_form = UpdateTutorForm(
req.POST, prefix='tutor_form', instance=user.tutor)
# print(tutor_form)
if tutor_form.is_valid():
tutor_form.save()
else:
print(tutor_form.errors)
return render(req, 'message.html', {'message_title': 'Profile Update Failure',
'message_content': 'Please enter valid information.'})
return render(req, 'message.html', {'message_title': 'Profile',
'message_content': 'Update Successful.'})
class IndexView(generic.TemplateView):
'""Models the index view.""'
template_name = 'signup.html'
def get_context_data(self, **kwargs):
context = super(IndexView, self).get_context_data(**kwargs)
context['choice'] = True
context['user_form'] = None
context['tutor_form'] = None
context['status'] = SINGUP_STATUS.NONE
context['SIGNUP_STATUS'] = SINGUP_STATUS
return context
class LoginView(generic.TemplateView):
'""Models the login view.""'
template_name = 'login.html'
def get_context_data(self, **kwargs):
context = super(LoginView, self).get_context_data(**kwargs)
context['status'] = 1
return context
def post(self, req, *args, **kwargs):
user = authenticate(username=req.POST['username'],
password=req.POST['password'])
if user is not None:
login(req, user)
req.session['username'] = req.POST['username']
if user.is_staff:
return HttpResponseRedirect(reverse('admin:index'))
return HttpResponseRedirect(reverse('homepage'))
else:
return render(req, self.template_name, {'status': 0})
@login_required
def logout_view(req):
logout(req)
req.session['username'] = None
return HttpResponseRedirect(reverse('homepage'))
class StudentFormView(generic.edit.CreateView):
'""Models the sign-up form.""'
template_name = 'signup.html'
form_class = UserForm
class TutorFormView(generic.edit.CreateView):
'""Models the sign-up form.""'
template_name = 'signup.html'
form_class = TutorForm
class StudentView(IndexView):
def get(self, req, *args, **kwargs):
context = self.get_context_data(**kwargs)
context['choice'] = False
context['user_form'] = UserForm(prefix='user_form')
return self.render_to_response(context)
def post(self, req, *args, **kwargs):
context = self.get_context_data(**kwargs)
form = UserForm(req.POST, prefix='user_form')
if form.is_valid():
username = form.cleaned_data['username']
password = form.cleaned_data['password']
try:
user = User.objects.get(username=username)
context['status'] = SINGUP_STATUS.EXISTED
except User.DoesNotExist:
user = form.save()
user.set_password(password)
user.save()
context['status'] = SINGUP_STATUS.SUCCESS
else:
context['status'] = SINGUP_STATUS.FAILED
return self.render_to_response(context)
class TutorView(IndexView):
def get(self, req, *args, **kwargs):
context = self.get_context_data(**kwargs)
context['choice'] = False
context['user_form'] = UserForm(prefix='user_form')
context['tutor_form'] = TutorForm(prefix='tutor_form')
return self.render_to_response(context)
def post(self, req, *args, **kwargs):
context = self.get_context_data()
form = UserForm(req.POST, prefix='user_form')
tutor_form = TutorForm(req.POST, prefix='tutor_form')
if form.is_valid() and tutor_form.is_valid():
username = form.cleaned_data['username']
password = form.cleaned_data['password']
try:
user = User.objects.get(username=username)
context['status'] = SINGUP_STATUS.EXISTED
except User.DoesNotExist:
user = form.save()
user.set_password(password)
user.save()
tutor_form.cleaned_data['user'] = user
tutor = tutor_form.save(commit=False)
tutor.user = user
tutor.save()
context['status'] = SINGUP_STATUS.SUCCESS
else:
context['status'] = SINGUP_STATUS.FAILED
return self.render_to_response(context)
class BothView(IndexView):
'""Models the sign-up form.""'
def get(self, req, *args, **kwargs):
context = self.get_context_data(**kwargs)
context['choice'] = False
context['user_form'] = UserForm(prefix='user_form')
context['tutor_form'] = TutorForm(prefix='tutor_form')
return self.render_to_response(context)
def post(self, req, *args, **kwargs):
context = self.get_context_data()
form = UserForm(req.POST, prefix='user_form')
tutor_form = TutorForm(req.POST, prefix='tutor_form')
if form.is_valid() and tutor_form.is_valid():
username = form.cleaned_data['username']
password = form.cleaned_data['password']
try:
user = User.objects.get(username=username)
context['status'] = SINGUP_STATUS.EXISTED
except User.DoesNotExist:
user = form.save()
user.set_password(password)
user.save()
tutor_form.cleaned_data['user'] = user
tutor = tutor_form.save(commit=False)
tutor.user = user
tutor.save()
context['status'] = SINGUP_STATUS.SUCCESS
else:
context['status'] = SINGUP_STATUS.FAILED
return self.render_to_response(context)
PASSWORD_EMAIL_SENDER = '[email protected]'
PASSWORD_RESET_TOKEN_REGEX = r'^reset/(?P<uidb64>[0-9A-Za-z_\-]+)/(?P<token>[0-9A-Za-z]{1,13}-[0-9A-Za-z]{1,20})/$'
PASSWORD_RESET_DONE_MSG = r"""
We've emailed you instructions for setting your password, if an account exists with the email you entered.
You should receive them shortly.
If you don't receive an email, please make sure you've entered the address you registered with,"
and check your spam folder.
"""
PASSWORD_RESET_EX_MSG = r"""
The password reset link was invalid, possibly because it has already been used.
Please request a new password reset.
"""
PASSWORD_RESET_COMPLETE = """
Your password has been set.
You may go ahead and login now.
"""
| python |
"""
*
* Author: Juarez Paulino(coderemite)
* Email: [email protected]
*
"""
a,b,c,d=[int(input())for _ in[0]*4]
print('10'[a!=d or (a<1 and d<1 and c>0)]) | python |
from flask import Blueprint
from flask import Response
from flask import abort
from flask import g
from flask import jsonify
from flask import request
from flask import current_app
from gleague.api import admin_required
from gleague.api import login_required
from gleague.core import db
from gleague.models import Match
from gleague.models import PlayerMatchRating
from gleague.match_import import create_match_from_replay
matches_bp = Blueprint("matches", __name__)
@matches_bp.route("/", methods=["POST"])
@admin_required
def create_match():
replay = request.files["file"]
if replay:
base_pts_diff = current_app.config.get("MATCH_BASE_PTS_DIFF", 20)
create_match_from_replay(replay, base_pts_diff)
return Response(status=201)
return abort(400)
@matches_bp.route("/<int:match_id>/ratings/", methods=["GET"])
def get_rates(match_id):
if not Match.is_exists(match_id):
return abort(404)
steam_id = g.user.steam_id if g.user else None
ratings = PlayerMatchRating.get_match_ratings(match_id, steam_id)
return jsonify({"ratings": ratings}), 200
@matches_bp.route(
"/<int:match_id>/ratings/<int:player_match_stats_id>", methods=["POST"]
)
@login_required
def rate_player(match_id, player_match_stats_id):
rating = request.args.get("rating", None)
try:
rating = int(rating)
except Exception:
return abort(400)
match = Match.query.get(match_id)
if not match:
return abort(404)
if rating not in range(1, 6):
return abort(406)
if not match.is_played(g.user.steam_id):
return abort(403)
db.session.add(
PlayerMatchRating(
player_match_stats_id=player_match_stats_id,
rating=rating,
rated_by_steam_id=g.user.steam_id,
)
)
db.session.flush()
return Response(status=200)
| python |
# Notes: copied inspect.py, dis.py, and opcodes.py into Jython dir (replacing stub inspect.py)
# Opcode will not work as using JVM, but required by dis.py, which was required by inspect.py
# only want functionality of getting source lines.
# Also copied textwrap.py?
# support for using tk
import java.io
from java.lang import Class, Runnable, Thread
import javax.swing.filechooser
from javax.swing import SwingUtilities, SwingConstants, \
AbstractAction, BorderFactory, Box, BoxLayout, ImageIcon, \
JDialog, JFrame, JScrollPane, JPanel, JComponent, JSplitPane, JTabbedPane, \
JColorChooser, JOptionPane, JFileChooser, \
JTextArea, JTextField, JLabel, JPasswordField, JEditorPane, JTextPane, \
JButton, JCheckBox, \
JMenuItem, JCheckBoxMenuItem, JMenuBar, JMenu, JPopupMenu, KeyStroke, \
JTree, \
JComboBox, DefaultComboBoxModel, \
JTable, \
JList, ListSelectionModel, DefaultListCellRenderer, DefaultListModel, \
JSlider, \
TransferHandler
from javax.swing.table import DefaultTableModel, DefaultTableCellRenderer
from javax.swing.event import ChangeListener, TreeSelectionListener, ListSelectionListener, HyperlinkEvent, TableModelListener
from java.awt.event import ActionListener, MouseAdapter, MouseMotionAdapter, MouseEvent, WindowFocusListener, MouseListener, KeyAdapter, KeyEvent
from javax.swing.text.html import HTMLEditorKit, FormView, HTML
from javax.swing.text import StyleConstants
from javax.swing.tree import DefaultMutableTreeNode, DefaultTreeModel, DefaultTreeCellRenderer, TreePath
from javax.swing.border import BevelBorder
from java.awt import Color, Cursor, BorderLayout, FlowLayout, Font, Dimension, Rectangle, Component, Polygon, Point, GridLayout, GridBagLayout, BasicStroke, Toolkit
from pawt import GridBag
from java.awt.datatransfer import DataFlavor, Transferable
from java.awt.dnd import DropTarget, DnDConstants, DropTargetAdapter, DragSourceListener, \
DragGestureListener, DragSource, DragSourceAdapter
from java.awt.image import BufferedImage
import os, os.path
############# useful classes that are not Swing specific #########
class Point:
def __init__(self, x, y):
self.x = x
self.y = y
def __repr__(self):
return "Point(%s, %s)" % (self.x, self.y)
class MouseEvent:
def __init__(self, isMeta, eventName, downPosition, previousPosition, currentPosition, upPosition):
self.isMeta = isMeta
self.eventName = eventName
self.downPosition = downPosition
self.previousPosition = previousPosition
self.currentPosition = currentPosition
self.upPosition = upPosition
# Support for late binding calls to methods for PrototypeMethod references passed to Swing
# This means methods edited in the system will have their changes called by Swing widgets
# even if they are edited after the widget was created
# Example:
# Instead of: self.widget.bind("<ButtonRelease-1>", self.OnMouseEventButton1Up)
# Use: self.widget.bind("<ButtonRelease-1>", LateBindWrapper(self, "OnMouseEventButton1Up"))
class LateBindWrapper:
def __init__(self, receiver, methodName, methodIsOptional=0, extraArg=None):
self.receiver = receiver
self.methodName = methodName
self.methodIsOptional = methodIsOptional
self.extraArg = extraArg
def __call__(self, *args, **kwargs):
if not self.receiver.hasProperty(self.methodName):
if not self.methodIsOptional:
raise AttributeError, self.methodName
return None
function = getattr(self.receiver, self.methodName)
if self.extraArg:
return function(self.extraArg, *args, **kwargs)
else:
return function(*args, **kwargs)
# used to provide "components" attribute in Morph for PythonCard compatability.
class IndirectAttributeAccessor:
def __init__(self, receiver, methodName):
self._receiver = receiver
self._methodName = methodName
def __getattr__(self, name):
function = getattr(self._receiver, self._methodName)
result = function(name)
if result == None:
raise AttributeError, name
return result
# Support for window management
def WorldShouldNoLongerBeUsedAsInspector(root, world):
# make sure no one still using this window as inspector
for window in root.openWindows:
if window.inspectorWorld == world:
window.inspectorWorld = None
def WindowShouldNoLongerHaveInspector(root, windowToClose, otherWindowsToClose):
# make sure no inspector is still hooked to this window
for window in root.openWindows:
#print window, window.inspectorForViewer
# PDF FIX BROKEN
if window.inspectorForViewer and window.inspectorForViewer == windowToClose:
#print "found window"
window.inspectorForViewer = None
window._updateTitle()
otherWindowsToClose.append(window)
def ExposeWindow(root, window):
# keep exposed window at end of list
root.openWindows.remove(window)
root.openWindows.append(window)
# close a window and related inspector (and its inspector's inspector etc.)
def CloseWindow(root, window):
otherWindowsToClose = []
WorldShouldNoLongerBeUsedAsInspector(root, window.world)
WindowShouldNoLongerHaveInspector(root, window, otherWindowsToClose)
window.world.removeMorph(window)
if window in root.openWindows:
root.openWindows.remove(window)
if not root.openWindows:
print "all windows closed -- PataPata application shutting down"
root.quit()
# close related inspectors
if otherWindowsToClose:
for otherWindow in otherWindowsToClose:
CloseWindow(root, otherWindow)
# debugging
class WrappedOutput:
def __init__(self, oldStream):
self.oldStream = oldStream
def write(self, text):
raise "write %s" % text
self.oldStream.write(text)
if text == None or text == "None":
raise "Stop"
# for tree text compoarison
# needs imporovements
def MyCompare(a, b):
""" ensure that things with brackets sort after text """
if type(a) in (str, unicode):
aText = a
else:
aText = a.GetText()
if type(b) in (str, unicode):
bText = b
else:
bText = b.GetText()
inheritedText = "[Inherited] "
if not aText[0].isalnum() and not bText[0].isalnum():
if aText.find(inheritedText) == 0 and bText.find(inheritedText) == 0:
return MyCompare(aText[len(inheritedText):], bText[len(inheritedText):])
return cmp(aText, bText)
elif not aText[0].isalnum():
return 1
elif not bText[0].isalnum():
return -1
else:
return cmp(aText, bText)
####################################################
# utility function
def GetNewText(parent, oldText="", prompt="Enter the new text", title="Text input"):
# PDF FIX -- does not use title
return JOptionPane.showInputDialog(parent, prompt, oldText)
def ShowMessage(parent, messageText="Something happened", title="Message"):
JOptionPane.showMessageDialog(parent, messageText, title, JOptionPane.PLAIN_MESSAGE)
class OptionsCallbackPopupMenu:
# options should be a list of (name, function, [arg1, [arg2]]) tuples
def __init__(self, parent, x, y, options, world, extraOptions=None):
self.world = world
self.popupMenu = JPopupMenu()
self.options = options
self.addToMenuForOptions(options)
if extraOptions:
self.addToMenuForOptions(extraOptions)
self.popupMenu.show(parent, x, y)
def addToMenuForOptions(self, options, menu=None):
if not menu:
menu = self.popupMenu
for option in options:
if not option or not option[0]:
menu.addSeparator()
else:
if type(option[1]) in [tuple, list]:
# nested menu
submenu = JMenu(option[0])
self.addToMenuForOptions(option[1], submenu)
menu.add(submenu)
else:
menuItem = JMenuItem(option[0], actionPerformed=lambda event, option=option: self.OnChoice(option))
menu.add(menuItem)
def OnChoice(self, option):
print "OnChoice", option
if len(option) == 2:
option[1]()
elif len(option) == 3:
option[1](option[2])
elif len(option) == 4:
option[1](option[2], option[3])
def BindCommonEvents(morph, subwidget=None):
if subwidget:
widget = subwidget
else:
widget = morph.widget
# PDF FIX PORT
"""
widget.bind("<FocusIn>", LateBindWrapper(morph, "gainFocus"))
widget.bind("<FocusOut>", LateBindWrapper(morph, "loseFocus"))
widget.bind("<Enter>", LateBindWrapper(morph, "mouseEnter"))
widget.bind("<Leave>", LateBindWrapper(morph, "mouseLeave"))
$$widget.bind("<Motion>", LateBindWrapper(morph, "mouseMove"))
$$widget.bind("<ButtonPress-1>", LateBindWrapper(morph, "mouseDown"))
$$widget.bind("<ButtonRelease-1>", LateBindWrapper(morph, "mouseUp"))
$$widget.bind("<B1-Motion>", LateBindWrapper(morph, "mouseDrag"))
widget.bind("<Double-Button-1>", LateBindWrapper(morph, "mouseDoubleClick"))
widget.bind("<ButtonPress-2>", LateBindWrapper(morph, "mouseMiddleDown"))
widget.bind("<ButtonRelease-2>", LateBindWrapper(morph, "mouseMiddleUp"))
widget.bind("<Double-Button-2>", LateBindWrapper(morph, "mouseMiddleDoubleClick"))
# these three may depend on meaning of context -- maybe mouse plus another key on Mac?
$$widget.bind("<ButtonPress-3>", LateBindWrapper(morph, "mouseContextDown"))
$$widget.bind("<ButtonRelease-3>", LateBindWrapper(morph, "mouseContextUp"))
widget.bind("<Double-Button-3>", LateBindWrapper(morph, "mouseContextDoubleClick"))
"""
widget.addMouseMotionListener(CallbackMouseMotionListener("", LateBindWrapper(morph, "mouseMove"), LateBindWrapper(morph, "mouseDrag")))
widget.addMouseListener(CallbackLeftMouseButtonListener("", LateBindWrapper(morph, "mouseDown"), LateBindWrapper(morph, "mouseUp")))
widget.addMouseListener(CallbackRightMouseButtonListener("", LateBindWrapper(morph, "mouseContextDown"), LateBindWrapper(morph, "mouseContextUp")))
widget.mouseEntered = LateBindWrapper(morph, "mouseEnter")
widget.mouseExited = LateBindWrapper(morph, "mouseLeave")
# PDF FIX UNFINISHED
class MyTreeNodeWithItem(DefaultMutableTreeNode):
def __init__(self, item):
self.item = item
self.userObject = item.GetText()
self.areChildrenDefined = 0
def getChildCount(self):
if not self.areChildrenDefined:
self.defineChildNodes()
return DefaultMutableTreeNode.getChildCount(self)
def defineChildNodes(self):
self.areChildrenDefined = 1
if self.item.IsExpandable():
childItems = self.item.GetSubList()
for item in childItems:
newNode = MyTreeNodeWithItem(item)
#newNode.setParent(self)
self.add(newNode)
def collapsed(self, tree):
self.removeAllChildren()
self.areChildrenDefined = 0
tree.model.nodeStructureChanged(self)
# Support for an inspector tree node
class PrototypeInspectorTreeItem:
#class PrototypeInspectorTreeItem(TreeWidget.TreeItem):
def __init__(self, parentObject, key, childObject, inheritedFlag):
self.parentObject = parentObject
self.key = key
self.childObject = childObject
self.inheritedFlag = inheritedFlag
def __str__(self):
return self.GetText()
def GetText(self):
childObject = self.childObject
extra = ""
if not hasattr(childObject, "__class__"):
extra = " : " + `childObject`
elif not hasattr(childObject, "__dict__") and not type(childObject) in [dict, list]:
extra = " : " + `childObject`
elif isinstance(childObject, PrototypeClass):
extra = " : <Prototype %s> %s" % (`id(childObject)`, childObject.traits)
elif isinstance(childObject, PrototypeMethod):
#extra = " : <PrototypeMethod %s>" % childObject.source.split("\n")[0]
extra = " : <PrototypeMethod %s>" % id(childObject)
else:
name = "%s" % childObject.__class__
unwantedPrefix = "__main__."
if name.find(unwantedPrefix) == 0:
name = name[len(unwantedPrefix):]
extra = " : %s" % name
if len(extra) > 40:
extra = extra[:40] + "..."
result = "%s" % self.key + extra
if self.inheritedFlag:
result = "[Inherited] " + result
return result
def IsEditable(self):
return 0
def SetText(self, text):
pass
def GetIconName(self):
if not self.IsExpandable():
return "python" # XXX wish there was a "file" icon
def IsExpandable(self):
childObject = self.childObject
result = (hasattr(childObject, "__dict__") and not isinstance(childObject, PrototypeMethod)) or (type(childObject) in [list, dict])
return result
def GetSubList(self):
result = []
nonInheritedNames = None
itemObject = self.childObject
if type(itemObject) == dict:
names = itemObject.keys()
names.sort()
elif type(itemObject) == list:
names = range(len(itemObject))
elif isinstance(itemObject, PrototypeClass):
properties = itemObject.allProperties()
names = properties.keys()
names.sort()
nonInheritedNames = itemObject._attributes.keys()
nonInheritedNames.sort()
else:
names = itemObject.__dict__.keys()
names.sort()
for key in names:
if type(itemObject) in [list, dict]:
childObject = itemObject[key]
else:
# hide the world pointer in all objects, plus other clutter
if key == "world":
continue
elif key in ["function", "prototypeHoldingTheFunction"] and isinstance(itemObject, PrototypeMethod):
continue
try:
childObject = getattr(itemObject, key)
except AttributeError:
# case where property exists, but not local or inherited
print "missing property definition for ", key
continue
inheritedFlag = 0
if nonInheritedNames:
inheritedFlag = not (key in nonInheritedNames)
store = PrototypeInspectorTreeItem(itemObject, key, childObject, inheritedFlag)
result.append(store)
result.sort(MyCompare)
return result
# support function to look through children of a tree node and find a match for the key
def InspectorTree_FindChildNodeWithKey(treeMorph, parentNode, key):
for index in range(0, parentNode.getChildCount()):
childNode = parentNode.getChildAt(index)
if childNode.item.key == key:
return childNode
return None
def InspectorTree_ScrollToAndSelectChildNodeWithKey(treeMorph, parentNode, key, collapseAndExpandParent=1):
if collapseAndExpandParent:
path = TreePath(parentNode.getPath())
treeMorph._tree.collapsePath(path)
parentNode.collapsed(treeMorph._tree)
treeMorph._tree.expandPath(path)
newNode = InspectorTree_FindChildNodeWithKey(treeMorph, parentNode, key)
path = TreePath(newNode.getPath())
treeMorph._tree.makeVisible(path)
treeMorph._tree.setSelectionPath(path)
def InspectorTree_ScrollToAndSelectNode(treeMorph, node, collapseAndExpandNode=1):
if collapseAndExpandNode:
if collapseAndExpandNode != "expandOnly":
treeMorph._tree.collapsePath(TreePath(node.getPath()))
node.collapsed(treeMorph._tree)
treeMorph._tree.expandPath(TreePath(node.getPath()))
path = TreePath(node.getPath())
treeMorph._tree.makeVisible(path)
treeMorph._tree.setSelectionPath(path)
def InspectorTree_FindChildNodeWithValue(treeMorph, parentNode, value):
for index in range(0, parentNode.getChildCount()):
childNode = parentNode.getChildAt(index)
if childNode.item.childObject == value:
return childNode
return None
def InspectorTree_CollapseAndExpandNode(treeMorph, node):
path = TreePath(node.getPath())
treeMorph._tree.collapsePath(path)
node.collapsed(treeMorph._tree)
treeMorph._tree.expandPath(path)
# for CallbackRunnable to be able to get None parameters
class NoParamSpecified:
pass
class CallbackRunnable(Runnable):
def __init__(self, callback, param1=NoParamSpecified, param2=NoParamSpecified):
self.callback = callback
self.param1 = param1
self.param2 = param2
def run(self):
if self.param1 == NoParamSpecified:
self.callback()
else:
if self.param2 == NoParamSpecified:
self.callback(self.param1)
else:
self.callback(self.param1, self.param2)
def invokeLater(self):
SwingUtilities.invokeLater(self)
########## Newer
def GetNativeFont(font):
name = font[0]
# PDF FINISH -- style not handled
style = Font.PLAIN
size = font[1]
return Font(name, style, size)
def GetWidthAndHeightForTextInFont(text, font):
try:
# idea from: http://today.java.net/pub/a/today/2004/04/22/images.html?page=last
buffer = BufferedImage(1, 1, BufferedImage.TYPE_INT_RGB)
g2 = buffer.createGraphics()
# PDF IMPROVE the correspondance of hints to what is actually used
#g2.setRenderingHint(RenderingHints.KEY_ANTIALIASING, RenderingHints.VALUE_ANTIALIAS_ON)
fc = g2.getFontRenderContext()
bounds = font.getStringBounds(text, fc)
# HAD FUNKY ERROR WITH COMMA AND getWidth: return int(bounds.geWidth()), int(bounds,getHeight())
return int(bounds.width), int(bounds.height)
except:
print "GetWidthAndHeightForTextInFont exception"
raise
def hexToColor(text):
r = int(text[0:2], 16)
g = int(text[2:4], 16)
b = int(text[4:6], 16)
return Color(r, g, b)
colorsLookupDictionary = {
#java colors
'white': Color.white,
'black': Color.black,
'blue': Color.blue,
'cyan': Color.cyan,
'dark gray': Color.darkGray,
'gray': Color.gray,
'grey': Color.gray,
'green': Color.green,
'light gray': Color.lightGray,
'light grey': Color.lightGray,
'magenta': Color.magenta,
'orange': Color.orange,
'pink': Color.pink,
'red': Color.red,
'yellow': Color.yellow,
# other colors
'light blue': hexToColor("C0D9D9"),
'green yellow': hexToColor("93DB70"),
'medium violet red': hexToColor("DB7093"),
'medium goldenrod': hexToColor("EAEAAE"),
'plum': hexToColor("EAADEA"),
'tan': hexToColor("DB9370"),
'turquoise': hexToColor("ADEAEA"),
'spring green': hexToColor("00FF7F"),
'orange red': hexToColor("FF2400"),
'goldenrod': hexToColor("DBDB70"),
'purple': hexToColor("800080"),
'light purple': hexToColor("C000C0"),
'sienna': hexToColor("A0522D"),
'slate blue': hexToColor("007FFF"),
'sea green': hexToColor("238E68"),
'very light gray': hexToColor("CDCDCD"),
'gold': hexToColor("FFD700"),
'violet red': hexToColor("CC3299"),
'coral': hexToColor("FF7F00"),
'light steel blue': hexToColor("8F8FBD"),
'silver': hexToColor("E6E8FA"),
'dark turquoise': hexToColor("7093DB"),
'light wood': hexToColor("E9C2A6"),
'feldspar': hexToColor("D19275"),
'thistle': hexToColor("D8BFD8"),
'khaki': hexToColor("F0E68C"),
'cool copper': hexToColor("D98719"),
'firebrick': hexToColor("B22222"),
'forest green': hexToColor("238E23"),
'steel blue': hexToColor("236B8E"),
}
def colorFromName(name):
name = name.lower()
return colorsLookupDictionary[name]
def colorName(color):
for colorName in colorsLookupDictionary.keys():
if colorsLookupDictionary[colorName] == color:
return colorName
return ""
def GetNativeColor(nameOrTuple):
if type(nameOrTuple) == tuple:
return Color(nameOrTuple[0], nameOrTuple[1], nameOrTuple[2])
else:
if nameOrTuple and nameOrTuple[0] == '#':
return hexToColor(nameOrTuple[1:7])
try:
return colorFromName(nameOrTuple)
except KeyError:
# try to return a system color
return Color.getColor(nameOrTuple)
###########
# PDF IMPORVE __ WHEN DRAG THIS< OBJECTS DISAPEAR __ NEED TO BE KEPT COPIED AT MORPH?
class MyImageCanvas(JComponent):
def __init__(self, backdropImage, cursorImage):
# PDF RESOLVE NAME images are actually expected to be icons...
self.clearImages()
self.backdropImage = backdropImage
self.cursorImage = cursorImage
self.doubleBuffered = 1
def paintComponent(self, g):
#JComponent.paintComponent(self, g)
#draw entire component with background
g.setColor(self.backgroundColor)
g.fillRect(0, 0, self.getWidth(), self.getHeight())
if self.backdropImage:
self.backdropImage.paintIcon(self, g, 0, 0)
#g.drawImage(self.backdropImage, 0, 0, self)
for image, position in self.otherImagesWithPosition:
image.paintIcon(self, g, position[0], position[1])
#g.drawImage(image, position[0], position[1], self)
if self.cursorImage:
x = self.cursorImagePosition[0] - self.cursorOriginOffset[0]
y = self.cursorImagePosition[1] - self.cursorOriginOffset[1]
self.cursorImage.paintIcon(self, g, x, y)
#g.drawImage(self.cursorImage, x, y, self)
def getPreferredSize(self):
if self.backdropImage:
try:
return (self.backdropImage.iconWidth, self.backdropImage.iconHeight)
except:
print "problem"
return (100, 100)
def getMinimumSize(self):
return self.getPreferredSize()
def clearImages(self):
self.backdropImage = None
self.cursorImage = None
self.cursorImagePosition = (0, 0)
self.cursorOriginOffset = (0, 0)
self.backgroundColor = Color.white
# list of tuples as (image, position)
self.otherImagesWithPosition = []
def addOtherImage(self, image, position):
self.otherImagesWithPosition.append((image, position))
self.repaint()
def clearOtherImages(self):
self.otherImagesWithPosition = []
self.repaint()
###############
# callbacks that check for the metaKey
def IsEventMatchForFilter(event, filter):
#print "IsEventMatchForFilter", filter, event
modifiers = event.getModifiersExText(event.getModifiersEx())
items = modifiers.split("+")
if filter == "":
if "Alt" in items: return 0
if "Ctrl" in items: return 0
if "Shift" in items: return 0
return 1
elif filter == "Alt":
if "Ctrl" in items: return 0
if "Shift" in items: return 0
if "Alt" in items: return 1
return 0
elif filter == "Control":
if "Shift" in items: return 0
if "Alt" in items: return 0
if "Ctrl" in items: return 1
return 0
elif filter == "Shift":
if "Alt" in items: return 0
if "Ctrl" in items: return 0
if "Shift" in items: return 1
return 0
elif filter == "Shift-Control":
if "Alt" in items: return 0
if "Ctrl" in items and "Shift" in items: return 1
return 0
return 0
class CallbackLeftMouseButtonListener(MouseAdapter):
def __init__(self, modifiersFilter, callbackOnDown, callbackOnUp):
self.modifiersFilter = modifiersFilter
self.callbackOnDown = callbackOnDown
self.callbackOnUp = callbackOnUp
def mousePressed(self, event):
if self.callbackOnDown and IsEventMatchForFilter(event, self.modifiersFilter):
if SwingUtilities.isLeftMouseButton(event):
self.callbackOnDown(event)
def mouseReleased(self, event):
if self.callbackOnUp and IsEventMatchForFilter(event, self.modifiersFilter):
if SwingUtilities.isLeftMouseButton(event):
self.callbackOnUp(event)
class CallbackRightMouseButtonListener(MouseAdapter):
def __init__(self, modifiersFilter, callbackOnDown, callbackOnUp):
self.modifiersFilter = modifiersFilter
self.callbackOnDown = callbackOnDown
self.callbackOnUp = callbackOnUp
def mousePressed(self, event):
if self.callbackOnDown and IsEventMatchForFilter(event, self.modifiersFilter):
if SwingUtilities.isRightMouseButton(event):
self.callbackOnDown(event)
def mouseReleased(self, event):
if self.callbackOnUp and IsEventMatchForFilter(event, self.modifiersFilter):
if SwingUtilities.isRightMouseButton(event):
self.callbackOnUp(event)
class CallbackMouseMotionListener(MouseMotionAdapter):
def __init__(self, modifiersFilter, callback, draggedCallback=None):
self.modifiersFilter = modifiersFilter
self.callback = callback
self.draggedCallback = draggedCallback
def mouseMoved(self, event):
if self.callback and IsEventMatchForFilter(event, self.modifiersFilter):
self.callback(event)
def mouseDragged(self, event):
if IsEventMatchForFilter(event, self.modifiersFilter):
if self.draggedCallback:
self.draggedCallback(event)
else:
self.callback(event)
class CallbackKeyListener(KeyAdapter):
def __init__(self, pressedCallback, releasedCallback):
self.pressedCallback = pressedCallback
self.releasedCallback = releasedCallback
def keyPressed(self, event):
print "CallbackKeyListener", event
if self.pressedCallback:
self.pressedCallback(event)
def keyReleased(self, event):
print "CallbackKeyListener", event
if self.releasedCallback:
self.releasedCallback(event)
####
class FileDialog:
def __init__(self, parent, title="Choose file", loadOrSave="load"):
self.parent = parent
self.title = title
self.loadOrSave = loadOrSave
def go(self, pattern="*.py", default=None):
fileChooser = JFileChooser()
if self.title:
fileChooser.setDialogTitle(self.title)
if default:
fileChooser.setSelectedFile(java.io.File(default))
fileChooser.setCurrentDirectory(java.io.File("."))
if self.loadOrSave == "load":
result = fileChooser.showOpenDialog(self.parent)
else:
result = fileChooser.showSaveDialog(self.parent)
if (result == JFileChooser.APPROVE_OPTION):
fileResult = None
fileAndMaybeDir = fileChooser.getSelectedFile().getAbsoluteFile()
if not fileAndMaybeDir.isDirectory():
fileResult = str(fileAndMaybeDir)
return fileResult
else:
return None
#### COMMON
# Cursor
def Common_GetCursor(widget):
return widget.getCursor()
def Common_SetCursor(widget, cursor):
widget.setCursor(cursor)
def Common_SetCursorByName(widget, cursorName):
if cursorName == "normal":
raise "unfinished"
elif cursorName == "cross":
newCursor = Cursor(Cursor.CROSSHAIR_CURSOR)
else:
raise "Unsupported cursor name"
self.widget.setCursor(newCursor)
# Image
def Common_LoadImage(fileName):
return ImageIcon(fileName)
def Common_ImageWidth(image):
return image.iconWidth
def Common_ImageHeight(image):
return image.iconHeight
# Native Event
def Common_NativeEventPositionInWindow(event):
return event.x, event.y
| python |
from selenium.webdriver.support.ui import Select
class ContactHelper:
def __init__(self, app):
self.app = app
def open_contact_page(self):
wd = self.app.wd
if not (wd.find_element_by_title("Search for any text") and wd.find_element_by_name("add")):
wd.find_element_by_link_text("home page").click()
def create_contact(self, contact):
wd = self.app.wd
self.open_contact_page()
wd.find_element_by_link_text("add new").click()
# add first name
wd.find_element_by_name("firstname").click()
wd.find_element_by_name("firstname").clear()
wd.find_element_by_name("firstname").send_keys(contact.firstname)
# add middle name
wd.find_element_by_name("middlename").click()
wd.find_element_by_name("middlename").clear()
wd.find_element_by_name("middlename").send_keys(contact.middlename)
# add address
wd.find_element_by_name("address").click()
wd.find_element_by_name("address").clear()
wd.find_element_by_name("address").send_keys(contact.address)
# add mobile
wd.find_element_by_name("mobile").click()
wd.find_element_by_name("mobile").clear()
wd.find_element_by_name("mobile").send_keys(contact.mobile)
# add list name
wd.find_element_by_name("lastname").click()
wd.find_element_by_name("lastname").clear()
wd.find_element_by_name("lastname").send_keys(contact.lastname)
# add nickname
wd.find_element_by_name("nickname").click()
wd.find_element_by_name("nickname").clear()
wd.find_element_by_name("nickname").send_keys(contact.nickname)
# add title
wd.find_element_by_name("title").click()
wd.find_element_by_name("title").clear()
wd.find_element_by_name("title").send_keys(contact.title)
# add company
wd.find_element_by_name("company").click()
wd.find_element_by_name("company").clear()
wd.find_element_by_name("company").send_keys(contact.company)
# add home number
wd.find_element_by_name("home").click()
wd.find_element_by_name("home").clear()
wd.find_element_by_name("home").send_keys(contact.home)
# add work humber
wd.find_element_by_name("work").click()
wd.find_element_by_name("work").clear()
wd.find_element_by_name("work").send_keys(contact.work)
# add fax number
wd.find_element_by_name("fax").click()
wd.find_element_by_name("fax").clear()
wd.find_element_by_name("fax").send_keys(contact.fax)
# add email
wd.find_element_by_name("email").click()
wd.find_element_by_name("email").clear()
wd.find_element_by_name("email").send_keys(contact.email_1)
wd.find_element_by_name("email2").click()
wd.find_element_by_name("email2").clear()
wd.find_element_by_name("email2").send_keys(contact.email_2)
wd.find_element_by_name("email3").click()
wd.find_element_by_name("email3").clear()
wd.find_element_by_name("email3").send_keys(contact.email_3)
# add birthday
wd.find_element_by_name("bday").click()
Select(wd.find_element_by_name("bday")).select_by_visible_text(contact.bday)
wd.find_element_by_name("bmonth").click()
Select(wd.find_element_by_name("bmonth")).select_by_visible_text(contact.bmonth)
wd.find_element_by_name("byear").click()
wd.find_element_by_name("byear").clear()
wd.find_element_by_name("byear").send_keys(contact.byear)
# add date anniversary
wd.find_element_by_name("aday").click()
Select(wd.find_element_by_name("aday")).select_by_visible_text(contact.aday)
wd.find_element_by_name("amonth").click()
Select(wd.find_element_by_name("amonth")).select_by_visible_text(contact.amonth)
wd.find_element_by_name("ayear").click()
wd.find_element_by_name("ayear").clear()
wd.find_element_by_name("ayear").send_keys(contact.ayear)
# add address 2
wd.find_element_by_name("address2").click()
wd.find_element_by_name("address2").clear()
wd.find_element_by_name("address2").send_keys(contact.address2)
# add phone number 2
wd.find_element_by_name("phone2").click()
wd.find_element_by_name("phone2").clear()
wd.find_element_by_name("phone2").send_keys(contact.phone2)
# add notes
wd.find_element_by_name("notes").click()
wd.find_element_by_name("notes").clear()
wd.find_element_by_name("notes").send_keys(contact.notes)
wd.find_element_by_xpath("//div[@id='content']/form/input[21]").click()
wd.find_element_by_link_text("Logout").click()
def delete_first_contact(self):
wd = self.app.wd
self.open_contact_page()
# select first group
wd.find_element_by_name("selected[]").click()
# submit deletion
wd.find_element_by_name("DeleteSel()").click()
wd.switch_to_alert().accept()
wd.find_element_by_link_text("home").click()
def count(self):
wd = self.app.wd
self.open_contact_page()
return len(wd.find_element_by_link_text("home page"))
| python |
def XXX(self, root: TreeNode) -> int:
if root is None:
return 0
m = 10 ** 5 # m为最小深度
def bfs(d, node):
nonlocal m
if node.left is None and node.right is None:
m = min(m, d)
return
bfs(d + 1, node.left) if node.left else None
bfs(d + 1, node.right) if node.right else None
bfs(1, root)
return m
| python |
# Copyright 2022 The Balsa Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from balsa.util import postgres
class CardEst(object):
"""Base class for cardinality estimators."""
def __call__(self, node, join_conds):
raise NotImplementedError()
class PostgresCardEst(CardEst):
def __init__(self):
self._cache = {}
def _HashKey(self, node):
"""Computes a hash key based on the logical contents of 'node'.
Specifically, hash on the sorted sets of table IDs and their filters.
NOTE: Postgres can produce slightly different cardinality estimates
when all being equal but just the FROM list ordering tables
differently. Here, we ignore this slight difference.
"""
sorted_filters = '\n'.join(sorted(node.GetFilters()))
sorted_leaves = '\n'.join(sorted(node.leaf_ids()))
return sorted_leaves + sorted_filters
def __call__(self, node, join_conds):
key = self._HashKey(node)
card = self._cache.get(key)
if card is None:
sql_str = node.to_sql(join_conds)
card = postgres.GetCardinalityEstimateFromPg(sql=sql_str)
self._cache[key] = card
return card
| python |
#!/usr/bin/python
# Copyright 2017 Telstra Open Source
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import requests
import json
import pprint
#For the following mn topo
#mn --controller=remote,ip=172.18.0.1,port=6653 --switch ovsk,protocols=OpenFlow13 --topo torus,3,3
#h1x1 ping h3x2
url = "http://localhost/api/v1/flow"
headers = {'Content-Type': 'application/json'}
j_data = {"src_switch":"00:00:00:00:00:00:01:01", "src_port":1, "src_vlan":0, "dst_switch":"00:00:00:00:00:00:03:02", "dst_port":1, "dst_vlan":0, "bandwidth": 2000}
result = requests.post(url, json=j_data, headers=headers)
print result.text
| python |
# Copyright 2020 University of Groningen
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Test graph related functions
"""
import pytest
import networkx as nx
import polyply
from .example_fixtures import example_meta_molecule
@pytest.mark.parametrize('source, max_length, min_length, expected',(
(4, 1, 1, [4, 1, 9, 10]),
(4, 2, 1, [4, 1, 9, 10, 0, 3]),
(4, 3, 3, [0, 3, 7, 8, 2]),
(0, 1, 1, [0, 1, 2])
))
def test_neighbourhood(source, max_length, min_length, expected):
graph = nx.balanced_tree(r=2, h=3)
neighbours = polyply.src.graph_utils.neighborhood(graph,
source,
max_length,
min_length=min_length)
assert set(neighbours) == set(expected)
@pytest.mark.parametrize('edges, expected',(
# simple linear
([(0, 1), (1, 2), (2, 3)], False),
# simple cyclic
([(0, 1), (1, 2), (2, 3), (3, 0)], False),
# simple branched
([(0, 1), (1, 2), (1, 3), (3, 4)], True),
# cyclic branched
([(0, 1), (1, 2), (2, 3), (3, 0), (0, 5)], True),
# no nodes
([], False)
))
def test_is_branched(edges, expected):
graph = nx.Graph()
graph.add_edges_from(edges)
result = polyply.src.graph_utils.is_branched(graph)
assert result == expected
@pytest.mark.parametrize('nodes, expected',(
((0, 1), [(1, 4)]),
# central residue
((1, 2), [(6, 9)]),
))
def test_find_connecting_edges(example_meta_molecule, nodes, expected):
result = polyply.src.graph_utils.find_connecting_edges(example_meta_molecule,
example_meta_molecule.molecule,
nodes)
assert result == expected
| python |
import pandas as pd
from koapy import KiwoomOpenApiContext
from koapy.backend.cybos.CybosPlusComObject import CybosPlusComObject
kiwoom = KiwoomOpenApiContext()
cybos = CybosPlusComObject()
kiwoom.EnsureConnected()
cybos.EnsureConnected()
kiwoom_codes = kiwoom.GetCommonCodeList()
cybos_codes = cybos.GetCommonCodeList()
cybos_codes = [code[1:] for code in cybos_codes]
kiwoom_codes = pd.DataFrame(kiwoom_codes, columns=['code'])
kiwoom_codes['kiwoom'] = 'TRUE'
cybos_codes = pd.DataFrame(cybos_codes, columns=['code'])
cybos_codes['cybos'] = 'TRUE'
df = pd.merge(kiwoom_codes, cybos_codes, how='outer', on='code')
df.to_excel('output.xlsx')
| python |
xs = [1, 2] | python |
# Copyright 2010-2011 Josh Kearney
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Event-based IRC Class"""
import random
import re
import time
import urllib
from pyhole import irclib
from pyhole import plugin
class IRC(irclib.SimpleIRCClient):
"""An IRC connection"""
def __init__(self, config, network, log, version, conf_file):
irclib.SimpleIRCClient.__init__(self)
self.log = log
self.version = version
self.conf_file = conf_file
self.admins = config.get("admins", type="list")
self.command_prefix = config.get("command_prefix")
self.reconnect_delay = config.get("reconnect_delay", type="int")
self.rejoin_delay = config.get("rejoin_delay", type="int")
self.plugin_dir = config.get("plugin_dir")
self.server = network.get("server")
self.password = network.get("password", default="")
self.port = network.get("port", type="int", default=6667)
self.ssl = network.get("ssl", type="bool", default=False)
self.ipv6 = network.get("ipv6", type="bool", default=False)
self.nick = network.get("nick")
self.identify_password = network.get("identify_password", default="")
self.channels = network.get("channels", type="list")
self.addressed = False
self.load_plugins()
self.log.info("Connecting to %s:%d as %s" % (self.server, self.port,
self.nick))
self.connect(self.server, self.port, self.nick, self.password,
ssl=self.ssl, ipv6=self.ipv6)
def load_plugins(self, reload_plugins=False):
"""Load plugins and their commands respectively"""
if reload_plugins:
plugin.reload_plugins(self.plugin_dir, irc=self,
conf_file=self.conf_file)
else:
plugin.load_plugins(self.plugin_dir, irc=self,
conf_file=self.conf_file)
self.log.info("Loaded Plugins: %s" % active_plugins())
def run_hook_command(self, mod_name, f, arg, **kwargs):
"""Make a call to a plugin hook"""
try:
f(arg, **kwargs)
if arg:
self.log.debug("Calling: %s.%s(\"%s\")" % (mod_name,
f.__name__, arg))
else:
self.log.debug("Calling: %s.%s(None)" % (mod_name,
f.__name__))
except Exception, e:
self.log.error(e)
def run_msg_regexp_hooks(self, message, private):
"""Run regexp hooks"""
for mod_name, f, msg_regex in plugin.hook_get_msg_regexs():
m = re.search(msg_regex, message, re.I)
if m:
self.run_hook_command(mod_name, f, m, private=private,
full_message=message)
def run_keyword_hooks(self, message, private):
"""Run keyword hooks"""
words = message.split(" ")
for mod_name, f, kw in plugin.hook_get_keywords():
for word in words:
m = re.search("^%s(.+)" % kw, word, re.I)
if m:
self.run_hook_command(mod_name, f, m.group(1),
private=private, full_message=message)
def run_command_hooks(self, message, private):
"""Run command hooks"""
for mod_name, f, cmd in plugin.hook_get_commands():
self.addressed = False
if private:
m = re.search("^%s$|^%s\s(.*)$" % (cmd, cmd), message, re.I)
if m:
self.run_hook_command(mod_name, f, m.group(1),
private=private, addressed=self.addressed,
full_message=message)
if message.startswith(self.command_prefix):
# Strip off command prefix
msg_rest = message[len(self.command_prefix):]
else:
# Check for command starting with nick being addressed
msg_start_upper = message[:len(self.nick) + 1].upper()
if msg_start_upper == self.nick.upper() + ":":
# Get rest of string after "nick:" and white spaces
msg_rest = re.sub("^\s+", "",
message[len(self.nick) + 1:])
else:
continue
self.addressed = True
m = re.search("^%s$|^%s\s(.*)$" % (cmd, cmd), msg_rest, re.I)
if m:
self.run_hook_command(mod_name, f, m.group(1), private=private,
addressed=self.addressed, full_message=message)
def poll_messages(self, message, private=False):
"""Watch for known commands"""
self.addressed = False
self.run_command_hooks(message, private)
self.run_keyword_hooks(message, private)
self.run_msg_regexp_hooks(message, private)
def reply(self, msg):
"""Send a privmsg"""
if not hasattr(msg, "encode"):
try:
msg = str(msg)
except Exception:
self.log.error("msg cannot be converted to string")
return
msg = msg.encode("utf-8").split("\n")
# 10 is completely arbitrary for now
if len(msg) > 10:
msg = msg[0:8]
msg.append("...")
for line in msg:
if self.addressed:
source = self.source.split("!")[0]
self.connection.privmsg(self.target, "%s: %s" % (source, line))
self.log.info("-%s- <%s> %s: %s" % (self.target, self.nick,
source, line))
else:
self.connection.privmsg(self.target, line)
if irclib.is_channel(self.target):
self.log.info("-%s- <%s> %s" % (self.target, self.nick,
line))
else:
self.log.info("<%s> %s" % (self.nick, line))
def privmsg(self, target, msg):
"""Send a privmsg"""
self.connection.privmsg(target, msg)
def op_user(self, params):
"""Op a user"""
params = params.split(" ", 1)
self.connection.mode(params[0], "+o %s" % params[1])
def deop_user(self, params):
"""De-op a user"""
params = params.split(" ", 1)
self.connection.mode(params[0], "-o %s" % params[1])
def set_nick(self, params):
"""Set IRC nick"""
self.nick = params
self.connection.nick(params)
def join_channel(self, params):
"""Join a channel"""
channel = params.split(" ", 1)
self.reply("Joining %s" % channel[0])
if irclib.is_channel(channel[0]):
self.channels.append(channel[0])
if len(channel) > 1:
self.connection.join(channel[0], channel[1])
else:
self.connection.join(channel[0])
def part_channel(self, params):
"""Part a channel"""
self.channels.remove(params)
self.reply("Parting %s" % params)
self.connection.part(params)
def fetch_url(self, url, name):
"""Fetch a URL"""
class PyholeURLopener(urllib.FancyURLopener):
version = self.version
urllib._urlopener = PyholeURLopener()
try:
return urllib.urlopen(url)
except IOError:
self.reply("Unable to fetch %s data" % name)
return None
def on_nicknameinuse(self, connection, event):
"""Ensure the use of unique IRC nick"""
random_int = random.randint(1, 100)
self.log.info("IRC nick '%s' is currently in use" % self.nick)
self.nick = "%s%d" % (self.nick, random_int)
self.log.info("Setting IRC nick to '%s'" % self.nick)
connection.nick("%s" % self.nick)
# Try to prevent nick flooding
time.sleep(1)
def on_welcome(self, connection, event):
"""Join channels upon successful connection"""
if self.identify_password:
self.privmsg("NickServ", "IDENTIFY %s" % self.identify_password)
for channel in self.channels:
c = channel.split(" ", 1)
if irclib.is_channel(c[0]):
if len(c) > 1:
connection.join(c[0], c[1])
else:
connection.join(c[0])
def on_disconnect(self, connection, event):
"""Attempt to reconnect after disconnection"""
self.log.info("Disconnected from %s:%d" % (self.server, self.port))
self.log.info("Reconnecting in %d seconds" % self.reconnect_delay)
time.sleep(self.reconnect_delay)
self.log.info("Connecting to %s:%d as %s" % (self.server, self.port,
self.nick))
self.connect(self.server, self.port, self.nick, self.password,
ssl=self.ssl)
def on_kick(self, connection, event):
"""Automatically rejoin channel if kicked"""
source = irclib.nm_to_n(event.source())
target = event.target()
nick, reason = event.arguments()
if nick == self.nick:
self.log.info("-%s- kicked by %s: %s" % (target, source, reason))
self.log.info("-%s- rejoining in %d seconds" % (target,
self.rejoin_delay))
time.sleep(self.rejoin_delay)
connection.join(target)
else:
self.log.info("-%s- %s was kicked by %s: %s" % (target, nick,
source, reason))
def on_invite(self, connection, event):
"""Join a channel upon invitation"""
source = event.source().split("@", 1)[0]
if source in self.admins:
self.join_channel(event.arguments()[0])
def on_ctcp(self, connection, event):
"""Respond to CTCP events"""
source = irclib.nm_to_n(event.source())
ctcp = event.arguments()[0]
if ctcp == "VERSION":
self.log.info("Received CTCP VERSION from %s" % source)
connection.ctcp_reply(source, "VERSION %s" % self.version)
elif ctcp == "PING":
if len(event.arguments()) > 1:
self.log.info("Received CTCP PING from %s" % source)
connection.ctcp_reply(source,
"PING %s" % event.arguments()[1])
def on_join(self, connection, event):
"""Handle joins"""
target = event.target()
source = irclib.nm_to_n(event.source())
self.log.info("-%s- %s joined" % (target, source))
def on_part(self, connection, event):
"""Handle parts"""
target = event.target()
source = irclib.nm_to_n(event.source())
self.log.info("-%s- %s left" % (target, source))
def on_quit(self, connection, event):
"""Handle quits"""
source = irclib.nm_to_n(event.source())
self.log.info("%s quit" % source)
def on_action(self, connection, event):
"""Handle IRC actions"""
target = event.target()
source = irclib.nm_to_n(event.source())
msg = event.arguments()[0]
self.log.info(unicode("-%s- * %s %s" % (target, source, msg), "utf-8"))
def on_privnotice(self, connection, event):
"""Handle private notices"""
source = irclib.nm_to_n(event.source())
msg = event.arguments()[0]
self.log.info(unicode("-%s- %s" % (source, msg), "utf-8"))
def on_pubnotice(self, connection, event):
"""Handle public notices"""
target = event.target()
source = irclib.nm_to_n(event.source())
msg = event.arguments()[0]
self.log.info(unicode("-%s- <%s> %s" % (target, source, msg),
"utf-8"))
def on_privmsg(self, connection, event):
"""Handle private messages"""
self.source = event.source().split("@", 1)[0]
self.target = irclib.nm_to_n(event.source())
msg = event.arguments()[0]
if self.target != self.nick:
self.log.info(unicode("<%s> %s" % (self.target, msg), "utf-8"))
self.poll_messages(msg, private=True)
def on_pubmsg(self, connection, event):
"""Handle public messages"""
self.source = event.source().split("@", 1)[0]
self.target = event.target()
nick = irclib.nm_to_n(event.source())
msg = event.arguments()[0]
self.log.info(unicode("-%s- <%s> %s" % (self.target, nick, msg),
"utf-8"))
self.poll_messages(msg)
def active_plugins():
"""List active plugins"""
return ", ".join(sorted(plugin.active_plugins()))
def active_commands():
"""List active commands"""
return ", ".join(sorted(plugin.active_commands()))
def active_keywords():
"""List active keywords"""
return ", ".join(sorted(plugin.active_keywords()))
| python |
from django import forms
from django.forms import ModelForm
from auctions.models import Listing, Comment, Bid, Category
categories = Category.objects.all().values_list('slug_name', 'name')
class CreateListing(ModelForm):
name = forms.ChoiceField(choices=categories, required=False)
class Meta:
model = Listing
fields = ['title', 'description', 'price', 'image']
class CreateComment(ModelForm):
class Meta:
model = Comment
fields = ['comment']
class CreateBid(ModelForm):
class Meta:
model = Bid
fields = ['price'] | python |
from .utils import find_closest_equivalent, Snapshot
from .find_init_weights import find_weights
| python |
import schema229
import os
'''
Unit tests
'''
def test_resolve_ref():
schema = schema229.A229Schema(os.path.join(os.path.dirname(__file__),'..','build',"schema","ASHRAE229.schema.json"))
node = schema.resolve_ref("ASHRAE229.schema.json#/definitions/ASHRAE229")
assert('title' not in node)
def test_get_schema_node():
schema = schema229.A229Schema(os.path.join(os.path.dirname(__file__),'..','build',"schema","ASHRAE229.schema.json"))
# Root node
node = schema.get_schema_node([])
assert('version' in node)
| python |
#!/usr/bin/python2
from math import sqrt
from decimal import Decimal
def check_prime(num):
if not num % 2:
return False
for i in xrange(3, int(sqrt(num) + 1), 2):
if not num % i:
return False
return True
def reverse(num):
rev_num = 0
while num:
rev_num = 10 * rev_num + num % 10
num /= 10
return rev_num
def reverse_s(s):
try:
if not isinstance(s, str):
s = str(s)
return s[::-1]
except:
return None
def check_palindrome(num):
return (True if num == int(reverse_s(num))
else False)
def prime_sieve(limit):
sieve = [True] * int(limit)
sieve[0], sieve[1] = [False] * 2
for i, v in enumerate(sieve):
if v:
sieve[i**2::i] = ([False] * (((limit - 1) / i) - (i - 1)))
return sieve
def multiples(number, factor):
counter = 0
while not number % factor:
number = number / factor
counter += 1
return (number, counter)
def prime_factors(number, limit=None):
original = number
factors, current = ({}, 3)
if not number % 2:
number, factors[2] = multiples(number, 2)
if limit and factor_length(factors.values()) >= limit:
return factors
max_factor = int(sqrt(number)) + 1
while number > 1 and current <= max_factor:
if not number % current:
number, factors[current] = multiples(number, current)
if limit and factor_length(factors.values()) >= limit:
break
max_factor = int(sqrt(number)) + 1
current += 2
if number != 1 and number != original:
factors[number] = 1
return factors
def factors(number):
return set(factor for factors in ((i, number/i) for i in
xrange(1, int(sqrt(number) + 1))
if not number % i)
for factor in factors)
def fibn(n):
n = Decimal(n)
root5 = Decimal(sqrt(5))
return int(((1 + root5) ** n - (1 - root5) ** n) /
((2 ** n) * root5))
| python |
# Space: O(n)
# Time: O(n)
import collections
class Solution:
def topKFrequent(self, nums, k):
counts = collections.Counter(nums)
res = sorted(counts.keys(), key=lambda x: counts[x], reverse=True)[:k]
return res
| python |
import multiprocessing
import os
import signal
import sys
import time
import WarBackend as War
from blessings import Terminal
def cleanexit(sig, frame):
if os.system("clear") != 0:
os.system("cls")
print("\nStopping...")
sys.exit()
signal.signal(signal.SIGINT, cleanexit) # Catches ^c and stops
term = Terminal()
global needscreenclear
needscreenclear = False
os.system("clear")
starttime = time.time() # Statement to allow time to be kept on the amount of time the program has been running.
# Todo add some terminal configuring options
options = {
"avthreads": 0,
"numberofgames": 5,
"createouput": False,
"outputfilename": ""
}
passed_arguments = sys.argv[1:]
continuetorun = True
if '-h' in passed_arguments:
print(''
'-h | prints this help thing :)\n'
'-t | Number of threads\n'
'-g | Number of games to play')
continuetorun = False
else:
if '-c' in passed_arguments:
threadarg = passed_arguments.index('-c')
try:
threadarg_perm = passed_arguments[threadarg + 1]
options["avthreads"] = float(threadarg_perm)
except IndexError or ValueError:
print('Invalid perameter')
continuetorun = False
else:
options["avthreads"] = multiprocessing.cpu_count() - 1
if '-g' in passed_arguments:
gamesarg = passed_arguments.index('-g')
try:
gamesarg_perm = passed_arguments[gamesarg + 1]
options["numberofgames"] = int(gamesarg_perm)
except IndexError or ValueError:
print('Invalid perameter')
continuetorun = False
else:
options["numberofgames"] = 1000000
# Playing functions
def warthread(numgames, threadnum, statlist):
if os.path.isfile(os.path.join(".",str(threadnum)+"-drawreport.csv")):
os.remove(os.path.join(".",str(threadnum)+"-drawreport.csv"))
tmpfile = open(os.path.join(".",str(threadnum)+"-drawreport.csv"),'w')
tmpfile.close()
else:
tmpfile = open(os.path.join(".", str(threadnum) + "-drawreport.csv"), 'w')
tmpfile.close()
for i in range(0, numgames):
result = War.playwar(fileoutput=os.path.join(".",str(threadnum)+"-drawreport.csv"))
if result == 1:
statlist[threadnum][0] += 1
elif result == 2:
statlist[threadnum][1] += 1
elif result == 3:
statlist[threadnum][2] += 1
statlist[threadnum][3] += 1
def totalup(statlist):
'''
:param statlist: The current real time statistic list
:return: A list of totaled data from this rt list
'''
outputstlist = []
for i in range(0, 4):
outputstlist.append(0) # Putting in values that way we can add to them
for dive in statlist:
for subdive in range(0, 4):
outputstlist[subdive] += dive[subdive]
return outputstlist
# Main Event
last_run = False
if (options["numberofgames"] > 0) and(continuetorun):
print("Playing %i games." % (options["numberofgames"]))
rtstatlist = []
for loops in range(0, options["avthreads"]):
stat = multiprocessing.Array('i', range(4)) # creating a statistic list for a thread to utalize
for kount in range(0, 4):
stat[kount] = 0
rtstatlist.append(stat)
# Creating the thread list and spawning the threads
threads = []
if options["avthreads"] == 1:
wthread = multiprocessing.Process(target=warthread, args=(options["numberofgames"], 0, rtstatlist))
threads.append(wthread)
else:
tmpgames_playing = options["numberofgames"]
for count in range(0, options["avthreads"] - 1):
wthread = multiprocessing.Process(target=warthread, args=(
options["numberofgames"] // options["avthreads"], count, rtstatlist))
tmpgames_playing -= options["numberofgames"] // options["avthreads"]
threads.append(wthread)
threads[count].start()
wthread = multiprocessing.Process(target=warthread, args=((tmpgames_playing, count + 1, rtstatlist)))
threads.append(wthread)
threads[count + 1].start()
while (totalup(rtstatlist))[3] != options["numberofgames"]:
statlist = totalup(rtstatlist)
# Minimizes a bug from occuring if a thread modified the rtstatlist before the print code finshed processing the first totalup
if statlist[0] > 0: # Prevents divide by zero error if the display code was run before any of the threads had a chance to play a game
if needscreenclear:
os.system("clear")
needscreenclear = False
with term.location(0, 5):
print("Press Esc to clear the screen (Just in case you accidentally typed garbage)")
print("Player One has won %f percent of the time. " % float(statlist[0] * 100 / statlist[3]))
print("Player Two has won %f percent of the time. " % float(statlist[1] * 100 / statlist[3]))
print("There has been a draw %f percent of the time. \n" % float(statlist[2] / statlist[3]))
print("Player One has won %i time(s)." % statlist[0])
print("Player Two has won %i time(s)." % statlist[1])
print("There have been %i draws" % statlist[2])
print("The game has been played %i time(s)." % statlist[3])
print("We are %f percent done." % (statlist[3] * 100 / options["numberofgames"]))
elapsted_seconds = time.time() - starttime
# elapsted_seconds = 602263 #Debug time amount. Should be 6 days, 23 hours, 17 minutes, and 43 seconds
days = int(elapsted_seconds // 86400)
hours = int(elapsted_seconds // 3600 - (days * 24))
minutes = int(elapsted_seconds // 60 - (hours * 60) - (days * 1440))
seconds = int(elapsted_seconds - (minutes * 60) - (hours * 3600) - (days * 86400))
print("Time Elapsed: ", days, " ", ":", hours, " ", ":", minutes, " ", ":", seconds, " ")
adverage_games_per_second = statlist[3] / elapsted_seconds
tremaining = (options["numberofgames"] - statlist[3]) / adverage_games_per_second
advdays = int(tremaining // 86400)
advhours = int(tremaining // 3600 - (advdays * 24))
advminutes = int(tremaining // 60 - (advhours * 60) - (advdays * 1440))
advseconds = int(tremaining - (advminutes * 60) - (advhours * 3600) - (advdays * 86400))
print("Time Remaining: ", advdays, " ", ":", advhours, " ", ":", advminutes, " ", ":",
advseconds, " ")
os.system("clear")
statlist = totalup(rtstatlist)
with term.location(0, 10):
print("Player One has won %f percent of the time. " % float(statlist[0] * 100 / statlist[3]))
print("Player Two has won %f percent of the time. " % float(statlist[1] * 100 / statlist[3]))
print("There has been a draw %f percent of the time. \n" % float(statlist[2] / statlist[3]))
print("Player One has won %i times." % statlist[0])
print("Player Two has won %i times." % statlist[1])
print("There have been %i draws" % statlist[2])
print("The game has been played %i time(s)" % statlist[3])
elapsted_seconds = time.time() - starttime
# elapsted_seconds = 602263 #Debug time amount. Should be 6 days, 23 hours, 17 minutes, and 43 seconds
days = int(elapsted_seconds // 86400)
hours = int(elapsted_seconds // 3600 - (days * 24))
minutes = int(elapsted_seconds // 60 - (hours * 60) - (days * 1440))
seconds = int(elapsted_seconds - (minutes * 60) - (hours * 3600) - (days * 86400))
print("Time Elapsed: ", days, " ", ":", hours, " ", ":", minutes, " ", ":", seconds, " ")
| python |
# -*- coding: utf-8 -*-
# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''
Evaluation script for CMRC 2018
version: v5
Note:
v5 formatted output, add usage description
v4 fixed segmentation issues
'''
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import absolute_import
from collections import Counter, OrderedDict
import string
import re
import argparse
import json
import sys
import nltk
import pdb
# split Chinese with English
def mixed_segmentation(in_str, rm_punc=False):
in_str = in_str.lower().strip()
segs_out = []
temp_str = ""
sp_char = [
'-', ':', '_', '*', '^', '/', '\\', '~', '`', '+', '=', ',', '。', ':',
'?', '!', '“', '”', ';', '’', '《', '》', '……', '·', '、', '「', '」', '(',
')', '-', '~', '『', '』'
]
for char in in_str:
if rm_punc and char in sp_char:
continue
if re.search(r'[\u4e00-\u9fa5]', char) or char in sp_char:
if temp_str != "":
ss = nltk.word_tokenize(temp_str)
segs_out.extend(ss)
temp_str = ""
segs_out.append(char)
else:
temp_str += char
#handling last part
if temp_str != "":
ss = nltk.word_tokenize(temp_str)
segs_out.extend(ss)
return segs_out
# remove punctuation
def remove_punctuation(in_str):
in_str = in_str.lower().strip()
sp_char = [
'-', ':', '_', '*', '^', '/', '\\', '~', '`', '+', '=', ',', '。', ':',
'?', '!', '“', '”', ';', '’', '《', '》', '……', '·', '、', '「', '」', '(',
')', '-', '~', '『', '』'
]
out_segs = []
for char in in_str:
if char in sp_char:
continue
else:
out_segs.append(char)
return ''.join(out_segs)
# find longest common string
def find_lcs(s1, s2):
m = [[0 for i in range(len(s2) + 1)] for j in range(len(s1) + 1)]
mmax = 0
p = 0
for i in range(len(s1)):
for j in range(len(s2)):
if s1[i] == s2[j]:
m[i + 1][j + 1] = m[i][j] + 1
if m[i + 1][j + 1] > mmax:
mmax = m[i + 1][j + 1]
p = i + 1
return s1[p - mmax:p], mmax
#
def evaluate(ground_truth_file, prediction_file):
f1 = 0
em = 0
total_count = 0
skip_count = 0
for instances in ground_truth_file["data"]:
for instance in instances["paragraphs"]:
context_text = instance['context'].strip()
for qas in instance['qas']:
total_count += 1
query_id = qas['id'].strip()
query_text = qas['question'].strip()
answers = [ans["text"] for ans in qas["answers"]]
if query_id not in prediction_file:
sys.stderr.write('Unanswered question: {}\n'.format(
query_id))
skip_count += 1
continue
prediction = prediction_file[query_id]
f1 += calc_f1_score(answers, prediction)
em += calc_em_score(answers, prediction)
f1_score = 100.0 * f1 / total_count
em_score = 100.0 * em / total_count
return f1_score, em_score, total_count, skip_count
def calc_f1_score(answers, prediction):
f1_scores = []
for ans in answers:
ans_segs = mixed_segmentation(ans, rm_punc=True)
prediction_segs = mixed_segmentation(prediction, rm_punc=True)
lcs, lcs_len = find_lcs(ans_segs, prediction_segs)
if lcs_len == 0:
f1_scores.append(0)
continue
precision = 1.0 * lcs_len / len(prediction_segs)
recall = 1.0 * lcs_len / len(ans_segs)
f1 = (2 * precision * recall) / (precision + recall)
f1_scores.append(f1)
return max(f1_scores)
def calc_em_score(answers, prediction):
em = 0
for ans in answers:
ans_ = remove_punctuation(ans)
prediction_ = remove_punctuation(prediction)
if ans_ == prediction_:
em = 1
break
return em
def eval_file(dataset_file, prediction_file):
ground_truth_file = json.load(open(dataset_file, 'r'))
prediction_file = json.load(open(prediction_file, 'r'))
F1, EM, TOTAL, SKIP = evaluate(ground_truth_file, prediction_file)
AVG = (EM + F1) * 0.5
return EM, F1, AVG, TOTAL
if __name__ == '__main__':
EM, F1, AVG, TOTAL = eval_file(sys.argv[1], sys.argv[2])
print(EM)
print(F1)
print(TOTAL)
| python |
#!/usr/bin/env python
""" This is the base class to start the RESTful web service hosting the Blackboard API. """
import logging.config
from logging.handlers import RotatingFileHandler
from time import strftime
from flask import Flask, Blueprint, request, jsonify
from blackboard_api import settings
from blackboard_api.api_1_0.blackboard import ns as blackboard
from blackboard_api.api_1_0.restplus import api
from blackboard_api.database import db
__author__ = 'Manfred von Teichman'
__version__ = '1.0'
__maintainer__ = 'Manfred von Teichman'
__email__ = '[email protected]'
__status__ = 'Development'
app = Flask(__name__)
# Setup the logging functionality
handler = RotatingFileHandler('app.log', maxBytes=1000000, backupCount=3)
logging.config.fileConfig('logging.conf')
log = logging.getLogger(__name__)
log.setLevel(logging.INFO)
log.addHandler(handler)
# Catch any 404 error and return it as a json response
@app.errorhandler(404)
def not_found(error):
return jsonify(error=str(error)), 404
# Registers the logging functionality to run after each request.
@app.after_request
def after_request(response):
timestamp = strftime('[%Y-%b-%d %H:%M]')
log.info('%s %s %s %s %s %s',
timestamp, request.remote_addr, request.method,
request.scheme, request.full_path, response.status)
return response
def configure_app(flask_app):
flask_app.config['SQLALCHEMY_DATABASE_URI'] = settings.SQLALCHEMY_DATABASE_URI
flask_app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = settings.SQLALCHEMY_TRACK_MODIFICATIONS
flask_app.config['SWAGGER_UI_DOC_EXPANSION'] = settings.RESTPLUS_SWAGGER_UI_DOC_EXPANSION
flask_app.config['RESTPLUS_VALIDATE'] = settings.RESTPLUS_VALIDATE
flask_app.config['RESTPLUS_MASK_SWAGGER'] = settings.RESTPLUS_MASK_SWAGGER
flask_app.config['ERROR_404_HELP'] = settings.RESTPLUS_ERROR_404_HELP
# Create the app using a factory, setup its dependencies and the base url given the set prefix.
def initialize_app(flask_app):
configure_app(flask_app)
blueprint = Blueprint('api', __name__, url_prefix='/api/v1')
api.init_app(blueprint)
api.add_namespace(blackboard)
flask_app.register_blueprint(blueprint)
db.init_app(flask_app)
# Initialize the app and run it on the pre-configured hostname and port.
def main():
initialize_app(app)
app.run(debug=settings.FLASK_DEBUG, host=settings.FLASK_HOST, port=settings.FLASK_PORT)
if __name__ == '__main__':
main()
| python |
"""
decoded AUTH_HEADER (newlines added for readability):
{
"identity": {
"account_number": "1234",
"internal": {
"org_id": "5678"
},
"type": "User",
"user": {
"email": "[email protected]",
"first_name": "Firstname",
"is_active": true,
"is_internal": true,
"is_org_admin": false,
"last_name": "Lastname",
"locale": "en_US",
"username": "test_username"
}
}
"entitlements": {
"insights": {
"is_entitled": true
}
}
}
"""
AUTH_HEADER = {
"X-RH-IDENTITY": "eyJpZGVudGl0eSI6eyJhY2NvdW50X251bWJlciI6Ij"
"EyMzQiLCJpbnRlcm5hbCI6eyJvcmdfaWQiOiI1Njc4In0sInR5cGUiOiJVc"
"2VyIiwidXNlciI6eyJlbWFpbCI6InRlc3RAZXhhbXBsZS5jb20iLCJmaXJz"
"dF9uYW1lIjoiRmlyc3RuYW1lIiwiaXNfYWN0aXZlIjp0cnVlLCJpc19pbnR"
"lcm5hbCI6dHJ1ZSwiaXNfb3JnX2FkbWluIjpmYWxzZSwibGFzdF9uYW1lIj"
"oiTGFzdG5hbWUiLCJsb2NhbGUiOiJlbl9VUyIsInVzZXJuYW1lIjoidGVzd"
"F91c2VybmFtZSJ9fSwiZW50aXRsZW1lbnRzIjp7Imluc2lnaHRzIjp7Imlz"
"X2VudGl0bGVkIjp0cnVlfX19Cg=="
}
AUTH_HEADER_NO_ENTITLEMENTS = {
"X-RH-IDENTITY": "eyJpZGVudGl0eSI6eyJhY2NvdW50X251bWJlciI6Ij"
"EyMzQiLCJ0eXBlIjoiVXNlciIsInVzZXIiOnsidXNl"
"cm5hbWUiOiJ0ZXN0X3VzZXJuYW1lIiwiZW1haWwiOi"
"J0ZXN0QGV4YW1wbGUuY29tIiwiZmlyc3RfbmFtZSI6"
"IkZpcnN0bmFtZSIsImxhc3RfbmFtZSI6Ikxhc3RuYW"
"1lIiwiaXNfYWN0aXZlIjp0cnVlLCJpc19vcmdfYWRt"
"aW4iOmZhbHNlLCJpc19pbnRlcm5hbCI6dHJ1ZSwibG"
"9jYWxlIjoiZW5fVVMifSwiaW50ZXJuYWwiOnsib3Jn"
"X2lkIjoiNTY3OCJ9fX0KCg=="
}
AUTH_HEADER_SMART_MGMT_FALSE = {
"X-RH-IDENTITY": "eyJpZGVudGl0eSI6eyJhY2NvdW50X251bWJlciI6"
"IjEyMzQiLCJpbnRlcm5hbCI6eyJvcmdfaWQiOiAi"
"NTY3OCJ9LCJ0eXBlIjogIlVzZXIiLCJ1c2VyIjp7"
"ImVtYWlsIjoidGVzdEBleGFtcGxlLmNvbSIsImZp"
"cnN0X25hbWUiOiJGaXJzdG5hbWUiLCJpc19hY3Rp"
"dmUiOnRydWUsImlzX2ludGVybmFsIjp0cnVlLCJp"
"c19vcmdfYWRtaW4iOmZhbHNlLCJsYXN0X25hbWUi"
"OiJMYXN0bmFtZSIsImxvY2FsZSI6ImVuX1VTIiwi"
"dXNlcm5hbWUiOiJ0ZXN0X3VzZXJuYW1lIn19LCJl"
"bnRpdGxlbWVudHMiOnsic21hcnRfbWFuYWdlbWVu"
"dCI6eyJpc19lbnRpdGxlZCI6IGZhbHNlfX19Cg=="
}
# this can't happen in real life, adding test anyway
AUTH_HEADER_NO_ACCT_BUT_HAS_ENTS = {
"X-RH-IDENTITY": "eyJpZGVudGl0eSI6eyJpbnRlcm5hbCI6eyJvcmdf"
"aWQiOiAiNTY3OCJ9LCJ0eXBlIjogIlVzZXIiLCJ1"
"c2VyIjp7ImVtYWlsIjoidGVzdEBleGFtcGxlLmNv"
"bSIsImZpcnN0X25hbWUiOiJGaXJzdG5hbWUiLCJp"
"c19hY3RpdmUiOnRydWUsImlzX2ludGVybmFsIjp0"
"cnVlLCJpc19vcmdfYWRtaW4iOmZhbHNlLCJsYXN0"
"X25hbWUiOiJMYXN0bmFtZSIsImxvY2FsZSI6ImVu"
"X1VTIiwidXNlcm5hbWUiOiJ0ZXN0X3VzZXJuYW1l"
"In19LCJlbnRpdGxlbWVudHMiOnsic21hcnRfbWFu"
"YWdlbWVudCI6eyJpc19lbnRpdGxlZCI6IHRydWV9"
"fX0K"
}
"""
decoded AUTH_HEADER_NO_ACCT (newlines added for readablity):
{
"identity": {
"internal": {
"org_id": "9999"
},
"type": "User",
"user": {
"email": "[email protected]",
"first_name": "No",
"is_active": true,
"is_internal": true,
"is_org_admin": false,
"last_name": "Number",
"locale": "en_US",
"username": "nonumber"
}
}
}
"""
AUTH_HEADER_NO_ACCT = {
"X-RH-IDENTITY": "eyJpZGVudGl0eSI6eyJ0eXBlIjoiVXNlciIsInVzZXIiO"
"nsidXNlcm5hbWUiOiJub251bWJlciIsImVtYWlsIjoibm"
"9udW1iZXJAZXhhbXBsZS5jb20iLCJmaXJzdF9uYW1lIjo"
"iTm8iLCJsYXN0X25hbWUiOiJOdW1iZXIiLCJpc19hY3Rp"
"dmUiOnRydWUsImlzX29yZ19hZG1pbiI6ZmFsc2UsImlzX"
"2ludGVybmFsIjp0cnVlLCJsb2NhbGUiOiJlbl9VUyJ9LC"
"JpbnRlcm5hbCI6eyJvcmdfaWQiOiI5OTk5In19fQo="
}
FETCH_BASELINES_RESULT = [
{
"id": "ff35596c-f98e-11e9-aea9-98fa9b07d419",
"account": "1212729",
"display_name": "baseline1",
"fact_count": 1,
"created": "2019-10-17T16:23:34.238952Z",
"updated": "2019-10-17T16:25:34.041645Z",
"baseline_facts": [{"name": "fqdn", "value": "test.example1.com"}],
},
{
"id": "89df6310-f98e-11e9-8a65-98fa9b07d419",
"account": "1212729",
"display_name": "baseline2",
"fact_count": 1,
"created": "2019-10-17T16:23:34.238952Z",
"updated": "2019-10-17T16:25:34.041645Z",
"baseline_facts": [{"name": "arch", "value": "golden"}],
},
]
FETCH_SYSTEMS_WITH_PROFILES_CAPTURED_DATE_RESULT = [
{
"account": "9876543",
"bios_uuid": "e380fd4a-28ae-11e9-974c-c85b761454fa",
"created": "2019-01-31T13:00:00.100010Z",
"display_name": None,
"fqdn": "fake_system_99.example.com",
"id": "fc1e497a-28ae-11e9-afd9-c85b761454fa",
"insights_id": "01791a58-28af-11e9-9ab0-c85b761454fa",
"ip_addresses": ["10.0.0.3", "2620:52:0:2598:5054:ff:fecd:ae15"],
"mac_addresses": ["52:54:00:cd:ae:00", "00:00:00:00:00:00"],
"rhel_machine_id": None,
"satellite_id": None,
"subscription_manager_id": "RHN Classic and Red Hat Subscription Management",
"system_profile": {
"captured_date": "2020-03-30T18:42:23+00:00",
"salutation": "hello",
"fqdn": "hostname_two",
"stale_warning_timestamp": "2018-12-31T12:00:00.000000Z",
"enabled_services": ["insights_client"],
"installed_packages": [
"0:bash-4.4.23-6.fc29.x86_64",
"this isn't parsable",
"no_epoch-1.0-1.fc99.8088",
],
"cpu_flags": ["maryland"],
"system_memory_bytes": 640,
"yum_repos": [{"name": "yummy", "enabled": False}, {"no_name": "bleh"}],
"network_interfaces": [
{
"name": "eth99",
"mtu": 3,
"ipv4_addresses": ["8.7.6.5"],
"ipv6_addresses": ["00:00:02"],
},
{"no_name": "foo"},
],
"system_profile_exists": True,
"id": "fc1e497a-28ae-11e9-afd9-c85b761454fa",
},
"tags": [],
"updated": "2019-01-31T14:00:00.500000Z",
},
{
"account": "9876543",
"bios_uuid": "e380fd4a-28ae-11e9-974c-c85b761454fb",
"created": "2018-01-31T13:00:00.100010Z",
"display_name": "hello",
"fqdn": "fake_system_99.example.com",
"id": "bbbbbbbb-28ae-11e9-afd9-c85b761454fa",
"insights_id": "00000000-28af-11e9-9ab0-c85b761454fa",
"ip_addresses": ["10.0.0.3", "2620:52:0:2598:5054:ff:fecd:ae15"],
"mac_addresses": ["52:54:00:cd:ae:00", "00:00:00:00:00:00"],
"rhel_machine_id": None,
"satellite_id": None,
"subscription_manager_id": "RHN Classic and Red Hat Subscription Management",
"system_profile": {
"captured_date": "2020-03-30T18:42:23+00:00",
"salutation": "hi",
"fqdn": "hostname_one",
"system_profile_exists": True,
"id": "bbbbbbbb-28ae-11e9-afd9-c85b761454fa",
"stale_warning_timestamp": "2018-12-31T12:00:00.000000Z",
"enabled_services": ["insights_client"],
"installed_packages": [
"0:bash-4.4.23-6.fc29.x86_64",
"this isn't parsable",
"no_epoch-1.0-1.fc99.8088",
],
"network_interfaces": [
{
"name": "eth99",
"mtu": 3,
"ipv4_addresses": ["8.7.6.5"],
"ipv6_addresses": ["00:00:01"],
},
{"no_name": "foo"},
],
},
"tags": [],
"updated": "2018-01-31T14:00:00.500000Z",
},
{
"account": "9876543",
"bios_uuid": "e380fd4a-28ae-11e9-974c-c85b761454fb",
"created": "2018-01-31T13:00:00.100010Z",
"display_name": None,
"fqdn": "hostname_one",
"id": "bbbbbbbb-28ae-11e9-afd9-c85b761454fa",
"insights_id": "00000000-28af-11e9-9ab0-c85b761454fa",
"ip_addresses": ["10.0.0.3", "2620:52:0:2598:5054:ff:fecd:ae15"],
"mac_addresses": ["52:54:00:cd:ae:00", "00:00:00:00:00:00"],
"rhel_machine_id": None,
"satellite_id": None,
"subscription_manager_id": "RHN Classic and Red Hat Subscription Management",
"system_profile": {
"captured_date": "2020-03-30T18:42:23+00:00",
"salutation": "hi",
"fqdn": "hostname_one",
"system_profile_exists": False,
"id": "bbbbbbbb-28ae-11e9-afd9-c85b761454fa",
"stale_warning_timestamp": "2018-12-31T12:00:00.000000Z",
"enabled_services": ["insights_client"],
"installed_packages": [
"0:bash-4.4.23-6.fc29.x86_64",
"this isn't parsable",
"no_epoch-1.0-1.fc99.8088",
],
"network_interfaces": [
{
"name": "eth99",
"mtu": 3,
"ipv4_addresses": ["8.7.6.5"],
"ipv6_addresses": ["00:00:01"],
},
{"no_name": "foo"},
],
},
"tags": [],
"updated": "2018-01-31T14:00:00.500000Z",
},
]
FETCH_SYSTEMS_WITH_PROFILES_RESULT = [
{
"account": "9876543",
"bios_uuid": "e380fd4a-28ae-11e9-974c-c85b761454fa",
"created": "2019-01-31T13:00:00.100010Z",
"display_name": None,
"fqdn": "fake_system_99.example.com",
"id": "fc1e497a-28ae-11e9-afd9-c85b761454fa",
"insights_id": "01791a58-28af-11e9-9ab0-c85b761454fa",
"ip_addresses": ["10.0.0.3", "2620:52:0:2598:5054:ff:fecd:ae15"],
"mac_addresses": ["52:54:00:cd:ae:00", "00:00:00:00:00:00"],
"rhel_machine_id": None,
"satellite_id": None,
"subscription_manager_id": "RHN Classic and Red Hat Subscription Management",
"system_profile": {
"salutation": "hello",
"fqdn": "hostname_two",
"installed_packages": [
"0:bash-4.4.23-6.fc29.x86_64",
"this isn't parsable",
"no_epoch-1.0-1.fc99.8088",
],
"cpu_flags": ["maryland"],
"system_memory_bytes": 640,
"yum_repos": [{"name": "yummy", "enabled": False}, {"no_name": "bleh"}],
"network_interfaces": [
{
"name": "eth99",
"mtu": 3,
"ipv4_addresses": ["8.7.6.5"],
"ipv6_addresses": ["00:00:02"],
},
{"no_name": "foo"},
],
"enabled_services": ["insights_client"],
"system_profile_exists": True,
"id": "fc1e497a-28ae-11e9-afd9-c85b761454fa",
"stale_warning_timestamp": "2018-12-31T12:00:00.000000Z",
},
"tags": [],
"updated": "2019-01-31T14:00:00.500000Z",
},
{
"account": "9876543",
"bios_uuid": "e380fd4a-28ae-11e9-974c-c85b761454fb",
"created": "2018-01-31T13:00:00.100010Z",
"display_name": "hello",
"fqdn": "fake_system_99.example.com",
"id": "bbbbbbbb-28ae-11e9-afd9-c85b761454fa",
"insights_id": "00000000-28af-11e9-9ab0-c85b761454fa",
"ip_addresses": ["10.0.0.3", "2620:52:0:2598:5054:ff:fecd:ae15"],
"mac_addresses": ["52:54:00:cd:ae:00", "00:00:00:00:00:00"],
"rhel_machine_id": None,
"satellite_id": None,
"subscription_manager_id": "RHN Classic and Red Hat Subscription Management",
"system_profile": {
"salutation": "hi",
"fqdn": "hostname_one",
"system_profile_exists": True,
"id": "bbbbbbbb-28ae-11e9-afd9-c85b761454fa",
"stale_warning_timestamp": "2018-12-31T12:00:00.000000Z",
"enabled_services": ["insights_client"],
"installed_packages": [
"0:bash-4.4.23-6.fc29.x86_64",
"this isn't parsable",
"no_epoch-1.0-1.fc99.8088",
],
"network_interfaces": [
{
"name": "eth99",
"mtu": 3,
"ipv4_addresses": ["8.7.6.5"],
"ipv6_addresses": ["00:00:01"],
},
{"no_name": "foo"},
],
},
"tags": [],
"updated": "2018-01-31T14:00:00.500000Z",
},
{
"account": "9876543",
"bios_uuid": "e380fd4a-28ae-11e9-974c-c85b761454fb",
"created": "2018-01-31T13:00:00.100010Z",
"display_name": None,
"fqdn": "hostname_one",
"id": "bbbbbbbb-28ae-11e9-afd9-c85b761454fa",
"insights_id": "00000000-28af-11e9-9ab0-c85b761454fa",
"ip_addresses": ["10.0.0.3", "2620:52:0:2598:5054:ff:fecd:ae15"],
"mac_addresses": ["52:54:00:cd:ae:00", "00:00:00:00:00:00"],
"rhel_machine_id": None,
"satellite_id": None,
"subscription_manager_id": "RHN Classic and Red Hat Subscription Management",
"system_profile": {
"salutation": "hi",
"fqdn": "hostname_one",
"system_profile_exists": False,
"id": "bbbbbbbb-28ae-11e9-afd9-c85b761454fa",
"stale_warning_timestamp": "2018-12-31T12:00:00.000000Z",
"enabled_services": ["insights_client"],
"installed_packages": [
"0:bash-4.4.23-6.fc29.x86_64",
"this isn't parsable",
"no_epoch-1.0-1.fc99.8088",
],
"network_interfaces": [
{
"name": "eth99",
"mtu": 3,
"ipv4_addresses": ["8.7.6.5"],
"ipv6_addresses": ["00:00:01"],
},
{"no_name": "foo"},
],
},
"tags": [],
"updated": "2018-01-31T14:00:00.500000Z",
},
]
FETCH_SYSTEM_PROFILES_INV_SVC = """
{
"count": 1,
"total": 1,
"page": 1,
"per_page": 50,
"results": [
{
"id": "243926fa-262f-11e9-a632-c85b761454fa",
"system_profile": {
"arch": "x86_64",
"bios_vendor": "SeaBIOS",
"bios_version": "?-20180531_142017-buildhw-08.phx2.fedoraproject.org-1.fc28",
"cores_per_socket": 1,
"cpu_flags": [ "fpu", "vme" ],
"enabled_services": ["auditd", "chronyd", "crond" ],
"infrastructure_type": "virtual",
"infrastructure_vendor": "kvm",
"installed_packages": ["0:bash-4.4.19-7.el8", "0:chrony-3.3-3.el8",
"0:dnf-4.0.9.2-4.el8", "1:NetworkManager-1.14.0-14.el8"],
"installed_services": [ "arp-ethers", "auditd", "autovt@", "chronyd", "cpupower"],
"kernel_modules": [ "kvm", "pcspkr", "joydev", "xfs"],
"last_boot_time": "2019-03-25T19:32:18",
"network_interfaces": [
{
"ipv4_addresses": ["127.0.0.1"],
"ipv6_addresses": ["::1"],
"mac_address": "00:00:00:00:00:00",
"mtu": 65536,
"name": "lo",
"state": "UNKNOWN",
"type": "loopback"
},
{
"ipv4_addresses": ["192.168.0.1"],
"ipv6_addresses": ["fe80::5054:ff::0001"],
"mac_address": "52:54:00:00:00:00",
"mtu": 1500,
"name": "eth0",
"state": "UP",
"type": "ether"
}
],
"number_of_cpus": 2,
"number_of_sockets": 2,
"os_kernel_version": "4.18.0",
"running_processes": [ "watchdog/1", "systemd-logind", "md", "ksmd", "sshd" ],
"system_memory_bytes": 1917988864,
"yum_repos": [
{
"base_url": "https://cdn.example.com/content/freedos/1.0/i386/os",
"enabled": true,
"gpgcheck": true,
"name": "freedos 1.0 repo i386"
},
{
"base_url": "https://cdn.example.com/content/freedos/1.0/z80/os",
"enabled": false,
"gpgcheck": true,
"name": "freedos 1.0 repo z80"
}
]
}
}
],
"total": 1
}
"""
FETCH_SYSTEMS_WITH_PROFILES_SAME_FACTS_RESULT = [
{
"account": "9876543",
"bios_uuid": "e380fd4a-28ae-11e9-974c-c85b761454fa",
"created": "2019-01-31T13:00:00.100010Z",
"display_name": None,
"system_profile": {
"salutation": "howdy",
"system_profile_exists": True,
"id": "fc1e497a-28ae-11e9-afd9-c85b761454fa",
"stale_warning_timestamp": "2018-12-31T12:00:00.000000Z",
"enabled_services": ["insights_client"],
"installed_packages": [
"0:bash-4.4.23-6.fc29.x86_64",
"this isn't parsable",
"no_epoch-1.0-1.fc99.8088",
],
},
"fqdn": "fake_system_99.example.com",
"id": "fc1e497a-28ae-11e9-afd9-c85b761454fa",
"insights_id": "01791a58-28af-11e9-9ab0-c85b761454fa",
"ip_addresses": ["10.0.0.3", "2620:52:0:2598:5054:ff:fecd:ae15"],
"mac_addresses": ["52:54:00:cd:ae:00", "00:00:00:00:00:00"],
"rhel_machine_id": None,
"satellite_id": None,
"subscription_manager_id": "RHN Classic and Red Hat Subscription Management",
"tags": [],
"updated": "2019-01-31T14:00:00.500000Z",
},
{
"account": "9876543",
"bios_uuid": "e380fd4a-28ae-11e9-974c-c85b761454fb",
"created": "2018-01-31T13:00:00.100010Z",
"display_name": None,
"system_profile": {
"salutation": "howdy",
"system_profile_exists": True,
"id": "bbbbbbbb-28ae-11e9-afd9-c85b761454fa",
"stale_warning_timestamp": "2018-12-31T12:00:00.000000Z",
"enabled_services": ["insights_client"],
"installed_packages": [
"0:bash-4.4.23-6.fc29.x86_64",
"this isn't parsable",
"no_epoch-1.0-1.fc99.8088",
],
},
"fqdn": "fake_system_99.example.com",
"id": "bbbbbbbb-28ae-11e9-afd9-c85b761454fa",
"insights_id": "00000000-28af-11e9-9ab0-c85b761454fa",
"ip_addresses": ["10.0.0.3", "2620:52:0:2598:5054:ff:fecd:ae15"],
"mac_addresses": ["52:54:00:cd:ae:00", "00:00:00:00:00:00"],
"rhel_machine_id": None,
"satellite_id": None,
"subscription_manager_id": "RHN Classic and Red Hat Subscription Management",
"tags": [],
"updated": "2018-01-31T14:00:00.500000Z",
},
]
FETCH_SYSTEM_TAGS = """
{
"total": 1,
"count": 1,
"page": 1,
"per_page": 50,
"results": {
"ec67f65c-2bc8-4ce8-82e2-6a27cada8d31": [
{
"namespace": "insights-client",
"key": "group",
"value": "XmygroupX"
}
]
}
}
"""
FETCH_SYSTEMS_INV_SVC = """
{
"count": 2,
"total": 2,
"page": 1,
"per_page": 50,
"results": [
{
"account": "1234567",
"bios_uuid": "dc43976c263411e9bcf0c85b761454fa",
"created": "2018-12-01T12:00:00.000000Z",
"display_name": "system1.example.com",
"fqdn": "system.example.com",
"id": "243926fa-262f-11e9-a632-c85b761454fa",
"insights_id": "TEST-ID00-0000-0000",
"ip_addresses": [
"10.0.0.1",
"10.0.0.2"
],
"mac_addresses": [
"c2:00:d0:c8:00:01"
],
"subscription_manager_id": "1234FAKE1234",
"tags": [],
"updated": "2018-12-31T12:00:00.000000Z",
"stale_warning_timestamp": "2018-12-31T12:00:00.000000Z"
},
{
"account": "1234567",
"bios_uuid": "ec43976c263411e9bcf0c85b761454fa",
"created": "2018-12-01T12:00:00.000000Z",
"display_name": "system2.example.com",
"fqdn": "system2.example.com",
"id": "264fb5b2-262f-11e9-9b12-c85b761454fa",
"insights_id": "TEST-ID22-2222-2222",
"ip_addresses": [
"10.0.0.3",
"10.0.0.4"
],
"mac_addresses": [
"ec2:00:d0:c8:00:01"
],
"subscription_manager_id": "2222FAKE2222",
"tags": [],
"updated": "2018-12-31T12:00:00.000000Z",
"stale_warning_timestamp": "2018-12-31T12:00:00.000000Z"
}
]}"""
SYSTEM_NOT_FOUND_TEMPLATE = """
{
"count": 0,
"page": 1,
"per_page": 50,
"results": [],
"total": 0
}
"""
| python |
"""
Plot a traced WE trajectory onto 2D plots.
# TODO: integrate into h5_plot
"""
import numpy as np
import matplotlib.pyplot as plt
import h5py
def get_parents(walker_tuple, h5_file):
it, wlk = walker_tuple
parent = h5_file[f"iterations/iter_{it:08d}"]["seg_index"]["parent_id"][wlk]
return it-1, parent
def trace_walker(walker_tuple, h5_file):
# Unroll the tuple into iteration/walker
it, wlk = walker_tuple
# Initialize our path
path = [(it,wlk)]
# And trace it
while it > 1:
it, wlk = get_parents((it, wlk), h5_file)
path.append((it,wlk))
return np.array(sorted(path, key=lambda x: x[0]))
def get_aux(path, h5_file, aux_name):
# Initialize a list for the pcoords
aux_coords = []
# Loop over the path and get the pcoords for each walker
for it, wlk in path:
# Here we are taking every 10 time points, feel free to adjust to see what that does
aux_coords.append(h5_file[f'iterations/iter_{it:08d}/auxdata/{str(aux_name)}'][wlk][::10])
#pcoords.append(h5_file[f'iterations/iter_{it:08d}']['pcoord'][wlk][::10,:])
return np.array(aux_coords)
def plot_trace(h5, walker_tuple, aux_x, aux_y=None, evolution=False, ax=None):
"""
Plot trace.
"""
if ax is None:
fig, ax = plt.subplots(figsize=(7,5))
else:
fig = plt.gcf()
it, wlk = walker_tuple
with h5py.File(h5, "r") as w:
# adjustments for plothist evolution of only aux_x data
if evolution:
# split iterations up to provide y-values for each x-value (pcoord)
iter_split = [i + (j/aux_x.shape[1])
for i in range(0, it)
for j in range(0, aux_x.shape[1])]
ax.plot(aux_x[:,0], iter_split, c="black", lw=2)
ax.plot(aux_x[:,0], iter_split, c="white", lw=1)
return
path = trace_walker((it, wlk), w)
# And pull aux_coords for the path calculated
aux_x = get_aux(path, w, aux_x)
aux_y = get_aux(path, w, aux_y)
ax.plot(aux_x[:,0], aux_y[:,0], c="black", lw=2)
ax.plot(aux_x[:,0], aux_y[:,0], c="cyan", lw=1)
# from h5_plot_main import *
# data_options = {"data_type" : "average",
# "p_max" : 20,
# "p_units" : "kcal",
# "last_iter" : 200,
# "bins" : 100
# }
# h5 = "1a43_v02/wcrawl/west_i200_crawled.h5"
# aux_x = "1_75_39_c2"
# aux_y = "M2Oe_M1He1"
# X, Y, Z = pdist_to_normhist(h5, aux_x, aux_y, **data_options)
# levels = np.arange(0, data_options["p_max"] + 1, 1)
# plt.contour(X, Y, Z, levels=levels, colors="black", linewidths=1)
# plt.contourf(X, Y, Z, levels=levels, cmap="gnuplot_r")
# plt.colorbar()
# from search_aux import *
# # for 1A43 V02: C2 and Dist M2-M1 - minima at val = 53° and 2.8A is alt minima = i173 s70
# iter, seg = search_aux_xy_nn(h5, aux_x, aux_y, 53, 2.8, data_options["last_iter"])
# plot_trace(h5, (iter,seg), aux_x, aux_y)
# plt.show()
| python |
import pandas as pd
from sklearn.base import BaseEstimator, TransformerMixin
class Passthrough(BaseEstimator, TransformerMixin):
"""
Class for passing through features that require no preprocessing.
https://stackoverflow.com/questions/54592115/appending-the-columntransformer-result-to-the-original-data-within-a-pipeline
"""
def fit(self, X, y=None):
return self
def transform(self, X):
# Single-column data frames are Pandas series, which Scikit-learn doesn't know how to deal with. Make sure that
# result is always a data frame.
X = pd.DataFrame(X)
return X
| python |
from bs4 import BeautifulSoup as soup
html = """
<html>
<body>
<ul>
<li><a href="http://www.naver.com">NAVER</a></li>
<li><a href="http://www.daum.net">DAUM</a></li>
</ul>
</body>
</html>
"""
content = soup(html, "html.parser")
links = content.find_all("a")
for a in links:
print(a.string, " > ", a.attrs["href"])
| python |
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
# pylint: disable=line-too-long
# pylint: disable=too-many-lines
# pylint: disable=too-many-statements
from azure.cli.core.commands.parameters import (
tags_type,
get_enum_type,
resource_group_name_type,
get_location_type
)
def load_arguments(self, _):
with self.argument_context('internet-analyzer profile create') as c:
c.argument('resource_group', resource_group_name_type)
c.argument('name', id_part=None, help='The name of the Internet Analyzer profile to be created')
c.argument('location', arg_type=get_location_type(self.cli_ctx))
c.argument('tags', tags_type)
c.argument('enabled_state', arg_type=get_enum_type(['Enabled', 'Disabled']), id_part=None, help='The state of the Experiment')
c.argument('etag', id_part=None, help='Gets a unique read-only string that changes whenever the resource is updated.')
with self.argument_context('internet-analyzer profile update') as c:
c.argument('resource_group', resource_group_name_type)
c.argument('name', id_part=None, help='The name of the Internet Analyzer profile to be updated')
c.argument('location', arg_type=get_location_type(self.cli_ctx))
c.argument('tags', tags_type)
c.argument('enabled_state', arg_type=get_enum_type(['Enabled', 'Disabled']), id_part=None, help='The state of the Experiment')
c.argument('etag', id_part=None, help='Gets a unique read-only string that changes whenever the resource is updated.')
with self.argument_context('internet-analyzer profile delete') as c:
c.argument('resource_group', resource_group_name_type)
c.argument('name', id_part=None, help='The name of the Internet Analyzer profile to be deleted')
with self.argument_context('internet-analyzer profile list') as c:
c.argument('resource_group', resource_group_name_type)
with self.argument_context('internet-analyzer profile show') as c:
c.argument('resource_group', resource_group_name_type)
c.argument('name', id_part=None, help='The name of the Internet Analyzer profile to show')
with self.argument_context('internet-analyzer preconfigured-endpoint list') as c:
c.argument('resource_group', resource_group_name_type)
c.argument('profile_name', id_part=None, help='The name of the Internet Analyzer profile for which to list preconfigured endpoints')
with self.argument_context('internet-analyzer test create') as c:
c.argument('resource_group', resource_group_name_type)
c.argument('profile_name', id_part=None, help='The name of the Internet Analyzer profile under which the new test should be created')
c.argument('name', id_part=None, help='The name of the Internet Analyzer test to be created')
c.argument('location', arg_type=get_location_type(self.cli_ctx))
c.argument('tags', tags_type)
c.argument('description', id_part=None, help='The description of the details or intents of the test')
c.argument('endpoint_a_name', id_part=None, help='The name of the control endpoint')
c.argument('endpoint_a_endpoint', id_part=None, help='The URL of the control endpoint in <hostname>[/<custom-path>] format (e.g., www.contoso.com or www.contoso.com/some/path/to/trans.gif). Must support HTTPS. If an object path isn\'t specified explicitly, Internet Analyzer will use "/apc/trans.gif" as the object path by default, which is where the preconfigured endpoints are hosting the one-pixel image.')
c.argument('endpoint_b_name', id_part=None, help='The name of the other endpoint')
c.argument('endpoint_b_endpoint', id_part=None, help='The URL of the other endpoint in <hostname>[/<custom-path>] format (e.g., www.contoso.com or www.contoso.com/some/path/to/trans.gif). Must support HTTPS. If an object path isn\'t specified explicitly, Internet Analyzer will use "/apc/trans.gif" as the object path by default, which is where the preconfigured endpoints are hosting the one-pixel image.')
c.argument('enabled_state', arg_type=get_enum_type(['Enabled', 'Disabled']), id_part=None, help='The initial of the test')
with self.argument_context('internet-analyzer test update') as c:
c.argument('resource_group', resource_group_name_type)
c.argument('profile_name', id_part=None, help='The name of the Internet Analyzer profile under which the test exists')
c.argument('name', id_part=None, help='The name of the Internet Analyzer test to be updated')
c.argument('location', arg_type=get_location_type(self.cli_ctx))
c.argument('tags', tags_type)
c.argument('description', id_part=None, help='The description of the details or intents of the test')
c.argument('endpoint_a_name', id_part=None, help='The name of the control endpoint')
c.argument('endpoint_a_endpoint', id_part=None, help='The URL of the control endpoint in <hostname>[/<custom-path>] format (e.g., www.contoso.com or www.contoso.com/some/path/to/trans.gif). Must support HTTPS. If an object path isn\'t specified explicitly, Internet Analyzer will use "/apc/trans.gif" as the object path by default, which is where the preconfigured endpoints are hosting the one-pixel image.')
c.argument('endpoint_b_name', id_part=None, help='The name of the other endpoint')
c.argument('endpoint_b_endpoint', id_part=None, help='The URL of the other endpoint in <hostname>[/<custom-path>] format (e.g., www.contoso.com or www.contoso.com/some/path/to/trans.gif). Must support HTTPS. If an object path isn\'t specified explicitly, Internet Analyzer will use "/apc/trans.gif" as the object path by default, which is where the preconfigured endpoints are hosting the one-pixel image.')
c.argument('enabled_state', arg_type=get_enum_type(['Enabled', 'Disabled']), id_part=None, help='The state of the Experiment')
with self.argument_context('internet-analyzer test delete') as c:
c.argument('resource_group', resource_group_name_type)
c.argument('profile_name', id_part=None, help='The name of the Internet Analyzer profile under which the test exists')
c.argument('name', id_part=None, help='The name of the Internet Analyzer test to delete')
with self.argument_context('internet-analyzer test list') as c:
c.argument('resource_group', resource_group_name_type)
c.argument('profile_name', id_part=None, help='The name of the Internet Analyzer profile for which to list tests')
with self.argument_context('internet-analyzer test show') as c:
c.argument('resource_group', resource_group_name_type)
c.argument('profile_name', id_part=None, help='The name of the Internet Analyzer profile under which the test exists')
c.argument('name', id_part=None, help='The name of the Internet Analyzer test to show')
| python |
from .base import AttackMetric
from ...tags import *
from ...text_process.tokenizer import Tokenizer
class JaccardWord(AttackMetric):
NAME = "Jaccard Word Similarity"
def __init__(self, tokenizer : Tokenizer):
"""
Args:
tokenizer: A tokenizer that will be used in this metric. Must be an instance of :py:class:`.Tokenizer`
"""
self.tokenizer = tokenizer
@property
def TAGS(self):
if hasattr(self.tokenizer, "TAGS"):
return self.tokenizer.TAGS
return set()
def calc_score(self, sentA : str, sentB : str) -> float:
"""
Args:
sentA: First sentence.
sentB: Second sentence.
Returns:
Jaccard word similarity of two sentences.
"""
tokenA = self.tokenizer.tokenize(sentA, pos_tagging=False)
tokenB = self.tokenizer.tokenize(sentB, pos_tagging=False)
AS=set()
BS=set()
for i in range(len(tokenA)):
AS.add(tokenA[i])
for i in range(len(tokenB)):
BS.add(tokenB[i])
return len(AS&BS)/len(AS|BS)
def after_attack(self, input, adversarial_sample):
if adversarial_sample is not None:
return self.calc_score( input["x"], adversarial_sample )
return None
| python |
from flask import Flask, render_template, request
app = Flask(__name__)
@app.route("/")
def main():
return render_template("main_better.html")
# getting basic user data
@app.route('/ask/', methods=['POST', 'GET'])
def ask():
if request.method == 'GET':
return render_template('ask.html')
else:
try:
return render_template('ask.html', name=request.form['name'], student=request.form['student'])
except:
return render_template('ask.html')
#
@app.route('/profile/<name>/')
def hello_name(name):
return render_template('profile.html', name=name)
app.run()
| python |
from keras.layers import Layer
from keras_contrib.layers.normalization.instancenormalization import InputSpec
import numpy as np
import matplotlib.image as mpimg
from progress.bar import Bar
import datetime
import time
import json
import csv
import os
import keras.backend as K
import tensorflow as tf
from skimage.transform import resize
class ReflectionPadding2D(Layer):
def __init__(self, padding=(1, 1), **kwargs):
self.padding = tuple(padding)
self.input_spec = [InputSpec(ndim=4)]
super(ReflectionPadding2D, self).__init__(**kwargs)
def compute_output_shape(self, s):
return (s[0], s[1] + 2 * self.padding[0], s[2] + 2 * self.padding[1], s[3])
def call(self, x, mask=None):
w_pad, h_pad = self.padding
return tf.pad(x, [[0, 0], [h_pad, h_pad], [w_pad, w_pad], [0, 0]], 'REFLECT')
class ImagePool():
def __init__(self, pool_size):
self.pool_size = pool_size
if self.pool_size > 0:
self.num_imgs = 0
self.images = []
def query(self, images):
if self.pool_size == 0:
return images
return_images = []
for image in images:
if len(image.shape) == 3:
image = image[np.newaxis, :, :, :]
if self.num_imgs < self.pool_size: # fill up the image pool
self.num_imgs = self.num_imgs + 1
if len(self.images) == 0:
self.images = image
else:
self.images = np.vstack((self.images, image))
if len(return_images) == 0:
return_images = image
else:
return_images = np.vstack((return_images, image))
else: # 50% chance that we replace an old synthetic image
p = np.random.rand()
if p > 0.5:
random_id = np.random.randint(0, self.pool_size)
tmp = self.images[random_id, :, :, :]
tmp = tmp[np.newaxis, :, :, :]
self.images[random_id, :, :, :] = image[0, :, :, :]
if len(return_images) == 0:
return_images = tmp
else:
return_images = np.vstack((return_images, tmp))
else:
if len(return_images) == 0:
return_images = image
else:
return_images = np.vstack((return_images, image))
return return_images
def load_data(subfolder='', generator=False):
def create_image_array(image_list, image_path, image_size, nr_of_channels):
bar = Bar('Loading...', max=len(image_list))
# Define image array
image_array = np.empty((len(image_list),) + (image_size) + (nr_of_channels,))
i = 0
for image_name in image_list:
# If file is image...
if image_name[-1].lower() == 'g': # to avoid e.g. thumbs.db files
# Load image and convert into np.array
image = mpimg.imread(os.path.join(image_path, image_name)) # Normalized to [0,1]
# image = np.array(Image.open(os.path.join(image_path, image_name)))
image = resize(image,(200,200))
# Add third dimension if image is 2D
if nr_of_channels == 1: # Gray scale image -> MR image
image = image[:, :, np.newaxis]
# Normalize image with (max 8 bit value - 1)
image = image * 2 - 1
# image = image / 127.5 - 1
# Add image to array
image_array[i, :, :, :] = image
i += 1
bar.next()
bar.finish()
return image_array
# Image paths
trainA_path = os.path.join('data', subfolder, 'trainA')
trainB_path = os.path.join('data', subfolder, 'trainB')
testA_path = os.path.join('data', subfolder, 'testA')
testB_path = os.path.join('data', subfolder, 'testB')
# Image file names
trainA_image_names = sorted(os.listdir(trainA_path))
trainB_image_names = sorted(os.listdir(trainB_path))
testA_image_names = sorted(os.listdir(testA_path))
testB_image_names = sorted(os.listdir(testB_path))
# Examine one image to get size and number of channels
im_test = mpimg.imread(os.path.join(trainA_path, trainA_image_names[0]))
# im_test = np.array(Image.open(os.path.join(trainA_path, trainA_image_names[0])))
if len(im_test.shape) == 2:
image_size = im_test.shape
nr_of_channels = 1
else:
image_size = im_test.shape[0:-1]
nr_of_channels = im_test.shape[-1]
trainA_images = create_image_array(trainA_image_names, trainA_path, (200,200), nr_of_channels)
trainB_images = create_image_array(trainB_image_names, trainB_path, (200,200), nr_of_channels)
testA_images = create_image_array(testA_image_names, testA_path, (200,200), nr_of_channels)
testB_images = create_image_array(testB_image_names, testB_path, (200,200), nr_of_channels)
return {"image_size": image_size, "nr_of_channels": nr_of_channels,
"trainA_images": trainA_images, "trainB_images": trainB_images,
"testA_images": testA_images, "testB_images": testB_images,
"trainA_image_names": trainA_image_names,
"trainB_image_names": trainB_image_names,
"testA_image_names": testA_image_names,
"testB_image_names": testB_image_names}
def write_metadata_to_JSON(model, opt):
# Save meta_data
data = {}
data['meta_data'] = []
data['meta_data'].append({
'img shape: height,width,channels': opt['img_shape'],
'batch size': opt['batch_size'],
'save training img interval': opt['save_training_img_interval'],
'normalization function': str(model['normalization']),
'lambda_ABA': opt['lambda_ABA'],
'lambda_BAB': opt['lambda_BAB'],
'lambda_adversarial': opt['lambda_adversarial'],
'learning_rate_D': opt['learning_rate_D'],
'learning rate G': opt['learning_rate_G'],
'epochs': opt['epochs'],
'use linear decay on learning rates': opt['use_linear_decay'],
'epoch where learning rate linear decay is initialized (if use_linear_decay)': opt['decay_epoch'],
'generator iterations': opt['generator_iterations'],
'discriminator iterations': opt['discriminator_iterations'],
'use patchGan in discriminator': opt['use_patchgan'],
'beta 1': opt['beta_1'],
'beta 2': opt['beta_2'],
'REAL_LABEL': opt['REAL_LABEL'],
'number of A train examples': len(opt['A_train']),
'number of B train examples': len(opt['B_train']),
'number of A test examples': len(opt['A_test']),
'number of B test examples': len(opt['B_test']),
'discriminator sigmoid': opt['discriminator_sigmoid'],
'resize convolution': opt['use_resize_convolution'],
})
with open('{}/meta_data.json'.format(opt['out_dir']), 'w') as outfile:
json.dump(data, outfile, sort_keys=True)
def write_loss_data_to_file(opt, history):
keys = sorted(history.keys())
with open('images/{}/loss_output.csv'.format(opt['date_time']), 'w') as csv_file:
writer = csv.writer(csv_file, delimiter=',')
writer.writerow(keys)
writer.writerows(zip(*[history[key] for key in keys]))
def join_and_save(opt, images, save_path):
# Join images
image = np.hstack(images)
# Save images
if opt['channels'] == 1:
image = image[:, :, 0]
mpimg.imsave(save_path, image, vmin=-1, vmax=1, cmap='gray')
def save_epoch_images(model, opt, epoch, num_saved_images=1):
# Save training images
nr_train_im_A = opt['A_train'].shape[0]
nr_train_im_B = opt['B_train'].shape[0]
rand_ind_A = np.random.randint(nr_train_im_A)
rand_ind_B = np.random.randint(nr_train_im_B)
real_image_A = opt['A_train'][rand_ind_A]
real_image_B = opt['B_train'][rand_ind_B]
synthetic_image_B = model['G_A2B'].predict(real_image_A[np.newaxis])[0]
synthetic_image_A = model['G_B2A'].predict(real_image_B[np.newaxis])[0]
reconstructed_image_A = model['G_B2A'].predict(synthetic_image_B[np.newaxis])[0]
reconstructed_image_B = model['G_A2B'].predict(synthetic_image_A[np.newaxis])[0]
save_path_A = '{}/train_A/epoch{}.png'.format(opt['out_dir'], epoch)
save_path_B = '{}/train_B/epoch{}.png'.format(opt['out_dir'], epoch)
if opt['paired_data']:
real_image_Ab = opt['B_train'][rand_ind_A]
real_image_Ba = opt['A_train'][rand_ind_B]
join_and_save(opt, (real_image_Ab, real_image_A, synthetic_image_B, reconstructed_image_A), save_path_A)
join_and_save(opt, (real_image_Ba, real_image_B, synthetic_image_A, reconstructed_image_B), save_path_B)
else:
join_and_save(opt, (real_image_A, synthetic_image_B, reconstructed_image_A), save_path_A)
join_and_save(opt, (real_image_B, synthetic_image_A, reconstructed_image_B), save_path_B)
# Save test images
real_image_A = opt['A_test'][0]
real_image_B = opt['B_test'][0]
synthetic_image_B = model['G_A2B'].predict(real_image_A[np.newaxis])[0]
synthetic_image_A = model['G_B2A'].predict(real_image_B[np.newaxis])[0]
reconstructed_image_A = model['G_B2A'].predict(synthetic_image_B[np.newaxis])[0]
reconstructed_image_B = model['G_A2B'].predict(synthetic_image_A[np.newaxis])[0]
save_path_A = '{}/test_A/epoch{}.png'.format(opt['out_dir'], epoch)
save_path_B = '{}/test_B/epoch{}.png'.format(opt['out_dir'], epoch)
if opt['paired_data']:
real_image_Ab = opt['B_test'][0]
real_image_Ba = opt['A_test'][0]
join_and_save(opt, (real_image_Ab, real_image_A, synthetic_image_B, reconstructed_image_A), save_path_A)
join_and_save(opt, (real_image_Ba, real_image_B, synthetic_image_A, reconstructed_image_B), save_path_B)
else:
join_and_save(opt, (real_image_A, synthetic_image_B, reconstructed_image_A), save_path_A)
join_and_save(opt, (real_image_B, synthetic_image_A, reconstructed_image_B), save_path_B)
def save_tmp_images(model, opt, real_image_A, real_image_B, synthetic_image_A, synthetic_image_B):
try:
reconstructed_image_A = model['G_B2A'].predict(synthetic_image_B[np.newaxis])[0]
reconstructed_image_B = model['G_A2B'].predict(synthetic_image_A[np.newaxis])[0]
real_images = np.vstack((real_image_A, real_image_B))
synthetic_images = np.vstack((synthetic_image_B, synthetic_image_A))
reconstructed_images = np.vstack((reconstructed_image_A, reconstructed_image_B))
save_path = '{}/tmp.png'.format(opt['out_dir'])
join_and_save(opt, (real_images, synthetic_images, reconstructed_images), save_path)
except: # Ignore if file is open
pass
def get_lr_linear_decay_rate(opt):
# Calculate decay rates
# max_nr_images = max(len(opt['A_train']), len(opt['B_train']))
nr_train_im_A = opt['A_train'].shape[0]
nr_train_im_B = opt['B_train'].shape[0]
nr_batches_per_epoch = int(np.ceil(np.max((nr_train_im_A, nr_train_im_B)) / opt['batch_size']))
updates_per_epoch_D = 2 * nr_batches_per_epoch
updates_per_epoch_G = nr_batches_per_epoch
nr_decay_updates_D = (opt['epochs'] - opt['decay_epoch'] + 1) * updates_per_epoch_D
nr_decay_updates_G = (opt['epochs'] - opt['decay_epoch'] + 1) * updates_per_epoch_G
decay_D = opt['learning_rate_D'] / nr_decay_updates_D
decay_G = opt['learning_rate_G'] / nr_decay_updates_G
return decay_D, decay_G
def update_lr(model, decay):
new_lr = K.get_value(model.optimizer.lr) - decay
if new_lr < 0:
new_lr = 0
# print(K.get_value(model.optimizer.lr))
K.set_value(model.optimizer.lr, new_lr)
def print_ETA(opt, start_time, epoch, nr_im_per_epoch, loop_index):
passed_time = time.time() - start_time
iterations_so_far = ((epoch - 1) * nr_im_per_epoch + loop_index) / opt['batch_size']
iterations_total = opt['epochs'] * nr_im_per_epoch / opt['batch_size']
iterations_left = iterations_total - iterations_so_far
eta = round(passed_time / (iterations_so_far + 1e-5) * iterations_left)
passed_time_string = str(datetime.timedelta(seconds=round(passed_time)))
eta_string = str(datetime.timedelta(seconds=eta))
print('Elapsed time', passed_time_string, ': ETA in', eta_string)
def save_model(opt, model, epoch):
# Create folder to save model architecture and weights
directory = os.path.join('saved_models', opt['date_time'])
if not os.path.exists(directory):
os.makedirs(directory)
weights_path = '{}/{}_weights_epoch_{}.hdf5'.format(directory, model.name, epoch)
model.save_weights(weights_path)
model_path = '{}/{}_model_epoch_{}.json'.format(directory, model.name, epoch)
model.save_weights(model_path)
json_string = model.to_json()
with open(model_path, 'w') as outfile:
json.dump(json_string, outfile)
print('{} has been saved in saved_models/{}/'.format(model.name, opt['date_time']))
| python |
import time
from umqtt.simple import MQTTClient
def sub_cb(topic, msg):
print((topic, msg))
c = MQTTClient("uqmtt_client", "localhost")
c.connect()
c.subscribe(b"foo_topic")
c.publish(b"foo_topic", b"hello")
while 1:
c.wait_msg()
c.disconnect()
| python |
import math
import os
import pickle
import sys
import gym
import numpy as np
import quaternion
import torch
from torch.nn import functional as F
from torchvision import transforms
import skimage.morphology
from PIL import Image
import matplotlib
if matplotlib.get_backend() == "agg":
print("matplot backend is {}".format(matplotlib.get_backend()))
# matplotlib.use('TkAgg')
import matplotlib.pyplot as plt
from .utils.map_builder import MapBuilder
from .utils.fmm_planner import FMMPlanner
from .utils.noisy_actions import CustomActionSpaceConfiguration
from .utils.supervision import HabitatMaps
from .utils.grid import get_grid, get_grid_full
from .utils import pose as pu
from .utils import visualizations as vu
import habitat
from habitat import logger
from habitat.config.default import get_config as cfg_env
from habitat.datasets.pointnav.pointnav_dataset import PointNavDatasetV1
from habitat_baselines.config.default import get_config as cfg_baseline
import onpolicy
def _preprocess_depth(depth):
depth = depth[:, :, 0]*1
mask2 = depth > 0.99
depth[mask2] = 0.
for i in range(depth.shape[1]):
depth[:, i][depth[:, i] == 0.] = depth[:, i].max()
mask1 = depth == 0
depth[mask1] = np.NaN
depth = depth * 1000.
return depth
class Exploration_Env(habitat.RLEnv):
def __init__(self, args, config_env, config_baseline, dataset, run_dir):
self.args = args
self.run_dir = run_dir
self.num_agents = args.num_agents
self.use_restrict_map = args.use_restrict_map
self.use_complete_reward = args.use_complete_reward
self.use_time_penalty = args.use_time_penalty
self.use_repeat_penalty = args.use_repeat_penalty
self.reward_decay = args.reward_decay
self.use_render = args.use_render
self.render_merge = args.render_merge
self.save_gifs = args.save_gifs
self.map_resolution = args.map_resolution
self.map_size_cm = args.map_size_cm
self.num_actions = 3
self.dt = 10
self.reward_gamma = 1
self.sensor_noise_fwd = \
pickle.load(open(onpolicy.__path__[0] + "/envs/habitat/model/noise_models/sensor_noise_fwd.pkl", 'rb'))
self.sensor_noise_right = \
pickle.load(open(onpolicy.__path__[0] + "/envs/habitat/model/noise_models/sensor_noise_right.pkl", 'rb'))
self.sensor_noise_left = \
pickle.load(open(onpolicy.__path__[0] + "/envs/habitat/model/noise_models/sensor_noise_left.pkl", 'rb'))
habitat.SimulatorActions.extend_action_space("NOISY_FORWARD")
habitat.SimulatorActions.extend_action_space("NOISY_RIGHT")
habitat.SimulatorActions.extend_action_space("NOISY_LEFT")
config_env.defrost()
config_env.SIMULATOR.ACTION_SPACE_CONFIG = "CustomActionSpaceConfiguration"
config_env.freeze()
super().__init__(config_env, dataset)
self.scene_name = self.habitat_env.sim.config.SCENE
if "replica" in self.scene_name:
self.scene_id = self.scene_name.split("/")[-3]
else:
self.scene_id = self.scene_name.split("/")[-1].split(".")[0]
self.action_space = gym.spaces.Discrete(self.num_actions)
self.observation_space = gym.spaces.Box(0, 255,
(3, args.frame_height,
args.frame_width),
dtype='uint8')
self.share_observation_space = gym.spaces.Box(0, 255,
(3, args.frame_height,
args.frame_width),
dtype='uint8')
self.mapper = []
for _ in range(self.num_agents):
self.mapper.append(self.build_mapper())
self.curr_loc = []
self.last_loc = []
self.curr_loc_gt = []
self.last_loc_gt = []
self.last_sim_location = []
self.map = []
self.explored_map = []
self.episode_no = 0
self.res = transforms.Compose([transforms.ToPILImage(),
transforms.Resize((args.frame_height, args.frame_width),
interpolation=Image.NEAREST)])
self.maps_dict = []
for _ in range(self.num_agents):
self.maps_dict.append({})
if self.use_render:
plt.ion()
self.figure, self.ax = plt.subplots(self.num_agents, 3, figsize=(6*16/9, 6),
facecolor="whitesmoke",
num="Scene {} Map".format(self.scene_id))
if args.render_merge:
self.figure_m, self.ax_m = plt.subplots(1, 2, figsize=(6*16/9, 6),
facecolor="whitesmoke",
num="Scene {} Merge Map".format(self.scene_id))
def randomize_env(self):
self._env._episode_iterator._shuffle_iterator()
def save_trajectory_data(self):
traj_dir = '{}/trajectory/{}/'.format(self.run_dir, self.scene_id)
if not os.path.exists(traj_dir):
os.makedirs(traj_dir)
for agent_id in range(self.num_agents):
filepath = traj_dir + 'episode' + str(self.episode_no) +'_agent' + str(agent_id) + ".txt"
with open(filepath, "w+") as f:
f.write(self.scene_name + "\n")
for state in self.trajectory_states[i]:
f.write(str(state)+"\n")
f.flush()
def save_position(self):
self.agent_state = []
for agent_id in range(self.num_agents):
self.agent_state.append(self._env.sim.get_agent_state())
self.trajectory_states[agent_id].append([self.agent_state[agent_id].position,
self.agent_state[agent_id].rotation])
def reset(self):
self.reward_gamma = 1
self.episode_no += 1
self.timestep = 0
self._previous_action = None
self.trajectory_states = [[] for _ in range(self.num_agents)]
self.explored_ratio_step = np.ones(self.num_agents) * (-1.0)
self.merge_explored_ratio_step = -1.0
self.explored_ratio_threshold = 0.9
self.merge_ratio = 0
self.ratio = np.zeros(self.num_agents)
if self.args.randomize_env_every > 0:
if np.mod(self.episode_no, self.args.randomize_env_every) == 0:
self.randomize_env()
# Get Ground Truth Map
self.explorable_map = []
self.n_rot = []
self.n_trans = []
self.init_theta = []
self.agent_n_rot = [[] for agent_id in range(self.num_agents)]
self.agent_n_trans = [[] for agent_id in range(self.num_agents)]
self.agent_st = []
obs = super().reset()
full_map_size = self.map_size_cm//self.map_resolution # 480
for agent_id in range(self.num_agents):
mapp, n_rot, n_trans, init_theta = self._get_gt_map(full_map_size, agent_id)
self.explorable_map.append(mapp)
self.n_rot.append(n_rot)
self.n_trans.append(n_trans)
self.init_theta.append(init_theta)
for aa in range(self.num_agents):
for a in range(self.num_agents):
delta_st = self.agent_st[a] - self.agent_st[aa]
delta_rot_mat, delta_trans_mat, delta_n_rot_mat, delta_n_trans_mat =\
get_grid_full(delta_st, (1, 1, self.grid_size, self.grid_size), (1, 1, full_map_size, full_map_size), torch.device("cpu"))
self.agent_n_rot[aa].append(delta_n_rot_mat.numpy())
self.agent_n_trans[aa].append(delta_n_trans_mat.numpy())
self.merge_pred_map = np.zeros_like(self.explorable_map[0])
self.prev_merge_exlored_map = np.zeros_like(self.explorable_map[0])
self.prev_explored_area = [0. for _ in range(self.num_agents)]
self.prev_merge_explored_area = 0
# Preprocess observations
rgb = [obs[agent_id]['rgb'].astype(np.uint8) for agent_id in range(self.num_agents)]
self.obs = rgb # For visualization
if self.args.frame_width != self.args.env_frame_width:
rgb = [np.asarray(self.res(rgb[agent_id])) for agent_id in range(self.num_agents)]
state = [rgb[agent_id].transpose(2, 0, 1) for agent_id in range(self.num_agents)]
depth = [_preprocess_depth(obs[agent_id]['depth']) for agent_id in range(self.num_agents)]
# Initialize map and pose
self.curr_loc = []
self.curr_loc_gt = []
self.last_loc_gt = []
self.last_loc = []
self.last_sim_location = []
for agent_id in range(self.num_agents):
self.mapper[agent_id].reset_map(self.map_size_cm)
self.curr_loc.append([self.map_size_cm/100.0/2.0,
self.map_size_cm/100.0/2.0, 0.])
self.curr_loc_gt.append([self.map_size_cm/100.0/2.0,
self.map_size_cm/100.0/2.0, 0.])
self.last_loc_gt.append([self.map_size_cm/100.0/2.0,
self.map_size_cm/100.0/2.0, 0.])
self.last_loc.append(self.curr_loc[agent_id])
self.last_sim_location.append(self.get_sim_location(agent_id))
# Convert pose to cm and degrees for mapper
mapper_gt_pose = []
for agent_id in range(self.num_agents):
mapper_gt_pose.append(
(self.curr_loc_gt[agent_id][0]*100.0,
self.curr_loc_gt[agent_id][1]*100.0,
np.deg2rad(self.curr_loc_gt[agent_id][2]))
)
fp_proj = []
fp_explored = []
self.map = []
self.explored_map = []
self.current_explored_gt = []
# Update ground_truth map and explored area
for agent_id in range(self.num_agents):
fp_proj_t, map_t, fp_explored_t, explored_map_t, current_explored_gt = \
self.mapper[agent_id].update_map(depth[agent_id], mapper_gt_pose[agent_id])
fp_proj.append(fp_proj_t)
self.map.append(map_t)
fp_explored.append(fp_explored_t)
self.explored_map.append(explored_map_t)
self.current_explored_gt.append(current_explored_gt)
# Initialize variables
self.merge_pred_map = np.zeros_like(self.explorable_map[0])
self.scene_name = self.habitat_env.sim.config.SCENE
self.visited = [np.zeros(self.map[0].shape)
for _ in range(self.num_agents)]
self.visited_vis = [np.zeros(self.map[0].shape)
for _ in range(self.num_agents)]
self.visited_gt = [np.zeros(self.map[0].shape)
for _ in range(self.num_agents)]
self.collison_map = [np.zeros(self.map[0].shape)
for _ in range(self.num_agents)]
self.col_width = [1 for _ in range(self.num_agents)]
# Set info
self.info = {
'time': [],
'fp_proj': [],
'fp_explored': [],
'sensor_pose': [],
'pose_err': [],
}
for agent_id in range(self.num_agents):
self.info['time'].append(self.timestep)
self.info['fp_proj'].append(fp_proj[agent_id])
self.info['fp_explored'].append(fp_explored[agent_id])
self.info['sensor_pose'].append([0., 0., 0.])
self.info['pose_err'].append([0., 0., 0.])
self.info['trans'] = self.n_trans
self.info['rotation'] = self.n_rot
self.info['theta'] = self.init_theta
self.info['agent_trans'] = self.agent_n_trans
self.info['agent_rotation'] = self.agent_n_rot
self.info['explorable_map'] = self.explorable_map
self.info['scene_id'] = self.scene_id
self.save_position()
return state, self.info
def step(self, action):
self.timestep += 1
noisy_action = []
# Action remapping
for agent_id in range(self.num_agents):
if action[agent_id] == 2: # Forward
action[agent_id] = 1
noisy_action.append(habitat.SimulatorActions.NOISY_FORWARD)
elif action[agent_id] == 1: # Right
action[agent_id] = 3
noisy_action.append(habitat.SimulatorActions.NOISY_RIGHT)
elif action[agent_id] == 0: # Left
action[agent_id] = 2
noisy_action.append(habitat.SimulatorActions.NOISY_LEFT)
for agent_id in range(self.num_agents):
self.last_loc[agent_id] = np.copy(self.curr_loc[agent_id])
self.last_loc_gt[agent_id] = np.copy(self.curr_loc_gt[agent_id])
self._previous_action = action
obs = []
rew = []
done = []
info = []
for agent_id in range(self.num_agents):
if self.args.noisy_actions:
obs_t, rew_t, done_t, info_t = super().step(noisy_action[agent_id], agent_id)
else:
obs_t, rew_t, done_t, info_t = super().step(action[agent_id], agent_id)
obs.append(obs_t)
rew.append(rew_t)
done.append(done_t)
info.append(info_t)
# Preprocess observations
rgb = [obs[agent_id]['rgb'].astype(np.uint8) for agent_id in range(self.num_agents)]
self.obs = rgb # For visualization
if self.args.frame_width != self.args.env_frame_width:
rgb = [np.asarray(self.res(rgb[agent_id]))
for agent_id in range(self.num_agents)]
state = [rgb[agent_id].transpose(2, 0, 1) for agent_id in range(self.num_agents)]
depth = [_preprocess_depth(obs[agent_id]['depth']) for agent_id in range(self.num_agents)]
# Get base sensor and ground-truth pose
dx_gt = []
dy_gt = []
do_gt = []
for agent_id in range(self.num_agents):
dx_gt_t, dy_gt_t, do_gt_t = self.get_gt_pose_change(agent_id)
dx_gt.append(dx_gt_t)
dy_gt.append(dy_gt_t)
do_gt.append(do_gt_t)
dx_base = []
dy_base = []
do_base = []
for agent_id in range(self.num_agents):
dx_base_t, dy_base_t, do_base_t = self.get_base_pose_change(
action[agent_id], (dx_gt[agent_id], dy_gt[agent_id], do_gt[agent_id]))
dx_base.append(dx_base_t)
dy_base.append(dy_base_t)
do_base.append(do_base_t)
for agent_id in range(self.num_agents):
self.curr_loc[agent_id] = pu.get_new_pose(self.curr_loc[agent_id],
(dx_base[agent_id], dy_base[agent_id], do_base[agent_id]))
for agent_id in range(self.num_agents):
self.curr_loc_gt[agent_id] = pu.get_new_pose(self.curr_loc_gt[agent_id],
(dx_gt[agent_id], dy_gt[agent_id], do_gt[agent_id]))
if not self.args.noisy_odometry:
self.curr_loc = self.curr_loc_gt
dx_base, dy_base, do_base = dx_gt, dy_gt, do_gt
# Convert pose to cm and degrees for mapper
mapper_gt_pose = []
for agent_id in range(self.num_agents):
mapper_gt_pose.append(
(self.curr_loc_gt[agent_id][0] * 100.0,
self.curr_loc_gt[agent_id][1] * 100.0,
np.deg2rad(self.curr_loc_gt[agent_id][2]))
)
fp_proj = []
fp_explored = []
self.map = []
self.explored_map = []
self.current_explored_gt = []
# Update ground_truth map and explored area
for agent_id in range(self.num_agents):
fp_proj_t, map_t, fp_explored_t, explored_map_t, current_explored_gt = \
self.mapper[agent_id].update_map(depth[agent_id], mapper_gt_pose[agent_id])
fp_proj.append(fp_proj_t)
self.map.append(map_t)
fp_explored.append(fp_explored_t)
self.explored_map.append(explored_map_t)
self.current_explored_gt.append(current_explored_gt)
# Update collision map
for agent_id in range(self.num_agents):
if action[agent_id] == 1:
x1, y1, t1 = self.last_loc[agent_id]
x2, y2, t2 = self.curr_loc[agent_id]
if abs(x1 - x2) < 0.05 and abs(y1 - y2) < 0.05:
self.col_width[agent_id] += 2
self.col_width[agent_id] = min(self.col_width[agent_id], 9)
else:
self.col_width[agent_id] = 1
dist = pu.get_l2_distance(x1, x2, y1, y2)
if dist < self.args.collision_threshold: # Collision
length = 2
width = self.col_width[agent_id]
buf = 3
for i in range(length):
for j in range(width):
wx = x1 + 0.05*((i+buf) * np.cos(np.deg2rad(t1)) +
(j-width//2) * np.sin(np.deg2rad(t1)))
wy = y1 + 0.05*((i+buf) * np.sin(np.deg2rad(t1)) -
(j-width//2) * np.cos(np.deg2rad(t1)))
r, c = wy, wx
r, c = int(r*100/self.map_resolution), \
int(c*100/self.map_resolution)
[r, c] = pu.threshold_poses([r, c],
self.collison_map[agent_id].shape)
self.collison_map[agent_id][r, c] = 1
# Set info
self.info = {
'time': [],
'fp_proj': [],
'fp_explored': [],
'sensor_pose': [],
'pose_err': [],
'explored_reward': [],
'explored_ratio': [],
'merge_explored_reward': 0.0,
'merge_explored_ratio': 0.0,
}
for agent_id in range(self.num_agents):
self.info['time'].append(self.timestep)
self.info['fp_proj'].append(fp_proj[agent_id])
self.info['fp_explored'].append(fp_explored[agent_id])
self.info['sensor_pose'].append([dx_base[agent_id], dy_base[agent_id], do_base[agent_id]])
self.info['pose_err'].append([dx_gt[agent_id] - dx_base[agent_id],
dy_gt[agent_id] - dy_base[agent_id],
do_gt[agent_id] - do_base[agent_id]])
agent_explored_area, agent_explored_ratio, merge_explored_area, merge_explored_ratio, curr_merge_explored_map = self.get_global_reward()
# log step
self.merge_ratio += merge_explored_ratio
if self.merge_ratio >= self.explored_ratio_threshold and self.merge_explored_ratio_step == -1.0:
self.merge_explored_ratio_step = self.timestep
self.info['merge_explored_ratio_step'] = self.timestep
for agent_id in range(self.num_agents):
self.ratio[agent_id] += agent_explored_ratio[agent_id]
if self.ratio[agent_id] >= self.explored_ratio_threshold and self.explored_ratio_step[agent_id] == -1.0:
self.explored_ratio_step[agent_id] = self.timestep
self.info["agent{}_explored_ratio_step".format(agent_id)] = self.timestep
agents_explored_map = np.zeros_like(self.explored_map[0])
self.info['merge_explored_reward'] = merge_explored_area
self.info['merge_explored_ratio'] = merge_explored_ratio
for agent_id in range(self.num_agents):
self.info['explored_reward'].append(agent_explored_area[agent_id])
self.info['explored_ratio'].append(agent_explored_ratio[agent_id])
if self.timestep % self.args.num_local_steps == 0:
agents_explored_map = np.maximum(agents_explored_map, self.transform(self.current_explored_gt[agent_id], agent_id))
if self.timestep % self.args.num_local_steps == 0 and self.merge_ratio < self.explored_ratio_threshold and self.use_repeat_penalty:
self.info['merge_explored_reward'] -= (agents_explored_map[self.prev_merge_exlored_map == 1].sum() * (25./10000) * 0.02)
self.prev_merge_exlored_map = curr_merge_explored_map
self.save_position()
if self.info['time'][0] >= self.args.max_episode_length:
done = [True for _ in range(self.num_agents)]
if self.merge_ratio >= self.explored_ratio_threshold and self.use_complete_reward:
self.info['merge_explored_reward'] += 1.0
if self.args.save_trajectory_data:
self.save_trajectory_data()
else:
done = [False for _ in range(self.num_agents)]
return state, rew, done, self.info
def get_reward_range(self):
# This function is not used, Habitat-RLEnv requires this function
return (0., 1.0)
def get_reward(self, observations, agent_id):
# This function is not used, Habitat-RLEnv requires this function
return 0.
def get_global_reward(self):
agent_explored_rewards = []
agent_explored_ratios = []
# calculate individual reward
curr_merge_explored_map = np.zeros_like(self.explored_map[0]) # global
merge_explorable_map = np.zeros_like(self.explored_map[0]) # global
for agent_id in range(self.num_agents):
curr_agent_explored_map = self.explored_map[agent_id] * self.explorable_map[agent_id]
curr_merge_explored_map = np.maximum(curr_merge_explored_map, self.transform(curr_agent_explored_map, agent_id))
merge_explorable_map = np.maximum(merge_explorable_map, self.transform(self.explorable_map[agent_id], agent_id))
curr_agent_explored_area = curr_agent_explored_map.sum()
agent_explored_reward = (curr_agent_explored_area - self.prev_explored_area[agent_id]) * 1.0
self.prev_explored_area[agent_id] = curr_agent_explored_area
# converting to m^2 * Reward Scaling 0.02 * reward time penalty
agent_explored_rewards.append(agent_explored_reward * (25./10000) * 0.02 * self.reward_gamma)
reward_scale = self.explorable_map[agent_id].sum()
agent_explored_ratios.append(agent_explored_reward/reward_scale)
# calculate merge reward
curr_merge_explored_area = curr_merge_explored_map.sum()
merge_explored_reward_scale = merge_explorable_map.sum()
merge_explored_reward = (curr_merge_explored_area - self.prev_merge_explored_area) * 1.0
self.prev_merge_explored_area = curr_merge_explored_area
merge_explored_ratio = merge_explored_reward / merge_explored_reward_scale
merge_explored_reward = merge_explored_reward * (25./10000.) * 0.02 * self.reward_gamma
if self.use_time_penalty:
self.reward_gamma *= self.reward_decay
return agent_explored_rewards, agent_explored_ratios, merge_explored_reward, merge_explored_ratio, curr_merge_explored_map
def get_done(self, observations, agent_id):
# This function is not used, Habitat-RLEnv requires this function
return False
def get_info(self, observations, agent_id):
# This function is not used, Habitat-RLEnv requires this function
info = {}
return info
def seed(self, seed):
self._env.seed(seed)
self.rng = np.random.RandomState(seed)
def get_spaces(self):
return self.observation_space, self.action_space
def build_mapper(self):
params = {}
params['frame_width'] = self.args.env_frame_width
params['frame_height'] = self.args.env_frame_height
params['fov'] = self.args.hfov
params['resolution'] = self.map_resolution
params['map_size_cm'] = self.map_size_cm
params['agent_min_z'] = 25
params['agent_max_z'] = 150
params['agent_height'] = self.args.camera_height * 100
params['agent_view_angle'] = 0
params['du_scale'] = self.args.du_scale
params['vision_range'] = self.args.vision_range
params['visualize'] = self.use_render
params['obs_threshold'] = self.args.obs_threshold
params['num_local_steps'] = self.args.num_local_steps
self.selem = skimage.morphology.disk(self.args.obstacle_boundary /
self.map_resolution)
mapper = MapBuilder(params)
return mapper
def get_sim_location(self, agent_id):
agent_state = super().habitat_env.sim.get_agent_state(agent_id)
x = -agent_state.position[2]
y = -agent_state.position[0]
axis = quaternion.as_euler_angles(agent_state.rotation)[0]
if (axis % (2*np.pi)) < 0.1 or (axis % (2*np.pi)) > 2*np.pi - 0.1:
o = quaternion.as_euler_angles(agent_state.rotation)[1]
else:
o = 2*np.pi - quaternion.as_euler_angles(agent_state.rotation)[1]
if o > np.pi:
o -= 2 * np.pi
return x, y, o
def get_gt_pose_change(self, agent_id):
curr_sim_pose = self.get_sim_location(agent_id)
dx, dy, do = pu.get_rel_pose_change(
curr_sim_pose, self.last_sim_location[agent_id])
self.last_sim_location[agent_id] = curr_sim_pose
return dx, dy, do
def get_base_pose_change(self, action, gt_pose_change):
dx_gt, dy_gt, do_gt = gt_pose_change
if action == 1: # Forward
x_err, y_err, o_err = self.sensor_noise_fwd.sample()[0][0]
elif action == 3: # Right
x_err, y_err, o_err = self.sensor_noise_right.sample()[0][0]
elif action == 2: # Left
x_err, y_err, o_err = self.sensor_noise_left.sample()[0][0]
else: # Stop
x_err, y_err, o_err = 0., 0., 0.
x_err = x_err * self.args.noise_level
y_err = y_err * self.args.noise_level
o_err = o_err * self.args.noise_level
return dx_gt + x_err, dy_gt + y_err, do_gt + np.deg2rad(o_err)
def transform(self, inputs, agent_id):
inputs = torch.from_numpy(inputs)
n_rotated = F.grid_sample(inputs.unsqueeze(0).unsqueeze(
0).float(), self.n_rot[agent_id].float(), align_corners=True)
n_map = F.grid_sample(
n_rotated.float(), self.n_trans[agent_id].float(), align_corners=True)
n_map = n_map[0, 0, :, :].numpy()
return n_map
def get_short_term_goal(self, inputs):
args = self.args
self.extrinsic_rew = []
self.intrinsic_rew = []
self.relative_angle = []
def discretize(dist):
dist_limits = [0.25, 3, 10]
dist_bin_size = [0.05, 0.25, 1.]
if dist < dist_limits[0]:
ddist = int(dist/dist_bin_size[0])
elif dist < dist_limits[1]:
ddist = int((dist - dist_limits[0])/dist_bin_size[1]) + \
int(dist_limits[0]/dist_bin_size[0])
elif dist < dist_limits[2]:
ddist = int((dist - dist_limits[1])/dist_bin_size[2]) + \
int(dist_limits[0]/dist_bin_size[0]) + \
int((dist_limits[1] - dist_limits[0])/dist_bin_size[1])
else:
ddist = int(dist_limits[0]/dist_bin_size[0]) + \
int((dist_limits[1] - dist_limits[0])/dist_bin_size[1]) + \
int((dist_limits[2] - dist_limits[1])/dist_bin_size[2])
return ddist
# Get Map prediction
map_pred = inputs['map_pred']
exp_pred = inputs['exp_pred']
output = [np.zeros((args.goals_size + 1))
for _ in range(self.num_agents)]
for agent_id in range(self.num_agents):
grid = np.rint(map_pred[agent_id])
explored = np.rint(exp_pred[agent_id])
# Get pose prediction and global policy planning window
start_x, start_y, start_o, gx1, gx2, gy1, gy2 = inputs['pose_pred'][agent_id]
gx1, gx2, gy1, gy2 = int(gx1), int(gx2), int(gy1), int(gy2)
planning_window = [gx1, gx2, gy1, gy2]
# Get last loc
last_start_x, last_start_y = self.last_loc[agent_id][0], self.last_loc[agent_id][1]
r, c = last_start_y, last_start_x
last_start = [int(r * 100.0/self.map_resolution - gx1),
int(c * 100.0/self.map_resolution - gy1)]
last_start = pu.threshold_poses(last_start, grid.shape)
# Get curr loc
self.curr_loc[agent_id] = [start_x, start_y, start_o]
r, c = start_y, start_x
start = [int(r * 100.0/self.map_resolution - gx1),
int(c * 100.0/self.map_resolution - gy1)]
start = pu.threshold_poses(start, grid.shape)
# TODO: try reducing this
self.visited[agent_id][gx1:gx2, gy1:gy2][start[0]-2:start[0]+3,
start[1]-2:start[1]+3] = 1
steps = 25 # ! wrong
for i in range(steps):
x = int(last_start[0] + (start[0] -
last_start[0]) * (i+1) / steps)
y = int(last_start[1] + (start[1] -
last_start[1]) * (i+1) / steps)
self.visited_vis[agent_id][gx1:gx2, gy1:gy2][x, y] = 1
# Get last loc ground truth pose
last_start_x, last_start_y = self.last_loc_gt[agent_id][0], self.last_loc_gt[agent_id][1]
r, c = last_start_y, last_start_x
last_start = [int(r * 100.0/self.map_resolution),
int(c * 100.0/self.map_resolution)]
last_start = pu.threshold_poses(
last_start, self.visited_gt[agent_id].shape)
# Get ground truth pose
start_x_gt, start_y_gt, start_o_gt = self.curr_loc_gt[agent_id]
r, c = start_y_gt, start_x_gt
start_gt = [int(r * 100.0/self.map_resolution),
int(c * 100.0/self.map_resolution)]
start_gt = pu.threshold_poses(start_gt, self.visited_gt[agent_id].shape)
steps = 25 # ! wrong
for i in range(steps):
x = int(last_start[0] + (start_gt[0] -
last_start[0]) * (i+1) / steps)
y = int(last_start[1] + (start_gt[1] -
last_start[1]) * (i+1) / steps)
self.visited_gt[agent_id][x, y] = 1
# Get goal
goal = inputs['goal'][agent_id]
goal = pu.threshold_poses(goal, grid.shape)
# Get intrinsic reward for global policy
# Negative reward for exploring explored areas i.e.
# for choosing explored cell as long-term goal
self.extrinsic_rew.append(-pu.get_l2_distance(10, goal[0], 10, goal[1]))
self.intrinsic_rew.append(-exp_pred[agent_id][goal[0], goal[1]])
# Get short-term goal
stg = self._get_stg(grid, explored, start, np.copy(goal), planning_window, agent_id)
# Find GT action
if self.args.use_eval or self.args.use_render or not self.args.train_local:
gt_action = 0
else:
gt_action = self._get_gt_action(1 - self.explorable_map[agent_id], start,
[int(stg[0]), int(stg[1])],
planning_window, start_o, agent_id)
(stg_x, stg_y) = stg
relative_dist = pu.get_l2_distance(stg_x, start[0], stg_y, start[1])
relative_dist = relative_dist*5./100.
angle_st_goal = math.degrees(math.atan2(stg_x - start[0],
stg_y - start[1]))
angle_agent = (start_o) % 360.0
if angle_agent > 180:
angle_agent -= 360
relative_angle = (angle_agent - angle_st_goal) % 360.0
if relative_angle > 180:
relative_angle -= 360
output[agent_id][0] = int((relative_angle % 360.)/5.)
output[agent_id][1] = discretize(relative_dist)
output[agent_id][2] = gt_action
self.relative_angle.append(relative_angle)
if self.use_render:
gif_dir = '{}/gifs/{}/episode_{}/all/'.format(self.run_dir, self.scene_id, self.episode_no)
if not os.path.exists(gif_dir):
os.makedirs(gif_dir)
self.render(inputs, grid, map_pred, gif_dir)
if self.render_merge:
gif_dir = '{}/gifs/{}/episode_{}/merge/'.format(self.run_dir, self.scene_id, self.episode_no)
if not os.path.exists(gif_dir):
os.makedirs(gif_dir)
self.render_merged_map(inputs, grid, map_pred, gif_dir)
return output
def _get_gt_map(self, full_map_size, agent_id):
self.scene_name = self.habitat_env.sim.config.SCENE
# logger.error('Computing map for %s', self.scene_name)
# Get map in habitat simulator coordinates
self.map_obj = HabitatMaps(self.habitat_env)
if self.map_obj.size[0] < 1 or self.map_obj.size[1] < 1:
logger.error("Invalid map: {}/{}".format(self.scene_name, self.episode_no))
return None
print(self._env.sim.get_agent_state(agent_id).position.tolist())
agent_y = self._env.sim.get_agent_state(agent_id).position.tolist()[1]*100. # cm
if self.use_restrict_map:
sim_map = self.map_obj.get_restrict_map(agent_y, -50., 50.0)
else:
sim_map = self.map_obj.get_map()
sim_map[sim_map > 0] = 1.
# Transform the map to align with the agent
min_x, min_y = self.map_obj.origin/100.0
x, y, o = self.get_sim_location(agent_id)
x, y = -x - min_x, -y - min_y
range_x, range_y = self.map_obj.max/100. - self.map_obj.origin/100.
map_size = sim_map.shape
scale = 2.
self.grid_size = int(scale*max(map_size))
grid_map = np.zeros((self.grid_size, self.grid_size))
grid_map[(self.grid_size - map_size[0])//2:
(self.grid_size - map_size[0])//2 + map_size[0],
(self.grid_size - map_size[1])//2:
(self.grid_size - map_size[1])//2 + map_size[1]] = sim_map
if map_size[0] > map_size[1]:
self.agent_st.append(torch.tensor([[
(x - range_x/2.) * 2. / (range_x * scale) \
* map_size[1] * 1. / map_size[0],
(y - range_y/2.) * 2. / (range_y * scale),
180.0 + np.rad2deg(o)
]]))
else:
self.agent_st.append(torch.tensor([[
(x - range_x/2.) * 2. / (range_x * scale),
(y - range_y/2.) * 2. / (range_y * scale)
* map_size[0] * 1. / map_size[1],
180.0 + np.rad2deg(o)
]]))
rot_mat, trans_mat, n_rot_mat, n_trans_mat = get_grid_full(self.agent_st[agent_id], (1, 1,
self.grid_size, self.grid_size), (1, 1,
full_map_size, full_map_size), torch.device("cpu"))
grid_map = torch.from_numpy(grid_map).float()
grid_map = grid_map.unsqueeze(0).unsqueeze(0)
translated = F.grid_sample(grid_map, trans_mat, align_corners=True)
rotated = F.grid_sample(translated, rot_mat, align_corners=True)
episode_map = torch.zeros((full_map_size, full_map_size)).float()
if full_map_size > self.grid_size:
episode_map[(full_map_size - self.grid_size)//2:
(full_map_size - self.grid_size)//2 + self.grid_size,
(full_map_size - self.grid_size)//2:
(full_map_size - self.grid_size)//2 + self.grid_size] = \
rotated[0, 0]
else:
episode_map = rotated[0, 0,
(self.grid_size - full_map_size)//2:
(self.grid_size - full_map_size)//2 + full_map_size,
(self.grid_size - full_map_size)//2:
(self.grid_size - full_map_size)//2 + full_map_size]
episode_map = episode_map.numpy()
episode_map[episode_map > 0] = 1.
return episode_map, n_rot_mat, n_trans_mat, 180.0 + np.rad2deg(o)
def _get_stg(self, grid, explored, start, goal, planning_window, agent_id):
[gx1, gx2, gy1, gy2] = planning_window
x1 = min(start[0], goal[0])
x2 = max(start[0], goal[0])
y1 = min(start[1], goal[1])
y2 = max(start[1], goal[1])
dist = pu.get_l2_distance(goal[0], start[0], goal[1], start[1])
buf = max(20., dist)
x1 = max(1, int(x1 - buf))
x2 = min(grid.shape[0]-1, int(x2 + buf))
y1 = max(1, int(y1 - buf))
y2 = min(grid.shape[1]-1, int(y2 + buf))
rows = explored.sum(1)
rows[rows > 0] = 1
ex1 = np.argmax(rows)
ex2 = len(rows) - np.argmax(np.flip(rows))
cols = explored.sum(0)
cols[cols > 0] = 1
ey1 = np.argmax(cols)
ey2 = len(cols) - np.argmax(np.flip(cols))
ex1 = min(int(start[0]) - 2, ex1)
ex2 = max(int(start[0]) + 2, ex2)
ey1 = min(int(start[1]) - 2, ey1)
ey2 = max(int(start[1]) + 2, ey2)
x1 = max(x1, ex1)
x2 = min(x2, ex2)
y1 = max(y1, ey1)
y2 = min(y2, ey2)
traversible = skimage.morphology.binary_dilation(
grid[x1:x2, y1:y2],
self.selem) != True
traversible[self.collison_map[agent_id]
[gx1:gx2, gy1:gy2][x1:x2, y1:y2] == 1] = 0
traversible[self.visited[agent_id]
[gx1:gx2, gy1:gy2][x1:x2, y1:y2] == 1] = 1
traversible[int(start[0]-x1)-1:int(start[0]-x1)+2,
int(start[1]-y1)-1:int(start[1]-y1)+2] = 1
if goal[0]-2 > x1 and goal[0]+3 < x2\
and goal[1]-2 > y1 and goal[1]+3 < y2:
traversible[int(goal[0]-x1)-2:int(goal[0]-x1)+3,
int(goal[1]-y1)-2:int(goal[1]-y1)+3] = 1
else:
goal[0] = min(max(x1, goal[0]), x2)
goal[1] = min(max(y1, goal[1]), y2)
def add_boundary(mat):
h, w = mat.shape
new_mat = np.ones((h+2, w+2))
new_mat[1:h+1, 1:w+1] = mat
return new_mat
traversible = add_boundary(traversible)
planner = FMMPlanner(traversible, 360//self.dt)
reachable = planner.set_goal([goal[1]-y1+1, goal[0]-x1+1])
stg_x, stg_y = start[0] - x1 + 1, start[1] - y1 + 1
for i in range(self.args.short_goal_dist):
stg_x, stg_y, replan = planner.get_short_term_goal([stg_x, stg_y])
if replan:
stg_x, stg_y = start[0], start[1]
else:
stg_x, stg_y = stg_x + x1 - 1, stg_y + y1 - 1
return (stg_x, stg_y)
def _get_gt_action(self, grid, start, goal, planning_window, start_o, agent_id):
[gx1, gx2, gy1, gy2] = planning_window
x1 = min(start[0], goal[0])
x2 = max(start[0], goal[0])
y1 = min(start[1], goal[1])
y2 = max(start[1], goal[1])
dist = pu.get_l2_distance(goal[0], start[0], goal[1], start[1])
buf = max(5., dist)
x1 = max(0, int(x1 - buf))
x2 = min(grid.shape[0], int(x2 + buf))
y1 = max(0, int(y1 - buf))
y2 = min(grid.shape[1], int(y2 + buf))
path_found = False
goal_r = 0
while not path_found:
traversible = skimage.morphology.binary_dilation(
grid[gx1:gx2, gy1:gy2][x1:x2, y1:y2],
self.selem) != True
traversible[self.visited[agent_id]
[gx1:gx2, gy1:gy2][x1:x2, y1:y2] == 1] = 1
traversible[int(start[0]-x1)-1:int(start[0]-x1)+2,
int(start[1]-y1)-1:int(start[1]-y1)+2] = 1
traversible[int(goal[0]-x1)-goal_r:int(goal[0]-x1)+goal_r+1,
int(goal[1]-y1)-goal_r:int(goal[1]-y1)+goal_r+1] = 1
scale = 1
planner = FMMPlanner(traversible, 360//self.dt, scale)
reachable = planner.set_goal([goal[1]-y1, goal[0]-x1])
stg_x_gt, stg_y_gt = start[0] - x1, start[1] - y1
for i in range(1):
stg_x_gt, stg_y_gt, replan = \
planner.get_short_term_goal([stg_x_gt, stg_y_gt])
if replan and buf < 100.:
buf = 2*buf
x1 = max(0, int(x1 - buf))
x2 = min(grid.shape[0], int(x2 + buf))
y1 = max(0, int(y1 - buf))
y2 = min(grid.shape[1], int(y2 + buf))
elif replan and goal_r < 50:
goal_r += 1
else:
path_found = True
stg_x_gt, stg_y_gt = stg_x_gt + x1, stg_y_gt + y1
angle_st_goal = math.degrees(math.atan2(stg_x_gt - start[0],
stg_y_gt - start[1]))
angle_agent = (start_o) % 360.0
if angle_agent > 180:
angle_agent -= 360
relative_angle = (angle_agent - angle_st_goal) % 360.0
if relative_angle > 180:
relative_angle -= 360
if relative_angle > 15.:
gt_action = 1
elif relative_angle < -15.:
gt_action = 0
else:
gt_action = 2
return gt_action
def render(self, inputs, grid, map_pred, gif_dir):
for agent_id in range(self.num_agents):
goal = inputs['goal'][agent_id]
goal = pu.threshold_poses(goal, grid.shape)
start_x, start_y, start_o, gx1, gx2, gy1, gy2 = inputs['pose_pred'][agent_id]
gx1, gx2, gy1, gy2 = int(gx1), int(gx2), int(gy1), int(gy2)
start_x_gt, start_y_gt, start_o_gt = self.curr_loc_gt[agent_id]
# predicted map and pose
vis_grid_local = vu.get_colored_map(np.rint(map_pred[agent_id]),
self.collison_map[agent_id][gx1:gx2, gy1:gy2],
self.visited_vis[agent_id][gx1:gx2, gy1:gy2],
self.visited_gt[agent_id][gx1:gx2, gy1:gy2],
[goal],
self.explored_map[agent_id][gx1:gx2, gy1:gy2],
self.explorable_map[agent_id][gx1:gx2, gy1:gy2],
self.map[agent_id][gx1:gx2, gy1:gy2] *
self.explored_map[agent_id][gx1:gx2, gy1:gy2])
vis_grid_local = np.flipud(vis_grid_local)
pos_local = (start_x - gy1 * self.map_resolution/100.0,
start_y - gx1 * self.map_resolution/100.0,
start_o)
pos_gt_local = (start_x_gt - gy1 * self.map_resolution/100.0,
start_y_gt - gx1 * self.map_resolution/100.0,
start_o_gt)
# ground truth map and pose
vis_grid_gt = vu.get_colored_map(self.map[agent_id],
self.collison_map[agent_id],
self.visited_gt[agent_id],
self.visited_gt[agent_id],
[(goal[0] + gx1,
goal[1] + gy1)],
self.explored_map[agent_id],
self.explorable_map[agent_id],
self.map[agent_id]*self.explored_map[agent_id])
vis_grid_gt = np.flipud(vis_grid_gt)
pos = (start_x, start_y, start_o)
pos_gt = (start_x_gt, start_y_gt, start_o_gt)
ax = self.ax[agent_id] if self.num_agents > 1 else self.ax
vu.visualize_all(agent_id, self.figure, ax,
self.obs[agent_id],
vis_grid_local[:, :, ::-1],
vis_grid_gt[:, :, ::-1],
pos_local,
pos_gt_local,
pos,
pos_gt,
gif_dir,
self.timestep,
self.use_render, self.save_gifs)
def render_merged_map(self, inputs, grid, map_pred, gif_dir):
merge_map = np.zeros_like(self.explored_map[0])
merge_collision_map = np.zeros_like(self.explored_map[0])
merge_visited_gt = np.zeros_like(self.explored_map[0])
merge_visited_vis = np.zeros_like(self.explored_map[0])
merge_explored_map = np.zeros_like(self.explored_map[0])
merge_explorable_map = np.zeros_like(self.explored_map[0])
merge_gt_explored = np.zeros_like(self.explored_map[0])
all_pos = []
all_pos_gt = []
all_goals = []
for agent_id in range(self.num_agents):
start_x, start_y, start_o, gx1, gx2, gy1, gy2 = inputs['pose_pred'][agent_id]
gx1, gx2, gy1, gy2 = int(gx1), int(gx2), int(gy1), int(gy2)
goal = inputs['goal'][agent_id]
goal = pu.threshold_poses(goal, grid.shape)
start_x_gt, start_y_gt, start_o_gt = self.curr_loc_gt[agent_id]
pos_map = np.zeros_like(self.explored_map[0])
pos_gt_map = np.zeros_like(self.explored_map[0])
goal_map = np.zeros_like(self.explored_map[0])
pos_map[int(start_y * 100.0/5.0), int(start_x * 100.0/5.0)] = 1
pos_gt_map[int(start_y_gt * 100.0/5.0), int(start_x_gt * 100.0/5.0)] = 1
goal_map[int(goal[0] + gx1), int(goal[1] + gy1)] = 1
pos_map = self.transform(pos_map, agent_id)
pos_gt_map = self.transform(pos_gt_map, agent_id)
goal_map = self.transform(goal_map, agent_id)
(index_b, index_a) = np.unravel_index(np.argmax(pos_map, axis=None), pos_map.shape)
(index_gt_b, index_gt_a) = np.unravel_index(np.argmax(pos_gt_map, axis=None), pos_gt_map.shape)
(index_goal_a, index_goal_b) = np.unravel_index(np.argmax(goal_map, axis=None), goal_map.shape)
pos = (index_a * 5.0/100.0, index_b * 5.0/100.0, start_o + self.init_theta[agent_id])
pos_gt = (index_gt_a * 5.0/100.0, index_gt_b * 5.0/100.0, start_o_gt + self.init_theta[agent_id])
goal = (index_goal_a, index_goal_b, 0)
all_pos.append(pos)
all_pos_gt.append(pos_gt)
all_goals.append(goal)
pred_map = np.zeros_like(self.explored_map[0])
pred_map[gx1:gx2, gy1:gy2]= np.rint(map_pred[agent_id])
self.merge_pred_map = np.maximum(self.merge_pred_map, self.transform(pred_map, agent_id))
merge_map = np.maximum(merge_map, self.transform(self.map[agent_id], agent_id))
merge_visited_gt = np.maximum(merge_visited_gt, self.transform(self.visited_gt[agent_id], agent_id))
merge_visited_vis = np.maximum(merge_visited_vis, self.transform(self.visited_vis[agent_id], agent_id))
merge_collision_map[self.transform(self.collison_map[agent_id], agent_id) == 1] = 1
merge_explorable_map[self.transform(self.explorable_map[agent_id], agent_id) == 1] = 1
merge_explored_map = np.maximum(merge_explored_map, self.transform(self.explored_map[agent_id], agent_id))
merge_gt_explored = np.maximum(merge_gt_explored, self.transform(self.map[agent_id] * self.explored_map[agent_id], agent_id))
vis_grid_gt = vu.get_colored_map(merge_map,
merge_collision_map,
merge_visited_gt,
merge_visited_gt,
all_goals,
merge_explored_map,
merge_explorable_map,
merge_gt_explored)
vis_grid_pred = vu.get_colored_map(self.merge_pred_map,
merge_collision_map,
merge_visited_vis,
merge_visited_gt,
all_goals,
merge_explored_map,
merge_explorable_map,
merge_gt_explored)
vis_grid_gt = np.flipud(vis_grid_gt)
vis_grid_pred = np.flipud(vis_grid_pred)
vu.visualize_map(self.figure_m, self.ax_m, vis_grid_gt[:, :, ::-1], vis_grid_pred[:, :, ::-1],
all_pos_gt, all_pos, gif_dir,
self.timestep,
self.use_render,
self.save_gifs) | python |
from django.apps import AppConfig
class StandardizingApiConfig(AppConfig):
name = 'standardizing_api'
| python |
# Generated by Django 3.2.9 on 2021-11-28 04:44
from django.db import migrations, models
import tinymce.models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Place',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=200, verbose_name='Заголовок')),
('description_short', models.TextField(verbose_name='Краткое описание')),
('description_long', tinymce.models.HTMLField(verbose_name='Полное описание')),
('lng', models.FloatField(verbose_name='Долгота')),
('lat', models.FloatField(verbose_name='Широта')),
],
),
]
| python |
import torch
import torch.nn as nn
from graphgallery.nn.layers.pytorch import GCNConv, Sequential, activations, InnerProductDecoder
class GAE(nn.Module):
def __init__(self,
in_features,
*,
out_features=16,
hids=[32],
acts=['relu'],
dropout=0.,
bias=False):
super().__init__()
encoder = []
encoder.append(nn.Dropout(dropout))
for hid, act in zip(hids, acts):
encoder.append(GCNConv(in_features,
hid,
bias=bias))
encoder.append(activations.get(act))
encoder.append(nn.Dropout(dropout))
in_features = hid
encoder.append(GCNConv(in_features, out_features, bias=bias))
encoder = Sequential(*encoder)
self.encoder = encoder
self.decoder = InnerProductDecoder()
def forward(self, x, adj):
z = self.encoder(x, adj)
return z
class VGAE(nn.Module):
def __init__(self,
in_features,
*,
out_features=16,
hids=[32],
acts=['relu'],
dropout=0.,
bias=False):
super().__init__()
conv = []
conv.append(nn.Dropout(dropout))
for hid, act in zip(hids, acts):
conv.append(GCNConv(in_features,
hid,
bias=bias))
conv.append(activations.get(act))
conv.append(nn.Dropout(dropout))
in_features = hid
self.mu_conv = GCNConv(in_features, out_features, bias=bias)
self.logstd_conv = GCNConv(in_features, out_features, bias=bias)
self.conv = Sequential(*conv)
self.decoder = InnerProductDecoder()
def forward(self, x, adj):
h = self.conv(x, adj)
mu = self.mu_conv(h, adj)
if self.training:
logstd = self.logstd_conv(h, adj)
std = torch.exp(logstd)
eps = torch.randn_like(std)
z = eps.mul(std).add_(mu)
return z, mu, logstd
else:
return mu
| python |
"""
TODAS AS QUESTÕES SENDO COMPUTADAS BEM COMO AS SUAS ALTERNATIVAS
E A SUA DEVIDA RESPOSTA CORRETA. DICIONÁRIO EM PYTHON.
"""
questionsX = {
'Pergunta 1': {
'pergunta': 'Qual é o século que ocorreu o período chamado iluminismo, o século das luzes?',
'alternativas': {'a': 'XIX -> Século 19', 'b': 'XVI -> Século 16', 'c': 'XVIII -> Século 18', 'd': 'XV -> Século 15'},
'resposta_correta': 'c',
},
'Pergunta 2': {
'pergunta': 'Quem é considerado o rei do futebol?',
'alternativas': {'a': 'Pelé', 'b': 'Zico', 'c': 'Cruijff', 'd': 'Beckenbauer'},
'resposta_correta': 'a',
},
'Pergunta 3': {
'pergunta': 'Qual é o país que lembra o formato de uma bota no mapa?',
'alternativas': {'a': 'Espanha', 'b': 'Itália', 'c': 'Brasil', 'd': 'Portugal'},
'resposta_correta': 'b',
},
'Pergunta 4': {
'pergunta': 'Onde é a capital do Canadá?',
'alternativas': {'a': 'Toronto', 'b': 'Vancouver', 'c': 'Alberta', 'd': 'Ottawa'},
'resposta_correta': 'd',
},
'Pergunta 5': {
'pergunta': 'Quem é conhecido por ser o inventor da lâmpada?',
'alternativas': {'a': 'Albert Einstein', 'b': 'Thomas Edison', 'c': 'Isaac Newton', 'd': 'Charles Darwin'},
'resposta_correta': 'b',
},
}
questionsY = {
'Pergunta 1': {
'pergunta': 'Quem é o grande nome na história da Microsoft?',
'alternativas': {'a': 'Bill Gates', 'b': 'Steve Jobs', 'c': 'Jeff Bezos', 'd': 'Elon Musk'},
'resposta_correta': 'a',
},
'Pergunta 2': {
'pergunta': 'Na série The Office (USA), qual é o nome do personagem da área de Relações Humanas?',
'alternativas': {'a': 'Kevin Malone', 'b': 'Andy Bernard', 'c': 'Kelly Kapoor', 'd': 'Toby Flenderson'},
'resposta_correta': 'd',
},
'Pergunta 3': {
'pergunta': 'A famosa grande barreira de coral fica situada próximo de qual região?',
'alternativas': {'a': 'Haiti', 'b': 'México', 'c': 'Austrália', 'd': 'Madagascar'},
'resposta_correta': 'c',
},
'Pergunta 4': {
'pergunta': 'Quem foi o aluno que morreu dentro da literatura Harry Potter - Cálice de fogo, durante o torneio tribruxo?',
'alternativas': {'a': 'Cedrico Diggory', 'b': 'Neville Longbottom', 'c': 'Rony Weasley', 'd': 'Cho Chang'},
'resposta_correta': 'a',
},
'Pergunta 5': {
'pergunta': 'Quem é o grande líder da Amazon?',
'alternativas': {'a': 'Steve Ballmer', 'b': 'Jeff Bezos', 'c': 'Jack Dorsey', 'd': 'Mark Zuckerberg'},
'resposta_correta': 'b',
},
}
questionsW = {
'Pergunta 1': {
'pergunta': 'Qual desses países não interliga alguma fronteira com o Brasil? Considerando a América do Sul.',
'alternativas': {'a': 'Peru', 'b': 'Bolívia', 'c': 'Chile', 'd': 'Uruguai'},
'resposta_correta': 'c',
},
'Pergunta 2': {
'pergunta': 'Qual é o nome daquele clássico bicho verde em Star Wars?',
'alternativas': {'a': 'Capitão fantástico', 'b': 'Hulk', 'c': 'Barney', 'd': 'Yoda'},
'resposta_correta': 'd',
},
'Pergunta 3': {
'pergunta': 'Qual é o país mais populoso do planeta?',
'alternativas': {'a': 'Estados Unidos', 'b': 'Índia', 'c': 'China', 'd': 'Rússia'},
'resposta_correta': 'c',
},
'Pergunta 4': {
'pergunta': 'Roma fica em qual país?',
'alternativas': {'a': 'Itália', 'b': 'França', 'c': 'Suécia', 'd': 'Inglaterra'},
'resposta_correta': 'a',
},
'Pergunta 5': {
'pergunta': 'Cristiano Ronaldo é um atleta profissional de qual esporte?',
'alternativas': {'a': 'Tênis', 'b': 'Futebol', 'c': 'Beisebol', 'd': 'Basquetebol'},
'resposta_correta': 'b',
},
}
THEFINAL = {
'Pergunta 1': {
'pergunta': 'Qual é a empresa que está causando o maior impacto na educação do país?',
'alternativas': {'a': 'Latam', 'b': 'Razer', 'c': 'Jovens Gênios', 'd': 'Unilever'},
'resposta_correta': 'c',
},
}
| python |
# flag = 'r2con{Sit down next to my friendLight matchStay}'
var_60h = 0xDEADBEEFDEADBEEFCAFE1337CAFE13370102030405060708090A.to_bytes(26, 'big')
var_40h = 0xDEADBEEFCAFE13371337CAFE133713370102030405060708090A.to_bytes(26, 'little')
First_arr = [ 0x97, 0xCD, 0xD2, 0xD6, 0xC0, 0xC7, 0xCD, 0x84, 0xEC, 0x91, 0xAD, 0x62, 0xF5, 0xF1, 0x65, 0x22, 0x58, 0x82, 0xB1, 0x37, 0x61, 0x3E, 0x5D, 0x2B, 0x14, 0x4C ]
Second_arr = [ 0x9C, 0xCD, 0xE1, 0x8E, 0xB0, 0x92, 0xD7, 0x91, 0xC0, 0x9E, 0xB2 ]
Third_arr = [ 0x97, 0xE2, 0xE7, 0x9D ]
print('r2con{', end='')
for i in range(0, len(First_arr)):
print(chr((((First_arr[i]-var_40h[i])^var_60h[i])) & 0xff), end='')
for i in range(0, len(Second_arr)):
print(chr((((Second_arr[i]-var_40h[i])^var_60h[i])) & 0xff), end='')
for i in range(0, len(Third_arr)):
print(chr((((Third_arr[i]-var_40h[i])^var_60h[i])) & 0xff), end='')
print('}') | python |
import tensorflow as tf
from absl import flags, app
from libs.inference import YoloInf
from libs.evals.coco import GetCocoEval
FLAGS = flags.FLAGS
flags.DEFINE_string('ckpt', default=None, help='Checkpoint file path')
flags.DEFINE_string('img_prefix', default=None, help='Image directory path to evaluate', short_name='i')
flags.DEFINE_string('coco_gt', default=None, help='COCO GT file path', short_name='g')
flags.DEFINE_float('conf_thr', default=0.05, help='Inference confidence threshold')
flags.DEFINE_list('img_exts', default=['.png', '.jpg', '.jpeg'], help='Image extensions')
flags.mark_flag_as_required('ckpt')
flags.mark_flag_as_required('img_prefix')
flags.mark_flag_as_required('coco_gt')
flags.mark_flag_as_required('conf_thr')
flags.mark_flag_as_required('img_exts')
# Save some gpu memories
physical_devices = tf.config.list_physical_devices('GPU')
for physical_device in physical_devices:
tf.config.experimental.set_memory_growth(device=physical_device, enable=True)
def main(_argv):
yolo_inf = YoloInf(ckpt_path=FLAGS.ckpt)
coco_eval = GetCocoEval(
img_prefix=FLAGS.img_prefix,
coco_gt_path=FLAGS.coco_gt,
yolo_inf=yolo_inf,
conf_thr=FLAGS.conf_thr,
img_exts=FLAGS.img_exts,
)
coco_eval.get(verbose=True)
if __name__ == '__main__':
app.run(main)
| python |
import torch
import torch.nn as nn
import numpy as np
import sys
sys.path.append('..')
from networks import HSwish, HSigmoid, Swish, Sigmoid
def compute_memory(module, inp, out):
if isinstance(module, (nn.ReLU, nn.ReLU6, nn.ELU, nn.LeakyReLU)):
return compute_ReLU_memory(module, inp, out)
elif isinstance(module, nn.PReLU):
return compute_PReLU_memory(module, inp, out)
elif isinstance(module, (Sigmoid, HSigmoid)):
return compute_Sigmoid_memory(module, inp, out)
elif isinstance(module, (Swish, HSwish)):
return compute_Swish_memory(module, inp, out)
elif isinstance(module, nn.Conv2d):
return compute_Conv2d_memory(module, inp, out)
elif isinstance(module, nn.ConvTranspose2d):
return compute_ConvTranspose2d_memory(module, inp, out)
elif isinstance(module, nn.BatchNorm2d):
return compute_BatchNorm2d_memory(module, inp, out)
elif isinstance(module, nn.Linear):
return compute_Linear_memory(module, inp, out)
elif isinstance(module, (
nn.AvgPool2d, nn.MaxPool2d, nn.AdaptiveAvgPool2d,
nn.AdaptiveMaxPool2d)):
return compute_Pool2d_memory(module, inp, out)
else:
print("[Memory]: {} is not supported!".format(type(module).__name__))
return 0, 0
pass
def num_params(module):
return sum(p.numel() for p in module.parameters() if p.requires_grad) # why conditioned if p.requires_grad ???
def compute_ReLU_memory(module, inp, out):
assert isinstance(module, (nn.ReLU, nn.ReLU6, nn.ELU, nn.LeakyReLU))
batch_size = inp.size()[0]
mread = batch_size * inp.size()[1:].numel()
mwrite = batch_size * inp.size()[1:].numel()
return (mread, mwrite)
def compute_PReLU_memory(module, inp, out):
assert isinstance(module, (nn.PReLU))
batch_size = inp.size()[0]
mread = batch_size * (inp.size()[1:].numel() + num_params(module))
mwrite = batch_size * inp.size()[1:].numel()
return (mread, mwrite)
def compute_Sigmoid_memory(module, inp, out):
assert isinstance(module, (Sigmoid, HSigmoid))
batch_size = inp.size()[0]
mread = batch_size * inp.size()[1:].numel()
mwrite = batch_size * inp.size()[1:].numel()
return (mread, mwrite)
def compute_Swish_memory(module, inp, out):
assert isinstance(module, (Swish, HSwish))
batch_size = inp.size()[0]
mread = batch_size * (inp.size()[1:].numel() + inp.size()[1:].numel())
mwrite = batch_size * inp.size()[1:].numel()
return (mread, mwrite)
def compute_Conv2d_memory(module, inp, out):
assert isinstance(module, nn.Conv2d)
assert len(inp.size()) == 4 and len(inp.size()) == len(out.size())
batch_size = inp.size()[0]
in_c = inp.size()[1]
out_c, out_h, out_w = out.size()[1:]
# This includes weighs with bias if the module contains it.
mread = batch_size * (inp.size()[1:].numel() + num_params(module))
mwrite = batch_size * out_c * out_h * out_w
return (mread, mwrite)
def compute_ConvTranspose2d_memory(module, inp, out):
assert isinstance(module, nn.ConvTranspose2d)
assert len(inp.size()) == 4 and len(inp.size()) == len(out.size())
batch_size = inp.size()[0]
in_c = inp.size()[1]
out_c, out_h, out_w = out.size()[1:]
# This includes weighs with bias if the module contains it.
mread = batch_size * (inp.size()[1:].numel() + num_params(module))
mwrite = batch_size * out_c * out_h * out_w
return (mread, mwrite)
def compute_BatchNorm2d_memory(module, inp, out):
assert isinstance(module, nn.BatchNorm2d)
assert len(inp.size()) == 4 and len(inp.size()) == len(out.size())
batch_size, in_c, in_h, in_w = inp.size()
mread = batch_size * (inp.size()[1:].numel() + 2 * in_c)
mwrite = inp.size().numel()
return (mread, mwrite)
def compute_Linear_memory(module, inp, out):
assert isinstance(module, nn.Linear)
assert len(inp.size()) == 2 and len(out.size()) == 2
batch_size = inp.size()[0]
mread = batch_size * (inp.size()[1:].numel() + num_params(module))
mwrite = out.size().numel()
return (mread, mwrite)
def compute_Pool2d_memory(module, inp, out):
assert isinstance(module, (
nn.MaxPool2d, nn.AvgPool2d, nn.AdaptiveAvgPool2d, nn.AdaptiveMaxPool2d))
assert len(inp.size()) == 4 and len(inp.size()) == len(out.size())
batch_size = inp.size()[0]
mread = batch_size * inp.size()[1:].numel()
mwrite = batch_size * out.size()[1:].numel()
return (mread, mwrite)
| python |
#!/usr/bin/env python
# =============================================================================
# MODULE DOCSTRING
# =============================================================================
"""
This scripts is used to generate graphs from smiles for the D-GIN publication.
"""
# =============================================================================
# GLOBAL IMPORTS
# =============================================================================
import logging
log = logging.getLogger(__name__)
import random
import os
from itertools import repeat
from multiprocessing import Pool
from functools import partial
from pathlib import Path
import argparse
import datetime
import graph_networks
from graph_networks.AtomGraph import AtomGraph
from graph_networks.utilities import readChemblXls, CDPLmolFromSmiles, pickleGraphs, LOG_LEVELS
# =============================================================================
# GLOBAL FIELDS
# =============================================================================
PROJECT_PATH = Path(os.path.dirname(graph_networks.__file__)).parent.absolute()
# =============================================================================
# Methods
# =============================================================================
def multi_threading(data_combined,featurization):
'''
PRIVATE METHOD
method for the pool instance used in various scripots (e.g. during graph generation). \n
Input \n
data_combined (tuple): tuple of two lists: first is a list of the data (name,smiles,properties), \n
second is a list of property names. \n
Returns: \n
(AtomGraph): graph instances of the molecule.
'''
try:
property_names = data_combined[-1]
data = data_combined[:-1]
indices = [i for i, x in enumerate(data) if x == '']
mol = CDPLmolFromSmiles(data[1],False)
if mol is None:
logging.debug("Could not process "+str(data[0])+" "+str(data[2])+" because of its multi comp!")
return None
graph = AtomGraph()
graph(mol,featurization=featurization)
graph.setName(data[0])
graph.setSmiles(data[1])
for i,property_name in enumerate(property_names[2:]):
if 'logs' in property_name.lower():
if float(data[2+i]) >0.0 or float(data[2+i]) < (-10.0):
return None
graph.setProperty(property_name.lower(),(float(data[2+i])+10.0))
elif 'logp' in property_name.lower():
graph.setProperty(property_name.lower(),(float(data[2+i])+3.0))
elif 'logd' in property_name.lower():
graph.setProperty(property_name.lower(),(float(data[2+i])+1.60))
else:
graph.setProperty('other',float(data[2+i]))
if not 'logd' in graph.properties:
graph.setProperty('logd',False)
if not 'logp' in graph.properties:
graph.setProperty('logp',False)
if not 'logs' in graph.properties:
graph.setProperty('logs',False)
if not 'other' in graph.properties:
graph.setProperty('other',False)
except Exception as e:
logging.debug("Could not process "+str(data[0])+" "+str(data[2])+" because of "+str(e))
return None
return graph
# =============================================================================
# Main Run Method
# =============================================================================
def run(args):
'''
The main method for the graph generation.
runs the GNN training or testing.
'''
try:
if not os.path.isdir(args.output_path_train):
raise FileExistsError("The output path does not exist - please create one with the corresponding name.")
logging.debug("Start read FILE and generate data!")
data = readChemblXls(path_to_xls=args.input_file_path,col_entries=args.columns,sheet_index=args.sheet_index,skip_rows=args.skip_rows,n_entries=args.n_entries)
logging.debug("Finished FILE and data reading with overall nr of entries: "+str(len(data)))
print("Finished FILE and data generation with overall nr of entries: "+str(len(data)))
graph_list = []
print("Start graph generation.")
pool = Pool(processes=int(args.n_processes))
logging.debug("Start muli threading and graph list generation!")
graph_list = pool.starmap(partial(multi_threading),zip(data, repeat(args.featurization)))
logging.debug("Finished muli threading and graph list generation!")
pool.close()
pool.join()
graph_list = list(filter(None, graph_list))
print("Finished graph generation with overall nr of entries: "+str(len(graph_list)))
logging.info("Finished graph generation with overall nr of entries: "+str(len(graph_list)))
split= int(len(graph_list)*args.train_test_split)
random.seed(1)
random.shuffle(graph_list)
# logd_train = list()
# with open('/home/owieder/projects/old_logd_train.txt') as f:
# lines = f.readlines()
# for line in lines:
# logd_train.append(line.split(' ')[0])
# logd_test = list()
# with open('/home/owieder/projects/old_logd_test.txt') as f:
# lines = f.readlines()
# for line in lines:
# logd_test.append(line.split(' ')[0])
# sorted_graph_list_train = list()
# sorted_graph_list_test = list()
# for name in logd_train:
# for graph in graph_list:
# if name == graph.name:
# sorted_graph_list_train.append(graph)
# for name in logd_test:
# for graph in graph_list:
# if name == graph.name:
# sorted_graph_list_test.append(graph)
# logs_train = list()
# with open('/home/owieder/projects/old_logs_train.txt') as f:
# lines = f.readlines()
# for line in lines:
# logs_train.append(line.split(' ')[0])
# logs_test = list()
# with open('/home/owieder/projects/old_logs_test.txt') as f:
# lines = f.readlines()
# for line in lines:
# logs_test.append(line.split(' ')[0])
# sorted_graph_list_train = list()
# sorted_graph_list_test = list()
# for name in logs_train:
# for graph in graph_list:
# if name == graph.name:
# sorted_graph_list_train.append(graph)
# for name in logs_test:
# for graph in graph_list:
# if name == graph.name:
# sorted_graph_list_test.append(graph)
logD_graph_list_train_eval = graph_list[:split]
logD_graph_list_test = graph_list[split:]
logging.info("Train/Evaluation graph list length: "+str(len(logD_graph_list_train_eval)))
logging.info("Test graph list length: "+str(len(logD_graph_list_test)))
print("Start pickling...")
logging.debug("Start pickling graph lists!")
pickleGraphs(args.output_path_train,logD_graph_list_train_eval,args.pickle_split)
logging.debug("Finished train/eval pickling!")
pickleGraphs(args.output_path_test,logD_graph_list_test,args.pickle_split)
logging.debug("Finished test pickling!")
except Exception as e:
logging.error("Could not finish the graph generation due to "+str(e))
# =============================================================================
# MAIN
# =============================================================================
if __name__ == "__main__":
parser = argparse.ArgumentParser("Graph Generation Tool",description="Uses xls files with the names, smiles and different properties in each column to generate pickled graph representation for the D-GIN publication. The xls file needs to contain in the first row the name/description for eaach column. These names are taken for the property names.")
parser.add_argument('--input_file_path',required=True,help='REQUIRED! The path to the xls file.',type=str)
parser.add_argument('--output_path_train',required=True,help='REQUIRED! The path to the output folder FOR TRAINING.')
parser.add_argument('--output_path_test',required=True,help='REQUIRED! The path to the output folder FOR TESTING.')
parser.add_argument('--columns',required=True,nargs='+', type=int,help='REQUIRED! Select the column for the name, smiles and other properties. The first to entries here need to be the name and smiles! Other Property names are extraced from the first row. e.g. if names are in column 0, smiles in column 7 and logD/logS endpoints in column 8 and 3 then use --columns 0 7 8 3')
parser.add_argument('--log_dir',help='REQUIRED! The log directory for the graph generation script.',required=True)
parser.add_argument('--featurization',type=str,help="Define the featurization type of the graph. Allowed featurizations are: " +
"'DMPNN','DGIN', 'DGIN3', 'DGIN4', 'DGIN5', 'DGIN6', 'DGIN7', 'DGIN8', 'DGIN9' ")
parser.add_argument('--skip_rows',type=int,help='How many rows should be skipped in addition to the first row of names/descriptions. So e.g. --skip_rows 2 skips one additional row. Default = 1',default=1)
parser.add_argument('--sheet_index',type=int,help="Sheet_index (int): Which sheet should be adressed. Default: 0 ",default=0)
parser.add_argument('--n_entries',type=int,help="Number of entries to be considered in the xls file. Default: 10000 ",default=10000)
parser.add_argument('--n_processes',type=int,help="Number of processes used on your machine. Default: 3 ",default=3)
parser.add_argument('--train_test_split',type=float,help="Split for training/testing. e.g. 0.9 means that 90 percent of the " +
"data is taken as training, the rest (10 percent) as testing data. Default: 0.9 ",default=0.9)
parser.add_argument('--log_verbosity', default=2, type=int,
help="Verbosity (between 1-4 occurrences with more leading to more "
"verbose logging). CRITICAL=0, ERROR=1, WARN=2, INFO=3, "
"DEBUG=4 - Default=3")
parser.add_argument('--pickle_split',type=int,help="Number of pickled data instances. Default: 5 ",default=5)
args = parser.parse_args()
#Path(args.log_dir).mkdir(parents=True, exist_ok=False)
logging.basicConfig(filename=args.log_dir+'/gen_graph.log', level=LOG_LEVELS[args.log_verbosity])
logging.info("NEW!!!! Start graph generation. "+ datetime.datetime.now().strftime('%D:%H.%f')[:-4])
logging.info("input_file_path:"+str(args.input_file_path))
logging.info("output_path_train:"+str(args.output_path_train))
logging.info("output_path_test:"+str(args.output_path_test))
logging.info("train_test_split:"+str(args.train_test_split))
logging.info("featurization:"+str(args.featurization))
print("Start graph generation - might take some time, depending on the amount of data!")
run(args)
print("Finished! For more details look into the log file: "+str(args.log_dir))
logging.info("Finished graph generation. "+ datetime.datetime.now().strftime('%D:%H.%f')[:-4]+'\n') | python |
import math
import itertools
import numpy as np
import pandas as pd
import plotly.express as px
import plotly.graph_objects as go
import scipy.stats as ss
import scikit_posthocs as sp
from dash_table.Format import Format, Scheme
from Bio import Phylo
from ete3 import Tree
from plotly.subplots import make_subplots
# -------------------------------------------------------------------------------------
# --------------------------------------- Classes -------------------------------------
class DrawTree():
def __init__(self, newicktree, template, topology, color_map, branch_len, font_family):
self.newicktree = Phylo.read(newicktree, "newick")
self.template = template
self.topology = topology
self.color_map = color_map
self.branch_len = branch_len
self.font_family = font_family
def create_square_tree(self):
def get_x_coordinates(tree):
"""Associates to each clade an x-coord.
returns dict {clade: x-coord}
"""
if self.branch_len:
xcoords = tree.depths(unit_branch_lengths=True)
else:
xcoords = tree.depths()
# tree.depth() maps tree clades to depths (by branch length).
# returns a dict {clade: depth} where clade runs over all Clade instances of the tree, and depth is the distance from root to clade
# If there are no branch lengths, assign unit branch lengths
if not max(xcoords.values()):
xcoords = tree.depths(unit_branch_lengths=True)
return xcoords
def get_y_coordinates(tree, dist=1.3):
"""
returns dict {clade: y-coord}
The y-coordinates are (float) multiple of integers (i*dist below)
dist depends on the number of tree leafs
"""
maxheight = tree.count_terminals() # Counts the number of tree leafs.
# Rows are defined by the tips/leafs
ycoords = dict(
(leaf, maxheight - i * dist)
for i, leaf in enumerate(reversed(tree.get_terminals()))
)
def calc_row(clade):
for subclade in clade:
if subclade not in ycoords:
calc_row(subclade)
# This is intermediate placement of internal nodes
ycoords[clade] = (ycoords[clade.clades[0]] + ycoords[clade.clades[-1]]) / 2
if tree.root.clades:
calc_row(tree.root)
return ycoords
def get_clade_lines(
orientation="horizontal",
y_curr=0,
x_start=0,
x_curr=0,
y_bot=0,
y_top=0,
line_color="white",
line_width=2,
root_clade = False
):
"""define a shape of type 'line', for branch
"""
branch_line = dict(
type="line", layer="below", line=dict(color=line_color, width=line_width)
)
if root_clade:
branch_line.update(x0=-0.01, y0=y_curr, x1=-0.01, y1=y_curr)
return branch_line
elif orientation == "horizontal":
branch_line.update(x0=x_start, y0=y_curr, x1=x_curr, y1=y_curr)
elif orientation == "vertical":
branch_line.update(x0=x_curr, y0=y_bot, x1=x_curr, y1=y_top)
else:
raise ValueError("Line type can be 'horizontal' or 'vertical'")
return branch_line
def draw_clade(
clade,
x_start,
line_shapes,
line_color="white",
line_width=2,
x_coords=0,
y_coords=0,
init_clade=False,
):
"""Recursively draw the tree branches, down from the given clade"""
x_curr = x_coords[clade]
y_curr = y_coords[clade]
# Draw a horizontal line from start to here
if init_clade:
branch_line = get_clade_lines(
orientation="horizontal",
y_curr=y_curr,
x_start=x_start,
x_curr=x_curr,
line_color=line_color,
line_width=line_width,
root_clade=True,
)
else:
branch_line = get_clade_lines(
orientation="horizontal",
y_curr=y_curr,
x_start=x_start,
x_curr=x_curr,
line_color=line_color,
line_width=line_width,
root_clade=False,
)
line_shapes.append(branch_line)
if clade.clades:
# Draw a vertical line connecting all children
y_top = y_coords[clade.clades[0]]
y_bot = y_coords[clade.clades[-1]]
line_shapes.append(
get_clade_lines(
orientation="vertical",
x_curr=x_curr,
y_bot=y_bot,
y_top=y_top,
line_color=line_color,
line_width=line_width,
)
)
# Draw descendants
for child in clade:
draw_clade(child, x_curr, line_shapes,
x_coords=x_coords, y_coords=y_coords,
line_color=line_color)
if 'dark' in self.template:
text_color = 'white'
else:
text_color = 'black'
line_color = self.color_map[self.topology]
tree = self.newicktree
tree.ladderize()
x_coords = get_x_coordinates(tree)
y_coords = get_y_coordinates(tree)
line_shapes = []
draw_clade(
tree.root,
0,
line_shapes,
line_color=line_color,
line_width=2,
x_coords=x_coords,
y_coords=y_coords,
init_clade=True,
)
my_tree_clades = x_coords.keys()
X = []
Y = []
text = []
for cl in my_tree_clades:
X.append(x_coords[cl])
Y.append(y_coords[cl])
# Add confidence values if internal node
if not cl.name:
if not cl.name:
text.append(" ")
else:
text.append(cl.name)
else:
text.append(cl.name)
axis = dict(
showline=False,
visible=False,
zeroline=False,
showgrid=False,
showticklabels=False,
title="", # y title
)
label_legend = ["Tree_1"]
nodes = []
for elt in label_legend:
node = dict(
type="scatter",
x=X,
y=Y,
mode="markers+text",
marker=dict(color=text_color, size=5),
text=text, # vignet information of each node
textposition='middle right',
textfont=dict(color=text_color, size=12),
showlegend=False,
name=elt,
)
nodes.append(node)
# Set graph x-range
if self.branch_len:
x_range = [-0.5, (max(x_coords.values())+2)]
show_xaxis = False
elif max(x_coords.values()) < 0.1:
x_range = [0, (max(x_coords.values())+(max(x_coords.values())*1.25))]
show_xaxis = True
elif max(x_coords.values()) < 0.5:
x_range = [0, 0.5]
show_xaxis = True
elif max(x_coords.values()) < 1:
x_range = [0, 1]
show_xaxis = True
elif max(x_coords.values()) == 1:
x_range = [0, max(x_coords.values())+2]
show_xaxis = False
else:
x_range = [0, max(x_coords.values())+2]
show_xaxis = False
layout = dict(
autosize=True,
showlegend=False,
template=self.template,
dragmode="pan",
margin=dict(t=20, b=10, r=20, l=10),
xaxis=dict(
showline=True,
zeroline=False,
visible=show_xaxis,
showgrid=False,
showticklabels=True,
range=x_range,
),
yaxis=axis,
hovermode="closest",
shapes=line_shapes,
font=dict(family=self.font_family,size=14),
)
fig = go.Figure(data=nodes, layout=layout)
return fig
def create_angular_tree(self):
def get_x_coordinates(tree):
"""Associates to each clade an x-coord.
returns dict {clade: x-coord}
"""
# xcoords = tree.depths(unit_branch_lengths=True)
# print("===========================")
# nodes = [n for n in tree.find_clades()]
# nodes = tree.get_terminals() + tree.get_nonterminals()
# print(tree.root.clades)
# root_xcoord = {tree.root.clades[1]:0}
terminal_nodes = tree.get_terminals()
internal_nodes = tree.get_nonterminals()
terminal_xcoords = dict((leaf, i) for i, leaf in enumerate(terminal_nodes))
internal_xcoords = dict(
(leaf, i+0.5) for leaf, i in zip(internal_nodes, range(1, len(internal_nodes)))
)
xcoords = {**terminal_xcoords, **internal_xcoords}
# print(xcoords)
# print("===========================")
# tree.depth() maps tree clades to depths (by branch length).
# returns a dict {clade: depth} where clade runs over all Clade instances of the tree, and depth
# is the distance from root to clade
# If there are no branch lengths, assign unit branch lengths
if not max(xcoords.values()):
xcoords = tree.depths(unit_branch_lengths=True)
return xcoords
def get_y_coordinates(tree, dist=1):
"""
returns dict {clade: y-coord}
The y-coordinates are (float) multiple of integers (i*dist below)
dist depends on the number of tree leafs
"""
maxheight = tree.count_terminals() # Counts the number of tree leafs.
# Rows are defined by the tips/leafs
# root_ycoord = {tree.root:maxheight}
terminal_nodes = tree.get_terminals()
internal_nodes = tree.get_nonterminals()
terminal_ycoords = dict((leaf, 1) for _, leaf in enumerate(terminal_nodes))
internal_ycoords = dict(
(leaf, i) for leaf, i in zip(internal_nodes, reversed(range(1, len(internal_nodes))))
)
ycoords = {**terminal_ycoords, **internal_ycoords}
def calc_row(clade):
for subclade in clade:
if subclade not in ycoords:
calc_row(subclade)
ycoords[clade] = (ycoords[clade.clades[0]] +
ycoords[clade.clades[-1]]) / 2
if tree.root.clades:
calc_row(tree.root)
return ycoords
def get_clade_lines(
orientation="horizontal",
y_curr=0,
last_y_curr=0,
x_start=0,
x_curr=0,
y_bot=0,
y_top=0,
line_color="rgb(25,25,25)",
line_width=0.5,
init_flag=False,
):
"""define a shape of type 'line', for branch
"""
branch_line = dict(
type="line", layer="below", line=dict(color=line_color, width=line_width)
)
if orientation == "horizontal":
if init_flag:
branch_line.update(x0=x_start, y0=y_curr,
x1=x_curr, y1=y_curr)
else:
branch_line.update(
x0=x_start, y0=last_y_curr, x1=x_curr, y1=y_curr)
elif orientation == "vertical":
branch_line.update(x0=x_curr, y0=y_bot, x1=x_curr, y1=y_top)
else:
raise ValueError("Line type can be 'horizontal' or 'vertical'")
return branch_line
def draw_clade(
clade,
x_start,
line_shapes,
line_color="rgb(15,15,15)",
line_width=1,
x_coords=0,
y_coords=0,
last_clade_y_coord=0,
init_flag=True
):
"""Recursively draw the tree branches, down from the given clade"""
x_curr = x_coords[clade]
y_curr = y_coords[clade]
# Draw a horizontal line from start to here
branch_line = get_clade_lines(
orientation="horizontal",
y_curr=y_curr,
last_y_curr=last_clade_y_coord,
x_start=x_start,
x_curr=x_curr,
line_color=line_color,
line_width=line_width,
init_flag=init_flag,
)
line_shapes.append(branch_line)
if clade.clades:
# Draw descendants
for child in clade:
draw_clade(child, x_curr, line_shapes, x_coords=x_coords,
y_coords=y_coords, last_clade_y_coord=y_coords[clade],
init_flag=False, line_color=line_color)
if 'dark' in self.template:
text_color = 'white'
else:
text_color = 'black'
line_color = self.color_map[self.topology]
# Load in Tree object and ladderize
tree = self.newicktree
tree.ladderize()
# Get coordinates + put into dictionary
# dict(keys=clade_names, values=)
x_coords = get_x_coordinates(tree)
y_coords = get_y_coordinates(tree)
line_shapes = []
draw_clade(
tree.root,
0,
line_shapes,
line_color=line_color,
line_width=2,
x_coords=x_coords,
y_coords=y_coords,
)
#
my_tree_clades = x_coords.keys()
X = []
Y = []
text = []
for cl in my_tree_clades:
X.append(x_coords[cl])
Y.append(y_coords[cl])
# Add confidence values if internal node
if not cl.name:
text.append(cl.confidence)
else:
text.append(cl.name)
axis = dict(
showline=False,
zeroline=False,
showgrid=False,
visible=False,
showticklabels=False,
)
label_legend = ["Tree_1"]
nodes = []
for elt in label_legend:
node = dict(
type="scatter",
x=X,
y=Y,
mode="markers+text",
marker=dict(color=text_color, size=5),
text=text, # vignet information of each node
textposition='right',
textfont=dict(color=text_color, size=25),
showlegend=False,
name=elt,
)
nodes.append(node)
layout = dict(
template=self.template,
dragmode="select",
autosize=True,
showlegend=True,
xaxis=dict(
showline=True,
zeroline=False,
visible=False,
showgrid=False,
showticklabels=True,
range=[0, (max(x_coords.values())+2)]
),
yaxis=axis,
hovermode="closest",
shapes=line_shapes,
legend={"x": 0, "y": 1},
font=dict(family="Open Sans"),
)
fig = dict(data=nodes, layout=layout)
return fig
def create_circular_tree(self):
def get_circular_tree_data(tree, order='level', dist=1, start_angle=0, end_angle=360, start_leaf='first'):
"""Define data needed to get the Plotly plot of a circular tree
Source code found at: https://chart-studio.plotly.com/~empet/14834.embed
"""
# tree: an instance of Bio.Phylo.Newick.Tree or Bio.Phylo.PhyloXML.Phylogeny
# order: tree traversal method to associate polar coordinates to its nodes
# dist: the vertical distance between two consecutive leafs in the associated rectangular tree layout
# start_angle: angle in degrees representing the angle of the first leaf mapped to a circle
# end_angle: angle in degrees representing the angle of the last leaf
# the list of leafs mapped in anticlockwise direction onto circles can be tree.get_terminals()
# or its reversed version tree.get_terminals()[::-1].
# start leaf: is a keyword with two possible values"
# 'first': to map the leafs in the list tree.get_terminals() onto a circle,
# in the counter-clockwise direction
# 'last': to map the leafs in the list, tree.get_terminals()[::-1]
start_angle *= np.pi/180 # conversion to radians
end_angle *= np.pi/180
def get_radius(tree):
"""
Associates to each clade root its radius, equal to the distance from that clade to the tree root
returns dict {clade: node_radius}
"""
if self.branch_len:
node_radius = tree.depths(unit_branch_lengths=True)
else:
node_radius = tree.depths()
# If the tree did not record the branch lengths assign the unit branch length
# (ex: the case of a newick tree "(A, (B, C), (D, E))")
if not np.count_nonzero(node_radius.values()):
node_radius = tree.depths(unit_branch_lengths=True)
return node_radius
def get_vertical_position(tree):
"""
returns a dict {clade: ycoord}, where y-coord is the cartesian y-coordinate
of a clade root in a rectangular phylogram
"""
n_leafs = tree.count_terminals() # Counts the number of tree leafs.
# Assign y-coordinates to the tree leafs
if start_leaf == 'first':
node_ycoord = dict((leaf, k) for k, leaf in enumerate(tree.get_terminals()))
elif start_leaf == 'last':
node_ycoord = dict((leaf, k) for k, leaf in enumerate(reversed(tree.get_terminals())))
else:
raise ValueError("start leaf can be only 'first' or 'last'")
def assign_ycoord(clade):#compute the y-coord for the root of this clade
for subclade in clade:
if subclade not in node_ycoord: # if the subclade root hasn't a y-coord yet
assign_ycoord(subclade)
node_ycoord[clade] = 0.5 * (node_ycoord[clade.clades[0]] + node_ycoord[clade.clades[-1]])
if tree.root.clades:
assign_ycoord(tree.root)
return node_ycoord
node_radius = get_radius(tree)
node_ycoord = get_vertical_position(tree)
y_vals = node_ycoord.values()
ymin, ymax = min(y_vals), max(y_vals)
ymin -= dist # this dist subtraction is necessary to avoid coincidence of the first and last leaf angle
# when the interval [ymin, ymax] is mapped onto [0, 2pi],
def ycoord2theta(y):
# maps an y in the interval [ymin-dist, ymax] to the interval [radian(start_angle), radian(end_angle)]
return start_angle + (end_angle - start_angle) * (y-ymin) / float(ymax-ymin)
def get_points_on_lines(linetype='radial', x_left=0, x_right=0, y_right=0, y_bot=0, y_top=0):
"""
- define the points that generate a radial branch and the circular arcs, perpendicular to that branch
- a circular arc (angular linetype) is defined by 10 points on the segment of ends
(x_bot, y_bot), (x_top, y_top) in the rectangular layout,
mapped by the polar transformation into 10 points that are spline interpolated
- returns for each linetype the lists X, Y, containing the x-coords, resp y-coords of the
line representative points
"""
if linetype == 'radial':
theta = ycoord2theta(y_right)
X = [x_left*np.cos(theta), x_right*np.cos(theta), None]
Y = [x_left*np.sin(theta), x_right*np.sin(theta), None]
elif linetype == 'angular':
theta_b = ycoord2theta(y_bot)
theta_t = ycoord2theta(y_top)
t = np.linspace(0,1, 10)# 10 points that span the circular arc
theta = (1-t) * theta_b + t * theta_t
X = list(x_right * np.cos(theta)) + [None]
Y = list(x_right * np.sin(theta)) + [None]
else:
raise ValueError("linetype can be only 'radial' or 'angular'")
return X,Y
def get_line_lists(clade, x_left, xlines, ylines, xarc, yarc):
"""Recursively compute the lists of points that span the tree branches"""
# xlines, ylines - the lists of x-coords, resp y-coords of radial edge ends
# xarc, yarc - the lists of points generating arc segments for tree branches
x_right = node_radius[clade]
y_right = node_ycoord[clade]
X,Y = get_points_on_lines(linetype='radial', x_left=x_left, x_right=x_right, y_right=y_right)
xlines.extend(X)
ylines.extend(Y)
if clade.clades:
y_top = node_ycoord[clade.clades[0]]
y_bot = node_ycoord[clade.clades[-1]]
X,Y = get_points_on_lines(linetype='angular', x_right=x_right, y_bot=y_bot, y_top=y_top)
xarc.extend(X)
yarc.extend(Y)
# get and append the lists of points representing the branches of the descedants
for child in clade:
get_line_lists(child, x_right, xlines, ylines, xarc, yarc)
xlines = []
ylines = []
xarc = []
yarc = []
get_line_lists(tree.root, 0, xlines, ylines, xarc, yarc)
xnodes = []
ynodes = []
for clade in tree.find_clades(order='preorder'): #it was 'level'
theta = ycoord2theta(node_ycoord[clade])
xnodes.append(node_radius[clade]*np.cos(theta))
ynodes.append(node_radius[clade]*np.sin(theta))
return xnodes, ynodes, xlines, ylines, xarc, yarc
if 'dark' in self.template:
text_color = 'white'
else:
text_color = 'black'
line_color = self.color_map[self.topology]
tree = self.newicktree
tree.ladderize()
traverse_order = 'preorder'
all_clades=list(tree.find_clades(order=traverse_order))
for k in range(len((all_clades))):
all_clades[k].id=k
xnodes, ynodes, xlines, ylines, xarc, yarc = get_circular_tree_data(tree, order=traverse_order, start_leaf='last')
tooltip=[]
clade_names=[]
color=[]
for clade in tree.find_clades(order=traverse_order):
if self.branch_len:
branch_length = 1
else:
branch_length = clade.branch_length
if clade.name and clade.confidence and clade.branch_length:
tooltip.append(f"name: {clade.name}<br>branch-length: {branch_length}\
<br>confidence: {int(clade.confidence)}")
color.append[clade.confidence.value]
clade_names.append(clade.name)
elif clade.name is None and clade.branch_length is not None and clade.confidence is not None:
color.append(clade.confidence)
clade_names.append(clade.name)
tooltip.append(f"branch-length: {branch_length}\
<br>confidence: {int(clade.confidence)}")
elif clade.name and clade.branch_length and clade.confidence is None:
tooltip.append(f"name: {clade.name}<br>branch-length: {branch_length}")
color.append(-1)
clade_names.append(clade.name)
else:
tooltip.append('')
color.append(-1)
clade_names.append(clade.name)
trace_nodes=dict(type='scatter',
x=xnodes,
y= ynodes,
mode='markers+text',
marker=dict(color=text_color, size=8),
text=clade_names,
textposition='top center',
textfont=dict(color=text_color, size=12),
hoverinfo='text',
hovertemplate=tooltip,
)
trace_radial_lines=dict(type='scatter',
x=xlines,
y=ylines,
mode='lines',
line=dict(color=line_color, width=1),
hoverinfo='none',
)
trace_arcs=dict(type='scatter',
x=xarc,
y=yarc,
mode='lines',
line=dict(color=line_color, width=1, shape='spline'),
hoverinfo='none',
)
layout=dict(
font=dict(family=self.font_family,size=14),
autosize=True,
showlegend=False,
template=self.template,
xaxis=dict(visible=False),
yaxis=dict(visible=False),
hovermode='closest',
margin=dict(t=20, b=10, r=20, l=10, pad=20),
)
fig = go.Figure(data=[trace_radial_lines, trace_arcs, trace_nodes], layout=layout)
return fig
class RFDistance():
def __init__(self, t1, t2):
self.t1 = Tree(t1)
self.t2 = Tree(t2)
self.compare = self.t1.compare(self.t2)
def NormRF(self):
return self.compare['norm_rf']
def RF(self):
return self.compare['rf']
def MaxRF(self):
return self.compare['max_rf']
# -------------------------------------------------------------------------------------
# ------------------------------ Alt Data Graph Functions -----------------------------
def make_alt_data_str_figure(
alt_data_to_graph,
chromosome_df,
color_mapping,
topology_df,
window_size,
template,
dataRange,
axis_line_width,
xaxis_gridlines,
yaxis_gridlines,
font_family,
whole_genome,
):
# sort dataframe
topology_df.sort_values(by=["Window"], inplace=True)
topology_df.fillna("NULL", inplace=True)
# Build graph
if whole_genome:
fig = px.histogram(
topology_df,
x="Window",
y=[1]*len(topology_df),
category_orders={"Chromosome": chromosome_df['Chromosome']},
color=alt_data_to_graph,
color_discrete_sequence=list(color_mapping.values()),
nbins=int(chromosome_df["End"].max()/window_size),
facet_row="Chromosome",
)
fig.for_each_annotation(lambda a: a.update(text=a.text.split("=")[-1]))
fig.update_layout(
template=template,
margin=dict(
l=60,
r=50,
b=40,
t=40,
),
legend=dict(
orientation="h",
yanchor="bottom",
y=1.02,
xanchor="left",
x=0
),
title={
'text': str(alt_data_to_graph),
'x':0.5,
'xanchor': 'center',
'yanchor': 'top',
},
hovermode="x unified",
font=dict(family=font_family,),
height=100*len(topology_df["Chromosome"].unique())
)
else:
fig = px.histogram(
topology_df,
x="Window",
y=[1]*len(topology_df),
color=alt_data_to_graph,
color_discrete_sequence=list(color_mapping.values()),
nbins=int(chromosome_df["End"].max()/window_size),
)
fig.update_layout(
template=template,
margin=dict(
l=60,
r=50,
b=40,
t=40,
),
legend=dict(
orientation="h",
yanchor="bottom",
y=1.02,
xanchor="left",
x=0
),
title={
'text': str(alt_data_to_graph),
'x':0.5,
'xanchor': 'center',
'yanchor': 'top',
},
hovermode="x unified",
font=dict(family=font_family,),
)
if dataRange:
fig.update_xaxes(
title="Position",
range=dataRange,
showline=True,
showgrid=xaxis_gridlines,
linewidth=axis_line_width,
)
else:
fig.update_xaxes(
title="Position",
showline=True,
showgrid=xaxis_gridlines,
linewidth=axis_line_width,
)
fig.update_yaxes(
title="y-axis",
range=[0, 1],
nticks=1,
showline=True,
showgrid=yaxis_gridlines,
linewidth=axis_line_width,
)
return fig
def make_alt_data_int_figure(
alt_data_to_graph,
color_mapping,
topology_df,
chromosome_df,
template,
dataRange,
y_max,
axis_line_width,
xaxis_gridlines,
yaxis_gridlines,
font_family,
whole_genome,
):
# sort dataframe
topology_df = topology_df.sort_values(by=["Window"])
y_range = [0, (y_max*1.1)]
# Build graph
if whole_genome:
fig = px.line(
topology_df,
x="Window",
y=alt_data_to_graph,
category_orders={"Chromosome": chromosome_df['Chromosome']},
color_discrete_sequence=list(color_mapping.values()),
facet_row="Chromosome",
)
fig.for_each_annotation(lambda a: a.update(text=a.text.split("=")[-1]))
fig.update_layout(
template=template,
margin=dict(
l=60,
r=50,
b=40,
t=40,
),
title={
'text': str(alt_data_to_graph),
'x':0.5,
'xanchor': 'center',
'yanchor': 'top',
},
hovermode="x unified",
font=dict(family=font_family,),
height=100*len(topology_df["Chromosome"].unique()),
)
else:
fig = px.line(
topology_df,
x="Window",
y=alt_data_to_graph,
color_discrete_sequence=list(color_mapping.values()),
)
fig.update_layout(
template=template,
margin=dict(
l=60,
r=50,
b=40,
t=40,
),
title={
'text': str(alt_data_to_graph),
'x':0.5,
'xanchor': 'center',
'yanchor': 'top',
},
hovermode="x unified",
font=dict(family=font_family,),
)
# Update X-axis
if dataRange:
fig.update_xaxes(
title="Position",
range=dataRange,
showline=True,
showgrid=xaxis_gridlines,
linewidth=axis_line_width,
)
else:
fig.update_xaxes(
title="Position",
showline=True,
showgrid=xaxis_gridlines,
linewidth=axis_line_width,
)
if y_max < 0.1:
fig.update_yaxes(
fixedrange=True,
linewidth=axis_line_width,
range=y_range,
showgrid=yaxis_gridlines,
showline=True,
title="Edit me",
showexponent = 'all',
exponentformat = 'e',
)
else:
fig.update_yaxes(
fixedrange=True,
linewidth=axis_line_width,
range=y_range,
showgrid=yaxis_gridlines,
showline=True,
title="Edit me",
)
return fig
# ----------------------------------------------------------------------------------------
# -------------------------- Single Chromosome Graph Functions ---------------------------
def build_histogram_with_rug_plot(
topology_df,
chromosome,
chromosome_df,
template,
current_topologies,
window_size,
color_mapping,
dataRange,
topoOrder,
axis_line_width,
xaxis_gridlines,
yaxis_gridlines,
font_family,
):
# --- Set up topology data ---
# Extract current topology data
if (type(current_topologies) == str) or (type(current_topologies) == int):
wanted_rows = topology_df[topology_df["TopologyID"] == current_topologies]
elif type(current_topologies) == list:
wanted_rows = topology_df[topology_df["TopologyID"].isin(current_topologies)]
# Add in psuedodata for missing current_topologies (fixes issue where topology is dropped from legend)
if len(wanted_rows['TopologyID'].unique()) < len(current_topologies):
missing_topologies = [t for t in current_topologies if t not in wanted_rows['TopologyID'].unique()]
for mt in missing_topologies:
missing_row_data = [chromosome, 0, 'NA', mt] + ['NULL']*(len(wanted_rows.columns)-4)
missing_row = pd.DataFrame(data={i:j for i,j in zip(wanted_rows.columns, missing_row_data)}, index=[0])
wanted_rows = pd.concat([wanted_rows, missing_row])
# Group data by topology ID
grouped_topology_df = wanted_rows.sort_values(['TopologyID'],ascending=False).groupby(by='TopologyID')
# Set row heights based on number of current_topologies being shown
if len(current_topologies) <= 6:
subplot_row_heights = [1, 1]
elif len(current_topologies) <= 8:
subplot_row_heights = [4, 2]
else:
subplot_row_heights = [8, 2]
# Build figure
# fig = make_subplots(rows=2, cols=1, row_heights=subplot_row_heights, vertical_spacing=0.05, shared_xaxes=True)
fig = make_subplots(rows=2, cols=1, vertical_spacing=0.05, shared_xaxes=True)
for topology, data in grouped_topology_df:
fig.add_trace(
go.Scatter(
x=data['Window'],
y=data['TopologyID'],
name=topology,
legendgroup=topology,
mode='markers',
marker_symbol='line-ns-open',
marker_line_width=1,
marker_color=[color_mapping[topology]]*len(data),
),
# go.Box(
# x=data['Window'],
# y=data['TopologyID'],
# boxpoints='all',
# jitter=0,
# legendgroup=topology,
# marker_symbol='line-ns-open',
# marker_color=color_mapping[topology],
# name=topology,
# ),
row=1, col=1,
)
fig.add_trace(
go.Bar(
x=data['Window'],
y=[1]*len(data),
name=topology,
legendgroup=topology,
showlegend=False,
marker_color=color_mapping[topology],
marker_line_width=0,
),
row=2, col=1
)
# Update layout + axes
fig.update_layout(
template=template,
legend_title_text='Topology',
margin=dict(
l=60,
r=50,
b=40,
t=40,
),
legend=dict(
orientation="h",
yanchor="bottom",
y=1.02,
xanchor="left",
x=0,
itemsizing='constant'
),
hovermode="x unified",
font=dict(family=font_family,),
)
fig.update_xaxes(
rangemode="tozero",
range=dataRange,
linewidth=axis_line_width,
showgrid=xaxis_gridlines,
row=1,
col=1
)
fig.update_xaxes(
rangemode="tozero",
range=dataRange,
linewidth=axis_line_width,
title='Position',
showgrid=xaxis_gridlines,
row=2,
col=1,
)
fig.update_yaxes(
rangemode="tozero",
categoryarray=topoOrder,
linewidth=axis_line_width,
showgrid=yaxis_gridlines,
showticklabels=False,
fixedrange=True,
ticklen=0,
title="",
type='category',
row=1,
col=1,
)
fig.update_yaxes(
rangemode="tozero",
fixedrange=True,
linewidth=axis_line_width,
nticks=1,
showgrid=yaxis_gridlines,
showticklabels=False,
ticklen=0,
title="",
row=2,
col=1,
)
return fig
def build_rug_plot(
topology_df,
chromosome,
template,
current_topologies,
color_mapping,
dataRange,
topoOrder,
axis_line_width,
xaxis_gridlines,
yaxis_gridlines,
font_family,
):
# --- Group wanted data ---
if (type(current_topologies) == str) or (type(current_topologies) == int):
wanted_rows = topology_df[topology_df["TopologyID"] == current_topologies]
elif type(current_topologies) == list:
wanted_rows = topology_df[topology_df["TopologyID"].isin(current_topologies)]
# Add in psuedodata for missing current_topologies (fixes issue where topology is dropped from legend)
if len(wanted_rows['TopologyID'].unique()) < len(current_topologies):
missing_topologies = [t for t in current_topologies if t not in wanted_rows['TopologyID'].unique()]
for mt in missing_topologies:
missing_row_data = [chromosome, 0, 'NA', mt] + ['NULL']*(len(wanted_rows.columns)-4)
missing_row = pd.DataFrame(data={i:j for i,j in zip(wanted_rows.columns, missing_row_data)}, index=[0])
wanted_rows = pd.concat([wanted_rows, missing_row])
else:
pass
# --- Group data by topology ID
grouped_topology_df = wanted_rows.groupby(by='TopologyID')
# --- Build figure ---
fig = go.Figure()
for topology, data in grouped_topology_df:
fig.add_trace(go.Scatter(
x=data['Window'],
y=data['TopologyID'],
name=topology,
legendgroup=topology,
mode='markers',
marker_symbol='line-ns-open',
marker_size=int(100/len(grouped_topology_df)),
marker_line_width=1,
marker_color=[color_mapping[topology]]*len(data),
))
# Update figure layout + axes
fig.update_layout(
template=template,
legend_title_text='Topology',
xaxis_title_text='Position',
margin=dict(
l=60,
r=60,
b=40,
t=40,
),
legend=dict(
orientation="h",
yanchor="bottom",
y=1.02,
xanchor="left",
x=0,
traceorder='normal',
),
hovermode="x unified",
font=dict(family=font_family,),
)
fig.update_xaxes(
rangemode="tozero",
range=dataRange,
linewidth=axis_line_width,
showgrid=xaxis_gridlines,
showline=True,
)
fig.update_yaxes(
fixedrange=True,
title="",
showline=True,
showgrid=yaxis_gridlines,
linewidth=axis_line_width,
showticklabels=False,
type='category',
categoryarray=topoOrder,
)
fig.for_each_annotation(lambda a: a.update(text=""))
return fig
def build_tile_plot(
topology_df_filtered,
chromosome_df,
template,
current_topologies,
color_mapping,
dataRange,
window_size,
axis_line_width,
xaxis_gridlines,
yaxis_gridlines,
font_family,
):
# Extract current topology data
if (type(current_topologies) == str) or (type(current_topologies) == int):
wanted_rows = topology_df_filtered[topology_df_filtered["TopologyID"] == current_topologies]
elif type(current_topologies) == list:
wanted_rows = topology_df_filtered[topology_df_filtered["TopologyID"].isin(current_topologies)]
# fig = px.histogram(
# wanted_rows,
# x="Window",
# y=[1]*len(wanted_rows),
# color="TopologyID",
# color_discrete_map=color_mapping,
# nbins=int(chromosome_df["End"].max()/window_size)
# )
grouped_topology_df = wanted_rows.groupby(by='TopologyID')
# Build figure
fig = go.Figure()
for topology, data in grouped_topology_df:
fig.add_trace(
go.Scatter(
x=data['Window'],
y=[1]*len(data),
name=topology,
legendgroup=topology,
mode='markers',
marker_symbol='line-ns-open',
marker_size=225,
# marker_line_width=2,
marker_color=[color_mapping[topology]]*len(data),
# showlegend = False
),
)
# Update layout + axes
fig.update_layout(
template=template,
legend_title_text='Topology',
margin=dict(
l=60,
r=50,
b=40,
t=40,
),
legend=dict(
orientation="h",
yanchor="bottom",
y=1.02,
xanchor="left",
x=0,
traceorder='normal',
),
hovermode="x unified",
font=dict(family=font_family,),
)
fig.update_xaxes(
linewidth=axis_line_width,
rangemode="tozero",
range=dataRange,
showgrid=xaxis_gridlines,
)
fig.update_yaxes(
fixedrange=True,
linewidth=axis_line_width,
# range=[0, 1],
showline=False,
showgrid=yaxis_gridlines,
showticklabels=False,
ticklen=0,
title="",
)
return fig
def build_alt_data_graph(
alt_data_to_graph,
chromosome_df,
color_mapping,
topology_df,
window_size,
template,
dataRange,
y_max,
axis_line_width,
xaxis_gridlines,
yaxis_gridlines,
font_family,
):
# Check input type and graph accordingly
try:
input_type = type(topology_df[alt_data_to_graph].dropna().to_list()[0])
except IndexError:
return no_data_graph(template)
if input_type == str:
alt_data_graph_data = make_alt_data_str_figure(
alt_data_to_graph,
chromosome_df,
color_mapping,
topology_df,
window_size,
template,
dataRange,
axis_line_width,
xaxis_gridlines,
yaxis_gridlines,
font_family,
False,
)
else:
alt_data_graph_data = make_alt_data_int_figure(
alt_data_to_graph,
color_mapping,
topology_df,
chromosome_df,
template,
dataRange,
y_max,
axis_line_width,
xaxis_gridlines,
yaxis_gridlines,
font_family,
False,
)
return alt_data_graph_data
def build_whole_genome_alt_data_graph(
alt_data_to_graph,
chromosome_df,
color_mapping,
topology_df,
window_size,
template,
y_max,
axis_line_width,
xaxis_gridlines,
yaxis_gridlines,
font_family,
):
# Check input type and graph accordingly
try:
input_type = type(topology_df[alt_data_to_graph].dropna().to_list()[0])
except IndexError:
return no_data_graph(template)
if input_type == str:
alt_data_graph_data = make_alt_data_str_figure(
alt_data_to_graph,
chromosome_df,
color_mapping,
topology_df,
window_size,
template,
None,
axis_line_width,
xaxis_gridlines,
yaxis_gridlines,
font_family,
True,
)
else:
alt_data_graph_data = make_alt_data_int_figure(
alt_data_to_graph,
color_mapping,
topology_df,
chromosome_df,
template,
None,
y_max,
axis_line_width,
xaxis_gridlines,
yaxis_gridlines,
font_family,
True,
)
return alt_data_graph_data
def build_gff_figure(
data,
dataRange,
template,
axis_line_width,
xaxis_gridlines,
yaxis_gridlines,
font_family,
):
regionStart, regionEnd = dataRange
# Show gene names if showing less than 1Mb of data
# if abs(regionEnd - regionStart) <= 10000000:
if abs(regionEnd - regionStart) <= 10000000:
show_gene_names = True
else:
show_gene_names = False
# Separate
# group data by feature and gene name
attr_group = data.groupby(by=['feature', 'attribute', 'strand'])
positive_text_pos = "top center"
negative_text_pos = "top center"
features_graphed = list()
fig = go.Figure()
y_idx = 1
curr_feature = dict()
for fg, gene_data in attr_group:
feature, gene, strand = fg
feature_strand = f"{feature} ({strand})"
x_values = sorted(gene_data['start'].to_list() + gene_data['end'].to_list())
# Update y-axis value if new feature
if not curr_feature:
curr_feature[feature_strand] = y_idx
y_idx += 1
elif feature_strand in curr_feature.keys():
pass
else:
curr_feature[feature_strand] = y_idx
y_idx += 1
# Set legend show if feature in list already
if feature_strand in features_graphed:
show_legend = False
else:
show_legend = True
features_graphed.append(feature_strand)
# Set color, y-values, and arrow direction
if strand == '+':
colorValue = 'red'
y_values = [curr_feature[feature_strand]]*len(x_values)
markerSymbol = ['square']*(len(x_values)-1) + ['triangle-right']
text_pos = positive_text_pos
text_val = [gene] + ['']*(len(x_values)-1)
if positive_text_pos == "top center":
positive_text_pos = "bottom center"
elif positive_text_pos == "bottom center":
positive_text_pos = "top center"
else:
colorValue = '#009BFF'
y_values = [curr_feature[feature_strand]]*len(x_values)
markerSymbol = ['triangle-left'] + ['square']*(len(x_values)-1)
text_pos = negative_text_pos
text_val = ['']*(len(x_values)-1) + [gene]
if negative_text_pos == "top center":
negative_text_pos = "bottom center"
elif negative_text_pos == "bottom center":
negative_text_pos = "top center"
if show_gene_names:
fig.add_trace(go.Scatter(
x=x_values,
y=y_values,
name=feature_strand,
legendgroup=feature_strand,
mode='markers+lines+text',
marker_symbol=markerSymbol,
marker_size=8,
marker_color=colorValue,
text=text_val,
textposition=text_pos,
textfont=dict(
size=10,
),
hovertemplate=None,
showlegend=show_legend,
))
else:
fig.add_trace(go.Scatter(
x=x_values,
y=y_values,
name=feature_strand,
legendgroup=feature_strand,
mode='markers+lines',
marker_symbol=markerSymbol,
marker_size=8,
marker_color=colorValue,
# hoverinfo=['all'],
hovertemplate=None,
showlegend=show_legend,
))
fig.update_layout(
hovermode="x unified",
showlegend=True,
legend=dict(
orientation="h",
yanchor="bottom",
y=1.02,
xanchor="left",
x=0,
traceorder='normal',
),
template=template,
title='',
margin=dict(
l=62,
r=50,
b=20,
t=20,
),
height=150*len(features_graphed),
font=dict(family=font_family,),
)
fig.update_xaxes(
range=dataRange,
title='Position',
matches="x",
rangemode="tozero",
linewidth=axis_line_width,
showgrid=xaxis_gridlines,
)
fig.update_yaxes(
range=[0, len(features_graphed)+1],
fixedrange=True,
showticklabels=False,
showgrid=yaxis_gridlines,
title='',
linewidth=axis_line_width,
)
return fig
# ----------------------------------------------------------------------------------------
# ------------------------------- Quantile Graph Functions -------------------------------
def get_quantile_coordinates(
chromLengths,
QUANTILES,
WINDOWSIZE,
):
quantileCoordinates = pd.DataFrame(columns=chromLengths["Chromosome"], index=range(1, QUANTILES+1))
for row in chromLengths.itertuples(index=False):
chrom, _, end = row
chunkSize = end // QUANTILES
for i in range(QUANTILES):
q = i + 1
if q == 1:
quantileCoordinates.at[q, chrom] = [0, chunkSize]
else:
quantileCoordinates.at[q, chrom] = [chunkSize*(q-1) + WINDOWSIZE, chunkSize*q]
return quantileCoordinates
def calculateFrequencies(
quantileCoordinates,
input_df,
chromLengths,
QUANTILES,
):
quantileFrequencies = pd.DataFrame(columns=chromLengths["Chromosome"], index=range(1, QUANTILES+1))
topos = input_df["TopologyID"].unique()
for chrom in quantileCoordinates.columns:
for q, quantile in enumerate(quantileCoordinates[chrom], 1):
quantileData = input_df[(input_df['Window'] >= quantile[0]) & (input_df['Window'] <= quantile[1]) & (input_df['Chromosome'] == chrom)]
topoQD = quantileData['TopologyID'].value_counts().to_dict()
# Add missing topologies as count=0
for i in topos:
if i not in topoQD.keys():
topoQD[i] = 0
quantileFrequencies.at[q, chrom] = topoQD
continue
return quantileFrequencies
def plot_frequencies(
quantileFrequencies,
n_quantiles,
template,
color_mapping,
axis_line_width,
xaxis_gridlines,
yaxis_gridlines,
):
def reorganizeDF(df):
new_df = pd.DataFrame(columns=['Chr', 'Quantile', 'TopologyID', 'Frequency'])
nidx = 0
for c in df.columns:
for idx in df.index:
chromTotal = sum([v for v in df.at[idx, c].values()])
for topo, freq in zip(df.at[idx, c].keys(), df.at[idx, c].values()):
new_df.at[nidx, 'TopologyID'] = topo
new_df.at[nidx, 'Chr'] = c
new_df.at[nidx, 'Quantile'] = idx
try:
new_df.at[nidx, 'Frequency'] = int(freq)/chromTotal
except ZeroDivisionError:
new_df.at[nidx, 'Frequency'] = 0.0
nidx += 1
return new_df
# Organize DataFrame
organizedDF= reorganizeDF(quantileFrequencies)
# Create line graph
fig = px.line(
organizedDF,
x='Quantile',
y='Frequency',
color='TopologyID',
facet_col='Chr',
facet_col_wrap=1,
facet_row_spacing=0.01,
color_discrete_map=color_mapping,
)
fig.update_traces(texttemplate='%{text:.3}', textposition='top center')
if len(organizedDF["Chr"].unique()) == 1:
fig.update_layout(
uniformtext_minsize=12,
template=template,
legend=dict(
orientation="h",
yanchor="bottom",
y=1.02,
xanchor="left",
x=0,
traceorder='normal',
),
height=300,
)
else:
fig.update_layout(
uniformtext_minsize=12,
template=template,
legend=dict(
orientation="h",
yanchor="bottom",
y=1.02,
xanchor="left",
x=0,
traceorder='normal',
),
height=100*len(organizedDF["Chr"].unique()),
)
fig.update_xaxes(
range=[1, n_quantiles],
rangemode="tozero",
linewidth=axis_line_width,
showgrid=xaxis_gridlines,
)
fig.update_yaxes(
range=[0, 1],
fixedrange=True,
showgrid=yaxis_gridlines,
linewidth=axis_line_width,
)
fig.for_each_annotation(lambda a: a.update(text=a.text.split("=")[-1]))
return fig
def calculate_topo_quantile_frequencies(df, current_topologies, additional_data, n_quantiles):
final_df = pd.DataFrame(columns=["TopologyID", "Frequency", "Quantile"])
for topology in current_topologies:
topo_df = pd.DataFrame(columns=["TopologyID", "Frequency", "Quantile"])
tidx = 0
df = df.sort_values(by=additional_data)
df = df.assign(Quantile = pd.qcut(df[additional_data].rank(method='first'), q=n_quantiles, labels=False))
df['Quantile'] = df['Quantile'].apply(lambda x: x+1)
df_group = df.groupby(by="Quantile")
for rank, data in df_group:
counts = data["TopologyID"].value_counts()
for t, f in zip(counts.index, counts):
if t == topology:
topo_df.at[tidx, "TopologyID"] = t
topo_df.at[tidx, "Frequency"] = f/len(df)
topo_df.at[tidx, "Quantile"] = rank
tidx += 1
break
else:
continue
# -- Concat dfs --
final_df = pd.concat([final_df, topo_df])
return final_df
def plot_frequencies_topo_quantile(
final_df,
template,
color_mapping,
axis_line_width,
xaxis_gridlines,
yaxis_gridlines,
graph_title,
additional_data
):
fig = px.line(
final_df,
x="Quantile", y="Frequency",
color="TopologyID",
color_discrete_map=color_mapping,
markers=True,
)
fig.update_layout(
template=template,
title=graph_title,
title_x=0.5,
margin=dict(
t=80
),
legend=dict(
orientation="h",
yanchor="bottom",
y=1.02,
xanchor="left",
x=0,
# itemsizing='constant'
),
)
fig.update_xaxes(
title=f"{additional_data} Quantiles",
linewidth=axis_line_width,
showgrid=xaxis_gridlines,
tick0=0,
dtick=1,
)
fig.update_yaxes(
rangemode="tozero",
linewidth=axis_line_width,
showgrid=yaxis_gridlines,
title='% Windows Observed',
)
return fig
# ---------------------------------------------------------------------------------
# -------------------------------- Whole Genome Graph Functions -------------------------------
def build_topology_frequency_pie_chart(
df,
template,
color_mapping,
font_family,
):
"""Returns pie graph for whole genome topology frequencies"""
fig = px.pie(
df,
values='Frequency',
names='TopologyID',
color="TopologyID",
color_discrete_map=color_mapping,
template=template,
title='Whole Genome Topology Frequencies',
)
fig.update_traces(textposition='inside')
fig.update_layout(
margin=dict(l=120, r=20, t=40, b=10),
uniformtext_minsize=12,
uniformtext_mode='hide',
legend=dict(itemclick=False, itemdoubleclick=False),
title_x=0.5,
font=dict(family=font_family,),
)
return fig
def build_rf_graph(
df,
ref_topo,
template,
color_mapping,
axis_line_width,
font_family,
):
fig = px.bar(
df, x="TopologyID", y="normRF-Distance",
color="TopologyID", color_discrete_map=color_mapping,
text='normRF-Distance')
fig.update_traces(texttemplate='%{text:.2f}', textposition='inside')
fig.update_layout(
title=f"Normalized RF-Distance from {ref_topo}",
title_x=0.5,
template=template,
font=dict(family=font_family,),
)
fig.update_xaxes(linewidth=axis_line_width)
fig.update_yaxes(linewidth=axis_line_width, range=[0, 1])
return fig
def build_whole_genome_rug_plot(
df,
chrom_df,
chromGroup,
template,
color_mapping,
currTopologies,
topoOrder,
window_size,
axis_line_width,
xaxis_gridlines,
yaxis_gridlines,
wg_squish_expand,
font_family,
):
df = df[(df['TopologyID'].isin(currTopologies)) & (df['Chromosome'].isin(chromGroup))]
grouped_topology_df = df.groupby(by='TopologyID')
num_chroms = len(df['Chromosome'].unique())
chrom_row_dict = {chrom:i for chrom, i in zip(sorted(df['Chromosome'].unique()), range(1, len(df['Chromosome'].unique())+1, 1))}
chrom_shapes = []
row_height = [2]*num_chroms
# --- Build figure ---
# If chromosome name longer than 5 characters, use subplot titles
# instead of row ittles
if df.Chromosome.map(len).max() > 5:
fig = make_subplots(
rows=num_chroms,
subplot_titles=chrom_row_dict.keys(),
shared_xaxes=True,
cols=1,
row_heights=row_height,
)
else:
fig = make_subplots(
rows=num_chroms,
row_titles=[c for c in chrom_row_dict.keys()],
shared_xaxes=True,
cols=1,
row_heights=row_height,
)
for topology, data in grouped_topology_df:
add_legend = True
for chrom in chrom_row_dict.keys():
chrom_data = data[data["Chromosome"] == chrom]
chrom_length_data = chrom_df[chrom_df['Chromosome'] == chrom]
chrom_length = chrom_length_data['End'].max()
if len(chrom_data) == 0:
fig.add_trace(
go.Scatter(
x=[0],
y=[topology],
name=topology,
legendgroup=topology,
mode='markers',
marker_symbol='line-ns-open',
marker_color=[color_mapping[topology]]*len(chrom_data),
showlegend = False,
),
row=chrom_row_dict[chrom], col=1,
)
elif add_legend:
fig.add_trace(
go.Scatter(
x=chrom_data['Window'],
y=chrom_data['TopologyID'],
name=topology,
legendgroup=topology,
mode='markers',
# marker_size=int(25/len(grouped_topology_df)),
marker_symbol='line-ns-open',
marker_color=[color_mapping[topology]]*len(chrom_data),
),
# go.Box(
# x=chrom_data['Window'],
# y=chrom_data['TopologyID'],
# boxpoints='all',
# jitter=0,
# legendgroup=topology,
# marker_symbol='line-ns-open',
# marker_color=color_mapping[topology],
# name=topology,
# ),
row=chrom_row_dict[chrom], col=1,
)
chrom_shapes.append(dict(type="line", xref="x", yref="y", x0=chrom_length, x1=chrom_length, y0=-1, y1=len(currTopologies), line_width=2))
add_legend = False
else:
fig.add_trace(
go.Scatter(
x=chrom_data['Window'],
y=chrom_data['TopologyID'],
name=topology,
legendgroup=topology,
mode='markers',
# marker_size=int(25/len(grouped_topology_df)),
marker_symbol='line-ns-open',
marker_color=[color_mapping[topology]]*len(chrom_data),
showlegend = False,
),
# go.Box(
# x=chrom_data['Window'],
# y=chrom_data['TopologyID'],
# boxpoints='all',
# jitter=0,
# marker_symbol='line-ns-open',
# marker_color=color_mapping[topology],
# legendgroup=topology,
# showlegend = False,
# name=topology,
# ),
row=chrom_row_dict[chrom], col=1,
)
chrom_ref = chrom_row_dict[chrom]
chrom_shapes.append(dict(type="rect", xref=f"x{chrom_ref}", yref=f"y{chrom_ref}", x0=chrom_length, x1=chrom_length, y0=-1, y1=len(currTopologies), line_width=2))
# Update layout + axes
fig.for_each_annotation(lambda a: a.update(text=a.text.split("=")[-1]))
fig.update_xaxes(
rangemode="tozero",
range=[0, (chrom_df['End'].max()+(2*window_size))],
fixedrange=True,
linewidth=axis_line_width,
ticklen=0,
matches="x",
showgrid=xaxis_gridlines,
)
fig.update_yaxes(
fixedrange=True,
title="",
showgrid=yaxis_gridlines,
showticklabels=False,
linewidth=axis_line_width,
categoryarray=topoOrder,
)
if wg_squish_expand == 'expand':
if num_chroms < 5:
fig.update_layout(
template=template,
legend_title_text='Topology',
legend=dict(
orientation="h",
yanchor="bottom",
y=1.02,
xanchor="left",
x=0,
traceorder='normal',
itemsizing='constant',
),
height=160*num_chroms,
shapes=chrom_shapes,
title_x=0.5,
font=dict(family=font_family,),
)
else:
fig.update_layout(
template=template,
legend_title_text='Topology',
legend=dict(
orientation="h",
yanchor="bottom",
y=1.02,
xanchor="left",
x=0,
traceorder='normal',
itemsizing='constant',
),
height=100*num_chroms,
shapes=chrom_shapes,
title_x=0.5,
font=dict(family=font_family,),
)
elif wg_squish_expand == 'squish':
if num_chroms < 5:
fig.update_layout(
template=template,
legend_title_text='Topology',
legend=dict(
orientation="h",
yanchor="bottom",
y=1.02,
xanchor="left",
x=0,
traceorder='normal',
itemsizing='constant',
),
height=125*num_chroms,
shapes=chrom_shapes,
title_x=0.5,
font=dict(family=font_family,),
)
else:
fig.update_layout(
template=template,
legend_title_text='Topology',
legend=dict(
orientation="h",
yanchor="bottom",
y=1.02,
xanchor="left",
x=0,
traceorder='normal',
itemsizing='constant',
),
height=50*num_chroms,
shapes=chrom_shapes,
title_x=0.5,
font=dict(family=font_family,),
)
else:
if num_chroms < 5:
fig.update_layout(
template=template,
legend_title_text='Topology',
legend=dict(
orientation="h",
yanchor="bottom",
y=1.02,
xanchor="left",
x=0,
traceorder='normal',
itemsizing='constant',
),
height=105*num_chroms,
shapes=chrom_shapes,
title_x=0.5,
font=dict(family=font_family,),
)
else:
fig.update_layout(
template=template,
legend_title_text='Topology',
legend=dict(
orientation="h",
yanchor="bottom",
y=1.02,
xanchor="left",
x=0,
traceorder='normal',
itemsizing='constant',
),
height=20*num_chroms,
shapes=chrom_shapes,
title_x=0.5,
margin=dict(
t=10,
b=30,
),
font=dict(family=font_family,),
)
# Rotate chromosome names to 0-degrees
for annotation in fig['layout']['annotations']:
annotation['textangle']=0
annotation['align']="center"
return fig
def build_whole_genome_tile_plot(
df,
chrom_df,
template,
color_mapping,
currTopologies,
topoOrder,
window_size,
axis_line_width,
chromGroup,
xaxis_gridlines,
yaxis_gridlines,
wg_squish_expand,
font_family,
):
"""
Max chromosomes per graph if # current_topologies <= 3: 20
Max chromosomes per graph if # current_topologies > 3: 20/2
Returns: List of figures to display
"""
df = df[df['TopologyID'].isin(currTopologies)]
df = df[df['Chromosome'].isin(chromGroup)]
grouped_topology_df = df.groupby(by='TopologyID')
num_chroms = len(df['Chromosome'].unique())
chrom_row_dict = {chrom:i for chrom, i in zip(sorted(df['Chromosome'].unique()), range(1, len(df['Chromosome'].unique())+1, 1))}
chrom_shapes = []
# --- Build figure ---
# If longest chromosome name longer
# than 5 characters, use subplot titles
# instead of row titles
if df.Chromosome.map(len).max() > 5:
fig = make_subplots(
rows=num_chroms,
cols=1,
shared_xaxes=True,
subplot_titles=chrom_row_dict.keys(),
vertical_spacing=0.03,
)
else:
fig = make_subplots(
rows=num_chroms,
cols=1,
shared_xaxes=True,
row_titles=[c for c in chrom_row_dict.keys()],
vertical_spacing=0.001,
)
for topology, data in grouped_topology_df:
add_legend = True
for chrom in chrom_row_dict.keys():
chrom_data = data[data["Chromosome"] == chrom]
chrom_length_data = chrom_df[chrom_df['Chromosome'] == chrom]
chrom_length = chrom_length_data['End'].max()
if add_legend:
fig.add_trace(
go.Histogram(
x=chrom_data['Window'],
y=[1]*len(chrom_data),
nbinsx=int(chrom_length/window_size),
name=topology,
legendgroup=topology,
marker_line_width=0,
marker_color=color_mapping[topology],
),
row=chrom_row_dict[chrom], col=1,
)
chrom_shapes.append(dict(type="line", xref="x", yref="y", x0=chrom_length, x1=chrom_length, y0=0, y1=1, line_width=2))
add_legend = False
else:
fig.add_trace(
go.Histogram(
x=chrom_data['Window'],
y=[1]*len(chrom_data),
nbinsx=int(chrom_length/window_size),
name=topology,
legendgroup=topology,
marker_line_width=0,
marker_color=color_mapping[topology],
showlegend = False
),
row=chrom_row_dict[chrom], col=1,
)
chrom_ref = chrom_row_dict[chrom]
chrom_shapes.append(dict(type="rect", xref=f"x{chrom_ref}", yref=f"y{chrom_ref}", x0=chrom_length, x1=chrom_length, y0=0, y1=1, line_width=2))
# Update layout + axes
if wg_squish_expand == 'expand':
if num_chroms < 5:
fig.update_layout(
barmode="relative",
template=template,
legend_title_text='Topology',
margin=dict(
l=60,
r=50,
b=40,
t=40,
),
legend=dict(
orientation="h",
yanchor="bottom",
y=1.02,
xanchor="left",
x=0,
traceorder='normal',
itemsizing='constant',
),
hovermode="x unified",
height=130*num_chroms,
shapes=chrom_shapes,
title_x=0.5,
font=dict(family=font_family,),
)
else:
fig.update_layout(
barmode="relative",
template=template,
legend_title_text='Topology',
margin=dict(
l=60,
r=50,
b=40,
t=40,
),
legend=dict(
orientation="h",
yanchor="bottom",
y=1.02,
xanchor="left",
x=0,
traceorder='normal',
itemsizing='constant',
),
hovermode="x unified",
height=100*num_chroms,
shapes=chrom_shapes,
title_x=0.5,
font=dict(family=font_family,),
)
elif wg_squish_expand == 'squish':
if num_chroms < 5:
fig.update_layout(
barmode="relative",
template=template,
legend_title_text='Topology',
margin=dict(
l=60,
r=50,
b=40,
t=40,
),
legend=dict(
orientation="h",
yanchor="bottom",
y=1.02,
xanchor="left",
x=0,
traceorder='normal',
itemsizing='constant',
),
hovermode="x unified",
height=80*num_chroms,
shapes=chrom_shapes,
title_x=0.5,
font=dict(family=font_family,),
)
else:
fig.update_layout(
barmode="relative",
template=template,
legend_title_text='Topology',
margin=dict(
l=60,
r=50,
b=40,
t=40,
),
legend=dict(
orientation="h",
yanchor="bottom",
y=1.02,
xanchor="left",
x=0,
traceorder='normal',
itemsizing='constant',
),
hovermode="x unified",
height=50*num_chroms,
shapes=chrom_shapes,
title_x=0.5,
font=dict(family=font_family,),
)
else:
if num_chroms < 5:
fig.update_layout(
barmode="relative",
template=template,
legend_title_text='Topology',
margin=dict(
l=60,
r=50,
b=40,
t=40,
),
legend=dict(
orientation="h",
yanchor="bottom",
y=1.02,
xanchor="left",
x=0,
traceorder='normal',
itemsizing='constant',
),
hovermode="x unified",
height=55*num_chroms,
shapes=chrom_shapes,
title_x=0.5,
font=dict(family=font_family,),
)
else:
fig.update_layout(
barmode="relative",
template=template,
legend_title_text='Topology',
margin=dict(
l=60,
r=50,
b=40,
t=40,
),
legend=dict(
orientation="h",
yanchor="bottom",
y=1.02,
xanchor="left",
x=0,
traceorder='normal',
itemsizing='constant',
),
hovermode="x unified",
height=20*num_chroms,
shapes=chrom_shapes,
title_x=0.5,
font=dict(family=font_family,),
)
fig.update_xaxes(
linewidth=axis_line_width,
fixedrange=True,
rangemode="tozero",
range=[0, chrom_df['End'].max()],
ticklen=0,
showgrid=xaxis_gridlines,
)
fig.update_yaxes(
# categoryarray=topoOrder,
range=[0, 1],
fixedrange=True,
linewidth=axis_line_width,
showgrid=yaxis_gridlines,
showticklabels=False,
title="",
ticklen=0,
)
# Rotate chromosome names to 0-degrees
for annotation in fig['layout']['annotations']:
annotation['textangle']=0
annotation['align']="center"
return fig
def build_whole_genome_bar_plot(
df,
template,
color_mapping,
currTopologies,
axis_line_width,
chromGroup,
xaxis_gridlines,
yaxis_gridlines,
font_family,
):
# Filter df to chromosomes in group
df = df[df['Chromosome'].isin(chromGroup)]
df = df[df['TopologyID'].isin(currTopologies)]
number_of_chrom_rows = len(df["Chromosome"].unique()) // 3
fig = px.bar(
df,
x='TopologyID',
y='Frequency',
facet_col='Chromosome',
facet_col_wrap=3,
facet_row_spacing=0.05,
color='TopologyID',
template=template,
color_discrete_map=color_mapping,
text='Frequency',
height=int(500*number_of_chrom_rows),
)
fig.for_each_annotation(lambda a: a.update(text=a.text.split("=")[-1]))
fig.update_traces(texttemplate='%{text:.2}', textposition='outside')
# Remove y-axis labels
for axis in fig.layout:
if type(fig.layout[axis]) == go.layout.YAxis:
fig.layout[axis].title.text = ''
fig.update_layout(
uniformtext_minsize=12,
legend=dict(
orientation="h",
yanchor="bottom",
y=1.02,
xanchor="left",
x=0,
traceorder='normal',
),
margin=dict(l=10, r=10, t=10, b=10),
title="",
annotations = list(fig.layout.annotations) +
[go.layout.Annotation(
x=-0.07,
y=0.5,
font=dict(
size=12,
# color='white',
),
showarrow=False,
text="Frequency",
textangle=-90,
xref="paper",
yref="paper"
)
],
title_x=0.5,
font=dict(family=font_family,),
)
fig.update_xaxes(
title="",
linewidth=axis_line_width,
showgrid=xaxis_gridlines,
)
fig.update_yaxes(
range=[0, 1.1],
matches='y',
linewidth=axis_line_width,
showgrid=yaxis_gridlines,
)
return fig
def build_whole_genome_pie_charts(
df,
template,
color_mapping,
chromGroup,
font_family,
):
# Filter df to chromosomes in group
df = df[df['Chromosome'].isin(chromGroup)]
number_of_chrom_rows = (len(df["Chromosome"].unique()) // 3)+(math.ceil(len(df["Chromosome"].unique()) % 3))
specs = [[{'type':'domain'}, {'type':'domain'}, {'type':'domain'}] for _ in range(number_of_chrom_rows)]
fig = make_subplots(
rows=number_of_chrom_rows,
cols=3,
specs=specs,
vertical_spacing=0.03,
horizontal_spacing=0.001,
subplot_titles=sorted(df["Chromosome"].unique()),
column_widths=[2]*3,
)
col_pos = 1
row_num = 1
for c in sorted(df['Chromosome'].unique()):
chrom_df = df[df["Chromosome"] == c]
fig.add_trace(go.Pie(labels=chrom_df["TopologyID"], values=chrom_df['Frequency'], marker_colors=list(color_mapping.values())), row=row_num, col=col_pos)
if col_pos == 3:
col_pos = 1
row_num += 1
else:
col_pos += 1
fig.update_traces(textposition='inside')
fig.update_layout(
uniformtext_minsize=12,
showlegend=True,
template=template,
height=int(200*number_of_chrom_rows),
font=dict(family=font_family,),
)
return fig
# ---------------------------------------------------------------------------------
# --------------------------- Stats DataFrame Generators --------------------------
def _get_valid_cols(topology_df):
valid_cols = list()
for i in topology_df.columns[4:]:
data = topology_df[i].unique()
flag = None
for j in data:
if type(j) == str:
flag = False
break
else:
flag = True
if flag:
valid_cols.append(i)
else:
continue
return valid_cols
def basic_stats_dfs(topology_df):
"""Generate dataframes of basic statistics
:param topology_df: Current View Tree Viewer input file dataframe
:type topology_df: Object
"""
# Calculate current view topologies
topo_freq_df = pd.DataFrame(topology_df["TopologyID"].value_counts()/len(topology_df))
if len(topo_freq_df) > 25: # If more than 25 topologies loaded, just show top 25
topo_freq_df = topo_freq_df.head(25)
remainder_freq = 1.0 - sum(topo_freq_df['TopologyID'])
topo_freq_df.at["Other", "TopologyID"] = remainder_freq
topo_names = [i for i in topo_freq_df.index]
topo_freqs = [round(i, 4) for i in topo_freq_df["TopologyID"]]
# Calculate median + average of additional data
if len(topology_df.columns) > 4:
valid_cols = _get_valid_cols(topology_df)
additional_dt_names = [i for i in valid_cols]
additional_dt_avg = [topology_df[i].mean() for i in valid_cols]
additional_dt_std = [topology_df[i].std() for i in valid_cols]
topo_freq_df = pd.DataFrame(
{
"TopologyID": topo_names,
"Frequency": topo_freqs,
}
)
additional_data_df = pd.DataFrame(
{
"Additional Data": additional_dt_names,
"Average": additional_dt_avg,
"Std Dev": additional_dt_std,
}
)
return topo_freq_df, additional_data_df
else: # No additional data types present in file
topo_freq_df = pd.DataFrame(
{
"TopologyID": topo_names,
"Frequency": topo_freqs,
}
)
return topo_freq_df, pd.DataFrame()
def current_view_topo_freq_chart(basic_stats_topo_freqs, template, color_mapping):
"""Return pie chart figure object for local topology frequencies
:param basic_stats_topo_freqs: Dataframe of topology frequencies
:type basic_stats_topo_freqs: DataFrame
:return: Plotly express pie chart
:rtype: Figure object
"""
if "Other" in basic_stats_topo_freqs["TopologyID"].to_list():
fig = px.bar(
basic_stats_topo_freqs,
x='TopologyID',
y="Frequency",
color="TopologyID",
color_discrete_map=color_mapping,
text="Frequency",
)
fig.update_layout(
template=template,
uniformtext_minsize=12,
uniformtext_mode='hide',
)
fig.update_traces(textposition='outside')
return fig
else:
fig = px.pie(
basic_stats_topo_freqs,
values="Frequency",
names="TopologyID",
color="TopologyID",
color_discrete_map=color_mapping,
template=template,
title="Current View Topology Frequencies",
)
fig.update_layout(
legend=dict(itemclick=False, itemdoubleclick=False),
margin=dict(l=120, r=20, t=40, b=10),
uniformtext_minsize=12,
uniformtext_mode='hide',
title_x=0.5,
)
fig.update_traces(textposition='inside')
return fig
def whole_genome_datatable(tv_df):
valid_cols = _get_valid_cols(tv_df[4:])
for i in tv_df.columns.to_list()[4:]:
if i in valid_cols:
continue
else:
tv_df.drop(labels=i, axis=1, inplace=True)
df_group = tv_df.groupby(by="TopologyID")
out_df = pd.DataFrame(columns=["TopologyID", "Additional Data", "Num. Windows", "Average", "Std Dev"])
idx = 0
for topology, data in df_group:
additional_datatypes = [i for i in data.columns[4:]]
for datatype in additional_datatypes:
dt_data = data[datatype]
mean = dt_data.mean()
stdev = dt_data.std()
out_df.at[idx, "TopologyID"] = topology
out_df.at[idx, "Additional Data"] = datatype
out_df.at[idx, "Num. Windows"] = len(dt_data)
out_df.at[idx, "Average"] = mean
out_df.at[idx, "Std Dev"] = stdev
idx += 1
continue
columns = [{'id': c, 'name': ["Per-Topology Whole Genome Comparison", c], 'type': 'numeric', 'format': Format(precision=4, scheme=Scheme.decimal)} for c in out_df.columns]
data = out_df.to_dict('records')
return data, columns
# --- post-hoc tests ---
def mann_whitney_posthoc(tv_df, additional_data_type, pval_adjustment):
return sp.posthoc_mannwhitney(tv_df, val_col=additional_data_type, group_col='TopologyID', p_adjust=pval_adjustment)
def dunns_test_posthoc(tv_df, additional_data_type, pval_adjustment):
return sp.posthoc_dunn(tv_df, val_col=additional_data_type, group_col='TopologyID', p_adjust=pval_adjustment)
def tukeyHSD_posthoc(tv_df, additional_data_type, pval_adjustment, alpha):
return sp.posthoc_tukey_hsd(tv_df[additional_data_type], tv_df["TopologyID"], alpha=alpha)
# --- Significance tests ---
def kruskal_wallis_H_test(tv_df, additional_data_type, posthoc_type, pval_adjustment, alpha):
"""Return dataframe with Kruskal-Wallis H test information for each topology
"""
d = [tv_df.loc[ids, additional_data_type].values for ids in tv_df.groupby('TopologyID').groups.values()]
H, p = ss.kruskal(*d, nan_policy='omit')
if posthoc_type == "Mann-Whitney rank test":
posthoc = mann_whitney_posthoc(tv_df, additional_data_type, pval_adjustment)
posthoc_df = pd.DataFrame(columns=[posthoc_type, "p-value"])
idx = 0
for c1 in posthoc.columns:
for c2, pval in zip(posthoc.index, posthoc[c1]):
if c1 == c2: # Remove self-self comparisons
continue
posthoc_df.at[idx, posthoc_type] = f"{c1} vs {c2}"
posthoc_df.at[idx, "p-value"] = float(pval)
idx += 1
data = posthoc_df.to_dict('records')
columns = [
{'id': posthoc_type, 'name': posthoc_type},
{'id': 'p-value', 'name': 'p-value', 'type': 'numeric', 'format': Format(precision=4, scheme=Scheme.decimal_or_exponent)},
]
elif posthoc_type == "Dunn's test":
posthoc = dunns_test_posthoc(tv_df, additional_data_type, pval_adjustment)
posthoc_df = pd.DataFrame(columns=[posthoc_type, "p-value"])
idx = 0
for c1 in posthoc.columns:
for c2, pval in zip(posthoc.index, posthoc[c1]):
if c1 == c2: # Remove self-self comparisons
continue
posthoc_df.at[idx, posthoc_type] = f"{c1} vs {c2}"
posthoc_df.at[idx, "p-value"] = float(pval)
idx += 1
data = posthoc_df.to_dict('records')
columns = [
{'id': posthoc_type, 'name': posthoc_type},
{'id': 'p-value', 'name': 'p-value', 'type': 'numeric', 'format': Format(precision=4, scheme=Scheme.decimal_or_exponent)},
]
elif posthoc_type == "TukeyHSD":
posthoc = tukeyHSD_posthoc(tv_df, additional_data_type, pval_adjustment, alpha)
posthoc_df = pd.DataFrame(columns=[posthoc_type, "p-value"])
idx = 0
for c1 in posthoc.columns:
for c2, pval in zip(posthoc.index, posthoc[c1]):
if c1 == c2: # Remove self-self comparisons
continue
posthoc_df.at[idx, posthoc_type] = f"{c1} vs {c2}"
posthoc_df.at[idx, "p-value"] = float(pval)
idx += 1
data = posthoc_df.to_dict('records')
columns = [
{'id': posthoc_type, 'name': posthoc_type},
{'id': 'p-value', 'name': 'p-value', 'type': 'numeric', 'format': Format(precision=4, scheme=Scheme.decimal_or_exponent)},
]
else:
pass
return posthoc, data, columns, H, p
def one_way_anova(tv_df, additional_data_type, posthoc_type, pval_adjustment, alpha):
d = [tv_df.loc[ids, additional_data_type].values for ids in tv_df.groupby('TopologyID').groups.values()]
F, p = ss.f_oneway(*d)
if posthoc_type == "Mann-Whitney rank test":
posthoc = mann_whitney_posthoc(tv_df, additional_data_type, pval_adjustment)
posthoc_df = pd.DataFrame(columns=[posthoc_type, "p-value"])
idx = 0
for c1 in posthoc.columns:
for c2, pval in zip(posthoc.index, posthoc[c1]):
posthoc_df.at[idx, posthoc_type] = f"{c1} vs {c2}"
posthoc_df.at[idx, "p-value"] = float(pval)
idx += 1
data = posthoc_df.to_dict('records')
columns = [
{'id': posthoc_type, 'name': posthoc_type},
{'id': 'p-value', 'name': 'p-value', 'type': 'numeric', 'format': Format(precision=4, scheme=Scheme.decimal_or_exponent)},
]
elif posthoc_type == "Dunn's test":
posthoc = dunns_test_posthoc(tv_df, additional_data_type, pval_adjustment)
posthoc_df = pd.DataFrame(columns=[posthoc_type, "p-value"])
idx = 0
for c1 in posthoc.columns:
for c2, pval in zip(posthoc.index, posthoc[c1]):
posthoc_df.at[idx, posthoc_type] = f"{c1} vs {c2}"
posthoc_df.at[idx, "p-value"] = float(pval)
idx += 1
data = posthoc_df.to_dict('records')
columns = [
{'id': posthoc_type, 'name': posthoc_type},
{'id': 'p-value', 'name': 'p-value', 'type': 'numeric', 'format': Format(precision=4, scheme=Scheme.decimal_or_exponent)},
]
elif posthoc_type == "TukeyHSD":
posthoc = tukeyHSD_posthoc(tv_df, additional_data_type, pval_adjustment, alpha)
posthoc_df = pd.DataFrame(columns=[posthoc_type, "p-value"])
idx = 0
for c1 in posthoc.columns:
for c2, pval in zip(posthoc.index, posthoc[c1]):
posthoc_df.at[idx, posthoc_type] = f"{c1} vs {c2}"
posthoc_df.at[idx, "p-value"] = float(pval)
idx += 1
data = posthoc_df.to_dict('records')
columns = [
{'id': posthoc_type, 'name': posthoc_type},
{'id': 'p-value', 'name': 'p-value', 'type': 'numeric', 'format': Format(precision=4, scheme=Scheme.decimal_or_exponent)},
]
else:
pass
return posthoc, data, columns, F, p
def stats_test_heatmap(posthoc, template):
fig = go.Figure(data=go.Heatmap(
z=posthoc.values,
x=posthoc.columns,
y=posthoc.index,
zmin=0,
zmax=1,
colorscale='Viridis',
colorbar=dict(title='p-value'),
hovertemplate = 'p-value: %{z}<extra></extra>',
))
fig.update_layout(
template=template,
coloraxis_colorbar=dict(title="log(p-value)"),
margin=dict(
t=60,
),
)
return fig
def frequency_distribution(data, name, template):
"""Return frequency density distribution"""
fig = px.histogram(data, x=name, histnorm='density')
fig.update_layout(template=template, margin=dict(t=20, pad=30))
return fig
def mean_frequency_of_alt_data_per_topology(tv_df, topologies, additional_data_type):
out_df = pd.DataFrame(columns=["TopologyID", "Total Windows", f"Mean ({additional_data_type})"])
idx = 1
for i in topologies:
topo_df = tv_df[tv_df["TopologyID"] == i]
additional_data_mean = topo_df[f"{additional_data_type}"].mean()
out_df.at[idx, "TopologyID"] = i
out_df.at[idx, "Total Windows"] = len(topo_df)
out_df.at[idx, f"Mean ({additional_data_type})"] = additional_data_mean
idx += 1
continue
return out_df.to_dict('records')
# ---------------------------------------------------------------------------------
# ------------------------- Graph Customization Functions -------------------------
def set_topology_colors(data, color):
df = pd.read_json(data)
# Set colors to current_topologies
sorted_topologies = df.assign(freq=df.groupby('TopologyID')['TopologyID'].transform('count')).sort_values(by=['freq','TopologyID'],ascending=[False,True]).loc[:,['TopologyID']]
unique_topos = sorted_topologies["TopologyID"].unique()
color_list = (color * ((len(unique_topos) // len(color))))+ color[:len(unique_topos) % len(color)]
output_dict = dict()
for s, c in zip(unique_topos, color_list):
output_dict[s] = c
return output_dict
def get_RFxpos(hoverdata, df):
hoverdata = hoverdata['points'][0]
if ('customdata' in hoverdata.keys()) or ('marker.color' in hoverdata.keys()):
return int(hoverdata['x'])
else:
return df.loc[hoverdata['binNumber']]['Window']
def get_Treexpos(hoverdata, df):
hoverdata = hoverdata['points'][0]
if ('customdata' in hoverdata.keys()) or ('marker.color' in hoverdata.keys()):
return int(hoverdata['x'])
else:
return int(hoverdata['x'])
# ---------------------------------------------------------------------------------
# ------------------------- Init + Empty Graph Functions --------------------------
def no_data_graph(template):
"""This function returns a blank figure with a "NO DATA" watermark"""
fig = go.Figure()
fig.update_layout(
template=template,
title='',
annotations=[
dict(
name="draft watermark",
text="NO DATA",
textangle=0,
opacity=0.5,
font=dict(color="white", size=50),
xref="paper",
yref="paper",
x=0.5,
y=0.5,
showarrow=False,
)
],
)
fig.update_xaxes(showgrid=False, range=[0.2, 1], zeroline=False, visible=False)
fig.update_yaxes(showgrid=False, range=[0.2, 1], zeroline=False, visible=False)
return fig
def init_data_graph(template):
"""
This function returns a blank figure with a "NO DATA LOADED" watermark.
"""
fig = go.Figure()
fig.update_layout(
template=template,
annotations=[
dict(
name="draft watermark",
text="NO DATA LOADED",
textangle=0,
opacity=0.9,
font=dict(color="white", size=50),
xref="paper",
yref="paper",
x=0.5,
y=0.5,
showarrow=False,
)
],
)
fig.update_xaxes(range=[0.2, 1], showgrid=False, visible=False, zeroline=False)
fig.update_yaxes(range=[0.2, 1], showgrid=False, visible=False, zeroline=False)
return fig
def init_stats_graph(template):
"""
This function returns a blank figure with a "NO DATA" watermark.
"""
fig = go.Figure()
fig.update_layout(
template=template,
annotations=[
dict(
name="draft watermark",
text="NO DATA",
textangle=0,
opacity=0.9,
font=dict(color="white", size=35),
xref="paper",
yref="paper",
x=0.5,
y=0.5,
showarrow=False,
)
],
)
fig.update_xaxes(range=[0.2, 1], showgrid=False, visible=False, zeroline=False)
fig.update_yaxes(range=[0.2, 1], showgrid=False, visible=False, zeroline=False)
return fig
def loading_data_graph(template):
"""
This function returns a blank figure with a "NO DATA" watermark.
"""
fig = go.Figure()
fig.update_layout(
template=template,
annotations=[
dict(
name="draft watermark",
text="GATHERING DATA...",
textangle=0,
opacity=0.9,
font=dict(color="white", size=100),
xref="paper",
yref="paper",
x=0.5,
y=0.5,
showarrow=False,
)
],
)
fig.update_xaxes(showgrid=False, range=[0.2, 1], zeroline=False, visible=False)
fig.update_yaxes(showgrid=False, range=[0.2, 1], zeroline=False, visible=False)
return fig
def init_RF_graph(template):
"""
This function returns a blank figure with a "NO DATA" watermark.
"""
fig = go.Figure()
fig.update_layout(
template=template,
annotations=[
dict(
name="draft watermark",
text="Hover Over Data to Activate",
textangle=0,
opacity=0.9,
font=dict(color="white", size=100),
xref="paper",
yref="paper",
x=0.5,
y=0.5,
showarrow=False,
)
],
)
fig.update_xaxes(showgrid=False, range=[0.2, 1], zeroline=False, visible=False)
fig.update_yaxes(showgrid=False, range=[0.2, 1], zeroline=False, visible=False)
return fig
def no_tree_data(template, msg):
"""
This function returns a blank figure with a "NO DATA" watermark.
"""
fig = go.Figure()
fig.update_layout(
template=template,
annotations=[
dict(
name="draft watermark",
text=msg,
textangle=0,
opacity=0.9,
font=dict(size=25),
xref="paper",
yref="paper",
x=0.5,
y=0.5,
showarrow=False,
)
],
)
fig.update_xaxes(showgrid=False, range=[0.2, 1], zeroline=False, visible=False)
fig.update_yaxes(showgrid=False, range=[0.2, 1], zeroline=False, visible=False)
return fig
def zoom_in_gff(template):
"""
This function returns a blank figure with a "NO DATA" watermark.
"""
fig = go.Figure()
fig.update_layout(
height=300,
template=template,
annotations=[
dict(
name="draft watermark",
text="Zoom in to minimum 5Mb to view",
textangle=0,
opacity=0.9,
font=dict(color="white", size=25),
xref="paper",
yref="paper",
x=0.5,
y=0.5,
showarrow=False,
)
],
)
fig.update_xaxes(showgrid=False, range=[0.2, 1], zeroline=False, visible=False)
fig.update_yaxes(showgrid=False, range=[0.2, 1], zeroline=False, visible=False)
return fig
# ---------------------------------------------------------------------------------
# --------------------------- Input File Verification -----------------------------
def validate_chrom_lengths(chromDF, tvDF):
"""Ensure all chromosomes in chromDF are present in tvDF.
Chromosome length file can contain for chromosomes than TV file,
but not the other way around.
Return True if all are found, False if not."""
chrom_names = chromDF['Chromosome'].unique()
tv_chrom_names = tvDF['Chromosome'].unique()
missing_chromosomes = []
valid = True
issue_files = []
# Check chromosome length file against TV file
# for c in chrom_names:
# if c not in tv_chrom_names:
# missing_chromosomes.append(c)
# valid = False
# issue_files.append("Chromosome Length File")
# continue
# else:
# continue
# Check TV file against chromosome length file
for c in tv_chrom_names:
if c not in chrom_names:
missing_chromosomes.append(c)
valid = False
issue_files.append("Tree Viewer File")
continue
else:
continue
try:
if not valid:
missing_chroms = ", ".join(missing_chromosomes)
if len(issue_files) > 1:
missing_files = " & ".join(list(set(issue_files)))
else:
missing_files = issue_files[0]
msg = f"ERROR: Chromosome(s) {missing_chroms} is missing from {missing_files}, please validate consistency of chromosomes between files"
return msg, False
else:
return None, True
except UnboundLocalError:
return None, True
def get_taxa_from_tree(tree):
"""Collect leaf names from tree"""
if tree == "NoTree":
return "NoTree"
tree = Tree(tree)
taxa = []
for leaf in tree.iter_leaves():
taxa.append(leaf.name)
return sorted(taxa)
def get_valid_init_tree(trees):
"""Returns first NewickTree entry that is not NoTree"""
for i in range(len(trees)):
if trees[i] == "NoTree":
continue
else:
return trees[i]
def validate_gff_gtf_filename(f):
"""Ensure file extension is gff or gtf"""
if "gtf" in f.lower():
return True
elif "gff" in f.lower():
return True
else:
return False
def get_y_max_list(alt_dropdown_options, topology_df):
"""Generate list of max y-values for additinal data"""
y_maxes = []
for i in alt_dropdown_options:
try:
data_type = type(topology_df[i][0])
except KeyError:
data_type = str
if data_type == str:
y_maxes.append(1)
else:
y_maxes.append(topology_df[i].max())
return y_maxes
def validate_tree_viewer_input(df):
"""Return False when required headers are not present/correct"""
def fix_column_names(columns):
""" Fix column names """
if columns[:4] == ["Chromosome", "Window", "NewickTree", "TopologyID"]:
return columns
else:
return ["Chromosome", "Window", "NewickTree", "TopologyID"] + columns[4:]
def check_newick(df):
"""Check if string contains basic newick characters"""
if "(" not in df["NewickTree"][0]:
return False
elif ")" not in df["NewickTree"][0]:
return False
elif ";" not in df["NewickTree"][0]:
return False
else:
return True
def check_window(df):
"""Return False if row type is not int"""
if type(df["Window"][0]) == np.int32:
return True
elif type(df["Window"][0]) == np.int64:
return True
else:
return False
# Fix required headers if needed
cols = fix_column_names(list(df.columns))
df.columns = cols
# Check reuqired column types
newick_check = check_newick(df)
window_check = check_window(df)
if not newick_check:
return False
elif not window_check:
return False
else:
return df
def tv_header_validation(df):
"""Return False if first four required column headers are not valid"""
required_cols = list(df.columns[:4])
try:
assert required_cols == ["Chromosome", "Window", "NewickTree", "TopologyID"]
return True
except AssertionError:
return False
# ---------------------------------------------------------------------------------
# --------------------------- Tree Prune Export Tools -----------------------------
def prune_tree(x, prune_taxa_choices):
if x == "NoTree":
return "NoTree"
else:
tree = Tree(x)
try:
tree.prune(prune_taxa_choices, preserve_branch_length=True)
except ValueError:
# Assumes taxa in dropdown selection
# is not found in a particular topology/tree
# Solution is to check list and remove taxa
# not present in tree
tree_taxa = tree.get_leaf_names()
trimmed_taxa_list = [t for t in prune_taxa_choices if t in tree_taxa]
tree.prune(trimmed_taxa_list, preserve_branch_length=True)
return tree.write()
def remove_heterotachy_info(l):
"""Remove any information in brackets - ete3
does not support this format of newick"""
# --- Ensure tree is NaN value, if so return NoTree ---
if type(l) == float:
return "NoTree"
if ("[" not in l) and ("]" not in l):
return l
open_brackets = [i for i, x in enumerate(l) if x == "["]
close_brackets = [i for i, x in enumerate(l) if x == "]"]
final_string = f'{l[:open_brackets[0]]}'
for ob, cb in zip(open_brackets[1:], close_brackets[:-1]):
final_string += l[cb+1:ob]
final_string += l[close_brackets[-1]+1:]
return final_string
def tv_topobinner(df):
"""Bin tree topologies that have RF-distance of 0"""
trees = df['NewickTree']
topologies = dict()
topoCount = 1
for n, t in enumerate(trees):
if t == "NoTree":
continue
elif len(topologies.keys()) == 0:
topologies[n] = {'count': 1, 'idx': [n]}
continue
else:
# Iterate through topology list
# add new topology if no rf == 0
# increase count if rf == 0 with topology
new_topology = True
for idx in topologies.keys():
t1 = Tree(remove_heterotachy_info(t))
t2 = Tree(remove_heterotachy_info(df.at[idx, 'NewickTree']))
comparison = t1.compare(t2)
rf = comparison['rf']
if rf == 0:
topologies[idx]['count'] += 1
topologies[idx]['idx'].append(n)
new_topology = False
break
else:
continue
if new_topology:
topologies[n] = {'count': 1, 'idx': [n]}
continue
else:
continue
# Sort topologies dictionary by 'count'
topologies = {k: v for k, v in sorted(topologies.items(), key=lambda item: item[1]['count'], reverse=True)}
# Update DataFrame TopologyID column with results
for topology in topologies.keys():
idx = topologies[topology]['idx']
topoName = f'topology{topoCount}'
for i in idx:
df.at[i, 'TopologyID'] = topoName
continue
topoCount += 1
return df
def mygrouper(n, iterable):
args = [iter(iterable)] * n
return ([e for e in t if e != None] for t in itertools.zip_longest(*args))
def make_topo_freq_table(df_grouped):
dataTableDF = pd.DataFrame(columns=["Chromosome", "TopologyID", 'Frequency'], index=range(len(df_grouped)))
idx = 0
for chrom, data in df_grouped:
chromFreqs = data["TopologyID"].value_counts()/len(data)
freqTopoOrder = [i for i in chromFreqs.index]
freqs = [f for f in chromFreqs]
for t, f in zip(freqTopoOrder, freqs):
dataTableDF.at[idx, 'Chromosome'] = chrom
dataTableDF.at[idx, 'TopologyID'] = t
dataTableDF.at[idx, 'Frequency'] = round(f, 3)
idx += 1
continue
return dataTableDF
def get_gridline_bools(axis_gridlines):
"""If gridlines ON, return True else False"""
if 'xaxis' in axis_gridlines:
xaxis_gridlines = True
else:
xaxis_gridlines = False
if 'yaxis' in axis_gridlines:
yaxis_gridlines = True
else:
yaxis_gridlines = False
return xaxis_gridlines, yaxis_gridlines
# ---------------------------------------------------------------------------------
# ----------------------------- Template Generaters -------------------------------
def project_ini_template():
content = """[MAIN]\nProjectDir = /path/to/Project\nTreeViewerFile = /path/to/TreeViewerInput.xlsx\nChromLengths = /path/to/ChromosomeLengths.bed\n\n[ADDITIONAL]\n# Load multiple gff/gtf files by listing them with ";" separating the files\nGFF_GTF = None"""
return content
def tree_viewer_template():
content = pd.DataFrame(columns=["Chromosome", "Window", "NewickTree", "TopologyID"])
return content
def chrom_len_template():
content = pd.DataFrame({"Chromosome": ["chr1", "chr2", "chr3"], "Start": [0, 0, 0], "Stop": [1000000, 1500000, 2000000]})
return content
# ---------------------------------------------------------------------------------
# ------------------------------- Misc. Functions ---------------------------------
def divide_input_into_cpu_size_chunks(l, n):
"""Divides chromosomes into sets of size n, where n
is the number of cores available to use"""
for i in range(0, len(l), n):
yield l[i:i + n]
def filter_numeric_dtypes(df):
filtered_names = []
for name, data_type in zip(df.dtypes.index[4:], df.dtypes[4:]):
if str(data_type) == 'object':
continue
else:
filtered_names.append(name)
return filtered_names
| python |
import logging
import os
import subprocess
from datetime import datetime, timezone, timedelta
from pathlib import Path
import django_rq
import novaclient
import vm_manager
from vm_manager.constants import INSTANCE_DELETION_RETRY_WAIT_TIME, \
INSTANCE_DELETION_RETRY_COUNT, \
INSTANCE_CHECK_SHUTOFF_RETRY_WAIT_TIME, \
INSTANCE_CHECK_SHUTOFF_RETRY_COUNT, LINUX
from vm_manager.models import VMStatus
from vm_manager.utils.utils import get_nectar, generate_hostname_url
from guacamole.models import GuacamoleConnection
logger = logging.getLogger(__name__)
def delete_vm_worker(instance):
logger.info(f"About to delete vm at addr: {instance.get_ip_addr()} "
f"for user {instance.user.username}")
if instance.guac_connection:
GuacamoleConnection.objects.filter(instance=instance).delete()
instance.guac_connection = None
instance.save()
n = get_nectar()
try:
n.nova.servers.stop(instance.id)
except novaclient.exceptions.NotFound:
logger.error(f"Trying to delete an instance that's missing "
f"from OpenStack {instance}")
# Check if the Instance is Shutoff before requesting OS to Delete it
logger.info(f"Checking whether {instance} is ShutOff "
f"after {INSTANCE_CHECK_SHUTOFF_RETRY_WAIT_TIME} "
f"seconds and Delete it")
scheduler = django_rq.get_scheduler('default')
scheduler.enqueue_in(
timedelta(seconds=INSTANCE_CHECK_SHUTOFF_RETRY_WAIT_TIME),
_check_instance_is_shutoff_and_delete, instance,
INSTANCE_CHECK_SHUTOFF_RETRY_COUNT,
_delete_volume_once_instance_is_deleted,
(instance, INSTANCE_DELETION_RETRY_COUNT))
def _check_instance_is_shutoff_and_delete(
instance, retries, func, func_args):
scheduler = django_rq.get_scheduler('default')
if not instance.check_shutdown_status() and retries > 0:
# If the instance is not Shutoff, schedule the recheck
logger.info(f"{instance} is not shutoff yet! Will check again in "
f"{INSTANCE_CHECK_SHUTOFF_RETRY_WAIT_TIME} seconds")
scheduler.enqueue_in(
timedelta(seconds=INSTANCE_CHECK_SHUTOFF_RETRY_WAIT_TIME),
_check_instance_is_shutoff_and_delete, instance,
retries - 1, func, func_args)
return
if retries <= 0:
# TODO - not sure we should delete the instance anyway ...
logger.info(f"Ran out of retries. {instance} shutoff took too long."
f"Proceeding to delete Openstack instance anyway!")
# Delete the instance
vm_status = VMStatus.objects.get_vm_status_by_instance(
instance, instance.boot_volume.requesting_feature)
vm_status.status_progress = 66
# Hack: since this won't be displayed when we are deleting a
# desktop, use the progress message for the shelving case.
vm_status.status_message = 'Instance shelving'
vm_status.save()
_delete_instance_worker(instance)
# The 'func' will do the next step; e.g. delete the volume
# or mark the volume as shelved.
scheduler.enqueue_in(
timedelta(seconds=INSTANCE_DELETION_RETRY_WAIT_TIME),
func, *func_args)
def _delete_instance_worker(instance):
n = get_nectar()
try:
n.nova.servers.delete(instance.id)
logger.info(f"Instructed OpenStack to delete {instance}")
except novaclient.exceptions.NotFound:
logger.info(f"Instance {instance} already deleted")
except Exception as e:
logger.error(f"something went wrong with the instance deletion "
f"call for {instance}, it raised {e}")
def _delete_volume_once_instance_is_deleted(instance, retries):
n = get_nectar()
try:
my_instance = n.nova.servers.get(instance.id)
logger.debug(f"Instance delete status is retries: {retries} "
f"openstack instance: {my_instance}")
except novaclient.exceptions.NotFound:
logger.info(f"Instance {instance.id} successfully deleted, "
f"we can delete the volume now!")
instance.deleted = datetime.now(timezone.utc)
instance.save()
_delete_volume(instance.boot_volume)
return
except Exception as e:
logger.error(f"something went wrong with the instance get "
f"call for {instance}, it raised {e}")
return
# Openstack still has the instance, and was able to return it to us
if retries == 0:
_delete_instance_worker(instance)
scheduler = django_rq.get_scheduler('default')
# Note in this case I'm using `minutes=` not `seconds=` to give
# a long wait time that should be sufficient
scheduler.enqueue_in(
timedelta(minutes=INSTANCE_DELETION_RETRY_WAIT_TIME),
_delete_volume_once_instance_is_deleted, instance,
retries - 1)
return
if retries <= 0:
error_message = f"ran out of retries trying to delete"
instance.error(error_message)
instance.boot_volume.error(error_message)
logger.error(f"{error_message} {instance}")
return
_delete_instance_worker(instance)
scheduler = django_rq.get_scheduler('default')
scheduler.enqueue_in(
timedelta(seconds=INSTANCE_DELETION_RETRY_WAIT_TIME),
_delete_volume_once_instance_is_deleted, instance, retries - 1)
def _delete_volume(volume):
n = get_nectar()
delete_result = str(n.cinder.volumes.delete(volume.id))
volume.deleted = datetime.now(timezone.utc)
volume.save()
logger.debug(f"Delete result is {delete_result}")
return
| python |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.