content
stringlengths 0
894k
| type
stringclasses 2
values |
---|---|
from unittest import TestCase
from src.chunker import Chunker
class TestChunker(TestCase) :
def test_chunker_yields_list_with_buffered_size(self) :
chunks = Chunker(range(5), 3)
chunk = next(chunks)
self.assertEqual(len(chunk), 3)
self.assertListEqual(chunk, [0,1,2])
next_chunk = next(chunks)
self.assertListEqual(next_chunk, [3,4])
def test_setting_neg1_on_buffer_yields_entire_list(self) :
chunks = Chunker(range(5), -1)
chunk = next(chunks)
self.assertListEqual(chunk, [0,1,2,3,4])
|
python
|
#!/usr/bin/env python3
import os
import sys
from setuptools import setup, find_packages
VERSION = os.environ.get('GITHUB_REF', '0.0.4').replace('refs/tags/v', '')
is_wheel = 'bdist_wheel' in sys.argv
_license = ""
if os.path.exists('LICENSE'):
with open('LICENSE') as lf:
_license = lf.readline().rstrip()
description = ""
if os.path.exists('README.md'):
with open('README.md') as df:
description = df.read()
requirements = []
if os.path.exists('requirements.txt'):
with open('requirements.txt') as rf:
requirements = rf.readlines()
setup_info = dict(
name='qab_core',
version=VERSION,
packages=find_packages(exclude=["tests", "*.tests", "*.tests.*", "tests.*"]),
py_modules=['qab_core'],
license=_license,
description="QAB framework, high performance, secure, easy to learn, fast to code, ready for production",
long_description=description,
long_description_content_type="text/markdown",
url="https://github.com/MaJyxSoftware/qab_core",
author="Benjamin Schwald",
author_email="[email protected]",
python_requires='>=3.7',
classifiers=[
'Natural Language :: English',
'License :: OSI Approved :: MIT License',
'Development Status :: 3 - Alpha',
'Intended Audience :: System Administrators',
'Intended Audience :: Developers',
'Operating System :: POSIX :: Linux',
'Programming Language :: Python',
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
'Topic :: Software Development :: Libraries',
'Topic :: Utilities'
],
)
if is_wheel:
setup_info['install_requires'] = requirements
setup(**setup_info)
|
python
|
import torch
import torch.nn as nn
from torch.autograd import Variable
import torch.nn.functional as F
from torchvision import transforms
import numpy as np
import cv2
from models.single_track import SiamRPNPP as base_model
from dataset.util import generate_anchor
class SiamRPNPP(nn.Module):
def __init__(self, tracker_name = ''):
super(SiamRPNPP, self).__init__()
self.cfg = {'lr': 0.45, 'window_influence': 0.44, 'penalty_k': 0.04, 'instance_size': 255, 'adaptive': False} # 0.355
self.tracker_name = tracker_name
self.model = base_model()
def temple(self, z):
zf = self.model.features(z)
zf = self.model.neck(zf)
self.zf = zf
def forward(self, x):
xf = self.model.features(x)
xf = self.model.neck(xf)
cls, loc = self.model.head(self.zf, xf)
return loc, cls
class TrackerConfig(object):
# These are the default hyper-params for DaSiamRPN 0.3827
windowing = 'cosine' # to penalize large displacements [cosine/uniform]
# Params from the network architecture, have to be consistent with the training
exemplar_size = 127 # input z size
instance_size = 255 # input x size (search region)
total_stride = 8
# score_size = (instance_size-exemplar_size)/total_stride+1 # for siamrpn
score_size = 25 # for siamrpn++
# print(score_size)
context_amount = 0.5 # context amount for the exemplar
ratios = [0.33, 0.5, 1, 2, 3]
scales = [8, ]
anchor_num = len(ratios) * len(scales)
anchor = []
penalty_k = 0.055
window_influence = 0.42
lr = 0.295
def update(self, cfg):
for k, v in cfg.items():
setattr(self, k, v)
# self.score_size = (self.instance_size - self.exemplar_size) / self.total_stride + 1 # for siamrpn
def tracker_eval(net, x_crop, target_pos, target_sz, window, scale_z, p):
delta, score = net(x_crop)
delta = delta.permute(1, 2, 3, 0).contiguous().view(4, -1).data.cpu().numpy()
score = F.softmax(score.permute(1, 2, 3, 0).contiguous().view(2, -1), dim=0).data[1, :].cpu().numpy()
delta[0, :] = delta[0, :] * p.anchor[:, 2] + p.anchor[:, 0]
delta[1, :] = delta[1, :] * p.anchor[:, 3] + p.anchor[:, 1]
delta[2, :] = np.exp(delta[2, :]) * p.anchor[:, 2]
delta[3, :] = np.exp(delta[3, :]) * p.anchor[:, 3]
def change(r):
return np.maximum(r, 1./r)
def sz(w, h):
pad = (w + h) * 0.5
sz2 = (w + pad) * (h + pad)
return np.sqrt(sz2)
def sz_wh(wh):
pad = (wh[0] + wh[1]) * 0.5
sz2 = (wh[0] + pad) * (wh[1] + pad)
return np.sqrt(sz2)
# size penalty
s_c = change(sz(delta[2, :], delta[3, :]) / (sz_wh(target_sz))) # scale penalty
r_c = change((target_sz[0] / target_sz[1]) / (delta[2, :] / delta[3, :])) # ratio penalty
penalty = np.exp(-(r_c * s_c - 1.) * p.penalty_k)
pscore = penalty * score
# window float
pscore = pscore * (1 - p.window_influence) + window * p.window_influence
best_pscore_id = np.argmax(pscore)
# print('###################### {}'.format(best_pscore_id))
target = delta[:, best_pscore_id] / scale_z
target_sz = target_sz / scale_z
lr = penalty[best_pscore_id] * score[best_pscore_id] * p.lr
res_x = target[0] + target_pos[0]
res_y = target[1] + target_pos[1]
res_w = target_sz[0] * (1 - lr) + target[2] * lr
res_h = target_sz[1] * (1 - lr) + target[3] * lr
target_pos = np.array([res_x, res_y])
target_sz = np.array([res_w, res_h])
return target_pos, target_sz, score[best_pscore_id]
def get_subwindow_tracking(im, pos, model_sz, original_sz, avg_chans, out_mode='torch', new=False):
if isinstance(pos, float):
pos = [pos, pos]
sz = original_sz
im_sz = im.shape
c = (original_sz+1) / 2
context_xmin = round(pos[0] - c) # floor(pos(2) - sz(2) / 2);
context_xmax = context_xmin + sz - 1
context_ymin = round(pos[1] - c) # floor(pos(1) - sz(1) / 2);
context_ymax = context_ymin + sz - 1
left_pad = int(max(0., -context_xmin))
top_pad = int(max(0., -context_ymin))
right_pad = int(max(0., context_xmax - im_sz[1] + 1))
bottom_pad = int(max(0., context_ymax - im_sz[0] + 1))
context_xmin = context_xmin + left_pad
context_xmax = context_xmax + left_pad
context_ymin = context_ymin + top_pad
context_ymax = context_ymax + top_pad
# zzp: a more easy speed version
r, c, k = im.shape
if any([top_pad, bottom_pad, left_pad, right_pad]):
te_im = np.zeros((r + top_pad + bottom_pad, c + left_pad + right_pad, k), np.uint8) # 0 is better than 1 initialization
te_im[top_pad:top_pad + r, left_pad:left_pad + c, :] = im
if top_pad:
te_im[0:top_pad, left_pad:left_pad + c, :] = avg_chans
if bottom_pad:
te_im[r + top_pad:, left_pad:left_pad + c, :] = avg_chans
if left_pad:
te_im[:, 0:left_pad, :] = avg_chans
if right_pad:
te_im[:, c + left_pad:, :] = avg_chans
im_patch_original = te_im[int(context_ymin):int(context_ymax + 1), int(context_xmin):int(context_xmax + 1), :]
else:
im_patch_original = im[int(context_ymin):int(context_ymax + 1), int(context_xmin):int(context_xmax + 1), :]
if not np.array_equal(model_sz, original_sz):
im_patch = cv2.resize(im_patch_original, (model_sz, model_sz)) # zzp: use cv to get a better speed
else:
im_patch = im_patch_original
return im_to_torch(im_patch) if out_mode in 'torch' else im_patch
def SiamRPN_init(im, target_pos, target_sz, net):
state = dict()
p = TrackerConfig()
p.update(net.cfg)
state['im_h'] = im.shape[0]
state['im_w'] = im.shape[1]
p.anchor = generate_anchor(p.total_stride, p.scales, p.ratios, int(p.score_size))
avg_chans = np.mean(im, axis=(0, 1))
wc_z = target_sz[0] + p.context_amount * sum(target_sz)
hc_z = target_sz[1] + p.context_amount * sum(target_sz)
s_z = round(np.sqrt(wc_z * hc_z))
# initialize the exemplar
z_crop = get_subwindow_tracking(im, target_pos, p.exemplar_size, s_z, avg_chans, out_mode='np')
transform = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])])
z = Variable(transform(z_crop).unsqueeze(0))
# net.temple(z.cuda())
net.temple(z)
window = np.outer(np.hanning(p.score_size), np.hanning(p.score_size))
window = np.tile(window.flatten(), p.anchor_num)
state['p'] = p
state['net'] = net
state['avg_chans'] = avg_chans
state['window'] = window
state['target_pos'] = target_pos
state['target_sz'] = target_sz
return state
def SiamRPN_track(state, im):
p = state['p']
net = state['net']
avg_chans = state['avg_chans']
window = state['window']
target_pos = state['target_pos']
target_sz = state['target_sz']
wc_z = target_sz[1] + p.context_amount * sum(target_sz)
hc_z = target_sz[0] + p.context_amount * sum(target_sz)
s_z = np.sqrt(wc_z * hc_z)
scale_z = p.exemplar_size / s_z
d_search = (p.instance_size - p.exemplar_size) / 2
pad = d_search / scale_z
s_x = s_z + 2 * pad
# extract scaled crops for search region x at previous target position
x_crop = get_subwindow_tracking(im, target_pos, p.instance_size, round(s_x), avg_chans, out_mode='np')
transform = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])])
x_crop = Variable(transform(x_crop).unsqueeze(0))
# target_pos, target_sz, score = tracker_eval(net, x_crop.cuda(), target_pos, target_sz * scale_z, window, scale_z, p)
target_pos, target_sz, score = tracker_eval(net, x_crop, target_pos, target_sz * scale_z, window, scale_z, p)
target_pos[0] = max(0, min(state['im_w'], target_pos[0]))
target_pos[1] = max(0, min(state['im_h'], target_pos[1]))
target_sz[0] = max(10, min(state['im_w'], target_sz[0]))
target_sz[1] = max(10, min(state['im_h'], target_sz[1]))
state['target_pos'] = target_pos
state['target_sz'] = target_sz
state['score'] = score
return state
|
python
|
import argparse
from utils import levenshtein
import pdb
import re
def clean(label):
alphabet = [a for a in '0123456789abcdefghijklmnopqrstuvwxyz* ']
label = label.replace('-', '*')
nlabel = ""
for each in label.lower():
if each in alphabet:
nlabel += each
return nlabel
parser = argparse.ArgumentParser()
parser.add_argument('--preds', type=str, default='../misc/preds/temp.txt', help='path to preds file')
parser.add_argument('--vocab', type=str, required=True)
parser.add_argument('--mode', type=str, default='word', help='path to preds file')
parser.add_argument('--lower', action='store_true', help='convert strings to lowercase ebfore comparison')
parser.add_argument('--alnum', action='store_true', help='convert strings to alphanumeric before comparison')
opt = parser.parse_args()
train_vocab = []
with open(opt.vocab) as f:
for line in f:
train_vocab.append(line.strip())
f = open(opt.preds, 'r')
tw = 0
ww = 0
tc = 0
wc = 0
word_lens = []
if opt.mode == 'word':
for i , line in enumerate(f):
print(line)
if i%2==0:
pred = line.strip()
else:
gt = line.strip()
if gt in train_vocab:
continue
if opt.lower:
gt = gt.lower()
pred = pred.lower()
if opt.alnum:
pattern = re.compile('[\W_]+')
gt = pattern.sub('', gt)
pred = pattern.sub('', pred)
# pdb.set_trace()
# gt =
# print('before')
if gt != pred:
ww += 1
wc += levenshtein(gt, pred)
word_lens.append(len(gt))
print(gt, pred, wc)
tc += len(gt)
tw += 1
else:
for i , line in enumerate(f):
if i%2==0:
pred = line.strip()
else:
gt = line.strip()
gt = clean(gt)
pred = clean(pred)
gt_w = gt.split()
pred_w = pred.split()
for j in range(len(gt_w)):
try:
if gt_w[j] != pred_w[j]:
# print(gt_w[j], pred_w[j])
ww += 1
except IndexError:
ww += 1
tw += len(gt.split())
wc += levenshtein(gt, pred)
tc += len(gt)
print(ww, tw)
print('WER: ', (ww/tw)*100)
print('CER: ', (wc/tc)*100)
print('Incorrect Avg: ', sum(word_lens)/len(word_lens))
print('Incorrect Max Avg: ', max(word_lens))
print('Incorrect Min Avg: ', min(word_lens))
|
python
|
"""
Create a pedestal file from an event file using the target_calib Pedestal
class
"""
from targetpipe.io.camera import Config
Config('checs')
from matplotlib.ticker import MultipleLocator, FormatStrFormatter, \
FuncFormatter, AutoMinorLocator
from traitlets import Dict, List
from ctapipe.core import Tool, Component
from ctapipe.io.eventfilereader import EventFileReaderFactory
from targetpipe.calib.camera.makers import PedestalMaker
from targetpipe.calib.camera.r1 import TargetioR1Calibrator
from targetpipe.calib.camera.tf import TFApplier
from targetpipe.io.eventfilereader import TargetioFileReader
from targetpipe.plots.official import ThesisPlotter
from tqdm import tqdm, trange
import numpy as np
from matplotlib import pyplot as plt
from matplotlib import patches
from os.path import join, dirname
from IPython import embed
import pandas as pd
from scipy.stats import norm
from targetpipe.utils.dactov import checm_dac_to_volts
from glob import glob
import re
class WaveformPlotter(ThesisPlotter):
name = 'WaveformPlotter'
def __init__(self, config, tool, **kwargs):
"""
Parameters
----------
config : traitlets.loader.Config
Configuration specified by config file or cmdline arguments.
Used to set traitlet values.
Set to None if no configuration to pass.
tool : ctapipe.core.Tool
Tool executable that is calling this component.
Passes the correct logger to the component.
Set to None if no Tool to pass.
kwargs
"""
super().__init__(config=config, tool=tool, **kwargs)
def add(self, waveform):
self.ax.plot(waveform)
def save(self, output_path=None):
self.ax.set_title("Waveforms for one channel, incrementing VPED")
self.ax.set_xlabel("Time (ns)")
self.ax.set_ylabel("Amplitude (ADC Pedestal-Subtracted)")
self.ax.xaxis.set_major_locator(MultipleLocator(16))
super().save(output_path)
class TFInvestigator(Tool):
name = "TFInvestigator"
description = "Produce plots associated with the " \
"transfer function calibration"
aliases = Dict(dict())
classes = List([])
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.df_file = None
self.tf = None
self.r1 = None
self.n_pixels = None
self.n_samples = None
self.p_vi = None
def setup(self):
self.log_format = "%(levelname)s: %(message)s [%(name)s.%(funcName)s]"
kwargs = dict(config=self.config, tool=self)
ped_path = "/Volumes/gct-jason/data_checs/tf/ac_tf_tmSN0074/Pedestal.tcal"
self.r1 = TargetioR1Calibrator(pedestal_path=ped_path,
**kwargs,
)
dfl = []
file_list = glob("/Volumes/gct-jason/data_checs/tf/ac_tf_tmSN0074/Amplitude_*_r0.tio")
pattern = 'Amplitude_(.+?)_r0.tio'
for p in file_list:
amplitude = int(re.search(pattern, p).group(1))
print(amplitude)
dfl.append(dict(path=p, amplitude=amplitude))
for d in dfl:
d['reader'] = TargetioFileReader(input_path=d['path'], **kwargs)
self.df_file = pd.DataFrame(dfl)
self.df_file = self.df_file.sort_values('amplitude')
first_event = dfl[0]['reader'].get_event(0)
telid = list(first_event.r0.tels_with_data)[0]
r1 = first_event.r1.tel[telid].pe_samples[0]
self.n_pixels, self.n_samples = r1.shape
p_kwargs = kwargs
p_kwargs['script'] = "ac_transfer_function_wfs"
p_kwargs['figure_name'] = "amplitude_increments"
self.p_vi = WaveformPlotter(**kwargs)
def start(self):
desc1 = 'Looping through files'
n_rows = len(self.df_file.index)
t = tqdm(self.df_file.iterrows(), total=n_rows, desc=desc1)
for index, row in t:
path = row['path']
reader = row['reader']
amplitude = row['amplitude']
source = reader.read()
n_events = reader.num_events
event = reader.get_event(0)
self.r1.calibrate(event)
wf = event.r1.tel[0].pe_samples[0, 0]
# if amplitude < 2000:
self.p_vi.add(wf)
def finish(self):
self.p_vi.save()
exe = TFInvestigator()
exe.run()
|
python
|
import django_filters
import htmlgenerator as hg
from django import forms
from django.utils.html import mark_safe
from django.utils.translation import gettext as _
from django_countries.widgets import LazySelect
from .button import Button
from .notification import InlineNotification
class Form(hg.FORM):
@staticmethod
def from_django_form(form, **kwargs):
return Form.from_fieldnames(form, form.fields, **kwargs)
@staticmethod
def from_fieldnames(form, fieldnames, **kwargs):
return Form.wrap_with_form(
form, *[FormField(fieldname) for fieldname in fieldnames], **kwargs
)
@staticmethod
def wrap_with_form(form, *elements, submit_label=None, **kwargs):
if kwargs.get("standalone", True) is True:
elements += (
hg.DIV(
Button(submit_label or _("Save"), type="submit"),
_class="bx--form-item",
style="margin-top: 2rem",
),
)
return Form(form, *elements, **kwargs)
def __init__(self, form, *children, use_csrf=True, standalone=True, **attributes):
"""
form: lazy evaluated value which should resolve to the form object
children: any child elements, can be formfields or other
use_csrf: add a CSRF input, but only for POST submission and standalone forms
standalone: if true, will add a CSRF token and will render enclosing FORM-element
"""
self.form = form
self.standalone = standalone
defaults = {"method": "POST", "autocomplete": "off"}
defaults.update(attributes)
if (
defaults["method"].upper() == "POST"
and use_csrf is not False
and standalone is True
):
children = (CsrfToken(),) + children
super().__init__(*children, **defaults)
def formfieldelements(self):
return self.filter(
lambda elem, parents: isinstance(elem, FormChild)
and not any((isinstance(p, Form) for p in parents[1:]))
)
def render(self, context):
form = hg.resolve_lazy(self.form, context, self)
for formfield in self.formfieldelements():
formfield.form = form
for error in form.non_field_errors():
self.insert(0, InlineNotification(_("Form error"), error, kind="error"))
for hidden in form.hidden_fields():
for error in hidden.errors:
self.insert(
0,
InlineNotification(
_("Form error: "), hidden.name, error, kind="error"
),
)
if self.standalone:
if form.is_multipart() and "enctype" not in self.attributes:
self.attributes["enctype"] = "multipart/form-data"
return super().render(context)
return super().render_children(context)
class FormChild:
"""Used to mark elements which need the "form" attribute set by the parent form before rendering"""
class FormField(FormChild, hg.BaseElement):
"""Dynamic element which will resolve the field with the given name
and return the correct HTML, based on the widget of the form field or on the passed argument 'fieldtype'"""
def __init__(
self,
fieldname,
fieldtype=None,
hidelabel=False,
elementattributes={},
widgetattributes={},
):
self.fieldname = fieldname
self.fieldtype = fieldtype
self.widgetattributes = widgetattributes
self.elementattributes = elementattributes
self.form = None # will be set by the render method of the parent method
self.hidelabel = hidelabel
def render(self, context):
element = _mapwidget(
self.form[self.fieldname],
self.fieldtype,
self.elementattributes,
self.widgetattributes,
)
if self.hidelabel:
element._replace(
lambda e, ancestors: isinstance(e, hg.LABEL), None, all=True
)
return element.render(context)
def __repr__(self):
return f"FormField({self.fieldname})"
class FormsetField(FormChild, hg.BaseElement):
def __init__(
self,
fieldname,
*children,
containertag=hg.DIV,
formsetinitial=None,
**formsetfactory_kwargs,
):
super().__init__(*children)
self.fieldname = fieldname
self.formsetfactory_kwargs = formsetfactory_kwargs
self.formsetinitial = formsetinitial
self.containertag = containertag
def render(self, context):
formset = self.form[self.fieldname].formset
# Detect internal fields like the delete-checkbox, the order-widget, id fields, etc and add their
# HTML representations. But we never show the "delete" checkbox, it should be manually added via InlineDeleteButton
declared_fields = [
f.fieldname
for f in self.filter(lambda e, ancestors: isinstance(e, FormField))
]
internal_fields = [
field
for field in formset.empty_form.fields
if field not in declared_fields
and field != forms.formsets.DELETION_FIELD_NAME
]
for field in internal_fields:
self.append(FormField(field))
skeleton = hg.DIV(
Form.from_django_form(formset.management_form, standalone=False),
self.containertag(
hg.Iterator(
formset,
loopvariable="formset_form",
content=Form(hg.C("formset_form"), *self, standalone=False),
),
id=f"formset_{formset.prefix}_container",
),
hg.DIV(
Form(formset.empty_form, *self, standalone=False),
id=f"empty_{ formset.prefix }_form",
_class="template-form",
style="display:none;",
),
hg.SCRIPT(
mark_safe(
f"""document.addEventListener("DOMContentLoaded", e => init_formset("{ formset.prefix }"));"""
)
),
)
yield from skeleton.render(context)
def __repr__(self):
return f"Formset({self.fieldname}, {self.formsetfactory_kwargs})"
class FormsetAddButton(FormChild, Button):
def __init__(self, fieldname, label=_("Add"), **kwargs):
defaults = {
"icon": "add",
"notext": True,
"buttontype": "tertiary",
}
defaults.update(kwargs)
self.fieldname = fieldname
super().__init__(label, **defaults)
def render(self, context):
formset = self.form[self.fieldname].formset
self.attributes["id"] = f"add_{formset.prefix}_button"
self.attributes[
"onclick"
] = f"formset_add('{ formset.prefix }', '#formset_{ formset.prefix }_container');"
return super().render(context)
class InlineDeleteButton(FormChild, Button):
def __init__(self, parentcontainerselector, label=_("Delete"), **kwargs):
"""
Show a delete button for the current inline form. This element needs to be inside a FormsetField
parentcontainerselector: CSS-selector which will be passed to element.closest in order to select the parent container which should be hidden on delete
"""
defaults = {
"notext": True,
"small": True,
"icon": "trash-can",
"buttontype": "ghost",
"onclick": f"delete_inline_element(this.querySelector('input[type=checkbox]'), this.closest('{parentcontainerselector}'))",
}
defaults.update(kwargs)
super().__init__(
label,
FormField(
forms.formsets.DELETION_FIELD_NAME,
elementattributes={"style": "display: none"},
),
**defaults,
)
class HiddenInput(FormChild, hg.INPUT):
def __init__(self, fieldname, widgetattributes, **attributes):
self.fieldname = fieldname
super().__init__(type="hidden", **{**widgetattributes, **attributes})
def render(self, context):
self.attributes["id"] = self.boundfield.auto_id
if self.boundfield is not None:
self.attributes["name"] = self.boundfield.html_name
if self.boundfield.value() is not None:
self.attributes["value"] = self.boundfield.value()
return super().render(context)
class CsrfToken(FormChild, hg.INPUT):
def __init__(self):
super().__init__(type="hidden")
def render(self, context):
self.attributes["name"] = "csrfmiddlewaretoken"
self.attributes["value"] = context["csrf_token"]
return super().render(context)
def _mapwidget(
field, fieldtype, elementattributes={}, widgetattributes={}, only_initial=False
):
from .checkbox import Checkbox
from .date_picker import DatePicker
from .file_uploader import FileUploader
from .multiselect import MultiSelect
from .select import Select
from .text_area import TextArea
from .text_input import PasswordInput, TextInput
WIDGET_MAPPING = {
forms.TextInput: TextInput,
forms.NumberInput: TextInput, # TODO HIGH
forms.EmailInput: TextInput, # TODO
forms.URLInput: TextInput, # TODO
forms.PasswordInput: PasswordInput,
forms.HiddenInput: HiddenInput,
forms.DateInput: DatePicker,
forms.DateTimeInput: TextInput, # TODO
forms.TimeInput: TextInput, # TODO HIGH
forms.Textarea: TextArea,
forms.CheckboxInput: Checkbox,
forms.Select: Select,
forms.NullBooleanSelect: Select,
forms.SelectMultiple: MultiSelect, # TODO HIGH
forms.RadioSelect: TextInput, # TODO HIGH
forms.CheckboxSelectMultiple: TextInput, # TODO HIGH
forms.FileInput: FileUploader,
forms.ClearableFileInput: FileUploader, # TODO HIGH
forms.MultipleHiddenInput: TextInput, # TODO
forms.SplitDateTimeWidget: TextInput, # TODO
forms.SplitHiddenDateTimeWidget: TextInput, # TODO
forms.SelectDateWidget: TextInput, # TODO
# 3rd party widgets
django_filters.widgets.DateRangeWidget: TextInput, # TODO
LazySelect: Select,
}
# The following is a bit of magic to play nicely with the django form processing
# TODO: This can be simplified, and improved
if field.field.localize:
field.field.widget.is_localized = True
attrs = dict(field.field.widget.attrs)
attrs.update(widgetattributes)
attrs = field.build_widget_attrs(attrs)
if getattr(field.field.widget, "allow_multiple_selected", False):
attrs["multiple"] = True
attrs["style"] = "height: 16rem"
if field.auto_id and "id" not in field.field.widget.attrs:
attrs.setdefault("id", field.html_initial_id if only_initial else field.auto_id)
if "name" not in attrs:
attrs["name"] = field.html_initial_name if only_initial else field.html_name
value = field.field.widget.format_value(field.value())
if value is not None and "value" not in attrs:
attrs["value"] = value
elementattributes = {
**getattr(field.field, "layout_kwargs", {}),
**elementattributes,
}
if isinstance(field.field.widget, forms.CheckboxInput):
attrs["checked"] = field.value()
if isinstance(field.field.widget, forms.Select):
if isinstance(field.field.widget, forms.SelectMultiple):
return hg.DIV(
MultiSelect(
field.field.widget.optgroups(
field.name,
field.field.widget.get_context(field.name, field.value(), {})[
"widget"
]["value"],
),
label=field.label,
help_text=field.help_text,
errors=field.errors,
disabled=field.field.disabled,
required=field.field.required,
widgetattributes=attrs,
**elementattributes,
),
_class="bx--form-item",
)
return hg.DIV(
Select(
field.field.widget.optgroups(
field.name,
field.field.widget.get_context(field.name, field.value(), {})[
"widget"
]["value"],
),
label=field.label,
help_text=field.help_text,
errors=field.errors,
disabled=field.field.disabled,
required=field.field.required,
widgetattributes=attrs,
**elementattributes,
),
_class="bx--form-item",
)
fieldtype = (
fieldtype
or getattr(field.field, "layout", None)
or WIDGET_MAPPING[type(field.field.widget)]
)
if isinstance(fieldtype, type) and issubclass(fieldtype, hg.BaseElement):
ret = fieldtype(
fieldname=field.name, widgetattributes=attrs, **elementattributes
)
else:
ret = fieldtype
ret.boundfield = field
if (
field.field.show_hidden_initial and fieldtype != HiddenInput
): # special case, prevent infinte recursion
return hg.BaseElement(
ret,
_mapwidget(field, HiddenInput, only_initial=True),
)
return ret
|
python
|
import sys
import pytest
sys.path.append(".")
sys.path.append("..")
sys.path.append("../..")
from Hologram.Network import Network
class TestNetwork(object):
def test_create_network(self):
network = Network()
def test_get_invalid_connection_status(self):
network = Network()
with pytest.raises(Exception, message = 'Must instantiate a defined Network type'):
connectionStatus = network.getConnectionStatus()
def test_get_invalid_signal_strength(self):
network = Network()
with pytest.raises(Exception, message = 'Must instantiate a defined Network type'):
connectionStatus = network.getSignalStrength()
|
python
|
from django.urls import path
from rest_framework_simplejwt import views as jwt_views
from . import views
from rest_framework_simplejwt.views import TokenRefreshView, TokenObtainPairView
urlpatterns = [
path('login/', views.loginView.as_view(), name='obtain_token'),
path('nlogin/', views.adminTokenObtainPairView.as_view(), name='obtain_token_new'),
path('login/refresh/', TokenRefreshView.as_view(), name="refresh_token"),
path('test/', views.testView.as_view(), name='test'),
path('register/', views.registerView.as_view(), name='register'),
path('change_password/<int:pk>/', views.changePasswordView.as_view(),
name="change_password"),
path('update_profile/<int:pk>/', views.updateProfileView.as_view(),
name="update_profile"),
path('logout/', views.logoutView.as_view(), name='logout'),
path('logout_all/', views.logoutAllView.as_view(), name='logout_all'),
path('adminlogin/', views.adminTokenObtainPairView.as_view(),
name='admin_obtain_token'),
path('adminlogin/refresh/', TokenRefreshView.as_view(),
name="admin_refresh_token"),
# Staff management:
path('staff/register/', views.staffRegisterView.as_view(), name="staff_register"),
path('staff/update_profile/<int:pk>/', views.staffProfileUpdate.as_view(), name="staff_profile_update"),
path('staff/modify_pay/<int:pk>/', views.staffPayView.as_view(), name="staff_profile_update"),
path('staff/staff_delete/<int:pk>/', views.staffDeleteView.as_view(), name="staff_profile_update"),
path('staff/stafflist/', views.staffListView.as_view(), name="staff_list"),
path('staff/staff_payment_list/', views.staffPaymentListView.as_view(), name="staff_list"),
path('staff/staffdetail/<int:pk>/', views.staffDetailView.as_view(), name="staff_detail"),
path('staff/paystaff/',views.staffPayRecordView.as_view(), name="staff_pay_record"),
path('staff/totalMoneyPaidToStaff/',views.totalMoneyPaidToStaff.as_view(), name="staff_payment_analysis"),
# Customer:
path('customer/customerlist/', views.customerListView().as_view(), name="customer_list"),
]
|
python
|
"""The managers for the models
"""
from django.contrib.auth.models import UserManager as BaseUserManager
class UserManager(BaseUserManager):
"""The user manager
"""
def create_user(self, username, email=None, password=None, **extra_fields):
"""Create a user.
:param username: The user name.
:param email: The user email.
:param password: The user password.
:param extra_fields: The user extra field.
:return: The created user.
"""
return super().create_user(username, email, password, is_admin=False, **extra_fields)
def create_superuser(self, username, email=None, password=None, **extra_fields):
"""Create a superuser.
:param username: The user name.
:param email: The user email.
:param password: The user password.
:param extra_fields: The user extra field.
:return: The created superuser.
"""
return self._create_user(username, email, password, is_admin=True, **extra_fields)
|
python
|
"""
Project: RadarBook
File: ecef_to_lla.py
Created by: Lee A. Harrison
On: 3/18/2018
Created with: PyCharm
Copyright (C) 2019 Artech House ([email protected])
This file is part of Introduction to Radar Using Python and MATLAB
and can not be copied and/or distributed without the express permission of Artech House.
"""
from numpy import sqrt, sin, cos, arctan2, mod
from scipy.constants import pi
def convert(ecef_x, ecef_y, ecef_z):
"""
Convert coordinates in ECEF to LLA.
:param ecef_x: The x coordinate of the point (m).
:param ecef_y: The y coordinate of the point (m).
:param ecef_z: The z coordinate of the point (m).
:return: The LLA coordinates of the point (rad, rad, m).
"""
# Earth constants
effective_earth_radius = 6378137
earth_eccentricity = 8.1819190842622e-2
# Effective polar radius
earth_radius_polar = sqrt(effective_earth_radius**2 * (1. - earth_eccentricity**2))
ep = sqrt((effective_earth_radius**2 - earth_radius_polar**2) / earth_radius_polar**2)
# Radius in xy plane
r = sqrt(ecef_x * ecef_x + ecef_y * ecef_y)
# Angle from the xy plane
theta = arctan2(effective_earth_radius * ecef_z, earth_radius_polar * r)
# Calculate the coordinates
lat = arctan2((ecef_z + ep**2 * earth_radius_polar * sin(theta)**3),
(r - earth_eccentricity**2 * effective_earth_radius * cos(theta)**3))
lon = mod(arctan2(ecef_y, ecef_x), 2. * pi)
alt = r / cos(lat) - effective_earth_radius / sqrt(1. - earth_eccentricity**2 * sin(lat)**2)
return lat, lon, alt
|
python
|
x,y=map(int,input().split())
z=0
for j in range(x,y):
z=j
a=0
for i in range(len(str(j))):
r=j%10
a=a+r**3
j=j//10
if a==z:
print(a,end=" ")
print()
|
python
|
import os
from .base import *
API_DB_URL = os.environ.get("API_DB_URL", "sqlite+aiosqlite:///db.sqlite")
|
python
|
# -*- coding: utf-8 -*-
from django.conf.urls import url, patterns
from django.contrib import admin
from .views import ChatRoomTokenView, ChatRoomView
admin.autodiscover()
urlpatterns = patterns(
'',
url(r'^room/(?P<token>\w{32})$', ChatRoomView.as_view(), name='chat-room'),
url(r'^new-room/$', ChatRoomTokenView.as_view(), name='chat-room-token'),
)
|
python
|
# coding=utf8
# Copyright 2018 JDCLOUD.COM
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# NOTE: This class is auto generated by the jdcloud code generator program.
class TranscodeInfo(object):
def __init__(self, videoCodec=None, videoCodeRate=None, videoFrameRate=None, width=None, height=None, template=None, templateName=None, audioCodec=None, audioFormat=None, audioSampleRate=None, audioChannel=None, audioCodeRate=None, jdchd=None, audioComfort=None):
"""
:param videoCodec: (Optional) 视频编码格式
- 取值:h264,h265,默认h264
:param videoCodeRate: (Optional) 转码输出的码率值:
- 取值: [128,15000]
- 单位: kpbs
:param videoFrameRate: (Optional) 转码输出的帧率值:
- 取值:[1,30]
:param width: (Optional) 转码输出视频宽度:
- 取值: [128,4096]
- 等比: 如果只填写一个参数,则按参数比例调节输出转码视频
- 随源: 如果两个参数都不填写,则按照源比例输出转码视频
:param height: (Optional) 转码输出视频高度:
- 取值: [128,4096]
- 等比: 如果只填写一个参数,则按参数比例调节输出转码视频
- 随源: 如果两个参数都不填写,则按照源比例输出转码视频
:param template: (Optional) 转码模板自定义名称:
- 自定义模板: 枚举类型校验,忽略大小写,自动删除空格,
取值要求:数字、大小写字母或短横线("-"),
首尾不能有特殊字符("-")
- 注意: 不能与标准的转码模板和已定义命名重复
:param templateName: (Optional) 转码模板名称
:param audioCodec: (Optional) 转码输出音频编码格式:
- 取值: aac、mp3
- 不区分大小写
:param audioFormat: (Optional) 转码输出音频格式:
- 取值: aac_lc,aac_low,aac_he,aac_he_v2
- 不区分大小写
:param audioSampleRate: (Optional) 转码输出音频采样率:
- 取值: [44100,48000]
:param audioChannel: (Optional) 转码输出音频通道数:
- 1 单声道
- 2 双声道
:param audioCodeRate: (Optional) 转码输出音频码率:
- 取值: [16,128]
- 单位: kbps
:param jdchd: (Optional) 京享超清
- 取值: jdchd-1.0,off
:param audioComfort: (Optional) 舒适音频
- 取值: on,off
"""
self.videoCodec = videoCodec
self.videoCodeRate = videoCodeRate
self.videoFrameRate = videoFrameRate
self.width = width
self.height = height
self.template = template
self.templateName = templateName
self.audioCodec = audioCodec
self.audioFormat = audioFormat
self.audioSampleRate = audioSampleRate
self.audioChannel = audioChannel
self.audioCodeRate = audioCodeRate
self.jdchd = jdchd
self.audioComfort = audioComfort
|
python
|
from panflute import run_filter, Header
def increase_header_level(elem, doc):
if type(elem)==Header:
if elem.level < 6:
elem.level += 1
else:
return []
def main(doc=None):
return run_filter(increase_header_level, doc=doc)
if __name__ == "__main__":
main()
|
python
|
# -*- coding: utf-8 -*-
import time
import json
import requests
import logging
import pika
from config import PROXY
from twython import TwythonStreamer
class sampleStreamer(TwythonStreamer):
"""
Retrieve data from the Twitter Streaming API.
The streaming API requires
`OAuth 1.0 <http://en.wikipedia.org/wiki/OAuth>`_ authentication.
"""
def __init__(self, rabbit_host, rabbit_port, app_key, app_secret, oauth_token, oauth_token_secret, tag):
"""Create a new instance of the sampleStreamer class that will connect to Twitter API and send tweets
to rabbitmq queue using pika module.
:param str app_key, app_secret, oauth_token, oauth_token_secret: credentials for Twitter API authentication
:param str tag: a tag that will be added to the tweet body to indicate its collection method
"""
self.rabbit_host = rabbit_host
self.rabbit_port = rabbit_port
self.rabbit_client = self.open_rabbit_connection()
self.tweets_queue = self.open_rabbit_channel()
if PROXY:
client_args = {
'proxies': PROXY
}
else:
client_args = {}
self.do_continue = True
TwythonStreamer.__init__(self, app_key, app_secret, oauth_token,
oauth_token_secret, timeout=100, chunk_size=200, client_args=client_args)
self.tag = tag
def open_rabbit_connection(self):
for i in range(10):
try:
rabbit_client = pika.BlockingConnection(
pika.ConnectionParameters(host=self.rabbit_host, port=self.rabbit_port,
connection_attempts=100, retry_delay=2,
# blocked_connection_timeout=1000,
# socket_timeout=1000,
ssl=False,
credentials=pika.credentials.PlainCredentials(
username='user',
password='password')))
break
except pika.exceptions.AMQPConnectionError:
time.sleep(2)
logging.error("pika AMQPConnectionError, retrying")
except Exception as error:
time.sleep(2)
logging.error("other error, retrying " + str(error))
return rabbit_client
def open_rabbit_channel(self):
tweets_queue = self.rabbit_client.channel()
tweets_queue.queue_declare(queue='tweets')
return tweets_queue
def on_success(self, data):
"""
:param data: response from Twitter API
"""
data["tags"] = [self.tag]
data["events"] = [""]
try:
self.tweets_queue.basic_publish(exchange='',
routing_key='tweets',
body=json.dumps(data))
except pika.exceptions.AMQPConnectionError:
logging.error("AMQPConnectionError, trying to reconnect")
self.rabbit_client = self.open_rabbit_connection()
self.tweets_queue = self.open_rabbit_channel()
self.on_success(data)
if self.do_continue == False:
logging.info("disconnect")
self.disconnect()
def on_error(self, status_code, data, logs="logs"):
"""
:param status_code: The status code returned by the Twitter API
:param data: The response from Twitter API
:param logs: this parameter does not match TwythonStreamer implementation but received from Twitter API.
"""
if status_code == 401:
logging.error(
'Error 401: Unauthorized. Check if the Twitter API access token is correct in file config.py.')
raise requests.exceptions.HTTPError
else:
logging.error("Error {}: {}".format(status_code, data))
def sample(self, lang=None):
"""
Wrapper for 'statuses / sample' API call
"""
while self.do_continue:
# Stream in an endless loop until limit is reached. See twython
# issue 288: https://github.com/ryanmcgrath/twython/issues/288
try:
self.statuses.sample(language=lang)
except requests.exceptions.ChunkedEncodingError as e:
if e is not None:
logging.error("Encoding error (stream will continue): {}".format(e))
def filter(self, track='', lang='fr'):
"""
Wrapper for 'statuses / filter' API call
"""
while self.do_continue:
# Stream in an endless loop until limit is reached
try:
self.statuses.filter(track=track, language=lang)
except requests.exceptions.ChunkedEncodingError as e:
if e is not None:
logging.error("Encoding error (stream will continue): {}".format(e))
continue
except requests.exceptions.ConnectionError as error:
logging.error(str(error) + " sleep 5 sec")
time.sleep(5)
continue
|
python
|
""" Package for cookie auth modules. """
__author__ = "William Tucker"
__date__ = "2020-02-14"
__copyright__ = "Copyright 2020 United Kingdom Research and Innovation"
__license__ = "BSD - see LICENSE file in top-level package directory"
|
python
|
"""
Tests scikit-learn's KNeighbours Classifier and Regressor converters.
"""
import unittest
from distutils.version import StrictVersion
import numpy
from sklearn import datasets
from sklearn.model_selection import train_test_split
from sklearn.neighbors import KNeighborsRegressor, KNeighborsClassifier
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import StandardScaler
import onnxruntime
from onnxruntime import InferenceSession
from skl2onnx import convert_sklearn
from skl2onnx.common.data_types import FloatTensorType, Int64TensorType
from skl2onnx.common.data_types import onnx_built_with_ml
from test_utils import dump_data_and_model, fit_classification_model
class TestNearestNeighbourConverter(unittest.TestCase):
def _fit_model_binary_classification(self, model):
iris = datasets.load_iris()
X = iris.data[:, :3]
y = iris.target
y[y == 2] = 1
model.fit(X, y)
return model, X
def _fit_model_multiclass_classification(self, model, use_string=False):
iris = datasets.load_iris()
X = iris.data[:, :3]
y = iris.target
if use_string:
y = numpy.array(["cl%d" % _ for _ in y])
model.fit(X, y)
return model, X
def _fit_model(self, model, n_targets=1, label_int=False):
X, y = datasets.make_regression(n_features=4,
random_state=0,
n_targets=n_targets)
if label_int:
y = y.astype(numpy.int64)
model.fit(X, y)
return model, X
@unittest.skipIf(
StrictVersion(onnxruntime.__version__) < StrictVersion("0.5.0"),
reason="not available")
def test_model_knn_regressor(self):
model, X = self._fit_model(KNeighborsRegressor(n_neighbors=2))
model_onnx = convert_sklearn(model, "KNN regressor",
[("input", FloatTensorType([None, 4]))])
self.assertIsNotNone(model_onnx)
dump_data_and_model(
X.astype(numpy.float32)[:7],
model, model_onnx,
basename="SklearnKNeighborsRegressor")
@unittest.skipIf(
StrictVersion(onnxruntime.__version__) < StrictVersion("0.5.0"),
reason="not available")
def test_model_knn_regressor_yint(self):
model, X = self._fit_model(
KNeighborsRegressor(n_neighbors=2), label_int=True)
model_onnx = convert_sklearn(model, "KNN regressor",
[("input", FloatTensorType([None, 4]))])
self.assertIsNotNone(model_onnx)
dump_data_and_model(
X.astype(numpy.float32)[:7],
model, model_onnx,
basename="SklearnKNeighborsRegressorYInt")
@unittest.skipIf(
StrictVersion(onnxruntime.__version__) < StrictVersion("0.5.0"),
reason="not available")
def test_model_knn_regressor2_1(self):
model, X = self._fit_model(KNeighborsRegressor(n_neighbors=1),
n_targets=2)
model_onnx = convert_sklearn(model, "KNN regressor",
[("input", FloatTensorType([None, 4]))])
self.assertIsNotNone(model_onnx)
dump_data_and_model(
X.astype(numpy.float32)[:2],
model, model_onnx,
basename="SklearnKNeighborsRegressor2")
@unittest.skipIf(
StrictVersion(onnxruntime.__version__) < StrictVersion("0.5.0"),
reason="not available")
def test_model_knn_regressor2_2(self):
model, X = self._fit_model(KNeighborsRegressor(n_neighbors=2),
n_targets=2)
model_onnx = convert_sklearn(model, "KNN regressor",
[("input", FloatTensorType([None, 4]))])
self.assertIsNotNone(model_onnx)
dump_data_and_model(
X.astype(numpy.float32)[:2],
model, model_onnx,
basename="SklearnKNeighborsRegressor2")
@unittest.skipIf(
StrictVersion(onnxruntime.__version__) < StrictVersion("0.5.0"),
reason="not available")
def test_model_knn_regressor_weights_distance(self):
model, X = self._fit_model(
KNeighborsRegressor(
weights="distance", algorithm="brute", n_neighbors=1))
model_onnx = convert_sklearn(model, "KNN regressor",
[("input", FloatTensorType([None, 4]))])
self.assertIsNotNone(model_onnx)
dump_data_and_model(
X.astype(numpy.float32)[:3],
model, model_onnx,
basename="SklearnKNeighborsRegressorWeightsDistance-Dec3")
@unittest.skipIf(
StrictVersion(onnxruntime.__version__) < StrictVersion("0.5.0"),
reason="not available")
def test_model_knn_regressor_metric_cityblock(self):
model, X = self._fit_model(KNeighborsRegressor(metric="cityblock"))
model_onnx = convert_sklearn(model, "KNN regressor",
[("input", FloatTensorType([None, 4]))])
self.assertIsNotNone(model_onnx)
dump_data_and_model(
X.astype(numpy.float32)[:7],
model, model_onnx,
basename="SklearnKNeighborsRegressorMetricCityblock")
@unittest.skipIf(not onnx_built_with_ml(),
reason="Requires ONNX-ML extension.")
@unittest.skipIf(
StrictVersion(onnxruntime.__version__) < StrictVersion("0.5.0"),
reason="not available")
def test_model_knn_classifier_binary_class(self):
model, X = self._fit_model_binary_classification(
KNeighborsClassifier())
model_onnx = convert_sklearn(
model,
"KNN classifier binary",
[("input", FloatTensorType([None, X.shape[1]]))],
)
self.assertIsNotNone(model_onnx)
dump_data_and_model(
X.astype(numpy.float32),
model, model_onnx,
basename="SklearnKNeighborsClassifierBinary")
@unittest.skipIf(True, reason="later")
@unittest.skipIf(not onnx_built_with_ml(),
reason="Requires ONNX-ML extension.")
@unittest.skipIf(
StrictVersion(onnxruntime.__version__) < StrictVersion("0.5.0"),
reason="not available")
def test_model_knn_classifier_multi_class(self):
model, X = self._fit_model_multiclass_classification(
KNeighborsClassifier())
model_onnx = convert_sklearn(
model,
"KNN classifier multi-class",
[("input", FloatTensorType([None, X.shape[1]]))],
)
self.assertIsNotNone(model_onnx)
dump_data_and_model(
X.astype(numpy.float32),
model, model_onnx,
basename="SklearnKNeighborsClassifierMulti")
@unittest.skipIf(not onnx_built_with_ml(),
reason="Requires ONNX-ML extension.")
@unittest.skipIf(
StrictVersion(onnxruntime.__version__) < StrictVersion("0.5.0"),
reason="not available")
def test_model_knn_classifier_multi_class_string(self):
model, X = self._fit_model_multiclass_classification(
KNeighborsClassifier(), use_string=True)
model_onnx = convert_sklearn(
model,
"KNN classifier multi-class",
[("input", FloatTensorType([None, 3]))],
)
self.assertIsNotNone(model_onnx)
dump_data_and_model(
X.astype(numpy.float32),
model, model_onnx,
basename="SklearnKNeighborsClassifierMulti")
@unittest.skipIf(
StrictVersion(onnxruntime.__version__) < StrictVersion("0.5.0"),
reason="not available")
def test_model_knn_classifier_weights_distance(self):
model, X = self._fit_model_multiclass_classification(
KNeighborsClassifier(weights='distance'))
model_onnx = convert_sklearn(
model, 'KNN classifier', [('input', FloatTensorType([None, 3]))])
self.assertIsNotNone(model_onnx)
dump_data_and_model(
X.astype(numpy.float32)[:7], model, model_onnx,
basename="SklearnKNeighborsClassifierWeightsDistance")
@unittest.skipIf(
StrictVersion(onnxruntime.__version__) < StrictVersion("0.5.0"),
reason="not available")
def test_model_knn_classifier_metric_cityblock(self):
model, X = self._fit_model_multiclass_classification(
KNeighborsClassifier(metric='cityblock'))
model_onnx = convert_sklearn(
model, 'KNN classifier', [('input', FloatTensorType([None, 3]))])
self.assertIsNotNone(model_onnx)
dump_data_and_model(
X.astype(numpy.float32)[:7], model, model_onnx,
basename="SklearnKNeighborsClassifierMetricCityblock")
@unittest.skipIf(
StrictVersion(onnxruntime.__version__) < StrictVersion("0.5.0"),
reason="not available")
def test_model_knn_regressor_int(self):
model, X = self._fit_model(KNeighborsRegressor())
X = X.astype(numpy.int64)
model_onnx = convert_sklearn(
model,
"KNN regressor",
[("input", Int64TensorType([None, X.shape[1]]))],
)
self.assertIsNotNone(model_onnx)
dump_data_and_model(
X,
model,
model_onnx,
basename="SklearnGradientBoostingRegressionInt-Dec4"
)
@unittest.skipIf(
StrictVersion(onnxruntime.__version__) < StrictVersion("0.5.0"),
reason="not available")
def test_model_knn_regressor_equal(self):
X, y = datasets.make_regression(
n_samples=1000, n_features=100, random_state=42)
X = X.astype(numpy.int64)
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.5, random_state=42)
model = KNeighborsRegressor(
algorithm='brute', metric='manhattan').fit(X_train, y_train)
model_onnx = convert_sklearn(
model, 'knn',
[('input', Int64TensorType([None, X_test.shape[1]]))])
exp = model.predict(X_test)
sess = InferenceSession(model_onnx.SerializeToString())
res = sess.run(None, {'input': numpy.array(X_test)})[0]
# The conversion has discrepencies when
# neighbours are at the exact same distance.
maxd = 1000
accb = numpy.abs(exp - res) > maxd
ind = [i for i, a in enumerate(accb) if a == 1]
assert len(ind) == 0
accp = numpy.abs(exp - res) < maxd
acc = numpy.sum(accp)
ratio = acc * 1.0 / res.shape[0]
assert ratio >= 0.7
# assert_almost_equal(exp, res)
@unittest.skipIf(
StrictVersion(onnxruntime.__version__) < StrictVersion("0.5.0"),
reason="not available")
def test_model_multi_class_nocl(self):
model, X = fit_classification_model(
KNeighborsClassifier(),
2, label_string=True)
model_onnx = convert_sklearn(
model,
"multi-class nocl",
[("input", FloatTensorType([None, X.shape[1]]))],
options={id(model): {'nocl': True}})
self.assertIsNotNone(model_onnx)
sonx = str(model_onnx)
assert 'classlabels_strings' not in sonx
assert 'cl0' not in sonx
dump_data_and_model(
X, model, model_onnx, classes=model.classes_,
basename="SklearnNaiveMultiNoCl", verbose=False,
allow_failure="StrictVersion(onnx.__version__)"
" < StrictVersion('1.2') or "
"StrictVersion(onnxruntime.__version__)"
" <= StrictVersion('0.2.1')")
@unittest.skipIf(
StrictVersion(onnxruntime.__version__) < StrictVersion("0.5.0"),
reason="not available")
def test_model_knn_regressor2_2_pipee(self):
pipe = make_pipeline(StandardScaler(),
KNeighborsClassifier())
model, X = self._fit_model_binary_classification(pipe)
model_onnx = convert_sklearn(
model, "KNN pipe",
[("input", FloatTensorType([None, X.shape[1]]))])
self.assertIsNotNone(model_onnx)
dump_data_and_model(
X.astype(numpy.float32)[:2],
model, model_onnx,
basename="SklearnKNeighborsRegressorPipe2")
if __name__ == "__main__":
unittest.main()
|
python
|
7
8
9
10
|
python
|
import sys
# mode is one of 'left', 'right', 'center'
def horizontal_align_print(s, width, mode='left', offsetChar=' ', end='\n',
os=sys.stdout):
p = _print_to_file_func(os)
if mode[0] == 'l': # left
offset = width - len(s)
p(s, end='')
for _ in range(offset):
p(offsetChar, end='')
p('', end=end)
elif mode[0] == 'r': # right
offset = width - len(s)
for _ in range(offset):
p(offsetChar, end='')
p(s, end=end)
else: # center
sIsEven = len(s) % 2 == 0
widthIsEven = width % 2 == 0
if sIsEven != widthIsEven:
width += 1
totalOffset = width - len(s)
for _ in range(int(totalOffset / 2)):
p(offsetChar, end='')
p(s, end='')
for _ in range(int(totalOffset / 2)):
p(offsetChar, end='')
p('', end=end)
def _print_to_file_func(file):
def f(*objects, sep=' ', end='\n', flush=False):
print(*objects, sep=sep, end=end, file=file, flush=flush)
return f
|
python
|
from os.path import join, dirname
import datetime
# import pandas as pd
# from scipy.signal import savgol_filter
# from bokeh.io import curdoc
# from bokeh.layouts import row, column
# from bokeh.models import ColumnDataSource, DataRange1d, Select
# from bokeh.palettes import Blues4
# from bokeh.plotting import figure
import pandas as pd
from bokeh.plotting import figure, ColumnDataSource
from bokeh.io import output_file, show, output_notebook, curdoc
from bokeh.models import HoverTool, Slider, Select, Dropdown, Div, Button, Slider, Range1d, Title, NumeralTickFormatter, Circle, Square, Asterisk, Scatter, LassoSelectTool, BoxSelectTool
from bokeh.models.widgets import Panel, Tabs, MultiChoice, Spinner, MultiSelect
from bokeh.layouts import row, column, gridplot, widgetbox, layout
from bokeh.transform import factor_cmap
from bokeh.palettes import Category20, Spectral10, Turbo256, Turbo
# from bokeh.plotting.figure.Figure import sq
from bokeh.application.handlers import FunctionHandler
from bokeh.application import Application
from bokeh.embed import file_html, server_document
from bokeh.resources import CDN
from bokeh.themes import built_in_themes,Theme
cat_columns = ['','country_category','work_station_category','production_line_category','plant_category','division_category','record_day_name','record_month_name','record_year_month','record_year']
idx_columns = ['tenant_id','record_date']
int_format = NumeralTickFormatter(format="#,##0")
global circle1
global circle2
global circle3
def get_cmap(df,fld:str):
cat = sorted(df[fld].unique())
cat_num = len(cat)
if cat_num <= 11:
return factor_cmap(field_name=fld,palette=Turbo[cat_num],factors=cat)
else:
color_step = int(256/len(cat))
palette_colors=[]
for color in range(0,255,color_step):
palette_colors.append(Turbo256[color])
return factor_cmap(field_name=fld,palette=palette_colors,factors=cat) #palette=Turbo256[len(cat)]
def get_source(selected_vars:list):
df_src = pd.read_csv('bokeh-app/data/main_dataframe_head.csv',parse_dates=['record_date'])
df_src['record_year'] = df_src['record_year'].astype(str)
return df_src[selected_vars]
def tab1_list_df_vars(var1,var2,var3,var_cat,var_size):
lst = idx_columns.copy()
if var1 == '':
lst.append(selectable_columns[1])
else:
lst.append(var1)
if var2 == '':
lst.append(selectable_columns[2])
else:
lst.append(var2)
if var3 == '':
lst.append(selectable_columns[3])
else:
lst.append(var3)
if var_cat != '':
lst.append(var_cat)
if var_size != '':
lst.append(var_size)
return lst
def set_selectable_columns():
df = pd.read_csv('bokeh-app/data/main_dataframe_head.csv',parse_dates=['record_date'])
df['record_year'] = df['record_year'].astype(str)
selectable_columns = df.columns.tolist()
selectable_columns = list(set(selectable_columns) - set(idx_columns) - set(cat_columns))
selectable_columns.insert(0,'')
selectable_columns.sort()
return selectable_columns
def set_selectable_tenants():
df = pd.read_csv('bokeh-app/data/main_dataframe_head.csv',parse_dates=['record_date'])
df['record_year'] = df['record_year'].astype(str)
selectable_tenants = sorted(df.tenant_id.unique())
selectable_tenants.insert(0,'')
selectable_tenants.sort()
return selectable_tenants
def build_plot(p, df, var_x, var_y, transparency, var_cat, var_size):
if var_size != '':
temp = ((df[var_size] - df[var_size].min()) / (df[var_size].max() - df[var_size].min())) * 100
df[var_size] = temp.round(0).astype(int)
src = ColumnDataSource(df)
if var_cat == '':
cat_cmap = 'blue'
else:
cat_cmap = get_cmap(df,var_cat)
p.title.text = '''Variable '{0}' contre Variable '{1}' '''.format(var_x,var_y)
p.renderers = []
if hasattr(p.legend,'items'):
p.legend.items = []
if var_cat != '' and var_size != '':
c = p.circle(var_x,var_y,source=src,alpha=transparency,fill_color=cat_cmap,legend_field=var_cat,size=var_size,
hover_fill_color='black',
hover_line_color='black',
hover_alpha=1,
selection_fill_alpha=1,
selection_line_alpha=1,
nonselection_fill_alpha=transparency,
nonselection_line_alpha=transparency)
hover = HoverTool(
tooltips = [
('locateur','@tenant_id'),
('date','@record_date{%Y-%m-%d}'),
('x-> {}'.format(var_x),'@{}'.format(var_x)),
('y-> {}'.format(var_y),'@{}'.format(var_y)),
('catégorie-> {}'.format(var_cat),'@{}'.format(var_cat)),
('taille-> {}'.format(var_size),'@{}'.format(var_size))
],
formatters={'@record_date' : 'datetime'},
renderers = [c],
mode = 'mouse'
)
elif var_cat != '' and var_size == '':
c = p.circle(var_x,var_y,source=src,alpha=transparency,fill_color=cat_cmap,legend_field=var_cat,
hover_fill_color='black',
hover_line_color='black',
hover_alpha=1,
selection_fill_alpha=1,
selection_line_alpha=1,
nonselection_fill_alpha=transparency,
nonselection_line_alpha=transparency) #get_cmap(df,var_cat)
hover = HoverTool(
tooltips = [
('locateur','@tenant_id'),
('date','@record_date{%Y-%m-%d}'),
('x-> {}'.format(var_x),'@{}'.format(var_x)),
('y-> {}'.format(var_y),'@{}'.format(var_y)),
('catégorie-> {}'.format(var_cat),'@{}'.format(var_cat))
],
formatters={'@record_date' : 'datetime'},
renderers = [c],
mode = 'mouse'
)
elif var_cat == '' and var_size != '':
c = p.circle(var_x,var_y,source=src,alpha=transparency,size=var_size,
hover_fill_color='black',
hover_line_color='black',
hover_alpha=1,
selection_fill_alpha=1,
selection_line_alpha=1,
nonselection_fill_alpha=transparency,
nonselection_line_alpha=transparency)
hover = HoverTool(
tooltips = [
('locateur','@tenant_id'),
('date','@record_date{%Y-%m-%d}'),
('x-> {}'.format(var_x),'@{}'.format(var_x)),
('y-> {}'.format(var_y),'@{}'.format(var_y)),
('taille-> {}'.format(var_size),'@{}'.format(var_size))
],
formatters={'@record_date' : 'datetime'},
renderers = [c],
mode = 'mouse'
)
else:
c = p.circle(var_x,var_y,source=src,alpha=transparency,
hover_fill_color='black',
hover_line_color='black',
hover_alpha=1,
selection_fill_alpha=1,
selection_line_alpha=1,
nonselection_fill_alpha=transparency,
nonselection_line_alpha=transparency)
hover = HoverTool(
tooltips = [
('locateur','@tenant_id'),
('date','@record_date{%Y-%m-%d}'),
('x-> {}'.format(var_x),'@{}'.format(var_x)),
('y-> {}'.format(var_y),'@{}'.format(var_y))
],
formatters={'@record_date' : 'datetime'},
renderers = [c],
mode = 'mouse'
)
# lasso = LassoSelectTool(renderers = [c])
# box = BoxSelectTool(renderers = [c])
p.add_tools(hover)
# p.add_tools(lasso)
# p.add_tools(box)
p.x_range = Range1d(0, df[var_x].max())
p.y_range = Range1d(0, df[var_y].max())
p.xaxis.axis_label = var_x
p.xaxis[0].formatter = int_format
p.yaxis.axis_label = var_y
p.yaxis[0].formatter = int_format
p.title.align = 'center'
return c
def select_on_change(event):
global circle1
global circle2
global circle3
vars_lst = tab1_list_df_vars(select_val1.value,select_val2.value,select_val3.value,select_cat.value,select_size.value)
df_selected = get_source(vars_lst)
circle1 = build_plot(plot_1, df_selected, select_val1.value, select_val2.value, alpha_slide.value, select_cat.value, select_size.value)
circle2 = build_plot(plot_2, df_selected, select_val3.value, select_val2.value, alpha_slide.value, select_cat.value, select_size.value)
circle3 = build_plot(plot_3, df_selected, select_val1.value, select_val3.value, alpha_slide.value, select_cat.value, select_size.value)
def change_transparency(attr, old, new):
for glyph in [circle1.glyph, circle2.glyph, circle3.glyph]:
glyph.fill_alpha = alpha_slide.value
def build_main_plot(event):
main_plot.renderers = []
src_col = idx_columns + [select_var_tab2.value,select_cat_tab2.value]
df = get_source(src_col)
df = df.loc[df[select_cat_tab2.value] == select_cat_val_tab2.value]
l_list = []
for tenant in select_tenant.options:
df_src = df.loc[df['tenant_id'] == tenant].copy()
src = ColumnDataSource(df_src)
if tenant != select_tenant.value:
l = main_plot.line('record_date',select_var_tab2.value,source=src,line_color='black',alpha=0.4,
hover_line_color='blue', hover_alpha=0.8)
l_list.append(l)
df_src = df.loc[df['tenant_id'] == select_tenant.value].copy()
src = ColumnDataSource(df_src)
main_plot.line('record_date',select_var_tab2.value,source=src,line_color='red',alpha=0.8,line_width=3)
hover = HoverTool(
tooltips = [
('locateur','@tenant_id'),
('date','@record_date{%Y-%m-%d}'),
('y-> {}'.format(select_var_tab2.value),'@{}'.format(select_var_tab2.value))
],
formatters={'@record_date' : 'datetime'},
renderers = l_list,
mode = 'mouse'
)
if len(main_plot.tools) > 5:
main_plot.tools[-1] = hover
else:
main_plot.add_tools(hover)
main_plot.yaxis.axis_label = select_var_tab2.value
main_plot.xaxis.axis_label = 'Dates'
main_plot.yaxis[0].formatter = int_format
title1_main.text = ''''{0}' des locateurs '{1}' de la catégorie '{2}' '''.format(select_var_tab2.value,select_cat_val_tab2.value,select_cat_tab2.value)
title2_main.text = '''focussé sur le locateur {0}'''.format(select_tenant.value)
print('>>>')
def get_tenants_in_category(cat,val):
src_col = idx_columns + [cat]
df = get_source(src_col)
tenants = df.loc[df[cat] == val,'tenant_id'].unique().tolist()
tenants.sort()
return tenants
def set_options_select_tenant(attr,old,new):
tenants = get_tenants_in_category(select_cat_tab2.value,select_cat_val_tab2.value)
select_tenant.options = tenants
select_tenant.value = tenants[0]
def set_options_compare_tenants(attr,old,new):
tenants = get_tenants_in_category(select_cat_tab2.value,select_cat_val_tab2.value)
tenants_wo_selected = list(set(tenants) - set([select_tenant.value]))
tenants_wo_selected.sort()
compare_tenants.options = tenants_wo_selected
def set_category_values(attr,old,new):
df = get_source([select_cat_tab2.value])
vals = df[select_cat_tab2.value].unique().tolist()
vals.sort()
select_cat_val_tab2.options = vals
select_cat_val_tab2.value = vals[0]
def get_tab2_line_graph(main_tenant,compare_tenant):
src_col = idx_columns + [select_var_tab2.value,select_cat_tab2.value]
df = get_source(src_col)
df = df.loc[df[select_cat_tab2.value] == select_cat_val_tab2.value]
fig = figure(title='Comparé à {0}'.format(compare_tenant),tools="pan,wheel_zoom,box_zoom,reset",x_axis_type='datetime',height=100,width=200)
df_src = df.loc[df['tenant_id'] == compare_tenant].copy()
src = ColumnDataSource(df_src)
l1 = fig.line('record_date',select_var_tab2.value,source=src,line_color='blue',alpha=0.4)
df_src = df.loc[df['tenant_id'] == main_tenant].copy()
src = ColumnDataSource(df_src)
l2 = fig.line('record_date',select_var_tab2.value,source=src,line_color='red',alpha=0.8,line_width=3)
hover = HoverTool(
tooltips = [
('locateur','@tenant_id'),
('date','@record_date{%Y-%m-%d}'),
('y-> {}'.format(select_var_tab2.value),'@{}'.format(select_var_tab2.value))
],
formatters={'@record_date' : 'datetime'},
renderers = [l1, l2],
mode = 'vline'
)
fig.add_tools(hover)
fig.yaxis.axis_label = select_var_tab2.value
fig.yaxis[0].formatter = int_format
fig.xaxis.axis_label = 'Dates'
fig.title.align = 'center'
return fig
def build_tab2_gridplot_graphs(event):
compared_tenants = compare_tenants.value
graphs = []
for t in compared_tenants:
g = get_tab2_line_graph(select_tenant.value,t)
if len(graphs) > 0:
g.x_range = graphs[0].x_range
g.y_range = graphs[0].y_range
graphs.append(g)
layout2.children[-1] = gridplot(graphs,ncols=nb_cols.value,merge_tools=True,sizing_mode='scale_both')
layout2.children[-2] = Div(text='''<h3>Comparaison des '{0}' du locateur {1}</h3>'''.format(select_var_tab2.value,select_tenant.value),align='center')
selectable_columns = set_selectable_columns()
selectable_tenants = set_selectable_tenants()
# Presentation models
intro_div = Div(text="""
<h1>Présentation du dashboard</h1>
<h3>Contexte</h3>
<p>Les données présentées dans ce dashboard proviennent de l'application Poka. Cette application aide la gestion manifacturière et ces données sont donc confidentielles.</p>
<h3>L'onglet 'Exploration des données'</h3>
<p>Cet onglet permet un premier coup d'oeil aux données. En choisissant 3 variables numérique, celles-ci seront présentées l'une contre l'autre dans 3 graphiques de type 'nuage de points'. Ces 3 variables sont nécessaires avant de cliquer sur 'Charger les graphiques'.</p>
<p>De plus, à ces 3 graphiques, vous pouvez aussi ajouter optionnellement une 4e variable qui sera présentée en jouant sur la taille des cercles de chaque graphique.</p>
<p>À ces 4 possibilités de variables numériques, vous pouvez aussi sélectionner optionnellement une catégorie, qui affectera la couleur des points présentés.</p>
<p>Finalement, la transparence des points peut aussi être changé, pour une meilleure visibilité de la dispersion des points.</p>
<h3>L'onglet 'Analyse temporelle'</h3>
<p>Cet onglet sert à comparer l'évolution dans le temps d'un locateur en comparant son évalotion à celles de d'autres locateurs d'une même catégorie.</p>
<p>Pour le graphique supérieur, affichant une vue générale, il faut d'un premier temps sélectionner la catégorisation, puis la catégorie souhaitée de celle-ci. Ensuite, il faut sélectionner le locateur dont on souhaite focussé l'analyse. Enfin, il faut choisir une variable numérique, comme base de l'évolution à afficher. Après la sélection de ces 4 choix, il suffit de cliquer sur 'Afficher le graphique' pour voir le résultat.</p>
<p>Pour la partie inférieur, il est possible de sélectionner les locateurs, dont on souhaiterait voir une comparaison isolée du locateur focus. D'abord, sélectionnez un ou plusieurs locateurs dans la liste affichée. Pour choisir plusieurs locateurs, cliquez et faites glisser pour sélectionner des locateurs côte-à-côte et/ou utilisez la touche 'control' en cliquant pour sélectionner des locateurs distancés. Une fois les locateurs choisis, déterminez sur combien de colonnes vous souhaitez voir les graphiques et cliquez finalement sur 'Afficher les graphiques'.</p>
<h3>Terminologie</h3>
<ul>
<li>Locateur: <em>Pour que le client puisse utiliser l'application, celui-ci se voit réserver un espace sur un instance et un identifiant locateur lui est attribué. Un client peut avoir un ou plusieurs identifiants locateur</em></li>
<li>Variables numériques
<ul>
<li>Préfixes
<ul>
<li>'created_': <em>Nombre de contenus créés de la charactéristique de l'application qui suit</em></li>
<li>'modified_': <em>Nombre de contenus modifiés de la caractéristique de l'application qui suit</em></li>
</ul>
</li>
<li>active_users: <em>Nombre d'utilisateurs uniques qui ont utilisé l'application</em></li>
<li>activities: <em>Nombre d'activités effectuées sur l'application</em></li>
<li>connected_once: <em>Nombre d'utilisateurs qui se sont connectés au moins une fois à l'application</em></li>
<li>'_forms': <em>Caractéristique formulaire de l'application</em></li>
<li>'_news': <em>Caractéristique de type publication de l'application</em></li>
<li>'_problems': <em>Caractéristique de type signalement de problèmes de l'application</em></li>
<li>'_skills': <em>Caractéristique des compétences de l'application, pouvant être associé à un utilisateur</em></li>
<li>'_skills_endorsement_requests': <em>Nombre de requêtes utilisateur pour recevoir l'approbation d'une compétence</em></li>
<li>skills_endorsements: <em>Nombre de compétences approuvés</em></li>
<li>divisions: <em>Nombre de regroupements d'usines, déterminé par le client</em></li>
<li>form_completions: <em>Nombre de formulaires remplis</em></li>
<li>plants: <em>Nombre d'usines associées au locateur</em></li>
<li>production_lines: <em>Nombre de lignes de production associées au locateur</em></li>
<li>views: <em>Nombre de contenus vu par les utilisateurs</em></li>
<li>work_stations: <em>Nombre de postes de travail associés au locateur</em></li>
<li>workinstructions: <em>Caractéristique des instructions de travail de l'application</em></li>
</ul>
</li>
<li>Variables catégorielles
<ul>
<li>Préfixe
<ul>
<li>'record_': <em>Catégories relatives à la date d'enregistrement des données [record_date]</em></li>
</ul>
</li>
<li>country_category: <em>Catégorisation selon le-s pays des usines du locateur</em></li>
<li>work_station_category: <em>Catégorisation selon le nombre de postes de travail</em></li>
<li>production_line_category: <em>Catégorisation selon le nombre de lignes de production</em></li>
<li>plant_category: <em>Catégorisation selon le nombre d'usines</em></li>
<li>division_category: <em>Catégorisation selon le nombre de divisions</em></li>
<li>'_day_name': <em>Regroupement par jour de la semaine</em></li>
<li>'_month_name': <em>Regroupement par mois</em></li>
<li>'_year_month': <em>Regroupement par année et mois</em></li>
<li>'_year': <em>Regroupement par année</em></li>
</ul>
</li>
</ul>
""")
pan0 = Panel(child=intro_div,title='Présentation')
# Data exploration
# Models
select_val1 = Select(title="Variable de l'axe des X pour les Graphiques 1 & 3",options=selectable_columns,value=selectable_columns[1])
select_val2 = Select(title="Variable de l'axe des Y pour les Graphiques 1 & 2",options=selectable_columns,value=selectable_columns[2])
select_val3 = Select(title="Variable axe X Graphique 2 & axe Y Graphique 3",options=selectable_columns,value=selectable_columns[3])
select_size = Select(title='Taille des points par',options=selectable_columns,value=None)
select_cat = Select(title='Couleur des points par',options=cat_columns,value=None)
load_graph = Button(label='Charger les graphiques',button_type='success')
alpha_slide = Slider(start=0.1,end=1,value=0.3,step=0.05,title='Transparence des points')
plot_1 = figure(tools="pan,wheel_zoom,box_zoom,reset,save,lasso_select,box_select") #lasso_select,
plot_2 = figure(tools="pan,wheel_zoom,box_zoom,reset,save,lasso_select,box_select")
plot_3 = figure(tools="pan,wheel_zoom,box_zoom,reset,save,lasso_select,box_select")
# Creation & Dynamics
vars_lst = tab1_list_df_vars(select_val1.value,select_val2.value,select_val3.value,select_cat.value,select_size.value)
df_selected = get_source(vars_lst)
circle1 = build_plot(plot_1, df_selected, select_val1.value, select_val2.value, alpha_slide.value, select_cat.value, select_size.value)
circle2 = build_plot(plot_2, df_selected, select_val3.value, select_val2.value, alpha_slide.value, select_cat.value, select_size.value)
circle3 = build_plot(plot_3, df_selected, select_val1.value, select_val3.value, alpha_slide.value, select_cat.value, select_size.value)
df_selected = None
out_legend = None
plot_1.x_range = plot_3.x_range
plot_1.y_range = plot_2.y_range
plot_2.x_range = plot_3.y_range
load_graph.on_click(select_on_change)
alpha_slide.on_change('value',change_transparency)
# Structure
page_title = Div(text='<h1>Exploration des données brutes</h1>')
widget_select_val = column(Div(),select_val1,select_val2,select_val3,select_size,select_cat,load_graph,Div(),Div(),alpha_slide)
plot_grid = gridplot([[Div(text='<h3>Graphique 1</h3>',align='center'),Div(text='<h3>Graphique 2</h3>',align='center')],
[plot_1,plot_2],
[Div(text='<h3>Graphique 3</h3>',align='center'),None],
[plot_3,out_legend]],
merge_tools=True) #,ncols=2
row_1 = row(widget_select_val,plot_grid)
layout1 = column(page_title,row_1)
pan1 = Panel(child=layout1,title='Exploration de données')
# Time analysis
# Models
select_cat_tab2 = Select(title='Choisissez une catégorisation',options=cat_columns,value=None)
select_cat_val_tab2 = Select(title='Choisissez la catégorie',value=None)
select_tenant = Select(title='Choisissez quel locateur est le focus',options=selectable_tenants,value=None)
select_var_tab2 = Select(title='Choisissez la variable à afficher',options=selectable_columns,value=None)
compare_tenants = MultiSelect(title='Choisissez le-s locateur-s à comparer au locateur focus',options=[],value=[],width=500,height=200)
nb_rows = Spinner(title='Nombre de rangées',low=1,high= 20,value=2,step=1,sizing_mode='stretch_width',visible=False) #width=125,align=('start','center')
nb_cols = Spinner(title='Nombre de colonnes',low=1,high= 20,value=2,step=1,sizing_mode='stretch_width')
load_main_graph_tab2 = Button(label='Afficher le graphique',button_type='success',align='end')
load_graphs_tab2 = Button(label='Afficher les graphiques',button_type='success',align='start',height=80,sizing_mode='stretch_width')
main_plot = figure(tools="pan,wheel_zoom,box_zoom,reset,save",x_axis_type='datetime',sizing_mode='stretch_width') #,width=1200
title1_main = Title(text='',align='center')
title2_main = Title(text='',align='center')
# Creation & Dynamics
main_plot.add_layout(title2_main,'above')
main_plot.add_layout(title1_main,'above')
main_plot.line([0,1],[0,1],alpha=0)
load_main_graph_tab2.on_click(build_main_plot)
select_cat_tab2.on_change('value',set_category_values)
select_cat_val_tab2.on_change('value',set_options_select_tenant)
select_tenant.on_change('value',set_options_compare_tenants)
load_graphs_tab2.on_click(build_tab2_gridplot_graphs)
# Structure
tab2_page_title = Div(text="<h1>Analyse temporelle d'un locateur comparé à d'autres de la même catégorie</h1>",sizing_mode='stretch_width')
tab2_select_vars_main_graph = row(select_cat_tab2,select_cat_val_tab2,select_tenant,select_var_tab2,load_main_graph_tab2) #,sizing_mode='stretch_both'
tab2_graphs_size = column(nb_rows,nb_cols,load_graphs_tab2,width=200)
tab2_select_vars_graphs = row(compare_tenants,tab2_graphs_size)
layout2 = layout([
[tab2_page_title],
[tab2_select_vars_main_graph],
[main_plot],
[Div()],
[tab2_select_vars_graphs],
[Div()],
[Div()]
])
pan2 = Panel(child=layout2,title='Analyse temporelle')
# city_select = Select(value=city, title='City', options=sorted(cities.keys()))
# distribution_select = Select(value=distribution, title='Distribution', options=['Discrete', 'Smoothed'])
# df = pd.read_csv(join(dirname(__file__), 'data/2015_weather.csv'))
# source = get_dataset(df, cities[city]['airport'], distribution)
# plot = make_plot(source, "Weather data for " + cities[city]['title'])
# city_select.on_change('value', update_plot)
# distribution_select.on_change('value', update_plot)
# controls = column(city_select, distribution_select)
tabs = Tabs(tabs=[pan0,pan1,pan2])
curdoc().add_root(tabs)
curdoc().title = "Poka"
curdoc().theme = 'light_minimal'
|
python
|
#!/usr/bin/env python3
import requests
import os
import json
from requests.auth import HTTPBasicAuth
import requests_unixsocket
MFMODULE_RUNTIME_HOME = os.environ['MFMODULE_RUNTIME_HOME']
ADMIN_USERNAME = "admin"
ADMIN_PASSWORD = os.environ['MFADMIN_GRAFANA_ADMIN_PASSWORD']
GRAFANA_SOCKET = "%s/tmp/grafana.sock" % MFMODULE_RUNTIME_HOME
GRAFANA_HOST = "localhost"
HOME_DASHBOARD_UID = "lCmsjhHik"
BASE_URL = "http+unix://%s" % GRAFANA_SOCKET.replace('/', '%2F')
requests_unixsocket.monkeypatch()
url = "%s/api/dashboards/uid/%s" % (BASE_URL, HOME_DASHBOARD_UID)
print(url)
dashboard = requests.get(url, auth=HTTPBasicAuth(ADMIN_USERNAME,
ADMIN_PASSWORD)).json()
print(json.dumps(dashboard, indent=4))
id = dashboard['dashboard']['id']
print(id)
url = "%s/api/user/preferences" % (BASE_URL,)
print(url)
preferences = requests.get(url, auth=HTTPBasicAuth(ADMIN_USERNAME,
ADMIN_PASSWORD)).json()
print(json.dumps(preferences, indent=4))
preferences['timezone'] = 'utc'
preferences['homeDashboardId'] = id
print(json.dumps(preferences, indent=4))
print(requests.put(url, auth=HTTPBasicAuth(ADMIN_USERNAME, ADMIN_PASSWORD),
json=preferences))
url = "%s/api/org/preferences" % (BASE_URL,)
print(url)
print(requests.put(url, auth=HTTPBasicAuth(ADMIN_USERNAME, ADMIN_PASSWORD),
json=preferences))
|
python
|
import os
from dotenv import load_dotenv
load_dotenv()
AV_API_KEY = os.getenv("AV_API_KEY", "value does not exist")
AV_API_KEY_2 = os.getenv("AV_API_KEY_2", "value does not exist")
BINANCE_KEY = os.getenv("BINANCE_KEY", "Binance key not found")
BINANCE_SECRET = os.getenv("BINANCE_SECRET", "Binance secret not found")
|
python
|
# This code is part of Qiskit.
#
# (C) Copyright IBM 2021.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""
Composite Experiment data class.
"""
from typing import Optional, Union, List
from qiskit.result import marginal_counts
from qiskit.exceptions import QiskitError
from qiskit_experiments.framework.experiment_data import ExperimentData
class CompositeExperimentData(ExperimentData):
"""Composite experiment data class"""
def __init__(
self,
experiment,
backend=None,
job_ids=None,
):
"""Initialize experiment data.
Args:
experiment (CompositeExperiment): experiment object that generated the data.
backend (Backend): Optional, Backend the experiment runs on. It can either be a
:class:`~qiskit.providers.Backend` instance or just backend name.
job_ids (list[str]): Optional, IDs of jobs submitted for the experiment.
Raises:
ExperimentError: If an input argument is invalid.
"""
super().__init__(
experiment,
backend=backend,
job_ids=job_ids,
)
# Initialize sub experiments
self._components = [
expr.__experiment_data__(expr, backend, job_ids) for expr in experiment._experiments
]
def __str__(self):
line = 51 * "-"
n_res = len(self._analysis_results)
status = self.status()
ret = line
ret += f"\nExperiment: {self.experiment_type}"
ret += f"\nExperiment ID: {self.experiment_id}"
ret += f"\nStatus: {status}"
if status == "ERROR":
ret += "\n "
ret += "\n ".join(self._errors)
ret += f"\nComponent Experiments: {len(self._components)}"
ret += f"\nCircuits: {len(self._data)}"
ret += f"\nAnalysis Results: {n_res}"
ret += "\n" + line
if n_res:
ret += "\nLast Analysis Result:"
ret += f"\n{str(self._analysis_results.values()[-1])}"
return ret
def component_experiment_data(
self, index: Optional[Union[int, slice]] = None
) -> Union[ExperimentData, List[ExperimentData]]:
"""Return component experiment data"""
if index is None:
return self._components
if isinstance(index, (int, slice)):
return self._components[index]
raise QiskitError(f"Invalid index type {type(index)}.")
def _add_single_data(self, data):
"""Add data to the experiment"""
# TODO: Handle optional marginalizing IQ data
metadata = data.get("metadata", {})
if metadata.get("experiment_type") == self._type:
# Add parallel data
self._data.append(data)
# Add marginalized data to sub experiments
if "composite_clbits" in metadata:
composite_clbits = metadata["composite_clbits"]
else:
composite_clbits = None
for i, index in enumerate(metadata["composite_index"]):
sub_data = {"metadata": metadata["composite_metadata"][i]}
if "counts" in data:
if composite_clbits is not None:
sub_data["counts"] = marginal_counts(data["counts"], composite_clbits[i])
else:
sub_data["counts"] = data["counts"]
self._components[index].add_data(sub_data)
|
python
|
# -*- coding: utf8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.core.urlresolvers import reverse
from django.utils.translation import ugettext_lazy as _
from horizon import messages
from horizon import tables
from horizon.utils import memoized
from tuskar_ui import api
class DeleteNode(tables.BatchAction):
name = "delete"
action_present = _("Delete")
action_past = _("Deleting")
data_type_singular = _("Node")
data_type_plural = _("Nodes")
classes = ('btn-danger',)
def allowed(self, request, obj=None):
if not obj:
# this is necessary because table actions use this function
# with obj=None
return True
return (getattr(obj, 'instance_uuid', None) is None and
obj.power_state not in api.node.POWER_ON_STATES)
def action(self, request, obj_id):
if obj_id is None:
messages.error(request, _("Select some nodes to delete."))
return
api.node.Node.delete(request, obj_id)
class ActivateNode(tables.BatchAction):
name = "activate"
action_present = _("Activate")
action_past = _("Activated")
data_type_singular = _("Node")
data_type_plural = _("Nodes")
def allowed(self, request, obj=None):
if not obj:
# this is necessary because table actions use this function
# with obj=None
return True
return (obj.cpus and obj.memory_mb and obj.local_gb and
obj.cpu_arch)
def action(self, request, obj_id):
if obj_id is None:
messages.error(request, _("Select some nodes to activate."))
return
api.node.Node.set_maintenance(request, obj_id, False)
api.node.Node.set_power_state(request, obj_id, 'off')
class SetPowerStateOn(tables.BatchAction):
name = "set_power_state_on"
action_present = _("Power On")
action_past = _("Powering On")
data_type_singular = _("Node")
data_type_plural = _("Nodes")
def allowed(self, request, obj=None):
if not obj:
# this is necessary because table actions use this function
# with obj=None
return True
return obj.power_state not in api.node.POWER_ON_STATES
def action(self, request, obj_id):
if obj_id is None:
messages.error(request, _("Select some nodes to power on."))
return
api.node.Node.set_power_state(request, obj_id, 'on')
class SetPowerStateOff(tables.BatchAction):
name = "set_power_state_off"
action_present = _("Power Off")
action_past = _("Powering Off")
data_type_singular = _("Node")
data_type_plural = _("Nodes")
def allowed(self, request, obj=None):
if not obj:
# this is necessary because table actions use this function
# with obj=None
return True
return (
obj.power_state in api.node.POWER_ON_STATES and
getattr(obj, 'instance_uuid', None) is None
)
def action(self, request, obj_id):
if obj_id is None:
messages.error(request, _("Select some nodes to power off."))
return
api.node.Node.set_power_state(request, obj_id, 'off')
class NodeFilterAction(tables.FilterAction):
def filter(self, table, nodes, filter_string):
"""Really naive case-insensitive search."""
q = filter_string.lower()
def comp(node):
return any(q in unicode(value).lower() for value in (
node.ip_address,
node.cpus,
node.memory_mb,
node.local_gb,
))
return filter(comp, nodes)
@memoized.memoized
def _get_role_link(role_id):
if role_id:
return reverse('horizon:infrastructure:roles:detail',
kwargs={'role_id': role_id})
def get_role_link(datum):
return _get_role_link(getattr(datum, 'role_id', None))
def get_power_state_with_transition(node):
if node.target_power_state and (
node.power_state != node.target_power_state):
return "{0} -> {1}".format(
node.power_state, node.target_power_state)
return node.power_state
def get_state_string(node):
state_dict = {
api.node.DISCOVERING_STATE: _('Discovering'),
api.node.DISCOVERED_STATE: _('Discovered'),
api.node.PROVISIONED_STATE: _('Provisioned'),
api.node.PROVISIONING_FAILED_STATE: _('Provisioning Failed'),
api.node.PROVISIONING_STATE: _('Provisioning'),
api.node.FREE_STATE: _('Free'),
}
node_state = node.state
return state_dict.get(node_state, node_state)
class BaseNodesTable(tables.DataTable):
node = tables.Column('uuid',
link="horizon:infrastructure:nodes:node_detail",
verbose_name=_("Node Name"))
role_name = tables.Column('role_name',
link=get_role_link,
verbose_name=_("Deployment Role"))
cpus = tables.Column('cpus',
verbose_name=_("CPU (cores)"))
memory_mb = tables.Column('memory_mb',
verbose_name=_("Memory (MB)"))
local_gb = tables.Column('local_gb',
verbose_name=_("Disk (GB)"))
power_status = tables.Column(get_power_state_with_transition,
verbose_name=_("Power Status"))
state = tables.Column(get_state_string,
verbose_name=_("Status"))
class Meta(object):
name = "nodes_table"
verbose_name = _("Nodes")
table_actions = (NodeFilterAction, SetPowerStateOn, SetPowerStateOff,
DeleteNode)
row_actions = (SetPowerStateOn, SetPowerStateOff, DeleteNode)
template = "horizon/common/_enhanced_data_table.html"
def get_object_id(self, datum):
return datum.uuid
def get_object_display(self, datum):
return datum.uuid
class AllNodesTable(BaseNodesTable):
class Meta(object):
name = "all_nodes_table"
verbose_name = _("All")
hidden_title = False
columns = ('node', 'cpus', 'memory_mb', 'local_gb', 'power_status',
'state')
table_actions = (NodeFilterAction, SetPowerStateOn, SetPowerStateOff,
DeleteNode)
row_actions = (SetPowerStateOn, SetPowerStateOff, DeleteNode)
template = "horizon/common/_enhanced_data_table.html"
class ProvisionedNodesTable(BaseNodesTable):
class Meta(object):
name = "provisioned_nodes_table"
verbose_name = _("Provisioned")
hidden_title = False
table_actions = (NodeFilterAction, SetPowerStateOn, SetPowerStateOff,
DeleteNode)
row_actions = (SetPowerStateOn, SetPowerStateOff, DeleteNode)
template = "horizon/common/_enhanced_data_table.html"
class FreeNodesTable(BaseNodesTable):
class Meta(object):
name = "free_nodes_table"
verbose_name = _("Free")
hidden_title = False
columns = ('node', 'cpus', 'memory_mb', 'local_gb', 'power_status')
table_actions = (NodeFilterAction, SetPowerStateOn, SetPowerStateOff,
DeleteNode)
row_actions = (SetPowerStateOn, SetPowerStateOff, DeleteNode,)
template = "horizon/common/_enhanced_data_table.html"
class MaintenanceNodesTable(BaseNodesTable):
class Meta(object):
name = "maintenance_nodes_table"
verbose_name = _("Maintenance")
hidden_title = False
columns = ('node', 'cpus', 'memory_mb', 'local_gb', 'power_status',
'state')
table_actions = (NodeFilterAction, ActivateNode, SetPowerStateOn,
SetPowerStateOff, DeleteNode)
row_actions = (ActivateNode, SetPowerStateOn, SetPowerStateOff,
DeleteNode)
template = "horizon/common/_enhanced_data_table.html"
|
python
|
"""
Lab jack GUI
"""
import datetime
import lab_jack_lib as lj
import PySimpleGUI as sg
def logprint(message=''):
"""
printing ='on'
print and return None
"""
form = '[{}, {}]'.format(datetime.datetime.now(), message)
print(form)
def now_datetime(type=1):
"""
type1:"%Y-%m-%d %H:%M:%S"
type2:"%Y%m%d%H%M%S"
type3:"%Y%m%d_%H%M%S"
type4:"%Y%m%d%H%M"
elae: "%Y%m%d"
:return: string date
"""
now = datetime.datetime.now()
if type == 1:
now_string = now.strftime("%Y-%m-%d %H:%M:%S")
elif type == 2:
now_string = now.strftime("%Y%m%d%H%M%S")
elif type == 3:
now_string = now.strftime("%Y%m%d_%H%M%S")
elif type == 4:
now_string = now.strftime("%Y%m%d%H%M")
elif type == 5:
now_string = now.strftime("%m%d_%H:%M:%S")
elif type == 6:
now_string = now.strftime("%Y%m%d")
else:
now_string = now
return now_string
def create_window():
"""create PySimpleGUI window
"""
# sg.theme('Light Blue 1')
sg.theme('Dark Blue 3')
# sg.theme('Black')
layout = [
[sg.Text('Current Position [mm]', size=(20, 1)), sg.Text('',
font=('Helvetica', 20), size=(10, 1), key='-cpA-')],
[sg.Text('Current Position [dp]', size=(20, 1)), sg.Text('',
font=('Helvetica', 20), size=(10, 1), key='-cpD-')],
[sg.Button(button_text='Current Positon',size=(7,3),key='-cp-')],
[sg.Button(button_text='Move Abs', key='-absmove-')],
[sg.Text('Abs Position [mm]', size=(20, 1)), sg.InputText('3', size=(5, 1), key='-abP-')],
[sg.Button(button_text='Move Shift', key='-shiftmove-')],
[sg.Text('Shift position [mm]', size=(20, 1)), sg.InputText('1', size=(5, 1), key='-abS-')],
[sg.Button(button_text='Move Home', key='-homemove-')],
# UP : Current position: 6.886823333333333 [mm], 8264188 [device units]
# down : Current position: 3.2741558333333334 [mm], 3928987 [device units]
[sg.Button(button_text='Move Up', key='-upmove-')],
[sg.Text('Abs set upper Position [mm]', size=(22, 1)), sg.InputText('6.88', size=(5, 1), key='-up-')],
[sg.Button(button_text='Move down', key='-downmove-')],
[sg.Text('Abs set lower Position [mm]', size=(22, 1)), sg.InputText('3.27', size=(5, 1), key='-low-')],
[sg.Text('--Exit Close--',font=('Helvetica', 14))],
[sg.Button(button_text='Exit',key='-cancel-')],
[sg.Output(size=(50, 10))],
]
# location=(lorizontal, vertical)LT:(0,0), LB:(0,1079), RT:(1919,0),RB:(1919,1079)
return sg.Window('Lab jack Tholabs', layout, location=(900, 50))
def main():
window = create_window()
while True:
event, values = window.read(timeout=100, timeout_key='-timeout-')
if event in (None, '-cancel-',):
logprint('Exit')
break
elif event in '-cp-':
dp,ap = lj.jack_status()
window['-cpA-'].update(ap)
window['-cpD-'].update(dp)
logprint(ap,dp)
elif event in '-absmove-':
abs_pos = float(values['-abP-'])
lj.jack_move(abs_pos)
dp,ap = lj.jack_status()
window['-cpA-'].update(ap)
window['-cpD-'].update(dp)
elif event in '-shiftmove-':
abs_shift = float(values['-abS-'])
lj.jack_relative_move(abs_shift)
dp,ap = lj.jack_status()
window['-cpA-'].update(ap)
window['-cpD-'].update(dp)
elif event in '-homemove-':
lj.jack_home()
dp,ap = lj.jack_status()
window['-cpA-'].update(ap)
window['-cpD-'].update(dp)
elif event in '-upmove-':
abs_pos_up = float(values['-up-'])
lj.jack_move(abs_pos_up)
dp,ap = lj.jack_status()
window['-cpA-'].update(ap)
window['-cpD-'].update(dp)
elif event in '-downmove-':
abs_pos_low = float(values['-low-'])
lj.jack_move(abs_pos_low)
dp,ap = lj.jack_status()
window['-cpA-'].update(ap)
window['-cpD-'].update(dp)
elif event in '-timeout-':
pass
# dp,ap = lj.jack_status()
# window['-cpA-'].update(ap)
# window['-cpD-'].update(dp)
window.close()
if __name__ == '__main__':
main()
|
python
|
print("Hello Open Source")
|
python
|
def extractLightNovelsWorld(item):
"""
Light Novels World
"""
vol, chp, frag, postfix = extractVolChapterFragmentPostfix(item['title'])
# This comes first, because it occationally includes non-numbered chapters.
if 'Tsuki ga Michibiku Isekai Douchuu (POV)' in item['tags']:
if not postfix and '-' in item['title']:
postfix = item['title'].split("-")[-1].strip()
return buildReleaseMessageWithType(item, 'Tsuki ga Michibiku Isekai Douchuur', vol, chp, frag=frag, postfix=postfix)
if not (chp or vol or frag) or 'preview' in item['title'].lower():
return None
if 'Announcements' in item['tags']:
return None
if 'Amaku Yasashii Sekai de Ikiru ni wa' in item['tags']:
return buildReleaseMessageWithType(item, 'Amaku Yasashii Sekai de Ikiru ni wa', vol, chp, frag=frag, postfix=postfix)
if 'Omae Mitai na Hiroin ga Ite Tamaruka!' in item['tags']:
return buildReleaseMessageWithType(item, 'Omae Mitai na Hiroin ga Ite Tamaruka!', vol, chp, frag=frag, postfix=postfix)
if 'the nine godheads' in item['tags']:
return buildReleaseMessageWithType(item, 'The Nine Godheads', vol, chp, frag=frag, postfix=postfix)
if 'World Seed' in item['tags']:
return buildReleaseMessageWithType(item, 'World Seed', vol, chp, frag=frag, postfix=postfix)
if 'Asura' in item['tags']:
return buildReleaseMessageWithType(item, 'Asura', vol, chp, frag=frag, postfix=postfix)
if 'Infinity Armament' in item['tags']:
return buildReleaseMessageWithType(item, 'Infinity Armament', vol, chp, frag=frag, postfix=postfix)
if 'Peerless Demonic Lord' in item['tags']:
return buildReleaseMessageWithType(item, 'Peerless Demonic Lord', vol, chp, frag=frag, postfix=postfix)
if 'The Throne Under the Starry Sky' in item['tags']:
return buildReleaseMessageWithType(item, 'The Throne Under the Starry Sky', vol, chp, frag=frag, postfix=postfix)
if 'Twin Sword' in item['tags']:
return buildReleaseMessageWithType(item, 'Twin Sword', vol, chp, frag=frag, postfix=postfix)
if 'Sayonara Ryuusei Konnichiwa Jinsei' in item['tags']:
return buildReleaseMessageWithType(item, 'Sayonara Ryuusei Konnichiwa Jinsei', vol, chp, frag=frag, postfix=postfix)
if 'Online Game: Evil Dragon Against The Heaven' in item['tags']:
return buildReleaseMessageWithType(item, 'Online Game: Evil Dragon Against The Heaven', vol, chp, frag=frag, postfix=postfix)
if 'Hakushaku Reijo ha Chito Tensei Mono' in item['tags']:
return buildReleaseMessageWithType(item, 'Hakushaku Reijo ha Chito Tensei Mono', vol, chp, frag=frag, postfix=postfix)
if 'Ore to Kawazu-san no Isekai Houriki' in item['tags'] or 'Ore to Kawazu-san no Isekai Hourouki' in item['tags']:
return buildReleaseMessageWithType(item, 'Ore to Kawazu-san no Isekai Houriki', vol, chp, frag=frag, postfix=postfix)
if 'Dragon Blood Warrior' in item['tags']:
return buildReleaseMessageWithType(item, 'Dragon Blood Warrior', vol, chp, frag=frag, postfix=postfix)
if 'Evil-like Duke Household' in item['tags']:
return buildReleaseMessageWithType(item, 'Evil-like Duke Household', vol, chp, frag=frag, postfix=postfix)
if 'Great Dao Commander' in item['tags']:
return buildReleaseMessageWithType(item, 'Great Dao Commander', vol, chp, frag=frag, postfix=postfix)
if 'It’s Impossible that My Evil Overlord is So Cute' in item['tags']:
return buildReleaseMessageWithType(item, 'It’s Impossible that My Evil Overlord is So Cute', vol, chp, frag=frag, postfix=postfix)
if 'I’m OP, but I Began an Inn' in item['tags']:
return buildReleaseMessageWithType(item, 'I’m OP, but I Began an Inn', vol, chp, frag=frag, postfix=postfix)
if 'The Lame Daoist Priest' in item['tags']:
return buildReleaseMessageWithType(item, 'The Lame Daoist Priest', vol, chp, frag=frag, postfix=postfix)
if 'The Last Apostle' in item['tags']:
return buildReleaseMessageWithType(item, 'The Last Apostle', vol, chp, frag=frag, postfix=postfix)
if 'Isekai Teni Jobumasuta e no Michi' in item['tags']:
return buildReleaseMessageWithType(item, 'Isekai Teni Jobumasuta e no Michi', vol, chp, frag=frag, postfix=postfix)
if 'Against the Fate' in item['tags']:
return buildReleaseMessageWithType(item, 'Against the Fate', vol, chp, frag=frag, postfix=postfix)
if 'Hone no aru Yatsu' in item['tags']:
return buildReleaseMessageWithType(item, 'Hone no aru Yatsu', vol, chp, frag=frag, postfix=postfix)
if 'LV999 Villager' in item['tags']:
return buildReleaseMessageWithType(item, 'LV999 Villager', vol, chp, frag=frag, postfix=postfix)
if "Immortal's Farmland" in item['tags']:
return buildReleaseMessageWithType(item, "Immortal's Farmland", vol, chp, frag=frag, postfix=postfix)
if 'Returning from the Immortal World' in item['tags']:
return buildReleaseMessageWithType(item, 'Returning from the Immortal World', vol, chp, frag=frag, postfix=postfix)
if 'Starchild Escapes Arranged Marriage' in item['tags']:
return buildReleaseMessageWithType(item, 'Starchild Escapes Arranged Marriage', vol, chp, frag=frag, postfix=postfix)
if '9 Coffins of the Immortals' in item['tags']:
return buildReleaseMessageWithType(item, '9 Coffins of the Immortals', vol, chp, frag=frag, postfix=postfix)
if 'Fantastic Creatures’ Travelogue' in item['tags']:
return buildReleaseMessageWithType(item, 'Fantastic Creatures’ Travelogue', vol, chp, frag=frag, postfix=postfix)
if "Hell's Cinema" in item['tags']:
return buildReleaseMessageWithType(item, "Hell's Cinema", vol, chp, frag=frag, postfix=postfix)
if 'The Great Conqueror' in item['tags']:
return buildReleaseMessageWithType(item, 'The Great Conqueror', vol, chp, frag=frag, postfix=postfix)
if 'Almighty Student' in item['tags']:
return buildReleaseMessageWithType(item, 'Almighty Student', vol, chp, frag=frag, postfix=postfix)
if 'Godly Student' in item['tags']:
return buildReleaseMessageWithType(item, 'Godly Student', vol, chp, frag=frag, postfix=postfix)
if 'Legend of the Cultivation God' in item['tags']:
return buildReleaseMessageWithType(item, 'Legend of the Cultivation God', vol, chp, frag=frag, postfix=postfix)
if 'Supreme Arrow God' in item['tags']:
return buildReleaseMessageWithType(item, 'Supreme Arrow God', vol, chp, frag=frag, postfix=postfix)
if 'Blade Online' in item['tags']:
return buildReleaseMessageWithType(item, 'Blade Online', vol, chp, frag=frag, postfix=postfix)
if 'The Crimson Dragon' in item['tags']:
return buildReleaseMessageWithType(item, 'The Crimson Dragon', vol, chp, frag=frag, postfix=postfix)
if 'Sky Prince' in item['tags']:
return buildReleaseMessageWithType(item, 'Sky Prince', vol, chp, frag=frag, postfix=postfix, tl_type='oel')
if 'Aenthar' in item['tags']:
return buildReleaseMessageWithType(item, 'Aenthar', vol, chp, frag=frag, postfix=postfix, tl_type='oel')
if 'How to Survive a Summoning 101' in item['tags']:
return buildReleaseMessageWithType(item, 'How to Survive a Summoning 101', vol, chp, frag=frag, postfix=postfix, tl_type='oel')
return False
|
python
|
from torchvision import datasets, transforms
from core.data.data_loaders.base import BaseDataLoader
class CIFAR100Loader(BaseDataLoader):
""" CIFAR100 data loading + transformations """
def __init__(self, data_dir, batch_size,
shuffle=True, validation_split=0.0,
training=True,
transformations="DefaultTransformations",
**kwargs):
print("[INFO][DATA] \t Preparing the CIFAR100 dataset ...")
_transf = BaseDataLoader.get_transformations(
self, name=transformations)
self.trans = _transf.get_train_trans() if training is True \
else _transf.get_test_trans()
self.data_dir = data_dir
self.dataset = datasets.CIFAR100(
self.data_dir, train=training, download=True, transform=self.trans)
super().__init__(self.dataset, batch_size, shuffle,
validation_split,
**kwargs)
|
python
|
from pint import UnitRegistry
ureg = UnitRegistry()
ureg.define('kn_cm2 = kilonewton / centimeter ** 2 = kn_cm2')
ureg.define('kNcm = kilonewton * centimeter = kncm')
ureg.define('kNm = kilonewton * meter = knm')
_Q = ureg.Quantity
e = 0.00001
|
python
|
'''
Created on Jan 23, 2018
@author: kyao
'''
import numpy as np
import typing
from d3m.metadata import hyperparams, params
from d3m import container
from d3m.exceptions import InvalidArgumentValueError
import d3m.metadata.base as mbase
from sklearn.random_projection import johnson_lindenstrauss_min_dim, GaussianRandomProjection
# from d3m.primitive_interfaces.featurization import FeaturizationLearnerPrimitiveBase
# changed primitive class to fit in devel branch of d3m (2019-1-17)
from d3m.primitive_interfaces.unsupervised_learning import UnsupervisedLearnerPrimitiveBase
from d3m.primitive_interfaces.base import CallResult
import pandas as pd
from . import config
Inputs = container.List#[container.DataFrame] # this format is for old version of d3m
Outputs = container.DataFrame
class Params(params.Params):
x_dim: int
y_dim: int
value_dimension: int
projection_param: typing.Dict
components_: typing.Optional[np.ndarray]
value_found: bool
class Hyperparams(hyperparams.Hyperparams):
'''
eps : Maximum distortion rate as defined by the Johnson-Lindenstrauss lemma.
'''
eps = hyperparams.Uniform(
lower=0.1,
upper=0.5,
default=0.2,
semantic_types=["http://schema.org/Float", "https://metadata.datadrivendiscovery.org/types/TuningParameter"]
)
generate_metadata = hyperparams.UniformBool(
default = True,
description="A control parameter to set whether to generate metada after the feature extraction. It will be very slow if the columns length is very large. For the default condition, it will turn off to accelerate the program running.",
semantic_types=["http://schema.org/Boolean", "https://metadata.datadrivendiscovery.org/types/ControlParameter"]
)
class RandomProjectionTimeSeriesFeaturization(UnsupervisedLearnerPrimitiveBase[Inputs, Outputs, Params, Hyperparams]):
'''
classdocs
'''
metadata = hyperparams.base.PrimitiveMetadata({
"id": "dsbox.timeseries_featurization.random_projection",
"version": config.VERSION,
"name": "DSBox random projection timeseries featurization ",
"description": "A simple timeseries featurization using random projection",
"python_path": "d3m.primitives.feature_extraction.RandomProjectionTimeSeriesFeaturization.DSBOX",
"primitive_family": "FEATURE_EXTRACTION",
"algorithm_types": [ "RANDOM_PROJECTION" ],
"source": {
"name": config.D3M_PERFORMER_TEAM,
"contact": config.D3M_CONTACT,
"uris": [ config.REPOSITORY ]
},
### Automatically generated
# "primitive_code"
# "original_python_path"
# "schema"
# "structural_type"
### Optional
"keywords": [ "feature_extraction", "timeseries"],
"installation": [ config.INSTALLATION ],
#"location_uris": [],
"precondition": ["NO_MISSING_VALUES", "NO_CATEGORICAL_VALUES"],
"effects": ["NO_JAGGED_VALUES"],
#"hyperparms_to_tune": []
})
def __init__(self, *, hyperparams: Hyperparams) -> None:
super().__init__(hyperparams=hyperparams)
self.hyperparams = hyperparams
self._model = None
self._training_data = None
self._value_found = False
self._x_dim = 0 # x_dim : the amount of timeseries dataset
self._y_dim = 0 # y_dim : the length of each timeseries dataset
self._value_dimension = 0 # value_dimension : used to determine which dimension data is the values we want
self._fitted = False
def produce(self, *, inputs: Inputs, timeout: float = None, iterations: int = None) -> CallResult[Outputs]:
# if self._training_data is None or self._y_dim==0:
inputs_timeseries = inputs[1]
inputs_d3mIndex = inputs[0]
if not self._fitted:
return CallResult(None, True, 0)
if isinstance(inputs_timeseries, np.ndarray):
X = np.zeros((inputs_timeseries.shape[0], self._y_dim))
else:
X = np.zeros((len(inputs_timeseries), self._y_dim))
for i, series in enumerate(inputs_timeseries):
if series.shape[1] > 1 and not self._value_found:
series_output = pd.DataFrame()
for j in range(series.shape[1]):
series_output = pd.concat([series_output,series.iloc[:, j]])
else:
series_output = series
if (series_output.shape[0] < self._y_dim):
# pad with zeros
X[i,:series_output.shape[0]] = series_output.iloc[:series_output.shape[0], self._value_dimension]
else:
# Truncate or just fit in
X[i,:] = series_output.iloc[:self._y_dim, self._value_dimension]
# save the result to DataFrame format
output_ndarray = self._model.transform(X)
output_dataFrame = container.DataFrame(output_ndarray)
# update the original index to be d3mIndex
output_dataFrame = container.DataFrame(pd.concat([pd.DataFrame(inputs_d3mIndex, columns=['d3mIndex']), pd.DataFrame(output_dataFrame)], axis=1))
# add d3mIndex metadata
index_metadata_selector = (mbase.ALL_ELEMENTS, 0)
index_metadata = {'semantic_types': ('https://metadata.datadrivendiscovery.org/types/TabularColumn', 'https://metadata.datadrivendiscovery.org/types/PrimaryKey')}
output_dataFrame.metadata = output_dataFrame.metadata.update(metadata=index_metadata, selector=index_metadata_selector)
# add other metadata
if self.hyperparams["generate_metadata"]:
for each_column in range(1, output_dataFrame.shape[1]):
metadata_selector = (mbase.ALL_ELEMENTS, each_column)
metadata_each_column = {'semantic_types': ('https://metadata.datadrivendiscovery.org/types/TabularColumn', 'https://metadata.datadrivendiscovery.org/types/Attribute')}
output_dataFrame.metadata = output_dataFrame.metadata.update(metadata=metadata_each_column, selector=metadata_selector)
return CallResult(output_dataFrame, True, None)
def set_training_data(self, *, inputs: Inputs) -> None:
if len(inputs) != 2:
raise InvalidArgumentValueError('Expecting two inputs')
inputs_timeseries = inputs[1]
inputs_d3mIndex = inputs[0]
if len(inputs_timeseries) == 0:
print("Warning: Inputs timeseries data to timeseries_featurization primitive's length is 0.")
return
# update: now we need to get the whole shape of inputs to process
lengths = [x.shape[0] for x in inputs_timeseries]
widths = [x.shape[1] for x in inputs_timeseries]
# here just take first timeseries dataset to search
column_name = list(inputs_timeseries[0].columns.values)
'''
New things, the previous version only trying to load the fixed columns
It will cause problems that may load the wrong data
e.g.: at dataset 66, it will read the "time" data instead of "value"
So here I added a function to check the name of each column to ensure that we read the correct data
'''
for i in range(len(column_name)):
if 'value' in column_name[i]:
self._value_found = True
self._value_dimension = i
is_same_length = len(set(lengths)) == 1
is_same_width = len(set(widths)) == 1
if not is_same_width:
print("Warning: some csv file have different dimensions!")
if self._value_found :
if is_same_length:
self._y_dim = lengths[0]
else:
# Truncate all time series to the shortest time series
self._y_dim = min(lengths)
else:
if is_same_length:
self._y_dim = lengths[0] * widths[0]
else:
# Truncate all time series to the shortest time series
self._y_dim = min(lengths) * min(widths)
self._x_dim = len(inputs_timeseries)
self._training_data = np.zeros((self._x_dim, self._y_dim))
for i, series in enumerate(inputs_timeseries):
if series.shape[1] > 1 and not self._value_found :
series_output = pd.DataFrame()
for each_dimension in range(series.shape[1]):
series_output = pd.concat([series_output,series.iloc[:, each_dimension]])
else:
series_output = series
self._training_data[i, :] = series_output.iloc[:self._y_dim, self._value_dimension]
def fit(self, *, timeout: float = None, iterations: int = None) -> CallResult[None]:
eps = self.hyperparams['eps']
n_components = johnson_lindenstrauss_min_dim(n_samples=self._x_dim, eps=eps)
print("[INFO] n_components is", n_components)
if n_components > self._y_dim:
# Default n_components == 'auto' fails. Need to explicitly assign n_components
self._model = GaussianRandomProjection(n_components=self._y_dim, random_state=self.random_seed)
else:
try:
self._model = GaussianRandomProjection(eps=eps, random_state=self.random_seed)
self._model.fit(self._training_data)
except:
print("[Warning] Using given eps value failed, will use default conditions.")
self._model = GaussianRandomProjection()
self._model.fit(self._training_data)
self._fitted = True
return CallResult(None, has_finished=True)
def get_params(self) -> Params:
if self._model:
return Params(y_dim = self._y_dim,
x_dim = self._x_dim,
value_found = self._value_found,
value_dimension = self._value_dimension,
projection_param = self._model.get_params(),
components_ = getattr(self._model, 'components_', None)
)
else:
return Params({'y_dim': 0, 'projection_param': {}})
def set_params(self, *, params: Params) -> None:
self._y_dim = params['y_dim']
self._x_dim = params['x_dim']
self._value_found = params['value_found']
self._value_dimension = params['value_dimension']
self._model = None
if params['projection_param']:
self._model = GaussianRandomProjection()
self._model.set_params(**params['projection_param'])
self._model.components_ = params['components_']
self._fitted = True
else:
self._fitted = False
|
python
|
"""IPs domain API."""
from ..base import ApiDomainResource
class IPs(ApiDomainResource):
"""
IPs domain resource.
"""
api_endpoint = "ips"
DOMAIN_NAMESPACE = True
def list(self):
"""
List the existing IPs on the domain.
"""
return self.request("GET")
def create(self, ip): # pylint: disable=invalid-name
"""
Assign a dedicated IP to the domain.
:param ip: the new IP address to assign
"""
return self.request("POST", data={"ip": ip})
def delete(self, ip): # pylint: disable=invalid-name
"""
Delete an existing IP on a domain.
"""
return self.request("DELETE", ip)
|
python
|
"""
pcolor: for plotting pcolor using matplotlib
"""
import matplotlib.pyplot as plt
import numpy as np
import os
import time
def is_linux():
import platform
s = platform.system()
return {
'Linux': True,
'Darwin': False,
'Windows': False,
}[s]
def is_mac():
import platform
s = platform.system()
return {
'Linux': False,
'Darwin': True,
'Windows': False,
}[s]
def linux_plot_issue():
if is_linux():
import matplotlib
matplotlib.use('TkAgg')
# matplotlib.use('agg')
print('backend:', matplotlib.get_backend())
# matplotlib.hold(true) # deprecated
output_directory = './generated'
os.makedirs(output_directory, exist_ok=True)
class PColor:
""" Show and save pcolor (w,h,3) in range float [0,1] """
@staticmethod
def plot_show_image(G_paintings2d, file_id, sleep_sec, more_info):
plt.clf()
import matplotlib
matplotlib.rc('axes', edgecolor='white')
matplotlib.rc('axes', facecolor='black')
ax = plt.gca()
ax.set_facecolor((0.0, 0.0, 0.0))
#print(dir(ax))
#exit()
#ax.set_edgecolor((1.0, 1.0, 1.0))
#print(np.max(np.max(G_paintings2d,axis=2), axis=0))
#print(np.min(np.min(G_paintings2d,axis=2), axis=1))
#print(G_paintings2d.shape)
#plt.imshow(G_paintings2d)
#plt.imshow((G_paintings2d * 0.2 + 0.5)*0.2)
#img_pix_rescale = (G_paintings2d * 0.05 + 0.5)
#img_pix_rescale = (G_paintings2d)
#plt.imshow(img_pix_rescale, vmin=-100, vmax=100)
#img_pix_rescale = ((G_paintings2d) / 80.0 *40 ) +0.5
#img_pix_rescale = ((G_paintings2d) / 2.0 ) +0.5
img_pix_rescale = G_paintings2d
# print('img_pix_rescale.shape', img_pix_rescale.shape)
RGB3D = 3
assert len(img_pix_rescale.shape) == RGB3D
if img_pix_rescale.shape[2] < RGB3D:
img_pix_rescale = np.max(img_pix_rescale, axis=2)
img_pix_rescale = img_pix_rescale[:,:,None]
img_pix_rescale = np.repeat(img_pix_rescale, RGB3D, axis=2)
if img_pix_rescale.shape[2] > RGB3D:
img_pix_rescale = img_pix_rescale[:,:,:RGB3D]
#scaled_back_to_255 = img_pix_rescale * 128
#scaled_back_to_255 = ((img_pix_rescale / 2.0)+0.5) * 128
scaled_back_to_255 = img_pix_rescale * 127.0 + 128
scaled_back_to_255[scaled_back_to_255 > 255] = 255
plt.imshow(scaled_back_to_255.astype(np.uint8))
print('min max:', np.min(img_pix_rescale.ravel()), np.max(img_pix_rescale.ravel()))
#plt.pcolor(np.mean(G_paintings2d, axis=2))
acc, score = more_info
plt.text(-.5, 0, 'D accuracy=%.2f (0.5 for D to converge)' % acc, fontdict={'size': 15})
plt.text(-.5, G_paintings2d.shape[1]*0.5, 'D score= %.2f (-1.38 for G to converge)' % score, fontdict={'size': 15})
# plt.colorbar()
PColor.next_plot(sleep_sec)
if(file_id is not None):
PColor.save( os.path.join(output_directory, file_id + '.png') )
@staticmethod
def save(filename):
plt.draw()
plt.savefig( filename )
print("saved")
if is_mac():
wait_time_sec = 0.1
time.sleep(wait_time_sec)
""" Next plot. Platform-independent """
@staticmethod
def next_plot(sleep_sec):
if is_mac():
print('draw')
import sys
sys.stdout.flush()
plt.draw()
time.sleep(sleep_sec)
elif is_linux():
# """ "Modal" """
# plt.show()
#plt.draw()
#plt.show(block=False)
#time.sleep(0.5)
#plt.draw()
"""
# futile:
plt.ion()
plt.draw()
plt.show()
plt.ioff()
time.sleep(sleep_sec)
time.sleep(2.0)
plt.close()
plt.ioff()
"""
else:
raise
@staticmethod
def init():
linux_plot_issue()
plt.cla()
#plt.imshow(main_artworks[0])
if is_linux():
# plt.ioff() # not necessary
# plt.show()
#plt.ion()
plt.draw()
plt.show(block=False)
plt.draw()
time.sleep(0.5)
return
elif is_mac():
plt.draw()
plt.ion()
plt.show()
time.sleep(0.1)
plt.ion() # something about continuous plotting
return
else:
raise
raise
@staticmethod
def last(self):
if is_mac():
plt.ioff()
plt.show()
elif is_linux():
pass
else:
raise
|
python
|
from empire.python.typings import *
from empire.enums.base_enum import BaseEnum
class TimeUnits(BaseEnum):
NANOS: Final[int] = 0
MICROS: Final[int] = 1
MILLIS: Final[int] = 2
SECONDS: Final[int] = 3
MINUTES: Final[int] = 4
HOURS: Final[int] = 5
DAYS: Final[int] = 6
class TimeUtil:
@staticmethod
def get_readable_time_value(time_value: float, source_unit: int, precision: int = 2) -> str:
current_unit = source_unit
if current_unit == TimeUnits.DAYS:
return '{} {}'.format(round(time_value, precision), TimeUtil.unit_to_string(current_unit))
while current_unit <= 6:
if current_unit in [TimeUnits.NANOS, TimeUnits.MICROS, TimeUnits.MILLIS] and time_value > 1000:
current_unit += 1
time_value /= 1000
continue
elif current_unit in [TimeUnits.SECONDS, TimeUnits.MINUTES] and time_value > 60:
current_unit += 1
time_value /= 60
continue
elif current_unit == TimeUnits.HOURS and time_value > 24:
current_unit += 1
time_value /= 24
continue
else:
break
return '{} {}'.format(round(time_value, precision), TimeUtil.unit_to_string(current_unit))
@staticmethod
def unit_to_string(unit: int) -> str:
if unit == TimeUnits.NANOS:
return 'ns'
elif unit == TimeUnits.MICROS:
return 'µs'
elif unit == TimeUnits.MILLIS:
return 'ms'
elif unit == TimeUnits.SECONDS:
return 's'
elif unit == TimeUnits.MINUTES:
return 'min'
elif unit == TimeUnits.HOURS:
return 'hours'
elif unit == TimeUnits.DAYS:
return 'days'
else:
return '~'
|
python
|
from __future__ import division
import numpy as np
from mpl_toolkits.axes_grid1 import make_axes_locatable
from numpy.random import rand
#import matplotlib.mlab as mlab
import matplotlib.pyplot as plt
import cPickle as pickle
import pylab as plb
import os, sys
def run():
'''
Read results of clash score servey
pdb_clash_scores = list([score_with_hydrogen,score_without_hydrogen]...)
pdb_clash_score_and_name = list([score_with_hydrogen,score_without_hydrogen,experment_type,file_name]...)
pdb_clash_score_dict[file_name] = [score_with_hydrogen,score_without_hydrogen,experment_type]
'''
# locate the directory containing the log files
osType = sys.platform
if osType.startswith('win'):
directory_path = 'c:\Phenix\Dev\Work\work\Clashes'
else:
directory_path = '/net/cci-filer2/raid1/home/youval/Work/work/Clashes'
# convert the path to python format
directory_path = os.path.realpath(directory_path)
os.chdir(directory_path)
pdb_clash_scores = pickle.load(open('pdb_clash_scores','r'))
pdb_clash_score_and_name = pickle.load(open('pdb_clash_score_and_name','r'))
pdb_clash_score_dict = pickle.load(open('pdb_clash_score_dict','r'))
# in the original run ELECTRON MICROSCOPE was not an option - fix that
for i,x in enumerate(pdb_clash_score_and_name):
if x[2]=='':
pdb_clash_score_and_name[i][2] = 'ELECTRON MICROSCOPE'
pdb_clash_scores.sort()
pdb_clash_score_and_name.sort()
print 'Total number of clash score records is: {}'.format(len(pdb_clash_score_and_name))
print '*'*60
#print_list(pdb_clash_score_and_name[-6:], 2)
#print_list(pdb_clash_score_and_name[:50], 5)
return pdb_clash_score_and_name,pdb_clash_score_dict
def print_list(l,n):
'''print list l with n items in a raw'''
x = len(l) % n
l.extend(['',]*x)
for i in range(len(l)//n):
s = i*n
e = s + n
print l[s:e]
def plot_data(pdb_clash_score_and_name,by_type_dict):
for k in by_type_dict:
# create a list with the same color for all points with the same experiment type
data = by_type_dict[k]
#c = np.ones(len(data))*0.647933889333
# build data with size and color
#datalist = [[i,d[0],(d[0]-d[1])] for i,d in enumerate(data)]
x = range(1,len(data)+1)
#x = [d[0] for d in datalist]
y = [d[1] for d in data] # use clash score without pdb hydrogens as y (keep_hydrogens=False)
y2 = [d[0] for d in data] # use clash score with pdb hydrogens as y
# make the size of the points on the plot relative to the difference in the clash scores
s = [50 + 5*abs(d[1]-d[0]) for d in data]
# The color of points where both clash scores are the same
c = ['y',]*len(data)
# Color the data points in a different colors
for i in range(len(data)):
if data[i][0]>data[i][1]: c[i] = 'b'
elif data[i][0]<data[i][1]: c[i] = 'r'
#c = rand(len(data))
plot_experiment(x,y,s,c,k,data)
hist_both_clash_scores(y,y2,k)
def plot_experiment(x,y,s,c,k,data):
'''
plot a sub plot for an experiment type
x: enumerating data points
y: clash score with hydrogen
s: size the data point, related to the difference between with/without hydrogen clash scores
c: data point color
k: pdb file experiment type
'''
def onpick3(event):
ind = event.ind
i = ind[0]
print '*'*50
print 'PDB file {0} Experiment type: {1}'.format(data[i][2],k)
print 'Clash score with hydrogen kept: {0:.4f} without hydrogen: {1:.4f}'.format(data[i][0],data[i][1])
print c[i]
# set figure look
gr = 1.61803398875
h = 10 # figure hight
w = gr*h # figure width
d = 0.05 # distance between plot regon and figure edge
fig = plt.figure(figsize=(w,h))
plt.subplots_adjust(left=d, right=1-d, top=1-d, bottom=d)
ax1 = fig.add_subplot(111)
# set scattering plot and allow interactinve selection of points on plot
col = ax1.scatter(x,y,s,c=c, picker=True)
fig.canvas.mpl_connect('pick_event',onpick3)
fig.set_size_inches(w,h)
#
maxy = max(y)
maxs = max(s)/100
ax1.set_ylim([-maxy*.01,maxy+maxs])
ax1.set_xlim([-x[-1]*0.01,x[-1]*1.01+maxs])
#
plt.title(k)
delta_score = [abs(i[0]-i[1]) for i in data]
minscore = min(delta_score)
maxscore = max(delta_score)
text1 = 'Number of data points: {0}\nMin score difference: {1}\nMax score difference: {2}\n\n'.format(x[-1],minscore,maxscore)
text2 = 'Blue: Score excluding H is lower\nRed: Score including PDB H is lower\nYellow: The same '
plt.text(x[-1]*0.1,maxy*.65, text1+text2,fontsize=16)
plt.ylabel('Clash score - Excluding hydrogens in input file')
fig.savefig('pscoll.eps')
plt.show()
def hist_both_clash_scores(x,y,k):
'''
x: clash score without pdb hydrogens as y (keep_hydrogens=False)
y: clash score with pdb hydrogens as y
k: Experiment type
'''
# set figure look
h = 11 # figure hight
w = 11 # figure width
d = 0.05 # distance between plot regon and figure edge
plt.figtext(0,0,k, fontsize=16)
fig, axScatter = plt.subplots(figsize=(w,h))
plt.subplots_adjust(left=d, right=1-d, top=1-d, bottom=d)
# the scatter plot:
axScatter.scatter(x, y)
axScatter.set_aspect(1.)
# create new axes on the right and on the top of the current axes
# The first argument of the new_vertical(new_horizontal) method is
# the height (width) of the axes to be created in inches.
divider = make_axes_locatable(axScatter)
binlim = 200
axHistx = divider.append_axes("top", 3, pad=1, sharex=axScatter, xlabel='Clash score without PDB Hydrogen', xlim=[0,binlim])
axHisty = divider.append_axes("right", 3, pad=1, sharey=axScatter, ylabel='Clash score with PDB Hydrogen', ylim=[0,binlim])
plt.annotate
#bins = np.arange(-lim, lim + binwidth, binwidth)
bins = 40
axHistx.hist(x, bins=bins)
axHisty.hist(y, bins=bins, orientation='horizontal')
plt.figtext(0.4,0.97,k, fontsize=16)
plt.draw()
plt.show()
def zero_scores(clash_data):
'''
In ELECTRON MICROSCOPE files there is no EXPERIMENT TYPE and clash score are zero
Remove those records
'''
zero_score = [x for x in clash_data if x[0:2] == [0,0]]
zero_scores_dict = initial_dict()
for x in zero_score:
keys = x[2].split(',')
for k in keys:
zero_scores_dict[k].append(x[3])
print 'Number of records with 0.0 clash scores: {}'.format(len(zero_score))
print '='*60
for x in zero_scores_dict:
print '{0:30} : {1:4}'.format(x,len(zero_scores_dict[x]))
print '*'*60
def create_by_type_dict(pdb_clash_score_and_name):
'''(list) -> dicttionary
sort clash score by experiment type
'''
by_type_dict = initial_dict()
for x in pdb_clash_score_and_name:
keys = x[2].split(',')
for k in keys:
by_type_dict[k].append([x[0],x[1],x[3]])
print 'Experimental type breakdown'
print '='*60
for x in by_type_dict:
print ' {0:30} : {1:4}'.format(x,len(by_type_dict[x]))
print '*'*60
return by_type_dict
def initial_dict():
init_dict = dict([('X-RAY DIFFRACTION',[]),
('NMR',[]),
('NEUTRON DIFFRACTION',[]),
('ELECTRON MICROSCOPE',[]),
('Other',[]),
('SMALL ANGLE X-RAY SCATTERING',[])])
return init_dict
if __name__=='__main__':
pdb_clash_score_and_name,pdb_clash_score_dict = run()
# Look at records with 0.0 scores
zero_scores(pdb_clash_score_and_name)
by_type_dict = create_by_type_dict(pdb_clash_score_and_name)
plot_data(pdb_clash_score_and_name,by_type_dict)
print 'done'
|
python
|
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from oslo_log import log as logging
from django.utils.translation import ugettext_lazy as _
from horizon import exceptions
from horizon import tabs
from openstack_dashboard.api import neutron
from openstack_dashboard.api import nova
from sahara_dashboard.api import sahara as saharaclient
from sahara_dashboard.content.data_processing \
import tabs as sahara_tabs
from sahara_dashboard.content. \
data_processing.utils import workflow_helpers as helpers
from sahara_dashboard.content.data_processing.clusters.nodegroup_templates \
import tables as node_group_template_tables
LOG = logging.getLogger(__name__)
class NodeGroupTemplatesTab(sahara_tabs.SaharaTableTab):
table_classes = (node_group_template_tables.NodegroupTemplatesTable, )
name = _("Node Group Templates")
slug = "node_group_templates_tab"
template_name = "horizon/common/_detail_table.html"
def get_nodegroup_templates_data(self):
try:
table = self._tables['nodegroup_templates']
search_opts = {}
filter = self.get_server_filter_info(table.request, table)
if filter['value'] and filter['field']:
search_opts = {filter['field']: filter['value']}
node_group_templates = saharaclient.nodegroup_template_list(
self.request, search_opts)
except Exception:
node_group_templates = []
exceptions.handle(self.request,
_("Unable to fetch node group template list"))
return node_group_templates
class GeneralTab(tabs.Tab):
name = _("General Info")
slug = "nodegroup_template_details_tab"
template_name = "nodegroup_templates/_details.html"
def get_context_data(self, request):
template_id = self.tab_group.kwargs['template_id']
try:
template = saharaclient.nodegroup_template_get(
request, template_id)
except Exception as e:
template = {}
LOG.error(
"Unable to fetch node group template details: %s" % str(e))
return {"template": template}
try:
flavor = nova.flavor_get(request, template.flavor_id)
except Exception:
flavor = {}
exceptions.handle(request,
_("Unable to fetch flavor for template."))
floating_ip_pool_name = None
if template.floating_ip_pool:
try:
floating_ip_pool_name = self._get_floating_ip_pool_name(
request, template.floating_ip_pool)
except Exception:
exceptions.handle(request,
_("Unable to fetch floating ip pools."))
base_image_name = None
if template.image_id:
try:
base_image_name = saharaclient.image_get(
request, template.image_id).name
except Exception:
exceptions.handle(request,
_("Unable to fetch Base Image with id: %s.")
% template.image_id)
security_groups = helpers.get_security_groups(
request, template.security_groups)
if getattr(template, 'boot_from_volume', None) is None:
show_bfv = False
else:
show_bfv = True
return {"template": template, "flavor": flavor,
"floating_ip_pool_name": floating_ip_pool_name,
"base_image_name": base_image_name,
"security_groups": security_groups,
"show_bfv": show_bfv}
def _get_floating_ip_pool_name(self, request, pool_id):
pools = [pool for pool in neutron.floating_ip_pools_list(
request) if pool.id == pool_id]
return pools[0].name if pools else pool_id
class ConfigsTab(tabs.Tab):
name = _("Service Configurations")
slug = "nodegroup_template_service_configs_tab"
template_name = "nodegroup_templates/_service_confs.html"
def get_context_data(self, request):
template_id = self.tab_group.kwargs['template_id']
try:
template = saharaclient.nodegroup_template_get(
request, template_id)
except Exception as e:
template = {}
LOG.error(
"Unable to fetch node group template details: %s" % str(e))
return {"template": template}
class NodegroupTemplateDetailsTabs(tabs.TabGroup):
slug = "nodegroup_template_details"
tabs = (GeneralTab, ConfigsTab, )
sticky = True
|
python
|
from utils.data_reader import prepare_data_for_feature, generate_vocab, read_data
from utils.features import get_feature
from utils.utils import getMetrics
from utils import constant
from baseline.baseline_classifier import get_classifier
from baseline.baseline_features import get_features_for_prediction
import numpy as np
import csv
import pandas as pd
import os
'''
Before running this file, pls assign save path.
python predict_classifier.py --save_path 'save/LR_final/' --classifier 'LR' --C 0.01 --pred_score --include_test
'''
if not os.path.exists(constant.save_path):
os.makedirs(constant.save_path)
label2emotion = ["others","happy", "sad","angry"]
## define parameters for getting feature
features = constant.features
## define parameters for building model
classifier_list = ["LR","SVM","XGB"]
## LR: c
## SVM: c
## XGB: n_estimators, max_depth
parameter_list = [constant.C,constant.n_estimators,constant.max_depth]
classifier = constant.classifier
print('features: ', features)
print('Classifier: ', classifier)
print('Parameters: ', parameter_list)
txt_file = classifier+"_baseline.txt"
microF1s = 0
## define parameters for checkpoint
if classifier=="XGB":
params = str(parameter_list[1])+"-"+str(parameter_list[2])
pass
else:
params = str(parameter_list[0])
pass
record_file = classifier+"_"+params+".csv"
checkpoint = False
currentSplit = 0
## check checkpoint
if os.path.exists(constant.save_path+record_file):
checkpoint = True
## read checkpoint
with open(constant.save_path+record_file, newline='') as csvfile:
mLines = csvfile.readlines()
## get current split
targetLine = mLines[-1]
currentSplit=targetLine.split(',')[0]
##read F1 score records
rLines = mLines[-currentSplit-1:]
for line in rLines:
microF1s += float(line.split(',')[1])
currentSplit += 1
model = get_classifier(ty=classifier, c=parameter_list[0], n_estimators=parameter_list[1], max_depth=parameter_list[2])
for i in range(constant.num_split):
## confirm checkpoint
if checkpoint==True and i<currentSplit:
print("Split {} is skipped because it has been run!".format(i))
continue
## prepare feature for model
X_train, y_train, X_val, y_val, X_test, ind, X_text = get_features_for_prediction(features, i, use_pca=False)
print('shape of X_train',X_train.shape)
print('shape of X_test',X_test.shape)
print("###### Running folder %d ######" % (i+1))
if i==0:
y_pred = []
pass
## train aval and predict
model.fit(X_train.reshape(X_train.shape[0], -1), y_train) ## [29010,3,emb_size] --> [29010, 3 * emb_size]
## validate to validation set
y_pred = model.predict(X_test.reshape(X_test.shape[0], -1))
print("###### Writing result of folder %d to file ######" % (i+1))
## generate files with 3 turns and labels
file = constant.save_path+"test_{}.txt".format(i)
if not os.path.exists(file):
with open(file, 'w') as the_file:
the_file.write("id\tturn1\tturn2\tturn3\tlabel\n")
preds_dict = {}
indices = []
for idx, text, pred in zip(ind,X_text,y_pred):
preds_dict[idx] = "{}\t{}\t{}\t{}\t{}\n".format(idx,text[0],text[1],text[2],label2emotion[pred])
indices.append(idx)
sorted_indices = np.argsort(-np.array(indices))[::-1]
for idx in range(len(sorted_indices)):
the_file.write(preds_dict[idx])
## run validation set to get the F1 score
if constant.pred_score:
if i==0:
txtfile = open(txt_file,'a')
txtfile.write("\n--------------------\n")
txtfile.write("Classifier %s, Parameters: %f, %f, %f" %(classifier, parameter_list[0], parameter_list[1], parameter_list[2]))
txtfile.close()
y_pred_val = model.predict(X_val.reshape(X_val.shape[0], -1))
## covert output to one hot
one_hot = np.zeros((y_pred_val.shape[0], 4))
one_hot[np.arange(y_pred_val.shape[0]), y_pred_val] = 1
## call the scorer
acc, microPrecision, microRecall, microF1 = getMetrics(one_hot,y_val,verbose=True)
txtfile = open(txt_file,'a')
txtfile.write("(EXPERIMENT %d) microF1 score %f" % ((i+1), microF1))
txtfile.write("\n--------------------\n")
txtfile.close()
result = [i,microF1]
with open(constant.save_path+record_file, 'a') as f:
writer = csv.writer(f)
writer.writerow(result)
microF1s = microF1s + microF1
microF1s = microF1s/constant.num_split
txtfile = open(txt_file,'a')
txtfile.write("\nAVERAGE F1 VAL: %3.5f\n\n" % microF1s)
txtfile.close()
|
python
|
"""Users models."""
from django.contrib.auth.models import AbstractUser
from django.db import models
class User(AbstractUser):
"""Custom user.
This inherits all the fields from Django's basic user,
but also has an avatar.
"""
def __str__(self) -> str:
"""Represent the user by their full name, or email, or ID."""
return self.get_full_name() or self.email or str(self.pk)
|
python
|
n1 = int(input('Digite o numero inicial: '))
razao = int(input('Digite sua razão: '))
contador = 10
while contador != 0:
n1 += razao
print(n1)
contador -= 1
termos = int(input('Se Você deseja adicionar mais termos, informe o numero, caso contrario, digite 0: '))
contador += termos
while termos > 0:
while contador > 0:
n1 += razao
print(n1)
contador -= 1
termos = int(input('Se Você deseja adicionar mais termos, informe o numero, caso contrario, digite 0: '))
contador += termos
if termos == 0 or termos < 0:
print('Fim do programa')
|
python
|
import magicbot
import wpilib
import ctre
import wpilib.drive
from robotpy_ext.common_drivers import navx
class MyRobot(magicbot.MagicRobot):
def createObjects(self):
self.init_drive_train()
def init_drive_train(self):
fl, bl, fr, br = (30, 40, 50, 10) # practice bot
br, fr, bl, fl = (1, 7, 2, 5) # on competition robot
self.br_motor = ctre.wpi_talonsrx.WPI_TalonSRX(br)
self.bl_motor = ctre.wpi_talonsrx.WPI_TalonSRX(bl)
self.fl_motor = ctre.wpi_talonsrx.WPI_TalonSRX(fl)
self.fr_motor = ctre.wpi_talonsrx.WPI_TalonSRX(fr)
self.fr_motor.setInverted(True)
self.br_motor.setInverted(True)
self.gyro = navx.AHRS.create_spi()
self.joystick = wpilib.Joystick(0)
self.joystick2 = wpilib.Joystick(1)
self.robot_drive = wpilib.RobotDrive(self.fl_motor, self.bl_motor, self.fr_motor, self.br_motor)
self.robot_drive = wpilib.RobotDrive(self.fl_motor, self.bl_motor, self.fr_motor, self.br_motor)
def teleopInit(self):
pass
def teleopPeriodic(self):
self.robot_drive.arcadeDrive(self.joystick.getX(), self.joystick.getY())
if __name__ == '__main__':
wpilib.run(MyRobot)
|
python
|
from notipy_me import Notipy
from repairing_genomic_gaps import cae_200, build_synthetic_dataset_cae, train_model
if __name__ == "__main__":
with Notipy():
model = cae_200()
train, test = build_synthetic_dataset_cae(200)
model = train_model(model, train, test, path="single_gap")
|
python
|
"""Tornado handlers for security logging."""
# Copyright (c) Jupyter Development Team.
# Distributed under the terms of the Modified BSD License.
from tornado import web
from . import csp_report_uri
from ...base.handlers import APIHandler
class CSPReportHandler(APIHandler):
"""Accepts a content security policy violation report"""
_track_activity = False
def skip_check_origin(self):
"""Don't check origin when reporting origin-check violations!"""
return True
def check_xsrf_cookie(self):
# don't check XSRF for CSP reports
return
@web.authenticated
def post(self):
"""Log a content security policy violation report"""
self.log.warning(
"Content security violation: %s", self.request.body.decode("utf8", "replace")
)
default_handlers = [(csp_report_uri, CSPReportHandler)]
|
python
|
"""exercism bob module."""
def response(hey_bob):
"""
Model responses for input text.
:param hey_bob string - The input provided.
:return string - The respons.
"""
answer = 'Whatever.'
hey_bob = hey_bob.strip()
yelling = hey_bob.isupper()
asking_question = len(hey_bob) > 0 and hey_bob[-1] == '?'
if asking_question and yelling:
answer = "Calm down, I know what I'm doing!"
elif asking_question:
answer = 'Sure.'
elif yelling:
answer = 'Whoa, chill out!'
elif hey_bob == '':
answer = 'Fine. Be that way!'
return answer
|
python
|
import os
"""
Guild how to read your graph
Description:
I provided several methods to read graph-network data but not limit other formats, please implement your own format as your need
### Graph Kinds & Data Structure
UNDIRECTED-GRAPH <SYMMETRIC-MATRIX, UPPER-MATRIX>
DIRECTED-GRAPH <ASYMETRTIC-MATRIX>
TREE <TREE-HIERARCHY>
MULTI-GRAPH
### Data IO Structure
SYMMETRIC-MATRIX
Square matrix N * N
Numpy array or list of lists
Upper matrix must be symetric with lower matrix
ASYMMETRIC-MATRIX
Square matrix N * N
Numpy array or list of lists
UPPER-MATRIX
Can be a list of upper matrix (total length of each row is decreased by 1) or numpy array with all elements in lower matrix are zeros
TREE-HIERARCHY
A list of NODE_TREE objects or a multi-level nested dictionary
"""
class GraphReader():
def __init__(self):
pass
def _prior_action_(self):
pass
def _post_action_(self):
pass
|
python
|
## Fake Binary
## 8 kyu
## https://www.codewars.com/kata/57eae65a4321032ce000002d
def fake_bin(x):
num = ''
for char in x:
if int(char) < 5:
num += '0'
else:
num += '1'
return num
|
python
|
#!/usr/bin/env python3
import datetime
import argparse
from pathlib import Path
import importlib
target = ''
technique_info = {
'blackbot_id': 'T1530',
'external_id': '',
'controller': 'lightsail_download_ssh_keys',
'services': ['Lightsail'],
'prerequisite_modules': [],
'arguments_to_autocomplete': [],
'version': '1',
'aws_namespaces': [],
'last_updated_by': 'Blackbot, Inc. Sun Sep 20 04:13:33 UTC 2020' ,
'ttp_exec': '',
'ttp_mitigation': '',
'ttp_detection': '',
'intent': 'Downloads Lightsails default SSH key pairs.',
'name': 'ADD_NAME_HERE',
}
parser = argparse.ArgumentParser(add_help=False, description=technique_info['name'])
def main(args, awsattack_main):
args = parser.parse_args(args)
import_path = 'ttp.src.lightsail_download_ssh_keys_src'
src_code = __import__(import_path, globals(), locals(), ['technique_info'], 0)
importlib.reload(src_code)
awsattack_main.chain = True
return src_code.main(args, awsattack_main, data=technique_info)
def summary(data, awsattack_main):
out = ' Keys downloaded to:\n'
out += ' ' + data['dl_path'] + '\n'
out += ' Downloaded Key Pairs for the following regions: \n'
for region in sorted(data['region_key_pairs']):
out += ' {}\n'.format(region)
return out
|
python
|
import abc
import argparse
import functools
import os
import pathlib
import shutil
import numpy as np
import pandas as pd
def parse_args():
parser = argparse.ArgumentParser(description="Client allocation")
parser.add_argument('-c', '--train-clients', default=100, type=int)
parser.add_argument('-t', '--test-clients', default=12, type=int)
parser.add_argument('-s', '--seed', default=42, type=int)
parser.add_argument('-d', '--data-root', default='./data')
parser.add_argument('--train-clients-subdir', default='train_clients')
parser.add_argument('--test-clients-subdir', default='test_clients')
return parser.parse_args()
def split_dataframe(df, split):
assert split in ['train', 'test']
imgs = df[df['Dataset_type'] == split.upper()].drop('Dataset_type', 1)
return imgs
def make_client_ids(num_clients):
return ["client-{:02d}".format(i) for i in range(num_clients)]
def split_dataframe_for_clients(df, client_ids):
splits = np.array_split(
df.loc[np.random.permutation(df.index)], len(client_ids))
return dict(zip(client_ids, splits))
def allocate_samples_on_disk(
client_samples,
data_root: pathlib.Path,
split_subdir: str,
clients_subdir: str,
):
split_root = data_root / split_subdir # e.g. 4P/data/train
clients_root = data_root / clients_subdir # e.g. 4P/data/train_clients
for client_id, sample in client_samples.items():
# e.g. 4P/data/train_clients/03/
client_dir_name = "{:2d}".format(client_id)
client_path = clients_root / client_dir_name
for label in ["0", "1", "2"]:
(client_path / label).mkdir(parents=True, exist_ok=True)
for imname, label in zip(sample.X_ray_image_name, sample.Numeric_Label):
shutil.copy(split_root / imname, client_path / str(label) / imname)
def main(args):
np.random.seed(args.seed)
data_root = pathlib.Path(args.data_root)
df = pd.read_csv(data_root.joinpath("Labels.csv"))
train_df = split_dataframe(df, 'train')
test_df = split_dataframe(df, 'test')
train_ids = make_client_ids(args.train_clients)
test_ids = make_client_ids(args.test_clients)
train_splits = split_dataframe_for_clients(train_df, train_ids)
test_splits = split_dataframe_for_clients(test_df, test_ids)
allocate_samples_on_disk(
train_splits, data_root, 'train', args.train_clients_subdir)
allocate_samples_on_disk(
test_splits, data_root, 'test', args.test_clients_subdir)
if __name__ == '__main__':
args = parse_args()
main(args)
|
python
|
# coding=utf-8
import argparse
import os
import random
import numpy as np
import torch
from torch.utils.data import DataLoader
from torch import nn
import pandas as pd
from Source import utils
import time
import logging
logging.basicConfig(format = '%(asctime)s - %(levelname)s - %(name)s - %(message)s',
datefmt = '%m/%d/%Y %H:%M:%S',
level = logging.INFO)
logger = logging.getLogger(__name__)
def get_params():
parser = argparse.ArgumentParser()
parser.add_argument("--data_dir", default='Data/', type=str)
parser.add_argument("--dataset", default='knowit', type=str, help='knowit or tvqa')
parser.add_argument("--bert_model", default='bert-base-uncased', type=str)
parser.add_argument("--do_lower_case", default=True)
parser.add_argument('--seed', type=int, default=181)
parser.add_argument("--lr", default=5e-5, type=float)
parser.add_argument("--workers", default=8)
parser.add_argument("--device", default='cuda', type=str, help="cuda, cpu")
parser.add_argument("--batch_size", default=32, type=int)
parser.add_argument('--momentum', default=0.9)
parser.add_argument('--nepochs', default=100, help='Number of epochs', type=int)
parser.add_argument('--patience', default=15, type=int)
parser.add_argument('--no_cuda', action='store_true')
parser.add_argument('--weight_loss_read', default=0.06, type=float)
parser.add_argument('--weight_loss_observe', default=0.06, type=float)
parser.add_argument('--weight_loss_recall', default=0.08, type=float)
parser.add_argument('--weight_loss_final', default=0.80, type=float)
parser.add_argument('--use_read', action='store_true')
parser.add_argument('--use_observe', action='store_true')
parser.add_argument('--use_recall', action='store_true')
parser.add_argument("--train_name", default='FusionMW', type=str)
args, unknown = parser.parse_known_args()
return args
class FusionMW(nn.Module):
def __init__(self):
super(FusionMW, self).__init__()
self.fc_read = nn.Sequential(nn.Linear(768, 1))
self.fc_obs = nn.Sequential(nn.Linear(768, 1))
self.fc_recall = nn.Sequential(nn.Linear(768, 1))
self.dropout = nn.Dropout(0.5)
self.classifier = nn.Sequential(nn.Linear(3, 1))
def forward(self, in_read_feat, in_obs_feat, in_recall_feat):
num_choices = in_read_feat.shape[1]
# R, O, LL features
flat_in_read_feat = in_read_feat.view(-1, in_read_feat.size(-1))
flat_in_obs_feat = in_obs_feat.view(-1, in_obs_feat.size(-1))
flat_in_recall_feat = in_recall_feat.view(-1, in_recall_feat.size(-1))
flat_in_read_feat = self.dropout(flat_in_read_feat)
flat_in_obs_feat = self.dropout(flat_in_obs_feat)
flat_in_recall_feat = self.dropout(flat_in_recall_feat)
# R, O, LL scores
read_scores = self.fc_read(flat_in_read_feat)
obs_scores = self.fc_obs(flat_in_obs_feat)
recall_scores = self.fc_recall(flat_in_recall_feat)
reshaped_read_scores = read_scores.view(-1, num_choices)
reshaped_obs_scores = obs_scores.view(-1, num_choices)
reshaped_recall_scores = recall_scores.view(-1, num_choices)
# Final score
all_feat = torch.squeeze(torch.cat([read_scores, obs_scores, recall_scores], 1), 1)
final_scores = self.classifier(all_feat)
reshaped_final_scores = final_scores.view(-1, num_choices)
return [reshaped_read_scores, reshaped_obs_scores, reshaped_recall_scores, reshaped_final_scores]
class LanguageData(object):
def __init__(self, id_q, question, subtitles, answer1, answer2, answer3, answer4, kg, label, vision = None):
self.id_q = id_q
self.question = question
self.subtitles = subtitles
self.kg = kg
self.label = label
self.vision = vision
self.answers = [
answer1,
answer2,
answer3,
answer4,
]
def trainEpoch(args, train_loader, model, criterion, optimizer, epoch):
read_losses, obs_losses, recall_losses = utils.AverageMeter(), utils.AverageMeter(), utils.AverageMeter()
final_losses = utils.AverageMeter()
losses = utils.AverageMeter()
model.train()
for batch_idx, (input, target) in enumerate(train_loader):
# Inputs to Variable type
input_var = list()
for j in range(len(input)):
input_var.append(torch.autograd.Variable(input[j]).cuda())
# Targets to Variable type
target_var = list()
for j in range(len(target)):
target[j] = target[j].cuda(async=True)
target_var.append(torch.autograd.Variable(target[j]))
# Output of the model
output = model(*input_var)
# Compute loss
read_loss = criterion(output[0], target_var[0])
obs_loss = criterion(output[1], target_var[0])
recall_loss = criterion(output[2], target_var[0])
final_loss = criterion(output[3], target_var[0])
train_loss = args.weight_loss_read * read_loss + \
args.weight_loss_observe * obs_loss + \
args.weight_loss_recall * recall_loss + \
args.weight_loss_final * final_loss
# Track loss
read_losses.update(read_loss.data.cpu().numpy(), input[0].size(0))
obs_losses.update(obs_loss.data.cpu().numpy(), input[0].size(0))
recall_losses.update(recall_loss.data.cpu().numpy(), input[0].size(0))
final_losses.update(final_loss.data.cpu().numpy(), input[0].size(0))
losses.update(train_loss.data.cpu().numpy(), input[0].size(0))
# Backpropagate loss and update weights
optimizer.zero_grad()
train_loss.backward()
optimizer.step()
# Print info
logger.info('Train Epoch: {} [{}/{} ({:.0f}%)]\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'.format(
epoch, batch_idx, len(train_loader), 100. * batch_idx / len(train_loader), loss=losses))
# Plot loss after all mini-batches have finished
plotter.plot('loss', 'train', 'Class Loss', epoch, losses.avg)
def valEpoch(args, val_loader, model, criterion, epoch):
losses = utils.AverageMeter()
model.eval()
for batch_idx, (input, target) in enumerate(val_loader):
# Inputs to Variable type
input_var = list()
for j in range(len(input)):
input_var.append(torch.autograd.Variable(input[j]).cuda())
# Targets to Variable type
target_var = list()
for j in range(len(target)):
target[j] = target[j].cuda(async=True)
target_var.append(torch.autograd.Variable(target[j]))
# Output of the model
with torch.no_grad():
output = model(*input_var)
# Compute loss
_, predicted = torch.max(output[3], 1)
_, p_read = torch.max(output[0], 1)
_, p_obs = torch.max(output[1], 1)
_, p_recall = torch.max(output[2], 1)
read_loss = criterion(output[0], target_var[0])
obs_loss = criterion(output[1], target_var[0])
recall_loss = criterion(output[2], target_var[0])
final_loss = criterion(output[3], target_var[0])
train_loss = args.weight_loss_read * read_loss + \
args.weight_loss_observe * obs_loss + \
args.weight_loss_recall * recall_loss + \
args.weight_loss_final * final_loss
losses.update(train_loss.data.cpu().numpy(), input[0].size(0))
# Save predictions to compute accuracy
if batch_idx == 0:
out = predicted.data.cpu().numpy()
out_r = p_read.data.cpu().numpy()
out_o = p_obs.data.cpu().numpy()
out_ll = p_recall.data.cpu().numpy()
label = target[0].cpu().numpy()
else:
out = np.concatenate((out,predicted.data.cpu().numpy()),axis=0)
out_r = np.concatenate((out_r, p_read.data.cpu().numpy()), axis=0)
out_o = np.concatenate((out_o, p_obs.data.cpu().numpy()), axis=0)
out_ll = np.concatenate((out_ll, p_recall.data.cpu().numpy()), axis=0)
label = np.concatenate((label,target[0].cpu().numpy()),axis=0)
# Accuracy
acc = np.sum(out == label) / len(out)
logger.info('Validation set: Average loss: {:.4f}\t'
'Accuracy {acc}'.format(losses.avg, acc=acc))
plotter.plot('loss', 'val', 'Class Loss', epoch, losses.avg)
plotter.plot('acc', 'val', 'Class Accuracy', epoch, acc)
acc_read = np.sum(out_r == label) / len(out)
acc_osb = np.sum(out_o == label) / len(out)
acc_recall = np.sum(out_ll == label) / len(out)
plotter.plot('readacc', 'val', 'Read Accuracy', epoch, acc_read)
plotter.plot('obsacc', 'val', 'Obs Accuracy', epoch, acc_osb)
plotter.plot('recallacc', 'val', 'Recall Accuracy', epoch, acc_recall)
return acc
def train(args, modeldir):
# Set GPU
n_gpu = torch.cuda.device_count()
logger.info("device: {} n_gpu: {}".format(args.device, n_gpu))
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if n_gpu > 0:
torch.cuda.manual_seed_all(args.seed)
# Create training directory
if not os.path.exists(modeldir):
os.makedirs(modeldir)
# Model, optimizer and loss
model = FusionMW()
if args.device == "cuda":
model.cuda()
optimizer = torch.optim.SGD(model.parameters(), lr=args.lr, momentum=args.momentum)
class_loss = nn.CrossEntropyLoss().cuda()
# Data
trainDataObject = FusionDataloader(args, split='train')
valDataObject = FusionDataloader(args, split='val')
train_loader = torch.utils.data.DataLoader(trainDataObject, batch_size=args.batch_size, shuffle=True, pin_memory=True, num_workers=args.workers)
val_loader = torch.utils.data.DataLoader(valDataObject, batch_size=args.batch_size, shuffle=True, pin_memory=True, num_workers=args.workers)
num_batches = train_loader.__len__()
# Now, let's start the training process!
logger.info('Training loader with %d samples' % train_loader.__len__())
logger.info('Validation loader with %d samples' % val_loader.__len__())
logger.info('Training...')
pattrack = 0
best_val = 0
for epoch in range(0, args.nepochs):
# Epoch
trainEpoch(args, train_loader, model, class_loss, optimizer, epoch)
current_val = valEpoch(args, val_loader, model, class_loss, epoch)
# Check patience
is_best = current_val > best_val
best_val = max(current_val, best_val)
if not is_best:
pattrack += 1
else:
pattrack = 0
if pattrack >= args.patience:
break
logger.info('** Validation information: %f (this accuracy) - %f (best accuracy) - %d (patience valtrack)' % (current_val, best_val, pattrack))
# Save
state = {'state_dict': model.state_dict(),
'best_val': best_val,
'optimizer': optimizer.state_dict(),
'pattrack': pattrack,
'curr_val': current_val}
filename = os.path.join(modeldir, 'model_latest.pth.tar')
torch.save(state, filename)
if is_best:
filename = os.path.join(modeldir, 'model_best.pth.tar')
torch.save(state, filename)
def evaluate(args, modeldir):
# Model
model = FusionMW()
if args.device == "cuda":
model.cuda()
class_loss = nn.CrossEntropyLoss().cuda()
logger.info("=> loading checkpoint from '{}'".format(modeldir))
checkpoint = torch.load(os.path.join(modeldir, 'model_best.pth.tar'))
model.load_state_dict(checkpoint['state_dict'])
# Data
evalDataObject = FusionDataloader(args, split='test')
test_loader = torch.utils.data.DataLoader(evalDataObject, batch_size=args.batch_size, shuffle=False, pin_memory=(not args.no_cuda), num_workers=args.workers)
logger.info('Evaluation loader with %d samples' % test_loader.__len__())
# Switch to evaluation mode & compute test samples embeddings
batch_time = utils.AverageMeter()
end = time.time()
model.eval()
for i, (input, target) in enumerate(test_loader):
# Inputs to Variable type
input_var = list()
for j in range(len(input)):
input_var.append(torch.autograd.Variable(input[j]).cuda())
# Targets to Variable type
target_var = list()
for j in range(len(target)):
target[j] = target[j].cuda(async=True)
target_var.append(torch.autograd.Variable(target[j]))
# Output of the model
with torch.no_grad():
output = model(*input_var)
_, predicted = torch.max(output[3], 1)
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
# Store outpputs
if i==0:
out = predicted.data.cpu().numpy()
label = target[0].cpu().numpy()
index = target[1].cpu().numpy()
scores_read = output[0].data.cpu().numpy()
scores_observe = output[1].data.cpu().numpy()
scores_recall = output[2].data.cpu().numpy()
scores_final = output[3].data.cpu().numpy()
else:
out = np.concatenate((out,predicted.data.cpu().numpy()),axis=0)
label = np.concatenate((label,target[0].cpu().numpy()),axis=0)
index = np.concatenate((index, target[1].cpu().numpy()), axis=0)
scores_read = np.concatenate((scores_read, output[0].cpu().numpy()), axis=0)
scores_observe = np.concatenate((scores_observe, output[1].cpu().numpy()), axis=0)
scores_recall = np.concatenate((scores_recall, output[2].cpu().numpy()), axis=0)
scores_final = np.concatenate((scores_final, output[3].cpu().numpy()), axis=0)
# Print accuracy
df = pd.read_csv(os.path.join(args.data_dir, 'knowit_data/knowit_data_test.csv'), delimiter='\t')
utils.accuracy(df, out, label, index)
if __name__ == "__main__":
args = get_params()
assert args.dataset in ['knowit', 'tvqa']
if args.dataset == 'knowit':
from Source.dataloader_knowit import FusionDataloader
args.descriptions_file = 'Data/knowit_observe/scenes_descriptions.csv'
elif args.dataset == 'tvqa':
# from Source.dataloader_tvqa import FusionDataloader
logger.error('Sorry, TVQA+ dataset not implemented yet.')
import sys
sys.exit(0)
# Create training and data directories
modeldir = os.path.join('Training', args.train_name)
if not os.path.exists(modeldir):
os.makedirs(modeldir)
outdatadir = os.path.join(args.data_dir, args.dataset)
if not os.path.exists(outdatadir):
os.makedirs(outdatadir)
# Train if model does not exist
if not os.path.isfile(os.path.join(modeldir, 'model_best.pth.tar')):
global plotter
plotter = utils.VisdomLinePlotter(env_name=args.train_name)
train(args, modeldir)
# Evaluation
evaluate(args, modeldir)
|
python
|
"""
Module: mercadopago/__init__.py
"""
from .sdk import SDK
|
python
|
import cv2 as cv
from utilities import show_in_matplotlib
def get_channel(img, channel):
b = img[:, :, channel]
# g = img[:,:,1]
# r = img[:,:,2]
return b
def remove_channel(img, channel):
imgCopy = img.copy()
imgCopy[:, :, channel] = 0
return imgCopy
def remove_channel_v0(img, channel):
b = img[:, :, 0]
g = img[:, :, 1]
r = img[:, :, 2]
if channel == 0:
b[:] = 0
elif channel == 1:
g[:] = 0
else:
r[:] = 0
img_merged = cv.merge((b, g, r))
return img_merged
if __name__ == "__main__":
import cv2 as cv
img = cv.imread('color_img.png')
show_in_matplotlib(img, title='original')
ch = 1
b = get_channel(img, ch)
show_in_matplotlib(b, title=f"Channel {ch} only")
img_merged = remove_channel(img, ch)
show_in_matplotlib(img_merged, title=f"Channel {ch} removed")
|
python
|
map = [0 for i in range(8*2*4)]
while(True):
x = int(input("Please input the operation number:\n1:get\n2:free\n3:show\n0:quit\n"))
if (x==0):
break
if (x==1):
map_index = []
file_size = int(input("Please input the file size\n"))
for i in range(8*2*4):
if (map[i]==0):
map_index.append(i)
if (len(map_index)>=file_size):
for i in map_index[0:file_size]:
map[i] = 1
print("------")
print("logic address "+str(i)+" allocated")
print("cylinder number: "+str(i // 8))
print("track number: "+str((i % 8) // 4))
print("sector number: "+str(i % 4))
print("-------")
else:
print("Fail")
if (x==2):
cn = int(input("cylinder number: \n"))
tn = int(input("track number: \n"))
sn = int(input("sector number: \n"))
print("The logic address is : "+str(8*cn+tn*4+sn))
if (map[8*cn+tn*4+sn]==1):
print("Success!")
map[8*cn+tn*4+sn]=0
else:
print("Error!")
if (x==3):
ans=""
for i in range(8):
for j in range(4*2):
ans+=str(map[i*8+j])
ans+="\n"
print(ans)
|
python
|
"""Unit tests for powercycle_sentinel.py."""
# pylint: disable=missing-docstring
import unittest
from datetime import datetime, timezone, timedelta
from unittest.mock import Mock
from evergreen import EvergreenApi, Task
from buildscripts.powercycle_sentinel import watch_tasks, POWERCYCLE_TASK_EXEC_TIMEOUT_SECS
def make_task_mock(evg_api, task_id, start_time, finish_time):
return Task({
"task_id": task_id,
"start_time": start_time,
"finish_time": finish_time,
}, evg_api)
class TestWatchTasks(unittest.TestCase):
"""Test watch_tasks."""
def test_no_long_running_tasks(self):
evg_api = EvergreenApi()
task_ids = ["1", "2"]
now = datetime.now(timezone.utc).isoformat()
task_1 = make_task_mock(evg_api, task_ids[0], now, now)
task_2 = make_task_mock(evg_api, task_ids[1], now, now)
evg_api.task_by_id = Mock(
side_effect=(lambda task_id: {
"1": task_1,
"2": task_2,
}[task_id]))
long_running_task_ids = watch_tasks(task_ids, evg_api, 0)
self.assertEqual([], long_running_task_ids)
def test_found_long_running_tasks(self):
evg_api = EvergreenApi()
task_ids = ["1", "2"]
exec_timeout_seconds_ago = (datetime.now(timezone.utc) -
timedelta(hours=POWERCYCLE_TASK_EXEC_TIMEOUT_SECS)).isoformat()
now = datetime.now(timezone.utc).isoformat()
task_1 = make_task_mock(evg_api, task_ids[0], exec_timeout_seconds_ago, now)
task_2 = make_task_mock(evg_api, task_ids[1], exec_timeout_seconds_ago, None)
evg_api.task_by_id = Mock(
side_effect=(lambda task_id: {
"1": task_1,
"2": task_2,
}[task_id]))
long_running_task_ids = watch_tasks(task_ids, evg_api, 0)
self.assertEqual([task_2.task_id], long_running_task_ids)
|
python
|
import functools
class Codec:
db = []
def encode(self, longUrl):
"""Encodes a URL to a shortened URL.
:type longUrl: str
:rtype: str
"""
length = len(self.db)
self.db.append(longUrl)
return self.conversionA(length)
def decode(self, shortUrl):
"""Decodes a shortened URL to its original URL.
:type shortUrl: str
:rtype: str
"""
return self.conversionC(shortUrl)
def conversionA(self, s):
b0 = s % 62
s = int(s / 62)
b1 = s % 62
s = int(s / 62)
b2 = s % 62
s = int(s / 62)
b3 = s % 62
s = int(s / 62)
b4 = s % 62
s = int(s / 62)
b5 = s % 62
return self.conversionB(b5) + self.conversionB(b4) + self.conversionB(b3) + self.conversionB(b2) + self.conversionB(b1) + self.conversionB(b0)
def conversionB(self, b):
if b < 10:
return chr(48 + b)
if b < 36:
return chr(65 + b - 10)
return chr(97 + b - 36)
def conversionC(self, c):
array = list(map(lambda x: self.conversionD(x), list(c)))
from functools import reduce
index = functools.reduce(lambda x , y: x * 62 + y, array, 0)
return self.db[index]
def conversionD(self, d):
c = ord(d)
if c < 58:
return c - 48
if c <= ord('Z'):
return c + 10 - 65
return c + 36 - 97
# Your Codec object will be instantiated and called as such:
# codec = Codec()
# codec.decode(codec.encode(url))
|
python
|
# https://deeplearningcourses.com/c/data-science-natural-language-processing-in-python
# https://www.udemy.com/data-science-natural-language-processing-in-python
# Author: http://lazyprogrammer.me
import numpy as np
import matplotlib.pyplot as plt
import string
import random
import re
import requests
import os
import textwrap
### create substitution cipher
# one will act as the key, other as the value
letters1 = list(string.ascii_lowercase)
letters2 = list(string.ascii_lowercase)
true_mapping = {}
# shuffle second set of letters
random.shuffle(letters2)
# populate map
for k, v in zip(letters1, letters2):
true_mapping[k] = v
### the language model
# initialize Markov matrix
M = np.ones((26, 26))
# initial state distribution
pi = np.zeros(26)
# a function to update the Markov matrix
def update_transition(ch1, ch2):
# ord('a') = 97, ord('b') = 98, ...
i = ord(ch1) - 97
j = ord(ch2) - 97
M[i,j] += 1
# a function to update the initial state distribution
def update_pi(ch):
i = ord(ch) - 97
pi[i] += 1
# get the log-probability of a word / token
def get_word_prob(word):
# print("word:", word)
i = ord(word[0]) - 97
logp = np.log(pi[i])
for ch in word[1:]:
j = ord(ch) - 97
logp += np.log(M[i, j]) # update prob
i = j # update j
return logp
# get the probability of a sequence of words
def get_sequence_prob(words):
# if input is a string, split into an array of tokens
if type(words) == str:
words = words.split()
logp = 0
for word in words:
logp += get_word_prob(word)
return logp
### create a markov model based on an English dataset
# is an edit of https://www.gutenberg.org/ebooks/2701
# (I removed the front and back matter)
# download the file
if not os.path.exists('moby_dick.txt'):
print("Downloading moby dick...")
r = requests.get('https://lazyprogrammer.me/course_files/moby_dick.txt')
with open('moby_dick.txt', 'w') as f:
f.write(r.content.decode())
# for replacing non-alpha characters
regex = re.compile('[^a-zA-Z]')
# load in words
for line in open('moby_dick.txt'):
line = line.rstrip()
# there are blank lines in the file
if line:
line = regex.sub(' ', line) # replace all non-alpha characters with space
# split the tokens in the line and lowercase
tokens = line.lower().split()
for token in tokens:
# update the model
# first letter
ch0 = token[0]
update_pi(ch0)
# other letters
for ch1 in token[1:]:
update_transition(ch0, ch1)
ch0 = ch1
# normalize the probabilities
pi /= pi.sum()
M /= M.sum(axis=1, keepdims=True)
### encode a message
# this is a random excerpt from Project Gutenberg's
# The Adventures of Sherlock Holmes, by Arthur Conan Doyle
# https://www.gutenberg.org/ebooks/1661
original_message = '''I then lounged down the street and found,
as I expected, that there was a mews in a lane which runs down
by one wall of the garden. I lent the ostlers a hand in rubbing
down their horses, and received in exchange twopence, a glass of
half-and-half, two fills of shag tobacco, and as much information
as I could desire about Miss Adler, to say nothing of half a dozen
other people in the neighbourhood in whom I was not in the least
interested, but whose biographies I was compelled to listen to.
'''
# Away they went, and I was just wondering whether I should not do well
# to follow them when up the lane came a neat little landau, the coachman
# with his coat only half-buttoned, and his tie under his ear, while all
# the tags of his harness were sticking out of the buckles. It hadn't
# pulled up before she shot out of the hall door and into it. I only
# caught a glimpse of her at the moment, but she was a lovely woman, with
# a face that a man might die for.
# My cabby drove fast. I don't think I ever drove faster, but the others
# were there before us. The cab and the landau with their steaming horses
# were in front of the door when I arrived. I paid the man and hurried
# into the church. There was not a soul there save the two whom I had
# followed and a surpliced clergyman, who seemed to be expostulating with
# them. They were all three standing in a knot in front of the altar. I
# lounged up the side aisle like any other idler who has dropped into a
# church. Suddenly, to my surprise, the three at the altar faced round to
# me, and Godfrey Norton came running as hard as he could towards me.
# a function to encode a message
def encode_message(msg):
# downcase
msg = msg.lower()
# replace non-alpha characters
msg = regex.sub(' ', msg)
# make the encoded message
coded_msg = []
for ch in msg:
coded_ch = ch # could just be a space
if ch in true_mapping:
coded_ch = true_mapping[ch]
coded_msg.append(coded_ch)
return ''.join(coded_msg)
encoded_message = encode_message(original_message)
# a function to decode a message
def decode_message(msg, word_map):
decoded_msg = []
for ch in msg:
decoded_ch = ch # could just be a space
if ch in word_map:
decoded_ch = word_map[ch]
decoded_msg.append(decoded_ch)
return ''.join(decoded_msg)
### run an evolutionary algorithm to decode the message
# this is our initialization point
dna_pool = []
for _ in range(20):
dna = list(string.ascii_lowercase)
random.shuffle(dna)
dna_pool.append(dna)
def evolve_offspring(dna_pool, n_children):
# make n_children per offspring
offspring = []
for dna in dna_pool:
for _ in range(n_children):
copy = dna.copy()
j = np.random.randint(len(copy))
k = np.random.randint(len(copy))
# switch
tmp = copy[j]
copy[j] = copy[k]
copy[k] = tmp
offspring.append(copy)
return offspring + dna_pool
num_iters = 1000
scores = np.zeros(num_iters)
best_dna = None
best_map = None
best_score = float('-inf')
for i in range(num_iters):
if i > 0:
# get offspring from the current dna pool
dna_pool = evolve_offspring(dna_pool, 3)
# calculate score for each dna
dna2score = {}
for dna in dna_pool:
# populate map
current_map = {}
for k, v in zip(letters1, dna):
current_map[k] = v
decoded_message = decode_message(encoded_message, current_map)
score = get_sequence_prob(decoded_message)
# store it
# needs to be a string to be a dict key
dna2score[''.join(dna)] = score
# record the best so far
if score > best_score:
best_dna = dna
best_map = current_map
best_score = score
# average score for this generation
scores[i] = np.mean(list(dna2score.values()))
# keep the best 5 dna
# also turn them back into list of single chars
sorted_dna = sorted(dna2score.items(), key=lambda x: x[1], reverse=True)
dna_pool = [list(k) for k, v in sorted_dna[:5]]
if i % 200 == 0:
print("iter:", i, "score:", scores[i], "best so far:", best_score)
# use best score
decoded_message = decode_message(encoded_message, best_map)
print("LL of decoded message:", get_sequence_prob(decoded_message))
print("LL of true message:", get_sequence_prob(regex.sub(' ', original_message.lower())))
# which letters are wrong?
for true, v in true_mapping.items():
pred = best_map[v]
if true != pred:
print("true: %s, pred: %s" % (true, pred))
# print the final decoded message
print("Decoded message:\n", textwrap.fill(decoded_message))
print("\nTrue message:\n", original_message)
|
python
|
from collections import defaultdict
"""
students = 10
leads = 9
clues = [[1, 2], [3, 4], [5, 2], [4, 6], [2, 6], [8, 7], [9, 7], [1, 6], [2, 4]]
"""
class Unionfind():
def __init__(self, students, leads, clues):
self.students = students
# Set up parent for each node.
self.parent = {item:item for item in range(1, self.students + 1)}
# Create a dictionary to store the group result.
self.group = defaultdict(list)
def find(self, u):
if self.parent[u] == u:
return u
else:
self.parent[u] = self.find(self.parent[u])
return self.parent[u]
def union(self, u, v):
t1 = self.find(u)
t2 = self.find(v)
if t1 != t2:
for i in range(1, self.students + 1):
if self.parent[i] == t2:
self.parent[i] = t1
#self.parent[t2] = t1
def find_group(self, clues):
for u, v in clues:
self.union(u, v)
for i in range(1, self.students + 1):
self.group[self.parent[i]].append(i)
return len(self.group)
def main():
students = 10
leads = 9
clues = [[1, 2], [3, 4], [5, 2], [4, 6], [2, 6], [8, 7], [9, 7], [1, 6], [2, 4]]
#clues = [[1, 2], [3, 4], [4, 5]]
uf = Unionfind(students, leads, clues)
print(uf.find_group(clues))
if __name__ == '__main__':
main()
|
python
|
# Set up configuration variables
__all__ = ['custom_viewer', 'qglue', 'test']
import os
import sys
from pkg_resources import get_distribution, DistributionNotFound
try:
__version__ = get_distribution('glue-core').version
except DistributionNotFound:
__version__ = 'undefined'
from ._mpl_backend import MatplotlibBackendSetter
sys.meta_path.append(MatplotlibBackendSetter())
from glue.viewers.custom.helper import custom_viewer
# Load user's configuration file
from .config import load_configuration
env = load_configuration()
from .qglue import qglue
from .main import load_plugins # noqa
def test(no_optional_skip=False):
from pytest import main
root = os.path.abspath(os.path.dirname(__file__))
args = [root, '-x']
if no_optional_skip:
args.append('--no-optional-skip')
return main(args=args)
from glue._settings_helpers import load_settings
load_settings()
# In PyQt 5.5+, PyQt overrides the default exception catching and fatally
# crashes the Qt application without printing out any details about the error.
# Below we revert the exception hook to the original Python one. Note that we
# can't just do sys.excepthook = sys.__excepthook__ otherwise PyQt will detect
# the default excepthook is in place and override it.
def handle_exception(exc_type, exc_value, exc_traceback):
sys.__excepthook__(exc_type, exc_value, exc_traceback)
sys.excepthook = handle_exception
|
python
|
def create_user(base_cls):
class User_info(base_cls):
__tablename__ = 'user_info'
__table_args__ = {'autoload': True}
return User_info
|
python
|
import os
from enum import Enum
from pathlib import Path
import pandas as pd
import matplotlib.pyplot as plt
class Verbosity(Enum):
error = 1
warning = 2
info = 3
debug = 4
BASE_FOLDER = "output/"
class Logger:
current_run = None
def __init__(self, verbosity=Verbosity.info, markdown=False, output_file=None, img_format='png') -> None:
self.verbosity = Verbosity(verbosity) if isinstance(verbosity, int) else verbosity
self.markdown_mode = markdown
self.output_file = output_file
self.img_format = img_format
def df(self, frame):
if not isinstance(frame, pd.DataFrame):
frame = pd.DataFrame(frame)
self.info("\n")
self.info(frame.to_markdown(), "\n\n")
def csv(self, frame: pd.DataFrame, name: str, extra="csv/"):
file_path = self.get_file_path(name, extra)
frame.to_csv(file_path)
def list(self, data):
self.__print("[")
for item in data:
self.__print(item, ",")
self.__print("]")
def log_img(self, name: str, file_format=None):
if file_format is None:
file_format = self.img_format
print(f"saving: {name} as {file_format}")
file_path = self.get_file_path(f"{name}.{file_format}")
plt.savefig(file_path, bbox_inches='tight', format=file_format)
# self.debug(f"Saving: {file_path}")
plt.clf()
plt.close()
self.info(f"")
def get_file_path(self, name, extra=None):
if Logger.current_run is not None:
subfolder = Logger.current_run
else:
subfolder = None
folder = BASE_FOLDER + subfolder if subfolder is not None else BASE_FOLDER
if extra is not None:
folder = folder + extra
Path(folder).mkdir(parents=True, exist_ok=True)
file_path = Path(folder + name).resolve()
return file_path
def set_verbosity(self, v: Verbosity):
self.verbosity = v
def debug(self, *args):
if self.verbosity.value >= 4:
self.__print(*args)
def info(self, *args):
if self.verbosity.value >= 3:
self.__print(*args)
def warning(self, *args):
if self.verbosity.value >= 2:
self.__print(*args)
def error(self, *args):
if self.verbosity.value >= 1:
self.__print(*args)
def is_info(self):
return self.verbosity.value >= 3
def __print(self, *args):
if self.output_file is not None:
file_path = self.get_file_path(self.output_file)
if os.path.exists(file_path):
write_type = "a"
else:
write_type = "w"
with open(file_path, write_type) as file:
if self.markdown_mode:
print(*args, file=file, end=' \n')
else:
print(*args, file=file)
file.close()
else:
if not self.markdown_mode:
print(*args)
else:
print(*args, " ")
|
python
|
"""
Attributes are arbitrary data stored on objects. Attributes supports
both pure-string values and pickled arbitrary data.
Attributes are also used to implement Nicks. This module also contains
the Attribute- and NickHandlers as well as the `NAttributeHandler`,
which is a non-db version of Attributes.
"""
import re
import weakref
from django.db import models
from django.conf import settings
from django.utils.encoding import smart_str
from evennia.locks.lockhandler import LockHandler
from evennia.utils.idmapper.models import SharedMemoryModel
from evennia.utils.dbserialize import to_pickle, from_pickle
from evennia.utils.picklefield import PickledObjectField
from evennia.utils.utils import lazy_property, to_str, make_iter
_TYPECLASS_AGGRESSIVE_CACHE = settings.TYPECLASS_AGGRESSIVE_CACHE
#------------------------------------------------------------
#
# Attributes
#
#------------------------------------------------------------
class Attribute(SharedMemoryModel):
"""
Attributes are things that are specific to different types of objects. For
example, a drink container needs to store its fill level, whereas an exit
needs to store its open/closed/locked/unlocked state. These are done via
attributes, rather than making different classes for each object type and
storing them directly. The added benefit is that we can add/remove
attributes on the fly as we like.
The Attribute class defines the following properties:
key - primary identifier.
lock_storage - perm strings.
obj - which object the attribute is defined on.
date_created - when the attribute was created.
value - the data stored in the attribute, in pickled form
using wrappers to be able to store/retrieve models.
strvalue - string-only data. This data is not pickled and is
thus faster to search for in the database.
category - optional character string for grouping the Attribute.
"""
#
# Attribute Database Model setup
#
# These database fields are all set using their corresponding properties,
# named same as the field, but withtout the db_* prefix.
db_key = models.CharField('key', max_length=255, db_index=True)
db_value = PickledObjectField(
'value', null=True,
help_text="The data returned when the attribute is accessed. Must be "
"written as a Python literal if editing through the admin "
"interface. Attribute values which are not Python literals "
"cannot be edited through the admin interface.")
db_strvalue = models.TextField(
'strvalue', null=True, blank=True,
help_text="String-specific storage for quick look-up")
db_category = models.CharField(
'category', max_length=128, db_index=True, blank=True, null=True,
help_text="Optional categorization of attribute.")
# Lock storage
db_lock_storage = models.TextField(
'locks', blank=True,
help_text="Lockstrings for this object are stored here.")
db_model = models.CharField(
'model', max_length=32, db_index=True, blank=True, null=True,
help_text="Which model of object this attribute is attached to (A "
"natural key like 'objects.dbobject'). You should not change "
"this value unless you know what you are doing.")
# subclass of Attribute (None or nick)
db_attrtype = models.CharField(
'attrtype', max_length=16, db_index=True, blank=True, null=True,
help_text="Subclass of Attribute (None or nick)")
# time stamp
db_date_created = models.DateTimeField(
'date_created', editable=False, auto_now_add=True)
# Database manager
#objects = managers.AttributeManager()
@lazy_property
def locks(self):
return LockHandler(self)
class Meta:
"Define Django meta options"
verbose_name = "Evennia Attribute"
# read-only wrappers
key = property(lambda self: self.db_key)
strvalue = property(lambda self: self.db_strvalue)
category = property(lambda self: self.db_category)
model = property(lambda self: self.db_model)
attrtype = property(lambda self: self.db_attrtype)
date_created = property(lambda self: self.db_date_created)
def __lock_storage_get(self):
return self.db_lock_storage
def __lock_storage_set(self, value):
self.db_lock_storage = value
self.save(update_fields=["db_lock_storage"])
def __lock_storage_del(self):
self.db_lock_storage = ""
self.save(update_fields=["db_lock_storage"])
lock_storage = property(__lock_storage_get, __lock_storage_set, __lock_storage_del)
# Wrapper properties to easily set database fields. These are
# @property decorators that allows to access these fields using
# normal python operations (without having to remember to save()
# etc). So e.g. a property 'attr' has a get/set/del decorator
# defined that allows the user to do self.attr = value,
# value = self.attr and del self.attr respectively (where self
# is the object in question).
# value property (wraps db_value)
#@property
def __value_get(self):
"""
Getter. Allows for `value = self.value`.
We cannot cache here since it makes certain cases (such
as storing a dbobj which is then deleted elsewhere) out-of-sync.
The overhead of unpickling seems hard to avoid.
"""
return from_pickle(self.db_value, db_obj=self)
#@value.setter
def __value_set(self, new_value):
"""
Setter. Allows for self.value = value. We cannot cache here,
see self.__value_get.
"""
self.db_value = to_pickle(new_value)
self.save(update_fields=["db_value"])
#@value.deleter
def __value_del(self):
"Deleter. Allows for del attr.value. This removes the entire attribute."
self.delete()
value = property(__value_get, __value_set, __value_del)
#
#
# Attribute methods
#
#
def __str__(self):
return smart_str("%s(%s)" % (self.db_key, self.id))
def __unicode__(self):
return u"%s(%s)" % (self.db_key,self.id)
def access(self, accessing_obj, access_type='read', default=False, **kwargs):
"""
Determines if another object has permission to access.
Args:
accessing_obj (object): object trying to access this one.
access_type (optional): type of access sought.
default (optional): what to return if no lock of access_type was found
Kwargs:
**kwargs: passed to `at_access` hook along with `result`.
Returns:
result:
"""
result = self.locks.check(accessing_obj, access_type=access_type, default=default)
#self.at_access(result, **kwargs)
return result
#
# Handlers making use of the Attribute model
#
class AttributeHandler(object):
"""
Handler for adding Attributes to the object.
"""
_m2m_fieldname = "db_attributes"
_attrcreate = "attrcreate"
_attredit = "attredit"
_attrread = "attrread"
_attrtype = None
def __init__(self, obj):
"Initialize handler"
self.obj = obj
self._objid = obj.id
self._model = to_str(obj.__dbclass__.__name__.lower())
self._cache = None
def _recache(self):
"Cache all attributes of this object"
query = {"%s__id" % self._model : self._objid,
"attribute__db_attrtype" : self._attrtype}
attrs = [conn.attribute for conn in getattr(self.obj, self._m2m_fieldname).through.objects.filter(**query)]
self._cache = dict(("%s-%s" % (to_str(attr.db_key).lower(),
attr.db_category.lower() if attr.db_category else None),
attr) for attr in attrs)
def has(self, key, category=None):
"""
Checks if the given Attribute (or list of Attributes) exists on
the object.
If an iterable is given, returns list of booleans.
"""
if self._cache is None or not _TYPECLASS_AGGRESSIVE_CACHE:
self._recache()
key = [k.strip().lower() for k in make_iter(key) if k]
category = category.strip().lower() if category is not None else None
searchkeys = ["%s-%s" % (k, category) for k in make_iter(key)]
ret = [self._cache.get(skey) for skey in searchkeys if skey in self._cache]
return ret[0] if len(ret) == 1 else ret
def get(self, key=None, category=None, default=None, return_obj=False,
strattr=False, raise_exception=False, accessing_obj=None,
default_access=True, not_found_none=False):
"""
Returns the value of the given Attribute or list of Attributes.
`strattr` will cause the string-only value field instead of the normal
pickled field data. Use to get back values from Attributes added with
the `strattr` keyword.
If `return_obj=True`, return the matching Attribute object
instead. Returns `default` if no matches (or [ ] if `key` was a list
with no matches). If `raise_exception=True`, failure to find a
match will raise `AttributeError` instead.
If `accessing_obj` is given, its `attrread` permission lock will be
checked before displaying each looked-after Attribute. If no
`accessing_obj` is given, no check will be done.
"""
class RetDefault(object):
"Holds default values"
def __init__(self):
self.value = default
self.strvalue = str(default) if default is not None else None
if self._cache is None or not _TYPECLASS_AGGRESSIVE_CACHE:
self._recache()
ret = []
key = [k.strip().lower() for k in make_iter(key) if k]
category = category.strip().lower() if category is not None else None
#print "cache:", self._cache.keys(), key
if not key:
# return all with matching category (or no category)
catkey = "-%s" % category if category is not None else None
ret = [attr for key, attr in self._cache.items() if key and key.endswith(catkey)]
else:
for searchkey in ("%s-%s" % (k, category) for k in key):
attr_obj = self._cache.get(searchkey)
if attr_obj:
ret.append(attr_obj)
else:
if raise_exception:
raise AttributeError
else:
ret.append(RetDefault())
if accessing_obj:
# check 'attrread' locks
ret = [attr for attr in ret if attr.access(accessing_obj, self._attrread, default=default_access)]
if strattr:
ret = ret if return_obj else [attr.strvalue for attr in ret if attr]
else:
ret = ret if return_obj else [attr.value for attr in ret if attr]
if not ret:
return ret if len(key) > 1 else default
return ret[0] if len(ret)==1 else ret
def add(self, key, value, category=None, lockstring="",
strattr=False, accessing_obj=None, default_access=True):
"""
Add attribute to object, with optional `lockstring`.
If `strattr` is set, the `db_strvalue` field will be used (no pickling).
Use the `get()` method with the `strattr` keyword to get it back.
If `accessing_obj` is given, `self.obj`'s `attrcreate` lock access
will be checked against it. If no `accessing_obj` is given, no check
will be done.
"""
if accessing_obj and not self.obj.access(accessing_obj,
self._attrcreate, default=default_access):
# check create access
return
if self._cache is None:
self._recache()
if not key:
return
category = category.strip().lower() if category is not None else None
keystr = key.strip().lower()
cachekey = "%s-%s" % (keystr, category)
attr_obj = self._cache.get(cachekey)
if attr_obj:
# update an existing attribute object
if strattr:
# store as a simple string (will not notify OOB handlers)
attr_obj.db_strvalue = value
attr_obj.save(update_fields=["db_strvalue"])
else:
# store normally (this will also notify OOB handlers)
attr_obj.value = value
else:
# create a new Attribute (no OOB handlers can be notified)
kwargs = {"db_key" : keystr, "db_category" : category,
"db_model" : self._model, "db_attrtype" : self._attrtype,
"db_value" : None if strattr else to_pickle(value),
"db_strvalue" : value if strattr else None}
new_attr = Attribute(**kwargs)
new_attr.save()
getattr(self.obj, self._m2m_fieldname).add(new_attr)
self._cache[cachekey] = new_attr
def batch_add(self, key, value, category=None, lockstring="",
strattr=False, accessing_obj=None, default_access=True):
"""
Batch-version of `add()`. This is more efficient than
repeat-calling add.
`key` and `value` must be sequences of the same length, each
representing a key-value pair.
"""
if accessing_obj and not self.obj.access(accessing_obj,
self._attrcreate, default=default_access):
# check create access
return
if self._cache is None:
self._recache()
if not key:
return
keys, values= make_iter(key), make_iter(value)
if len(keys) != len(values):
raise RuntimeError("AttributeHandler.add(): key and value of different length: %s vs %s" % key, value)
category = category.strip().lower() if category is not None else None
new_attrobjs = []
for ikey, keystr in enumerate(keys):
keystr = keystr.strip().lower()
new_value = values[ikey]
cachekey = "%s-%s" % (keystr, category)
attr_obj = self._cache.get(cachekey)
if attr_obj:
# update an existing attribute object
if strattr:
# store as a simple string (will not notify OOB handlers)
attr_obj.db_strvalue = new_value
attr_obj.save(update_fields=["db_strvalue"])
else:
# store normally (this will also notify OOB handlers)
attr_obj.value = new_value
else:
# create a new Attribute (no OOB handlers can be notified)
kwargs = {"db_key" : keystr, "db_category" : category,
"db_attrtype" : self._attrtype,
"db_value" : None if strattr else to_pickle(new_value),
"db_strvalue" : value if strattr else None}
new_attr = Attribute(**kwargs)
new_attr.save()
new_attrobjs.append(new_attr)
if new_attrobjs:
# Add new objects to m2m field all at once
getattr(self.obj, self._m2m_fieldname).add(*new_attrobjs)
self._recache()
def remove(self, key, raise_exception=False, category=None,
accessing_obj=None, default_access=True):
"""
Remove attribute or a list of attributes from object.
If `accessing_obj` is given, will check against the `attredit` lock.
If not given, this check is skipped.
"""
if self._cache is None or not _TYPECLASS_AGGRESSIVE_CACHE:
self._recache()
key = [k.strip().lower() for k in make_iter(key) if k]
category = category.strip().lower() if category is not None else None
for searchstr in ("%s-%s" % (k, category) for k in key):
attr_obj = self._cache.get(searchstr)
if attr_obj:
if not (accessing_obj and not attr_obj.access(accessing_obj,
self._attredit, default=default_access)):
attr_obj.delete()
elif not attr_obj and raise_exception:
raise AttributeError
self._recache()
def clear(self, category=None, accessing_obj=None, default_access=True):
"""
Remove all Attributes on this object. If `accessing_obj` is
given, check the `attredit` lock on each Attribute before
continuing. If not given, skip check.
"""
if self._cache is None or not _TYPECLASS_AGGRESSIVE_CACHE:
self._recache()
if accessing_obj:
[attr.delete() for attr in self._cache.values()
if attr.access(accessing_obj, self._attredit, default=default_access)]
else:
[attr.delete() for attr in self._cache.values()]
self._recache()
def all(self, accessing_obj=None, default_access=True):
"""
Return all Attribute objects on this object.
If `accessing_obj` is given, check the `attrread` lock on
each attribute before returning them. If not given, this
check is skipped.
"""
if self._cache is None or not _TYPECLASS_AGGRESSIVE_CACHE:
self._recache()
attrs = sorted(self._cache.values(), key=lambda o: o.id)
if accessing_obj:
return [attr for attr in attrs
if attr.access(accessing_obj, self._attredit, default=default_access)]
else:
return attrs
class NickHandler(AttributeHandler):
"""
Handles the addition and removal of Nicks
(uses Attributes' `strvalue` and `category` fields)
Nicks are stored as Attributes
with categories `nick_<nicktype>`
"""
_attrtype = "nick"
def has(self, key, category="inputline"):
return super(NickHandler, self).has(key, category=category)
def get(self, key=None, category="inputline", **kwargs):
"Get the replacement value matching the given key and category"
return super(NickHandler, self).get(key=key, category=category, strattr=True, **kwargs)
def add(self, key, replacement, category="inputline", **kwargs):
"Add a new nick"
super(NickHandler, self).add(key, replacement, category=category, strattr=True, **kwargs)
def remove(self, key, category="inputline", **kwargs):
"Remove Nick with matching category"
super(NickHandler, self).remove(key, category=category, **kwargs)
def nickreplace(self, raw_string, categories=("inputline", "channel"), include_player=True):
"Replace entries in raw_string with nick replacement"
raw_string
obj_nicks, player_nicks = [], []
for category in make_iter(categories):
obj_nicks.extend([n for n in make_iter(self.get(category=category, return_obj=True)) if n])
if include_player and self.obj.has_player:
for category in make_iter(categories):
player_nicks.extend([n for n in make_iter(self.obj.player.nicks.get(category=category, return_obj=True)) if n])
for nick in obj_nicks + player_nicks:
# make a case-insensitive match here
match = re.match(re.escape(nick.db_key), raw_string, re.IGNORECASE)
if match:
raw_string = raw_string.replace(match.group(), nick.db_strvalue, 1)
break
return raw_string
class NAttributeHandler(object):
"""
This stand-alone handler manages non-database saving.
It is similar to `AttributeHandler` and is used
by the `.ndb` handler in the same way as `.db` does
for the `AttributeHandler`.
"""
def __init__(self, obj):
"initialized on the object"
self._store = {}
self.obj = weakref.proxy(obj)
def has(self, key):
"Check if object has this attribute or not"
return key in self._store
def get(self, key):
"Returns named key value"
return self._store.get(key, None)
def add(self, key, value):
"Add new key and value"
self._store[key] = value
self.obj.set_recache_protection()
def remove(self, key):
"Remove key from storage"
if key in self._store:
del self._store[key]
self.obj.set_recache_protection(self._store)
def clear(self):
"Remove all nattributes from handler"
self._store = {}
def all(self, return_tuples=False):
"List all keys or (keys, values) stored, except _keys"
if return_tuples:
return [(key, value) for (key, value) in self._store.items() if not key.startswith("_")]
return [key for key in self._store if not key.startswith("_")]
|
python
|
import asyncio
import logging
from .util import testing_exception_handler
loop = asyncio.get_event_loop()
loop.set_exception_handler(testing_exception_handler)
logging.getLogger('asynqp').setLevel(100) # mute the logger
|
python
|
# Generated by Django 2.2.13 on 2020-09-04 06:26
import enumfields.fields
from django.db import migrations
import leasing.enums
class Migration(migrations.Migration):
dependencies = [
("leasing", "0014_add_lease_identifier_field"),
]
operations = [
migrations.AddField(
model_name="planunit",
name="plan_unit_status",
field=enumfields.fields.EnumField(
default="present",
enum=leasing.enums.PlanUnitStatus,
max_length=30,
verbose_name="Plan unit status",
),
),
]
|
python
|
import os
import time
import libtorrent as lt
from Downloader.Utils.tasks import shutdown
from Downloader.configuration import TORRENT_PATH
from Downloader.Utils.file_operations import create_folder
def download_magnetic_link(_link, _path=TORRENT_PATH):
ses = lt.session()
ses.listen_on(6881, 6891)
if not os.path.exists(_path):
os.makedirs(_path)
params = {
'save_path': _path,
'storage_mode': lt.storage_mode_t(2)}
handle = lt.add_magnet_uri(ses, _link, params)
ses.start_dht()
print('downloading metadata...')
while not handle.has_metadata():
time.sleep(1)
print('got metadata, starting torrent download...')
while handle.status().state != lt.torrent_status.seeding:
s = handle.status()
state_str = ['queued', 'checking', 'downloading metadata', \
'downloading', 'finished', 'seeding', 'allocating']
print('%.2f%% complete (down: %.1f kb/s up: %.1f kB/s peers: %d) %s' % \
(s.progress * 100, s.download_rate / 1000, s.upload_rate / 1000, \
s.num_peers, state_str[s.state]))
time.sleep(5)
def download_torrents(links, _shutdown='no', _path=TORRENT_PATH):
create_folder(_path)
for link in links:
download_magnetic_link(link, _path)
shutdown(_shutdown)
|
python
|
log_level = "INFO"
max_task_count = 1000
poll_db_interval = 100
config_max_downloading = 10000
mq_queue = "download_retrier_queue"
mq_routing_key = "download_retrier_routing_key"
mq_exchange = "download_retrier_exchange"
max_file_size = 52428800
config_domains = ['youku.com', 'ykimg.com', 'tudou.com',
'tudouui.com', 'tdimg.com', 'le.com',
'letv.com', 'letvcdn.com', 'iqiyi.com',
'qiyi.com', 'sohu.com', 'qq.com',
'qzoneapp.com', 'gtimg.com']
config_fetch_day = -10
# config_dates = "AND url_date>='20160827' AND url_date<='20160829'"
config_dates = ""
|
python
|
import oemof.solph as solph
from .component import Component
class Supply (Component):
""" Generic supply component
(usually for grid supplied electricity, heat etc.) is created through this
class """
def __init__(self, params):
# Call the init function of the mother class.
Component.__init__(self)
# ------------------- PARAMETERS -------------------
self.name = 'Grid_default_name'
# Maximum output per timestep of commodity:
# e.g. for the electricity grid [Wh], thermal grid [Wh], CH4 grid [kg/h]
self.output_max = 8000000
self.bus_out = None
# ------------- PARAMETERS ARTIFICIAL COSTS FOREIGN STATE --------------
# The artificial costs for supplying electricity can be dependant on a
# foreign state, like a storage SoC. Therefore the name and the state
# name of that foreign entity have to be defined as well as the threshold
# level, under which the low level costs are used. Above the threshold,
# the high level artificial costs are used.
# Define the threshold value for the artificial costs.
self.fs_threshold = None
# Define the low and the high art. cost value e.g. [EUR/Wh], [EUR/kg]
self.fs_low_art_cost = None
self.fs_high_art_cost = None
# ------------------- UPDATE PARAMETER DEFAULT VALUES -------------------
self.set_parameters(params)
# ------------------- INTERNAL VALUES -------------------
# The current artificial cost value e.g. [EUR/Wh], [EUR/kg].
self.current_ac = 0
def prepare_simulation(self, components):
# Update the artificial costs for this time step (dependant on foreign states).
if self.fs_component_name is not None:
foreign_state_value = self.get_foreign_state_value(components)
if foreign_state_value < self.fs_threshold:
self.artificial_costs = self.fs_low_art_cost
else:
self.artificial_costs = self.fs_high_art_cost
# Set the total costs for the commodity this time step
# (costs + art. costs) e.g. [EUR/Wh], [EUR/kg].
self.current_ac = self.get_costs_and_art_costs()
def create_oemof_model(self, busses, _):
from_grid = solph.Source(
label=self.name,
outputs={busses[self.bus_out]: solph.Flow(
nominal_value=self.output_max,
variable_costs=self.current_ac
)})
return from_grid
|
python
|
from dicom_parser.utils.sequence_detector.sequences.mr.dwi.derived import \
DWI_DERIVED_RULES
from dicom_parser.utils.sequence_detector.sequences.mr.dwi.diffusion import \
DWI_RULES
from dicom_parser.utils.sequence_detector.sequences.mr.dwi.fieldmap import \
DWI_FIELDMAP
from dicom_parser.utils.sequence_detector.sequences.mr.dwi.sbref import \
DWI_SBREF_RULES
MR_DIFFUSION_SEQUENCES = {
"dwi": DWI_RULES,
"dwi_derived": DWI_DERIVED_RULES,
"dwi_fieldmap": DWI_FIELDMAP,
"dwi_sbref": DWI_SBREF_RULES,
}
|
python
|
# -*- coding: utf-8 -*-
from typing import Union
import urllib3
from indico.config import IndicoConfig
from indico.http.client import HTTPClient
from indico.client.request import HTTPRequest, RequestChain, PagedRequest
class IndicoClient:
"""
The Indico GraphQL Client.
IndicoClient is the primary way to interact with the Indico Platform.
Args:
config= (IndicoConfig, optional): IndicoConfig object with environment configuration
Returns:
IndicoConfig object
Raises:
RuntimeError: If api_token_path does not exist.
"""
def __init__(self, config: IndicoConfig = None):
if not config:
config = IndicoConfig()
if not config.verify_ssl:
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
self.config = config
self._http = HTTPClient(config)
def _handle_request_chain(self, chain: RequestChain):
response = None
for request in chain.requests():
if isinstance(request, HTTPRequest):
response = self._http.execute_request(request)
chain.previous = response
elif isinstance(request, RequestChain):
response = self._handle_request_chain(request)
chain.previous = response
if chain.result:
return chain.result
return response
def call(self, request: Union[HTTPRequest, RequestChain]):
"""
Make a call to the Indico IPA Platform
Args:
request (GraphQLRequest or RequestChain): GraphQL request to send to the Indico Platform
Returns:
Response appropriate to the class of the provided request parameter. Often JSON but not always.
Raises:
IndicoRequestError: With errors in processing the request
"""
if isinstance(request, RequestChain):
return self._handle_request_chain(request)
elif request and isinstance(request, HTTPRequest):
return self._http.execute_request(request)
def paginate(self, request: PagedRequest):
"""
Provides a generator that continues paging through responses
Available with List<> Requests that offer pagination
Example:
for s in client.paginate(ListSubmissions()):
print("Submission", s)
"""
while request.has_next_page:
r = self._http.execute_request(request)
yield r
|
python
|
import os
#import requests
import sys, urllib2, urllib
comp_err_file = open("compile.e", 'r')
comp_err_str = comp_err_file.read()
comp_out_file = open("compile.o", 'r')
comp_out_str = comp_out_file.read()
fileName = str(sys.argv[1])
print 'something'
data = urllib.urlencode({'fileName':fileName,'compileO':comp_out_str, 'compileE':comp_err_str})
req = urllib.urlopen("http://10.201.136.134:8000/COL380/API/Compile/", data)
#response = urllib.urlopen(req)
if (len(comp_err_file.read()) == 0):
# compilation success
os.system("/opt/pbs/default/bin/qsub -P cse -N Test1 -lselect=1:ncpus=21:mem=24gb -l walltime=00:15:00 run1.sh")
os.system("/opt/pbs/default/bin/qsub -P cse -N Test2 -lselect=1:ncpus=21:mem=24gb -l walltime=00:15:00 run2.sh")
os.system("/opt/pbs/default/bin/qsub -P cse -N Test3 -lselect=1:ncpus=21:mem=24gb -l walltime=00:15:00 run3.sh")
os.system("/opt/pbs/default/bin/qsub -P cse -N Test4 -lselect=1:ncpus=21:mem=24gb -l walltime=00:15:00 run4.sh")
os.system("/opt/pbs/default/bin/qsub -P cse -N Test5 -lselect=1:ncpus=21:mem=24gb -l walltime=00:15:00 run5.sh")
os.system("/opt/pbs/default/bin/qsub -P cse -N sendStatus -lselect=1:ncpus=21:mem=24gb -l walltime=00:15:00 SendStatus.sh")
|
python
|
""" Keras Retinanet from https://github.com/fizyr/keras-retinanet
Some slight refactoring are done to improve reusability of codebase
"""
import keras
from .. import initializers
from .. import layers
from .. import losses
from ._retinanet_config import make_config
from ._retinanet import (
default_classification_model,
default_regression_model,
create_pyramid_features,
apply_model_to_features,
compute_anchors
)
from ._load_backbone import (
load_backbone,
load_backbone_preprocessing,
load_backbone_custom_objects
)
def compile_retinanet(
training_model,
huber_sigma=3.0,
focal_alpha=0.25,
focal_gamma=2.0,
optimizer=None
):
if optimizer is None:
optimizer=keras.optimizers.adam(lr=1e-5, clipnorm=0.001)
training_model.compile(
loss={
'regression' : losses.make_detection_huber_loss(sigma=huber_sigma),
'classification': losses.make_detection_focal_loss(alpha=focal_alpha, gamma=focal_gamma)
},
optimizer=optimizer
)
def RetinaNetLoad(filepath, backbone='resnet50'):
""" Loads a retinanet model from a file
Args
filepath: one of the following:
- string, path to the saved model, or
- h5py.File object from which to load the model
backbone : Backbone with which the model was trained.
"""
custom_objects = {
'PriorProbability' : initializers.PriorProbability,
'ResizeTo' : layers.ResizeTo,
'Anchors' : layers.Anchors,
'ClipBoxes' : layers.ClipBoxes,
'RegressBoxes' : layers.RegressBoxes,
'FilterDetections' : layers.FilterDetections,
'detection_focal_loss' : losses.make_detection_focal_loss(),
'detection_huber_loss' : losses.make_detection_huber_loss()
}
custom_objects.update(load_backbone_custom_objects(backbone))
return keras.models.load_model(filepath, custom_objects=custom_objects)
def RetinaNetTrain(num_classes, **kwargs):
""" Construct a RetinaNet model for training
Args
Refer to keras_collections.models._retinanet_config.py
Returns
training_model : RetinaNet training model (a keras.models.Model object)
- Outputs of this model are [anchor_regressions, anchor_classifications]
- Shapes would be [(batch_size, num_anchors, 4), (batch_size, num_anchors, num_classes)]
config : The network configs (used to convert into a prediction model)
"""
kwargs['num_classes'] = num_classes
config = make_config(**kwargs)
# Make all submodels
backbone_model = load_backbone(
config.backbone,
freeze_backbone=config.freeze_backbone
)
regression_model = default_regression_model(
config.num_anchors,
pyramid_feature_size=config.pyramid_feature_size,
regression_feature_size=config.regression_feature_size
)
classification_model = default_classification_model(
config.num_classes, config.num_anchors,
pyramid_feature_size=config.pyramid_feature_size,
classification_feature_size=config.classification_feature_size
)
# Create inputs and apply preprocessing
model_inputs = keras.Input(shape=(None, None, 3))
preprocessing_layer = load_backbone_preprocessing(config.backbone)
preprocessed_inputs = preprocessing_layer(model_inputs)
# Create feature pyramid
C3, C4, C5 = backbone_model(preprocessed_inputs)[-3:]
features = create_pyramid_features(C3, C4, C5, feature_size=config.pyramid_feature_size)
# Compute outputs
regression_outputs = apply_model_to_features('regression' , regression_model , features)
classification_outputs = apply_model_to_features('classification', classification_model, features)
return keras.models.Model(
inputs=model_inputs,
outputs=(regression_outputs, classification_outputs),
name=config.name + '_train'
), config
def RetinaNetFromTrain(
training_model,
config,
nms=True,
class_specific_filter=True,
):
""" Converts a RetinaNet model for training from a prediction model
Args
training_model : The RetinaNetTrain mode
config : The configs returned by the training model
nms : Flag to trigger if nms is to be applied
class_specific_filter : Flag to trigger if nms is to be applied to each class
Returns
RetinaNet prediction model (a keras.models.Model object)
- Outputs of this model are [boxes, scores, labels]
- Shapes would be [(batch_size, max_detection, 4), (batch_size, max_detection), (batch_size, max_detection)]
"""
# Compute anchors
features = [training_model.get_layer(p_name).output for p_name in ['P3', 'P4', 'P5', 'P6', 'P7']]
anchors = compute_anchors(
features,
sizes=config.anchor_sizes,
strides=config.anchor_strides,
ratios=config.anchor_ratios,
scales=config.anchor_scales,
)
# Get training_model outputs
regression = training_model.outputs[0]
classification = training_model.outputs[1]
# Apply predicted regression to anchors
boxes = layers.RegressBoxes(name='boxes')([anchors, regression])
boxes = layers.ClipBoxes(name='clipped_boxes')([training_model.inputs[0], boxes])
# Filter detections
detections = layers.FilterDetections(
nms=nms,
class_specific_filter=class_specific_filter,
name='filtered_detections'
)([boxes, classification])
return keras.models.Model(inputs=training_model.inputs, outputs=detections, name=config.name)
|
python
|
from wtforms import fields, validators as va, Form
from receipt_split.models import MAX_MESSAGE_LENGTH
from . import UserSummaryForm
class PaymentForm(Form):
message = fields.StringField("Message", [va.length(min=1,
max=MAX_MESSAGE_LENGTH
)])
amount = fields.DecimalField("Decimal", [va.NumberRange(min=0.01)])
to_user = fields.FormField(UserSummaryForm)
|
python
|
from .startapp import StartApplication
|
python
|
import numpy as np
from seedbank._keys import make_key, make_seed
class SeedState:
"""
Manage a root seed and facilities to derive seeds.
"""
_seed: np.random.SeedSequence
def __init__(self, seed=None):
if seed is None:
seed = np.random.SeedSequence()
self._seed = seed
@property
def seed(self) -> np.random.SeedSequence:
"Get the seed sequence for this seed state."
return self._seed
@property
def int_seed(self):
"Get this seed as an integer."
return self.entropy(1)[0]
def entropy(self, words):
"""
Get *n* words of entropy as a NumPy array.
Args:
words(int): the number of words to return.
Returns:
numpy.ndarray: the entropy.
"""
return self._seed.generate_state(words)
def initialize(self, seed, keys):
seed = make_seed(seed)
if keys:
seed = self.derive(seed, keys).seed
self._seed = seed
return seed
def derive(self, base, keys=None):
"""
Derive a new seed state.
Args:
base(seed-like):
The base seed. If ``None``, use this seed state.
keys(list of seed-like):
Additional keys for deriving the seed. If no keys are
provided, calls :meth:`numpy.random.SeedSequence.spawn` to
obtain a new RNG.
Returns:
SeedState: the derived seed state.
"""
if base is None:
base = self.seed
else:
base = make_seed(base)
if keys:
k2 = tuple(make_key(k) for k in keys)
seed = np.random.SeedSequence(base.entropy, spawn_key=base.spawn_key + k2)
else:
seed = base.spawn(1)[0]
return SeedState(seed)
def rng(self, seed=None):
if seed is None:
seed, = self.seed.spawn(1)
elif not isinstance(seed, np.random.SeedSequence):
seed = np.random.SeedSequence(make_key)
return np.random.default_rng(seed)
|
python
|
# By Nick Cortale
# 2017-06-28
#
# Extends the functionality of faker to a more data scientist-esque approach.
# Implements some of the functions from numpy to create some fake data. This is
# also useful for creating data sets with a certain demensionality and integer
# fields.
import faker
import pandas as pd
import time as time
FIELD_LIST = ['am_pm', 'boolean', 'bothify', 'bs', 'building_number', 'catch_phrase',
'century', 'city', 'city_prefix', 'city_suffix', 'color_name', 'company',
'company_email', 'company_suffix', 'country', 'country_code', 'credit_card_expire',
'credit_card_number', 'credit_card_provider', 'credit_card_security_code',
'currency_code', 'date', 'domain_name', 'domain_word', 'ean', 'ean13', 'ean8',
'email', 'file_extension', 'file_name', 'file_path', 'first_name',
'first_name_female', 'first_name_male', 'free_email', 'free_email_domain',
'geo_coordinate', 'hex_color', 'image_url', 'internet_explorer', 'ipv4', 'ipv6',
'isbn10', 'isbn13', 'iso8601', 'job', 'language_code', 'last_name',
'last_name_female', 'last_name_male', 'latitude', 'lexify',
'linux_platform_token', 'linux_processor', 'locale', 'longitude', 'mac_address',
'mac_platform_token', 'mac_processor', 'md5', 'military_apo', 'military_dpo',
'military_ship', 'military_state', 'mime_type', 'month', 'month_name', 'name',
'name_female', 'name_male', 'null_boolean', 'numerify',
'password', 'phone_number', 'postalcode', 'postalcode_plus4', 'postcode',
'prefix', 'prefix_female', 'prefix_male', 'pybool', 'pydecimal', 'pyfloat',
'pyint', 'pystr', 'random_digit', 'random_digit_not_null',
'random_digit_not_null_or_empty', 'random_digit_or_empty', 'random_element',
'random_int', 'random_letter', 'random_number', 'randomize_nb_elements',
'safe_color_name', 'safe_email', 'safe_hex_color', 'secondary_address', 'seed',
'sentence', 'sha1', 'sha256', 'slug', 'ssn', 'state',
'state_abbr', 'street_address', 'street_name', 'street_suffix', 'suffix',
'suffix_female', 'suffix_male', 'text', 'time',
'timezone', 'tld', 'unix_time', 'uri', 'uri_extension', 'uri_page', 'uri_path',
'url', 'user_agent', 'user_name', 'uuid4', 'windows_platform_token', 'word',
'year', 'zipcode', 'zipcode_plus4']
QUICK_LIST = ['random_element', 'random_digit', 'random_digit_not_null',
'uri_page', 'safe_color_name', 'free_email_domain', 'military_state',
'random_int', 'uri_extension', 'state_abbr', 'state', 'pybool', 'military_ship',
'pyint', 'tld', 'zipcode', 'random_letter', 'null_boolean', 'mac_processor',
'randomize_nb_elements', 'city_prefix', 'linux_processor', 'company_suffix',
'postalcode', 'city_suffix', 'unix_time', 'windows_platform_token', 'boolean',
'century', 'linux_platform_token', 'word', 'street_suffix',
'random_digit_not_null_or_empty', 'currency_code', 'hex_color', 'sha1',
'credit_card_provider', 'sha256', 'md5', 'country_code',
'random_digit_or_empty', 'country', 'safe_hex_color', 'timezone', 'uuid4',
'geo_coordinate', 'random_number', 'language_code', 'longitude',
'zipcode_plus4', 'latitude', 'postalcode_plus4', 'mime_type', 'file_extension',
'prefix_male', 'job', 'mac_platform_token', 'prefix_female', 'uri_path',
'ipv4', 'suffix_female', 'iso8601', 'locale', 'color_name', 'image_url',
'internet_explorer', 'file_name', 'ssn', 'bs', 'time', 'numerify',
'catch_phrase', 'prefix', 'suffix_male', 'lexify', 'suffix',
'secondary_address', 'date', 'month_name', 'month', 'year', 'file_path',
'pyfloat', 'credit_card_security_code', 'pydecimal', 'mac_address', 'am_pm',
'ipv6', 'building_number', 'bothify', 'slug', 'ean8', 'military_apo',
'military_dpo']
BASIC_LIST = ['name', 'free_email_domain', 'city', 'state_abbr', 'job',
'random_digit', 'random_digit_or_empty']
class PandasFaker(object):
"""Create fake data for data analysis or database testing purposes.
fields : list or None
If fields is none, will use the basic list.
"""
def __init__(self, fields=None):
if not fields:
fields = BASIC_LIST
self.fields = fields
self.faker_obj = faker.Faker()
def _gen_fake(self):
"""Create a fake dictionary of attributes as defined in fields.
fields : list
Fields to grab to generate some fake data.
"""
#fake = faker.Faker()
data = {}
for field in self.fields:
try:
x = getattr(self.faker_obj, field)
data[field] = x()
except:
print("{} is not currently implemented".format(field) )
return data
def make_fakes(self, num_fakes):
"""Create multiple fake records that will be output as a pandas
dataframe.
num_fakes : int
Number of fakes to create
"""
data_list = []
for i in range(num_fakes):
data = self._gen_fake()
data_list.append(data)
return pd.DataFrame(data_list)
|
python
|
import numpy as np
import pandas as pd
#from stat_perform import *
from utilities import *
'''
This program will calculate the IoU per emage per class
Inputs:
- .tflite: a segmentation model
- .jpg: a picture from pascal
- pascal_segmented_classes_per_image.csv file
Output:
- CSV file contains iou_score (float),
iou_per_class_array (float array size 1x20 an entry per class),
time_milisecs (time in miliseconds)
'''
#mdl_name = 'modelDeepLabV3_Mila'
#mdl_name = 'lite-model_deeplabv3-mobilenetv2_dm05_1_default_2'
#mdl_name = 'lite-model_deeplabv3-xception65_1_default_2'
#mdl_name = 'lite-model_mobilenetv2-coco_dr_1'
#mdl_name = 'lite-model_mobilenetv2-coco_dr_1'
tst_path = './datasets/pascal/'
#mdl_path = './models/deep_lab_v3_plus/'
#mdl_path = './models/TensorFlowHub/'
mdl_path = './models/frozen_graph/'
csv_in = 'pascal_segmented_classes_per_image.csv'
mdls = os.listdir(mdl_path)
labels = pd.read_csv(tst_path + csv_in, index_col=1).drop('Unnamed: 0', axis = 1)
label_array = labels.to_numpy()
#col_head = labels.columns
val_path = tst_path + 'Segmentation_input/validation/'
seg_path = tst_path + 'Segmentation_output/validation/'
#val_path = 'C:/Users/adi/Downloads/VOC2012/SegmentationClass/'
image_list = os.listdir(val_path)
# Calculate # uou per class
nimg = len(image_list)
for mdl in mdls:
iou_out = []
i=1
# Loop to compute the IOU
for img in image_list:
print('Processing image:' + img + ', image no ' + str(i) + ' of ' + str(nimg))
#iou_score, ipclass, time_milisecs = iou_per_class(mdl_path + mdl_name + '.tflite', val_path + img, labels)
#iou_out.append(np.hstack((iou_score, np.squeeze(ipclass), time_milisecs)))
label = image2segmap(seg_path + os.path.splitext(img)[0] + '.png')
#time_milisecs, iou_score = meanIou(mdl_path + os.path.splitext(mdl)[0] + '.tflite', val_path + img, seg_path + os.path.splitext(img)[0] + '.png')
#_, iou_score, ioupclass ,time_milisecs = iou_per_pixelclass1(mdl_path + os.path.splitext(mdl)[0] + '.tflite', val_path + img, seg_path + os.path.splitext(img)[0] + '.png')
iou_score, ioupclass ,time_milisecs = meanIougraph_2(mdl_path + os.path.splitext(mdl)[0] + '.pb', val_path + img, seg_path + os.path.splitext(img)[0] + '.png')
iou_out.append(np.hstack((iou_score, time_milisecs, ioupclass)))
i=i+1
iou_out = np.array(iou_out)
# Create header for CSV
_, label_names = get_pascal_labels()
label_names = label_names[:-1]
header = np.hstack(('mIOU', 'Speed (ms)', label_names))
#header = np.hstack(('mIOU', 'Speed (ms)'))
rst =pd.DataFrame(iou_out, columns = header, index = image_list[:iou_out.shape[0]])
print(rst.head())
# Do mean for evaluating the model performace
iouave = iou_out.mean(axis=0)
maiou = iouave[0]
#mspd = iouave[-1]
mspd = iouave[1]
prtout = 'MAIOU: ' + str(maiou) + ', mean speed: ' + str(mspd)
print(prtout)
print(prtout, file=open(mdl + '_maiou.txt', "a"))
# Create the csv file
rst.to_csv(mdl + '_miou.csv')
|
python
|
"""Microsoft Teams destination."""
import logging
import pymsteams
def build_notification_text(text_parameters) -> str:
"""Create and format the contents of the notification."""
nr_changed = len(text_parameters["metrics"])
plural_s = "s" if nr_changed > 1 else ""
report_link = f'[{text_parameters["report_title"]}]({text_parameters["url"]})'
result = f'{report_link} has {nr_changed} metric{plural_s} that changed status:\n\n'
for metric in text_parameters["metrics"]:
name = metric["metric_name"]
unit = metric["metric_unit"]
unit = unit if unit.startswith("%") else f" {unit}"
result += f'* {name} status is {metric["new_metric_status"]}, was {metric["old_metric_status"]}. ' \
f'Value is {metric["new_metric_value"]}{unit}, was {metric["old_metric_value"]}{unit}.\n'
return result
def send_notification_to_teams(destination: str, text: str) -> None:
"""Send notification to Microsoft Teams using a Webhook."""
logging.info("Sending notification to configured webhook")
my_teams_message = pymsteams.connectorcard(destination)
my_teams_message.text(text)
try:
my_teams_message.send()
except Exception as reason: # pylint: disable=broad-except
logging.error("Could not deliver notification: %s", reason)
|
python
|
from BorutaShap import BorutaShap
def test_class_constructs():
BorutaShap()
|
python
|
#! /usr/bin/env python
import numpy as np
import math
import time
import rospy
import roslib
from geometry_msgs.msg import Twist
from std_msgs.msg import String, Float32, Int32, Bool, Int32MultiArray, Float32MultiArray
from sensor_msgs.msg import Image
from cv_bridge import CvBridge, CvBridgeError
frame_w = rospy.get_param("/usb_cam/image_width")
frame_h = rospy.get_param("/usb_cam/image_height")
ball_x = -1
ball_y = -1
ball_size = -1
def ball_pos_callback(pos_msg):
global ball_x, ball_y
ball_x = pos_msg.data[0]
ball_y = pos_msg.data[1]
button = [0, 0]
def button_callback(pos_msg):
global button
button[0] = pos_msg.data[0]
button[1] = pos_msg.data[1]
magneto = 0
def compass_callback(cmps_msg):
global magneto
magneto = cmps_msg.data
count_ball_loss = 0
def ball_lost(threshold):
global count_ball_loss
if ball_x == -1 and ball_y == -1:
count_ball_loss += 1
if count_ball_loss >= threshold :
return True
else :
count_ball_loss = 0
return False
def head_move(head_pan, head_tilt):
global pos_pan, pos_tilt
pos_pan = head_pan
pos_tilt = head_tilt
head_pos = Float32MultiArray()
head_pos.data = [pos_pan, pos_tilt]
head_pub.publish(head_pos)
def walk(x, y, a):
velocity = Twist()
velocity.linear.x = x
velocity.linear.y = y
velocity.linear.z = a
motion_vel_pub.publish(velocity)
pos_pan = 0.0
pos_tilt = 0.0
pan_min = -1.5
pan_max = 1.5
tilt_min = -1.3
tilt_max = 0.0
def head_limit(pos_pan, pos_tilt):
if pos_pan <= pan_min :
pos_pan = pan_min
elif pos_pan >= pan_max :
pos_pan = pan_max
if pos_tilt <= tilt_min :
pos_tilt = tilt_min
elif pos_tilt >= tilt_max :
pos_tilt = tilt_max
head_pos = Float32MultiArray()
head_pos.data = [pos_pan, pos_tilt]
return head_pos
pan_step = 0.0
tilt_step = 0.0
move_pan = True
def scan_ball(mode):
global pos_pan, pos_tilt, pan_step, tilt_step, move_pan
if pan_step > 0:
pan_step = rospy.get_param("/united_soccer_params/Pan_Step")
else:
pan_step = rospy.get_param("/united_soccer_params/Pan_Step") * -1
if tilt_step > 0:
tilt_step = rospy.get_param("/united_soccer_params/Tilt_Step")
else:
tilt_step = rospy.get_param("/united_soccer_params/Tilt_Step") * -1
if mode == 0: # normal
pos_pan += pan_step
if pos_pan >= pan_max or pos_pan <= pan_min:
pan_step *= -1
pos_tilt += tilt_step
if pos_tilt >= tilt_max or pos_tilt <= tilt_min:
tilt_step *= -1
elif mode == 1: # only tilt
pos_pan = 0.0
pos_tilt += tilt_step
if pos_tilt >= tilt_max or pos_tilt <= tilt_min:
tilt_step *= -1
elif mode == 2: # rectangle
if move_pan:
pos_pan += pan_step
if pos_pan >= pan_max or pos_pan <= pan_min:
pan_step *= -1
move_pan = False
else:
pos_tilt += tilt_step
if pos_tilt >= tilt_max or pos_tilt <= tilt_min:
tilt_step *= -1
move_pan = True
head_pos = head_limit(pos_pan, round(pos_tilt, 3))
pos_pan, pos_tilt = head_pos.data
head_pub.publish(head_pos)
sum_err_pan = 0
sum_err_tilt = 0
last_error_x = 0
last_error_y = 0
def head_track_ball():
global pos_pan, pos_tilt, sum_err_pan, sum_err_tilt, last_error_x, last_error_y
global freq
dt = 1.0 / float(freq)
KP_pan = rospy.get_param("/united_soccer_params/Pan_KP")
KI_pan = rospy.get_param("/united_soccer_params/Pan_KI")
KD_pan = rospy.get_param("/united_soccer_params/Pan_KD")
KP_tilt = rospy.get_param("/united_soccer_params/Tilt_KP")
KI_tilt = rospy.get_param("/united_soccer_params/Tilt_KI")
KD_tilt = rospy.get_param("/united_soccer_params/Tilt_KD")
if ball_x != -1 and ball_y != -1:
error_x = (frame_w/2) - ball_x
error_x *= 77.32 / frame_w
error_x = (error_x * math.pi)/ 180
error_x_diff = error_x - last_error_x
P_pan = last_error_x * KP_pan
sum_err_pan += error_x * dt
I_pan = sum_err_pan * KI_pan
deriv_err_pan = error_x_diff / dt
D_pan = deriv_err_pan * KD_pan
last_error_x = error_x
pos_pan += (P_pan + I_pan + D_pan)
error_y = (frame_h/2) - ball_y
error_y *= -1
error_y *= 61.93 / frame_h
error_y = (error_y * math.pi) /180
error_y_diff = error_y - last_error_y
P_tilt = last_error_y * KP_tilt
sum_err_tilt += error_y * dt
I_tilt = sum_err_tilt * KI_tilt
deriv_err_tilt = sum_err_tilt / dt
D_tilt = deriv_err_tilt * KD_tilt
last_error_y = error_y
pos_tilt += (P_tilt + I_tilt + D_tilt)
head_pos = head_limit(pos_pan, round(pos_tilt, 2))
pos_pan, pos_tilt = head_pos.data
head_pub.publish(head_pos)
def body_track_ball():
global pos_pan, pos_tilt
KP_body = rospy.get_param("/united_soccer_params/Body_KP")
if ball_x != -1 and ball_y != -1:
error_body_a = pos_pan - 0
else:
error_body_a = 0
max_walk_a = 0.4
body_a = error_body_a * KP_body
if body_a >= max_walk_a:
body_a = max_walk_a
elif body_a <= -max_walk_a:
body_a = -max_walk_a
body_a = round(body_a, 2)
return body_a
ball_pos = False
px_ball_pos = 0.00
py_ball_pos = 0.00
def ball_positioning(setPoint_X, setPoint_Y, speed=0.10):
global pos_pan, pos_tilt, ball_pos, px_ball_pos, py_ball_pos
errorPos_X = pos_pan - setPoint_X
errorPos_Y = pos_tilt - setPoint_Y
KP_ball_positioning_y = rospy.get_param("/united_soccer_params/KP_Ball_Pos_Y")
# print("KP_BALL_POS", KP_ball_positioning_y)
if (errorPos_X > -0.10 and errorPos_X < 0.10) and (errorPos_Y > -0.10):
px_ball_pos = 0.00
py_ball_pos = 0.00
ball_pos = True
else:
ball_pos = False
if (pos_pan >= 1.0 and pos_tilt >= -1.2) or (pos_pan <= -1.0 and pos_tilt >= -1.2): # bola disamping | pan tilt kircok (polar)
px_ball_pos = -0.03
py_ball_pos = errorPos_X * KP_ball_positioning_y
else:
# X Move
if pos_tilt > setPoint_Y:
px_ball_pos = -0.03
elif pos_tilt >= (setPoint_Y - 0.1) and pos_tilt <= setPoint_Y:
px_ball_pos = 0.00
elif pos_tilt >= (setPoint_Y - 0.3) and pos_tilt < (setPoint_Y - 0.1):
px_ball_pos = errorPos_Y * -speed
if px_ball_pos >= 0.02:
px_ball_pos = 0.02
elif px_ball_pos <= 0.00:
px_ball_pos = 0.00
else: #bola masih jauh
px_ball_pos = pos_tilt * (0.08 / -1.6)
# Y Move
if pos_pan >= (setPoint_X - 0.1) and pos_pan <= (setPoint_X + 0.1):
py_ball_pos = 0.00
else: # belum dalam range
py_ball_pos = errorPos_X * KP_ball_positioning_y
walk(round(px_ball_pos, 3), round(py_ball_pos,3), 0.0)
# walk(0.00,0.00,0.00)
def ball_positioning2(setPoint_X, setPoint_Y, speed=0.10):
global pos_pan, pos_tilt, ball_pos, px_ball_pos, py_ball_pos
errorPos_X = pos_pan - setPoint_X
errorPos_Y = pos_tilt - setPoint_Y
# print("error", errorPos_X, errorPos_Y)
KP_ball_positioning_x = rospy.get_param("/united_soccer_params/KP_Ball_Pos_X")
KP_ball_positioning_y = rospy.get_param("/united_soccer_params/KP_Ball_Pos_Y")
if (errorPos_X > -0.08 and errorPos_X < 0.08 and errorPos_Y > -0.10):
ball_pos = True
elif (pos_pan > 1.35 and pos_pan < 1.35):
py_ball_pos = errorPos_X * KP_ball_positioning_x
px_ball_pos = -errorPos_Y * KP_ball_positioning_y
if py_ball_pos >= 0.03:
py_ball_pos = 0.03
if py_ball_pos <= -0.03:
py_ball_pos = -0.03
walk(-0.015, round(py_ball_pos,3), 0.0)
ball_pos = False
else:
py_ball_pos = errorPos_X * KP_ball_positioning_x
px_ball_pos = -errorPos_Y * KP_ball_positioning_y
if px_ball_pos >= 0.04:
px_ball_pos = 0.04
if py_ball_pos >= 0.03:
py_ball_pos = 0.03
if py_ball_pos <= -0.03:
py_ball_pos = -0.03
walk(round(px_ball_pos, 3), round(py_ball_pos,3), 0.0)
ball_pos = False
def kick():
global pos_pan, pos_tilt, ball_pos
pPan_kick = rospy.get_param("/united_soccer_params/Pan_Kick")
pTilt_kick = rospy.get_param("/united_soccer_params/Tilt_Kick")
if pos_pan >= 0 :#and right_kick == False and left_kick == False: # left_kick
left_kick = True
right_kick = False
elif pos_pan <= 0 :#and right_kick == False and left_kick == False: # right_kick
right_kick = True
left_kick = False
if left_kick:
if ball_pos:
motion_state_pub.publish("stop")
time.sleep(1)
motion_state_pub.publish("action 1")
# return True
else:
ball_positioning(-pPan_kick, pTilt_kick, 0.10)
if right_kick:
if ball_pos:
motion_state_pub.publish("stop")
time.sleep(1)
motion_state_pub.publish("action 2")
# return True
else:
ball_positioning(pPan_kick, pTilt_kick, 0.10)
count_ready_kick = 0
def followBall(mode): #0 normal, 1 sambil belok
head_track_ball()
global pos_pan, pos_tilt, count_ready_kick
set_point_pan = 0.0
set_point_tilt = 0.0
if pos_tilt >= set_point_tilt:
pos_tilt = set_point_tilt
elif pos_tilt < -2.0:
pos_tilt = -2.0
error_fPan = pos_pan - set_point_pan
error_fTilt = pos_tilt - set_point_tilt
if pos_tilt >= set_point_tilt and pos_pan < 0.4 and pos_pan > -0.4 and ball_x != -1 and ball_y != -1: # Stop(bola sudah dekat)
count_ready_kick += 1
else: # Kejar Bola(bola masih jauh)
count_ready_kick = 0
if count_ready_kick >= 5:
px_move = 0.0 # jalan ditempat
py_move = error_fPan * 0.040 # 0.045
pa_move = error_fPan * 0.20 # 0.30 0.045
else:
if pos_tilt < -1.5:
px_move = 0.05
elif pos_tilt >= -1.5 and pos_tilt < -1.3:
px_move = 0.04
elif pos_tilt > -1.0:
px_move = 0.03
else:
px_move = 0.02
py_move = error_fPan * 0.045 # 0.045
pa_move = error_fPan * 0.25 # 0.35 #0.045
if mode == 0: # Mode differential walking
if error_fPan > -0.4 and error_fPan < 0.4:
# print("AA\n")
walk(round(px_move, 3), 0.0, round(pa_move,3))
else:
# print("BB\n")
walk(0.0, 0.0, round(pa_move, 3))
elif mode == 1: # Mode omnidirectional walking
if error_fPan > -0.4 and error_fPan < 0.4:
# print("CC\n")
walk(round(px_move, 3), round(py_move,3), round(pa_move,3))
else:
#printf("DD\n");
walk(0.0, 0.0, round(pa_move,3))
def compass_goal_found(compass_goal, compass_minmax=40):
compass_min = compass_goal - compass_minmax
if compass_min < 0:
compass_min = 360 - compass_min
if compass_min > 360:
compass_min = compass_min - 360
compass_max = compass_goal + compass_minmax
if compass_max < 0:
compass_max = 360 - compass_max
if compass_max > 360:
compass_max = compass_max - 360
if magneto > compass_min and magneto < compass_max:
print("True")
return True
else:
print("False")
return False
def kill_node():
rospy.signal_shutdown("shutdown time.")
def main():
print("United Soccer Player - Running")
rospy.init_node("united_soccer_player")
rospy.wait_for_service("/srv_controller")
global head_pub, motion_vel_pub, motion_state_pub
motion_vel_pub = rospy.Publisher("/motion/cmd_vel", Twist, queue_size=1)
motion_state_pub = rospy.Publisher("/motion/state", String, queue_size=1)
head_pub = rospy.Publisher("/head/pos", Float32MultiArray, queue_size=1)
ball_pos_sub = rospy.Subscriber("/united_soccer/ball/position", Int32MultiArray, ball_pos_callback)
button_sub = rospy.Subscriber("/button/state", Int32MultiArray, button_callback)
compass_sub = rospy.Subscriber("/compass/value", Int32, compass_callback)
print("United Soccer Player - Running")
time.sleep(0.3)
motion_state_pub.publish("stand")
global freq
freq = 50
rate = rospy.Rate(freq)
state = "initial"
play = False
button_pressed = [0, 0]
conf_stop = 0
foot = "left"
found_ball = 0
compass_goal = 175
count_goal_found = 0
while not rospy.is_shutdown():
if button[0] == 1:
button_pressed[0] = 1
else:
if button_pressed[0] == 1:
if play:
motion_state_pub.publish("sit")
print("Sit")
play = False
else:
motion_state_pub.publish("stand")
print("Stand")
play = True
state = "initial"
button_pressed[0] = 0
#///////////////////////////////////////////////////////////////////////
#//////////////.............Role of execution............///////////////
#///////////////////////////////////////////////////////////////////////
if play :
# print("pospan %f postilt %f", pos_pan, pos_tilt)
print(state)
if state == "initial":
if ball_lost(20):
scan_ball(0)
motion_state_pub.publish("stop")
else:
if ball_x != -1 and ball_y != -1 :
found_ball += 1
if found_ball >= 50:
state = "follow_ball"
found_ball = 0
elif state == "follow_ball":
if ball_lost(20):
scan_ball(0)
walk(0.0,0.0,0.0)
# motion_state_pub.publish("stop")
# head_move(0.0, -1.3)
else:
head_track_ball()
followBall(1)
if pos_tilt >= -0.6 and ball_x != -1 and ball_y != -1:
state = "positioning"
# if button[1] == 1:
# state = "kick"
# motion_state_pub.publish("start")
# state = "forward"
elif state == "positioning":
if ball_lost(20):
state = "follow_ball"
# scan_ball(0)
# walk(0.0,0.0,0.0)
else:
head_track_ball()
set_point_x = rospy.get_param("/united_soccer_params/Pan_Kick")
set_point_y = rospy.get_param("/united_soccer_params/Tilt_Kick")
# if pos_pan > 0:
ball_positioning2(-set_point_x, set_point_y)
# if ball_pos == True and compass_goal_found(compass_goal) == True:
# count_goal_found += 1
# else:
# count_goal_found = 0
if ball_pos == True :
count_goal_found += 1
else:
count_goal_found = 0
if count_goal_found > 5:
state = "kick_left"
count_goal_found = 0
# else:
# state = "goto_goal_heading"
# else:
# ball_positioning2(-set_point_x, set_point_y)
# if ball_pos == True:
# state = "kick_left"
elif state == "goto_goal_heading":
if ball_lost(20):
# scan_ball(0)
# walk(0.0,0.0,0.0)
state = "follow_ball"
else:
head_track_ball()
if compass_goal_found(compass_goal) == True:
count_goal_found += 1
else:
count_goal_found = 0
if count_goal_found > 5:
state = "positioning"
count_goal_found = 0
else:
rotate_alpha = pos_pan * rospy.get_param("/united_soccer_params/KP_Compass_A")
rotate_y = 0.3
if pos_tilt > -0.3:
walk(0.00, round(rotate_y, 3), round(rotate_alpha,3))
else:
error_tilt = pos_tilt - rospy.get_param("/united_soccer_params/Tilt_Kick")
rotate_x = -error_tilt * rospy.get_param("/united_soccer_params/KP_Compass_X")
walk(round(rotate_x, 3), round(rotate_y, 3), round(rotate_alpha,3))
elif state == "kick_right":
motion_state_pub.publish("stop")
time.sleep(1)
motion_state_pub.publish("action 2")
state = "initial"
elif state == "kick_left":
motion_state_pub.publish("stop")
time.sleep(1)
motion_state_pub.publish("action 1")
state = "initial"
elif state == "tune_head":
if ball_lost(20):
scan_ball(0)
else:
head_track_ball()
# print("%d, %d", ball_x, ball_y)
elif state == "tune_body":
if ball_lost(20):
motion_state_pub.publish("stop")
scan_ball(0)
else:
head_track_ball()
shift = body_track_ball()
walk(0.0, 0.0, shift)
# print("%d, %d", ball_x, ball_y)
elif state == "test_kick":
if button[1] == 1:
if foot == "left":
motion_state_pub.publish("action 1") # left_kick
foot = "right"
elif foot == "right":
motion_state_pub.publish("action 2") # right_kick
foot = "left"
rate.sleep()
print("United Soccer Player - Shut Down")
rospy.on_shutdown(kill_node)
if __name__ == "__main__":
main()
|
python
|
from __future__ import print_function
from __future__ import absolute_import
from __future__ import division
from builtins import str
from builtins import range
from collections import defaultdict
from itertools import chain
from lxml import etree
from lxml.html import fromstring
import numpy as np
from fonduer.models import Phrase
from fonduer.snorkel.candidates import Ngrams
from fonduer.snorkel.models.context import TemporarySpan
from fonduer.snorkel.utils import tokens_to_ngrams
from fonduer.utils_table import (min_row_diff, min_col_diff, is_row_aligned,
is_col_aligned, is_axis_aligned)
from fonduer.utils_visual import (
bbox_from_span, bbox_from_phrase, bbox_horz_aligned, bbox_vert_aligned,
bbox_vert_aligned_left, bbox_vert_aligned_right, bbox_vert_aligned_center)
from bs4 import BeautifulSoup
from fuzzywuzzy import fuzz
import os
# Default dimensions for 8.5" x 11"
DEFAULT_WIDTH = 612
DEFAULT_HEIGHT = 792
def get_between_ngrams(c, attrib='words', n_min=1, n_max=1, lower=True):
"""Return the ngrams _between_ two unary Spans of a binary-Span Candidate.
Get the ngrams _between_ two unary Spans of a binary-Span Candidate, where
both share the same sentence Context.
:param c: The binary-Span Candidate to evaluate.
:param attrib: The token attribute type (e.g. words, lemmas, poses)
:param n_min: The minimum n of the ngrams that should be returned
:param n_max: The maximum n of the ngrams that should be returned
:param lower: If 'True', all ngrams will be returned in lower case
:rtype: a _generator_ of ngrams
"""
if len(c) != 2:
raise ValueError("Only applicable to binary Candidates")
span0 = c[0]
span1 = c[1]
if span0.sentence != span1.sentence:
raise ValueError("Only applicable to Candidates where both spans are \
from the same immediate Context.")
distance = abs(span0.get_word_start() - span1.get_word_start())
if span0.get_word_start() < span1.get_word_start():
for ngram in get_right_ngrams(
span0,
window=distance - 1,
attrib=attrib,
n_min=n_min,
n_max=n_max,
lower=lower):
yield ngram
else: # span0.get_word_start() > span1.get_word_start()
for ngram in get_left_ngrams(
span1,
window=distance - 1,
attrib=attrib,
n_min=n_min,
n_max=n_max,
lower=lower):
yield ngram
def get_left_ngrams(span,
window=3,
attrib='words',
n_min=1,
n_max=1,
lower=True):
"""Get the ngrams within a window to the _left_ of the Candidate from its sentence Context.
For higher-arity Candidates, defaults to the _first_ argument.
:param span: The Span to evaluate. If a candidate is given, default to its first Span.
:param window: The number of tokens to the left of the first argument to return
:param attrib: The token attribute type (e.g. words, lemmas, poses)
:param n_min: The minimum n of the ngrams that should be returned
:param n_max: The maximum n of the ngrams that should be returned
:param lower: If True, all ngrams will be returned in lower case
:rtype: a _generator_ of ngrams
"""
span = span if isinstance(span,
TemporarySpan) else span[0] # get first Span
i = span.get_word_start()
for ngram in tokens_to_ngrams(
getattr(span.sentence, attrib)[max(0, i - window):i],
n_min=n_min,
n_max=n_max,
lower=lower):
yield ngram
def get_right_ngrams(span,
window=3,
attrib='words',
n_min=1,
n_max=1,
lower=True):
"""Get the ngrams within a window to the _right_ of the Candidate from its sentence Context.
For higher-arity Candidates, defaults to the _last_ argument.
:param span: The Span to evaluate. If a candidate is given, default to its last Span.
:param window: The number of tokens to the left of the first argument to return
:param attrib: The token attribute type (e.g. words, lemmas, poses)
:param n_min: The minimum n of the ngrams that should be returned
:param n_max: The maximum n of the ngrams that should be returned
:param lower: If True, all ngrams will be returned in lower case
:rtype: a _generator_ of ngrams
"""
span = span if isinstance(span,
TemporarySpan) else span[-1] # get last Span
i = span.get_word_end()
for ngram in tokens_to_ngrams(
getattr(span.sentence, attrib)[i + 1:i + 1 + window],
n_min=n_min,
n_max=n_max,
lower=lower):
yield ngram
def get_matches(lf, candidate_set, match_values=[1, -1]):
"""Return a list of candidates that are matched by a particular LF.
A simple helper function to see how many matches (non-zero by default) an LF gets.
Returns the matched candidates, which can then be directly put into the Viewer.
:param lf: The labeling function to apply to the candidate_set
:param candidate_set: The set of candidates to evaluate
:param match_values: An option list of the values to consider as matched. [1, -1] by default.
:rtype: a list of candidates
"""
matches = []
for c in candidate_set:
label = lf(c)
if label in match_values:
matches.append(c)
print(("%s matches") % len(matches))
return matches
# TABLE LF HELPERS ##########################################################
def same_document(c):
"""Return True if all Spans in the given candidate are from the same Document.
:param c: The candidate whose Spans are being compared
:rtype: boolean
"""
return (all(c[i].sentence.document is not None
and c[i].sentence.document == c[0].sentence.document
for i in range(len(c))))
def same_table(c):
"""Return True if all Spans in the given candidate are from the same Table.
:param c: The candidate whose Spans are being compared
:rtype: boolean
"""
return (all(c[i].sentence.is_tabular()
and c[i].sentence.table == c[0].sentence.table
for i in range(len(c))))
# Added by Wei Li
def same_file(organic, figure):
"""Return True if all candidate are from the same file.
:rtype: boolean
"""
return organic.sentence.document == figure.figure.document
def mentionsFig(organic, figure):
text = organic.sentence.text.replace(' ', '').lower()
fig_name = figure.figure.name.replace(' ', '').lower()
return text.find(fig_name) != -1
def mentionsOrg(figure, organic):
fig_text = figure.figure.description
if figure.figure.text and len(figure.figure.text) != 0:
fig_text += figure.figure.text
fig_text.replace(' ', '').lower()
organic_name = organic.text
return fig_text.find(organic_name) != -1
def same_row(c):
"""Return True if all Spans in the given candidate are from the same Row.
:param c: The candidate whose Spans are being compared
:rtype: boolean
"""
return (same_table(c) and all(
is_row_aligned(c[i].sentence, c[0].sentence) for i in range(len(c))))
def same_col(c):
"""Return True if all Spans in the given candidate are from the same Col.
:param c: The candidate whose Spans are being compared
:rtype: boolean
"""
return (same_table(c) and all(
is_col_aligned(c[i].sentence, c[0].sentence) for i in range(len(c))))
def is_tabular_aligned(c):
"""Return True if all Spans in the given candidate are from the same Row or Col.
:param c: The candidate whose Spans are being compared
:rtype: boolean
"""
return (same_table(c) and (is_col_aligned(c[i].sentence, c[0].sentence)
or is_row_aligned(c[i].sentence, c[0].sentence)
for i in range(len(c))))
def same_cell(c):
"""Return True if all Spans in the given candidate are from the same Cell.
:param c: The candidate whose Spans are being compared
:rtype: boolean
"""
return (all(c[i].sentence.cell is not None
and c[i].sentence.cell == c[0].sentence.cell
for i in range(len(c))))
def same_phrase(c):
"""Return True if all Spans in the given candidate are from the same Phrase.
:param c: The candidate whose Spans are being compared
:rtype: boolean
"""
return (all(c[i].sentence is not None and c[i].sentence == c[0].sentence
for i in range(len(c))))
def get_max_col_num(span):
"""Return the largest column number that a Span occupies.
:param span: The Span to evaluate. If a candidate is given, default to its last Span.
:rtype: integer or None
"""
span = span if isinstance(span, TemporarySpan) else span[-1]
if span.sentence.is_tabular():
return span.sentence.cell.col_end
else:
return None
def get_min_col_num(span):
"""Return the lowest column number that a Span occupies.
:param span: The Span to evaluate. If a candidate is given, default to its first Span.
:rtype: integer or None
"""
span = span if isinstance(span, TemporarySpan) else span[0]
if span.sentence.is_tabular():
return span.sentence.cell.col_start
else:
return None
def get_phrase_ngrams(span, attrib='words', n_min=1, n_max=1, lower=True):
"""Get the ngrams that are in the Phrase of the given span, not including itself.
Note that if a candidate is passed in, all of its Spans will be searched.
:param span: The Span whose Phrase is being searched
:param attrib: The token attribute type (e.g. words, lemmas, poses)
:param n_min: The minimum n of the ngrams that should be returned
:param n_max: The maximum n of the ngrams that should be returned
:param lower: If True, all ngrams will be returned in lower case
:rtype: a _generator_ of ngrams
"""
spans = [span] if isinstance(span, TemporarySpan) else span.get_contexts()
for span in spans:
for ngram in get_left_ngrams(
span,
window=100,
attrib=attrib,
n_min=n_min,
n_max=n_max,
lower=lower):
yield ngram
for ngram in get_right_ngrams(
span,
window=100,
attrib=attrib,
n_min=n_min,
n_max=n_max,
lower=lower):
yield ngram
def get_neighbor_phrase_ngrams(span,
d=1,
attrib='words',
n_min=1,
n_max=1,
lower=True):
"""Get the ngrams that are in the neighoring Phrases of the given Span.
Note that if a candidate is passed in, all of its Spans will be searched.
:param span: The span whose neighbor Phrases are being searched
:param attrib: The token attribute type (e.g. words, lemmas, poses)
:param n_min: The minimum n of the ngrams that should be returned
:param n_max: The maximum n of the ngrams that should be returned
:param lower: If True, all ngrams will be returned in lower case
:rtype: a _generator_ of ngrams
"""
spans = [span] if isinstance(span, TemporarySpan) else span.get_contexts()
for span in spans:
for ngram in chain.from_iterable([
tokens_to_ngrams(
getattr(phrase, attrib),
n_min=n_min,
n_max=n_max,
lower=lower) for phrase in span.sentence.document.phrases
if abs(phrase.phrase_num - span.sentence.phrase_num) <= d
and phrase != span.sentence
]):
yield ngram
def get_cell_ngrams(span, attrib='words', n_min=1, n_max=1, lower=True):
"""Get the ngrams that are in the Cell of the given span, not including itself.
Note that if a candidate is passed in, all of its Spans will be searched.
:param span: The span whose Cell is being searched
:param attrib: The token attribute type (e.g. words, lemmas, poses)
:param n_min: The minimum n of the ngrams that should be returned
:param n_max: The maximum n of the ngrams that should be returned
:param lower: If True, all ngrams will be returned in lower case
:rtype: a _generator_ of ngrams
"""
spans = [span] if isinstance(span, TemporarySpan) else span.get_contexts()
for span in spans:
for ngram in get_phrase_ngrams(
span, attrib=attrib, n_min=n_min, n_max=n_max, lower=lower):
yield ngram
if isinstance(span.sentence,
Phrase) and span.sentence.cell is not None:
for ngram in chain.from_iterable([
tokens_to_ngrams(
getattr(phrase, attrib),
n_min=n_min,
n_max=n_max,
lower=lower) for phrase in span.sentence.cell.phrases
if phrase != span.sentence
]):
yield ngram
def get_neighbor_cell_ngrams(span,
dist=1,
directions=False,
attrib='words',
n_min=1,
n_max=1,
lower=True):
"""Get the ngrams from all Cells that are within a given Cell distance in one direction from the given Span.
Note that if a candidate is passed in, all of its Spans will be searched.
If `directions=True``, each ngram will be returned with a direction in {'UP', 'DOWN', 'LEFT', 'RIGHT'}.
:param span: The span whose neighbor Cells are being searched
:param dist: The Cell distance within which a neighbor Cell must be to be considered
:param directions: A Boolean expressing whether or not to return the direction of each ngram
:param attrib: The token attribute type (e.g. words, lemmas, poses)
:param n_min: The minimum n of the ngrams that should be returned
:param n_max: The maximum n of the ngrams that should be returned
:param lower: If True, all ngrams will be returned in lower case
:rtype: a _generator_ of ngrams (or (ngram, direction) tuples if directions=True)
"""
# TODO: Fix this to be more efficient (optimize with SQL query)
spans = [span] if isinstance(span, TemporarySpan) else span.get_contexts()
for span in spans:
for ngram in get_phrase_ngrams(
span, attrib=attrib, n_min=n_min, n_max=n_max, lower=lower):
yield ngram
if isinstance(span.sentence,
Phrase) and span.sentence.cell is not None:
root_cell = span.sentence.cell
for phrase in chain.from_iterable([
_get_aligned_phrases(root_cell, 'row'),
_get_aligned_phrases(root_cell, 'col')
]):
row_diff = min_row_diff(phrase, root_cell, absolute=False)
col_diff = min_col_diff(phrase, root_cell, absolute=False)
if (row_diff or col_diff) and not (
row_diff and
col_diff) and abs(row_diff) + abs(col_diff) <= dist:
if directions:
direction = ''
if col_diff == 0:
if 0 < row_diff and row_diff <= dist:
direction = "UP"
elif 0 > row_diff and row_diff >= -dist:
direction = "DOWN"
elif row_diff == 0:
if 0 < col_diff and col_diff <= dist:
direction = "RIGHT"
elif 0 > col_diff and col_diff >= -dist:
direction = "LEFT"
for ngram in tokens_to_ngrams(
getattr(phrase, attrib),
n_min=n_min,
n_max=n_max,
lower=lower):
yield (ngram, direction)
else:
for ngram in tokens_to_ngrams(
getattr(phrase, attrib),
n_min=n_min,
n_max=n_max,
lower=lower):
yield ngram
def get_row_ngrams(span,
attrib='words',
n_min=1,
n_max=1,
spread=[0, 0],
lower=True):
"""Get the ngrams from all Cells that are in the same row as the given Span.
Note that if a candidate is passed in, all of its Spans will be searched.
:param span: The span whose row Cells are being searched
:param attrib: The token attribute type (e.g. words, lemmas, poses)
:param n_min: The minimum n of the ngrams that should be returned
:param n_max: The maximum n of the ngrams that should be returned
:param lower: If True, all ngrams will be returned in lower case
:rtype: a _generator_ of ngrams
"""
spans = [span] if isinstance(span, TemporarySpan) else span.get_contexts()
for span in spans:
for ngram in _get_axis_ngrams(
span,
axis='row',
attrib=attrib,
n_min=n_min,
n_max=n_max,
spread=spread,
lower=lower):
yield ngram
def get_col_ngrams(span,
attrib='words',
n_min=1,
n_max=1,
spread=[0, 0],
lower=True):
"""Get the ngrams from all Cells that are in the same column as the given Span.
Note that if a candidate is passed in, all of its Spans will be searched.
:param span: The span whose column Cells are being searched
:param attrib: The token attribute type (e.g. words, lemmas, poses)
:param n_min: The minimum n of the ngrams that should be returned
:param n_max: The maximum n of the ngrams that should be returned
:param lower: If True, all ngrams will be returned in lower case
:rtype: a _generator_ of ngrams
"""
spans = [span] if isinstance(span, TemporarySpan) else span.get_contexts()
for span in spans:
for ngram in _get_axis_ngrams(
span,
axis='col',
attrib=attrib,
n_min=n_min,
n_max=n_max,
spread=spread,
lower=lower):
yield ngram
def get_aligned_ngrams(span,
attrib='words',
n_min=1,
n_max=1,
spread=[0, 0],
lower=True):
"""Get the ngrams from all Cells that are in the same row or column as the given Span.
Note that if a candidate is passed in, all of its Spans will be searched.
:param span: The span whose row and column Cells are being searched
:param attrib: The token attribute type (e.g. words, lemmas, poses)
:param n_min: The minimum n of the ngrams that should be returned
:param n_max: The maximum n of the ngrams that should be returned
:param lower: If True, all ngrams will be returned in lower case
:rtype: a _generator_ of ngrams
"""
spans = [span] if isinstance(span, TemporarySpan) else span.get_contexts()
for span in spans:
for ngram in get_row_ngrams(
span,
attrib=attrib,
n_min=n_min,
n_max=n_max,
spread=spread,
lower=lower):
yield ngram
for ngram in get_col_ngrams(
span,
attrib=attrib,
n_min=n_min,
n_max=n_max,
spread=spread,
lower=lower):
yield ngram
def get_head_ngrams(span,
axis=None,
attrib='words',
n_min=1,
n_max=1,
lower=True):
"""Get the ngrams from the cell in the head of the row or column.
More specifically, this returns the ngrams in the leftmost cell in a row and/or the
ngrams in the topmost cell in the column, depending on the axis parameter.
Note that if a candidate is passed in, all of its Spans will be searched.
:param span: The span whose head Cells are being returned
:param axis: Which axis {'row', 'col'} to search. If None, then both row and col are searched.
:param attrib: The token attribute type (e.g. words, lemmas, poses)
:param n_min: The minimum n of the ngrams that should be returned
:param n_max: The maximum n of the ngrams that should be returned
:param lower: If True, all ngrams will be returned in lower case
:rtype: a _generator_ of ngrams
"""
spans = [span] if isinstance(span, TemporarySpan) else span.get_contexts()
axes = [axis] if axis else ['row', 'col']
for span in spans:
if not span.sentence.cell:
return
else:
for axis in axes:
if getattr(span.sentence, _other_axis(axis) + '_start') == 0:
return
for phrase in getattr(
_get_head_cell(span.sentence.cell, axis), 'phrases',
[]):
for ngram in tokens_to_ngrams(
getattr(phrase, attrib),
n_min=n_min,
n_max=n_max,
lower=lower):
yield ngram
def _get_head_cell(root_cell, axis):
other_axis = 'row' if axis == 'col' else 'col'
aligned_cells = _get_aligned_cells(root_cell, axis)
return sorted(
aligned_cells, key=lambda x: getattr(x, other_axis + '_start'))[
0] if aligned_cells else []
def _get_axis_ngrams(span,
axis,
attrib='words',
n_min=1,
n_max=1,
spread=[0, 0],
lower=True):
for ngram in get_phrase_ngrams(
span, attrib=attrib, n_min=n_min, n_max=n_max, lower=lower):
yield ngram
if (span.sentence.cell is not None):
for phrase in _get_aligned_phrases(span.sentence, axis, spread=spread):
for ngram in tokens_to_ngrams(
getattr(phrase, attrib), n_min=n_min, n_max=n_max,
lower=lower):
yield ngram
def _get_aligned_cells(root_cell, axis):
aligned_cells = [
cell for cell in root_cell.table.cells
if is_axis_aligned(root_cell, cell, axis=axis) and cell != root_cell
]
return aligned_cells
def _get_aligned_phrases(root_phrase, axis, spread=[0, 0]):
return [
phrase for cell in root_phrase.table.cells
if is_axis_aligned(root_phrase, cell, axis=axis, spread=spread)
for phrase in cell.phrases if phrase != root_phrase
]
def _other_axis(axis):
return 'row' if axis == 'col' else 'col'
def is_superset(a, b):
"""Check if a is a superset of b.
This is typically used to check if ALL of a list of phrases is in the ngrams returned by an lf_helper.
:param a: A collection of items
:param b: A collection of items
:rtype: boolean
"""
return set(a).issuperset(b)
def overlap(a, b):
"""Check if a overlaps b.
This is typically used to check if ANY of a list of phrases is in the ngrams returned by an lf_helper.
:param a: A collection of items
:param b: A collection of items
:rtype: boolean
"""
return not set(a).isdisjoint(b)
############################
# Visual feature helpers
############################
def is_same_org_fig_page(org, fig):
return fig.page in org.page
def is_same_sent_fig_page(org, fig):
return fig.page in org.sentence.page
def is_nearby_org_fig_page(org, fig, num_pages):
for i in range(1, num_pages+1):
if (fig.page-i) in org.page or (fig.page+i) in org.page:
return True
return False
def fig_on_prev_page(org, fig):
return fig.page < min(org.page)
def org_on_prev_page(org, fig):
return fig.page > max(org.page)
def within_distance(org, fig, ratio, page_width=DEFAULT_WIDTH, page_height=DEFAULT_HEIGHT):
fig_vert_pos = (fig.top+fig.bottom)/2.0
fig_horz_pos = (fig.left+fig.right)/2.0
org_vert_pos = (org.top[0]+org.bottom[0])/2.0
org_horz_pos = (org.left[0]+org.right[0])/2.0
org_vert_pos += page_height * (org.page[0] - fig.page)
if abs(fig_vert_pos - org_vert_pos) <= ratio * page_height:
yield "WITHIN_{}_VERT_PAGE".format(ratio)
if abs(fig_horz_pos - org_horz_pos) <= ratio * page_width:
yield "WITHIN_{}_HORZ_PAGE".format(ratio)
def org_pos_near_fig(org, fig, page_width=DEFAULT_WIDTH, page_height=DEFAULT_HEIGHT):
fig_vert_pos = (fig.top + fig.bottom) / 2.0
fig_horz_pos = (fig.left + fig.right) / 2.0
org_vert_pos = (org.top[0] + org.bottom[0]) / 2.0
org_horz_pos = (org.left[0] + org.right[0]) / 2.0
org_vert_pos += page_height * (org.page[0] - fig.page)
return abs(fig_vert_pos - org_vert_pos) <= 0.5 * page_height and \
abs(fig_horz_pos - org_horz_pos) <= 0.5 * page_width
def ahead_feature(org, fig, page_height=DEFAULT_HEIGHT):
fig_vert_pos = (fig.top + fig.bottom) / 2.0
fig_horz_pos = (fig.left + fig.right) / 2.0
org_vert_pos = (org.top[0] + org.bottom[0]) / 2.0
org_horz_pos = (org.left[0] + org.right[0]) / 2.0
org_vert_pos += page_height * (org.page[0] - fig.page)
if org_vert_pos < fig_vert_pos:
yield "ORG_AHEAD_VERT_PDF"
if org_horz_pos < fig_horz_pos:
yield "ORG_AHEAD_HORZ_PDF"
def fig_contains_org(organic, figure, scores=[75]):
fig_text = figure.description
if figure.text:
text = ' '.join(figure.text.strip().replace('\n', ' ').split())
fig_text += ' ' + text
for score in scores:
if fuzz.partial_ratio(organic.text, fig_text) >= score:
yield "FIG_HAS_ORG_{}_SCORE".format(score)
def org_contains_fig_name(organic, figure, scores=[75]):
fig_text = figure.name
organic_text = organic.sentence.text
for score in scores:
if fuzz.partial_ratio(organic_text, fig_text) >= score:
yield "ORG_HAS_FIG_{}_SCORE".format(score)
if organic.text.find(fig_text) != -1:
yield "ORG_FIG_EXACT_MATCH"
def fig_text_matches_org_text(organic, figure, scores=[75]):
fig_text = figure.description
organic_text = organic.sentence.text
for score in scores:
if fuzz.token_set_ratio(organic_text, fig_text) >= score:
yield "ORG_FIG_TEXT_{}_SCORE".format(score)
def both_contain_keywords(organic, figure, keywords):
fig_text = figure.description
organic_text = organic.sentence.text
img_contains = False
for word in keywords:
if fuzz.token_set_ratio(word, fig_text) > 90:
img_contains = True
break
if img_contains:
for word in keywords:
if fuzz.token_set_ratio(word, organic_text) > 90:
return True
return False
def search_fig_first_apprearance(organic, figure):
doc = organic.sentence.document
for i in range(len(doc.phrases)):
text = doc.phrases[i].text
if i < len(doc.phrases) - 1:
text += doc.phrases[i+1].text
text = text.strip().replace(' ', '')
if len(text) < 4:
continue
fig_name = figure.name.strip().replace(' ','')
if text.find(fig_name) != -1:
dist = i - organic.sentence.phrase_num
if fuzz.partial_ratio(organic.text.strip().replace(' ',''), text) > 85:
yield "ORG_IN_FIG_FIRST_MENTION"
pg_dist = sum(doc.phrases[i].page)/len(doc.phrases[i].page) - \
sum(organic.sentence.page)/len(organic.sentence.page)
if pg_dist < -2:
yield "ORG_MENTION_FAR_AHEAD"
elif pg_dist < 0:
yield "ORG_MENTION_NEAR_AHEAD"
elif pg_dist == 0:
yield "ORG_MENTION_SAME_PAGE"
elif pg_dist < 3:
yield "ORG_MENTION_NEAR_BEHIND"
else:
yield "ORG_MENTION_FAR_BEHIND"
if i < -300:
yield "FIG_FAR_AHEAD"
elif i < -100:
yield "FIG_NEAR_AHEAD"
elif i < 0:
yield "FIG_CLOSE_AHEAD"
elif i == 0:
yield "FIG_EXACT_MATCH"
elif i < 100:
yield "FIG_CLOSE_AFTER"
elif i <= 300:
yield "FIG_NEAR_BEHIND"
else:
yield "FIG_FAR_BEHIND"
return "NO_MATCH"
def get_page(span):
"""Return the page number of the given span.
If a candidate is passed in, this returns the page of its first Span.
:param span: The Span to get the page number of.
:rtype: integer
"""
span = span if isinstance(span, TemporarySpan) else span[0]
return span.get_attrib_tokens('page')[0]
def is_horz_aligned(c):
"""Return True if all the components of c are horizontally aligned.
Horizontal alignment means that the bounding boxes of each Span of c shares
a similar y-axis value in the visual rendering of the document.
:param c: The candidate to evaluate
:rtype: boolean
"""
return (all([
c[i].sentence.is_visual()
and bbox_horz_aligned(bbox_from_span(c[i]), bbox_from_span(c[0]))
for i in range(len(c))
]))
def is_vert_aligned(c):
"""Return true if all the components of c are vertically aligned.
Vertical alignment means that the bounding boxes of each Span of c shares
a similar x-axis value in the visual rendering of the document.
:param c: The candidate to evaluate
:rtype: boolean
"""
return (all([
c[i].sentence.is_visual()
and bbox_vert_aligned(bbox_from_span(c[i]), bbox_from_span(c[0]))
for i in range(len(c))
]))
def is_vert_aligned_left(c):
"""Return true if all the components of c are vertically aligned based on their left border.
Vertical alignment means that the bounding boxes of each Span of c shares
a similar x-axis value in the visual rendering of the document. In this function
the similarity of the x-axis value is based on the left border of their bounding boxes.
:param c: The candidate to evaluate
:rtype: boolean
"""
return (all([
c[i].sentence.is_visual()
and bbox_vert_aligned_left(bbox_from_span(c[i]), bbox_from_span(c[0]))
for i in range(len(c))
]))
def is_vert_aligned_right(c):
"""Return true if all the components of c are vertically aligned based on their right border.
Vertical alignment means that the bounding boxes of each Span of c shares
a similar x-axis value in the visual rendering of the document. In this function
the similarity of the x-axis value is based on the right border of their bounding boxes.
:param c: The candidate to evaluate
:rtype: boolean
"""
return (all([
c[i].sentence.is_visual() and bbox_vert_aligned_right(
bbox_from_span(c[i]), bbox_from_span(c[0])) for i in range(len(c))
]))
def is_vert_aligned_center(c):
"""Return true if all the components of c are vertically aligned based on their left border.
Vertical alignment means that the bounding boxes of each Span of c shares
a similar x-axis value in the visual rendering of the document. In this function
the similarity of the x-axis value is based on the center of their bounding boxes.
:param c: The candidate to evaluate
:rtype: boolean
"""
return (all([
c[i].sentence.is_visual() and bbox_vert_aligned_center(
bbox_from_span(c[i]), bbox_from_span(c[0])) for i in range(len(c))
]))
def same_page(c):
"""Return true if all the components of c are on the same page of the document.
Page numbers are based on the PDF rendering of the document. If a PDF file is
provided, it is used. Otherwise, if only a HTML/XML document is provided, a
PDF is created and then used to determine the page number of a Span.
:param c: The candidate to evaluate
:rtype: boolean
"""
return (all([
c[i].sentence.is_visual()
and bbox_from_span(c[i]).page == bbox_from_span(c[0]).page
for i in range(len(c))
]))
def get_horz_ngrams(span,
attrib='words',
n_min=1,
n_max=1,
lower=True,
from_phrase=True):
"""Return all ngrams which are visually horizontally aligned with the Span.
Note that if a candidate is passed in, all of its Spans will be searched.
:param span: The Span to evaluate
:param attrib: The token attribute type (e.g. words, lemmas, poses)
:param n_min: The minimum n of the ngrams that should be returned
:param n_max: The maximum n of the ngrams that should be returned
:param lower: If True, all ngrams will be returned in lower case
:param from_phrase: If True, returns ngrams from any horizontally aligned Phrases,
rather than just horizontally aligned ngrams themselves.
:rtype: a _generator_ of ngrams
"""
spans = [span] if isinstance(span, TemporarySpan) else span.get_contexts()
for span in spans:
for ngram in _get_direction_ngrams('horz', span, attrib, n_min, n_max,
lower, from_phrase):
yield ngram
def get_vert_ngrams(span,
attrib='words',
n_min=1,
n_max=1,
lower=True,
from_phrase=True):
"""Return all ngrams which are visually vertivally aligned with the Span.
Note that if a candidate is passed in, all of its Spans will be searched.
:param span: The Span to evaluate
:param attrib: The token attribute type (e.g. words, lemmas, poses)
:param n_min: The minimum n of the ngrams that should be returned
:param n_max: The maximum n of the ngrams that should be returned
:param lower: If True, all ngrams will be returned in lower case
:param from_phrase: If True, returns ngrams from any horizontally aligned Phrases,
rather than just horizontally aligned ngrams themselves.
:rtype: a _generator_ of ngrams
"""
spans = [span] if isinstance(span, TemporarySpan) else span.get_contexts()
for span in spans:
for ngram in _get_direction_ngrams('vert', span, attrib, n_min, n_max,
lower, from_phrase):
yield ngram
def _get_direction_ngrams(direction, c, attrib, n_min, n_max, lower,
from_phrase):
# TODO: this currently looks only in current table;
# precompute over the whole document/page instead
bbox_direction_aligned = bbox_vert_aligned if direction == 'vert' else bbox_horz_aligned
ngrams_space = Ngrams(n_max=n_max, split_tokens=[])
f = (lambda w: w.lower()) if lower else (lambda w: w)
spans = [c] if isinstance(c, TemporarySpan) else c.get_contexts()
for span in spans:
if not span.sentence.is_tabular() or not span.sentence.is_visual():
continue
for phrase in span.sentence.table.phrases:
if (from_phrase):
if (bbox_direction_aligned(
bbox_from_phrase(phrase), bbox_from_span(span))
and phrase is not span.sentence):
for ngram in tokens_to_ngrams(
getattr(phrase, attrib),
n_min=n_min,
n_max=n_max,
lower=lower):
yield ngram
else:
for ts in ngrams_space.apply(phrase):
if (bbox_direction_aligned(
bbox_from_span(ts), bbox_from_span(span))
and not (phrase == span.sentence
and ts.get_span() in span.get_span())):
yield f(ts.get_span())
def get_vert_ngrams_left(c):
"""Not implemented."""
# TODO
return
def get_vert_ngrams_right(c):
"""Not implemented."""
# TODO
return
def get_vert_ngrams_center(c):
"""Not implemented."""
# TODO
return
def get_visual_header_ngrams(c, axis=None):
"""Not implemented."""
# TODO
return
def get_visual_distance(c, axis=None):
"""Not implemented."""
# TODO
return
def get_page_vert_percentile(span,
page_width=DEFAULT_WIDTH,
page_height=DEFAULT_HEIGHT):
"""Return which percentile from the TOP in the page Span candidate is located in.
Percentile is calculated where the top of the page is 0.0, and the bottom of
the page is 1.0. For example, a Span in at the top 1/4 of the page will have
a percentil of 0.25.
Page width and height are based on pt values:
Letter 612x792
Tabloid 792x1224
Ledger 1224x792
Legal 612x1008
Statement 396x612
Executive 540x720
A0 2384x3371
A1 1685x2384
A2 1190x1684
A3 842x1190
A4 595x842
A4Small 595x842
A5 420x595
B4 729x1032
B5 516x729
Folio 612x936
Quarto 610x780
10x14 720x1008
and should match the source documents. Letter size is used by default.
Note that if a candidate is passed in, only the vertical percentil of its
first Span is returned.
:param span: The Span to evaluate
:param page_width: The width of the page. Default to Letter paper width.
:param page_height: The heigh of the page. Default to Letter paper height.
:rtype: float in [0.0, 1.0]
"""
span = span if isinstance(span, TemporarySpan) else span[0]
return bbox_from_span(span).top / page_height
def get_page_horz_percentile(span,
page_width=DEFAULT_WIDTH,
page_height=DEFAULT_HEIGHT):
"""Return which percentile from the LEFT in the page the Span is located in.
Percentile is calculated where the left of the page is 0.0, and the right of
the page is 1.0.
Page width and height are based on pt values:
Letter 612x792
Tabloid 792x1224
Ledger 1224x792
Legal 612x1008
Statement 396x612
Executive 540x720
A0 2384x3371
A1 1685x2384
A2 1190x1684
A3 842x1190
A4 595x842
A4Small 595x842
A5 420x595
B4 729x1032
B5 516x729
Folio 612x936
Quarto 610x780
10x14 720x1008
and should match the source documents. Letter size is used by default.
Note that if a candidate is passed in, only the vertical percentil of its
first Span is returned.
:param c: The Span to evaluate
:param page_width: The width of the page. Default to Letter paper width.
:param page_height: The heigh of the page. Default to Letter paper height.
:rtype: float in [0.0, 1.0]
"""
span = span if isinstance(span, TemporarySpan) else span[0]
return bbox_from_span(span).left, page_width
def _assign_alignment_features(phrases_by_key, align_type):
for key, phrases in phrases_by_key.items():
if len(phrases) == 1:
continue
context_lemmas = set()
for p in phrases:
p._aligned_lemmas.update(context_lemmas)
# update lemma context for upcoming phrases in the group
if len(p.lemmas) < 7:
new_lemmas = [
lemma.lower() for lemma in p.lemmas if lemma.isalpha()
]
# if new_lemmas: print '++Lemmas for\t', p, context_lemmas
context_lemmas.update(new_lemmas)
context_lemmas.update(
align_type + lemma for lemma in new_lemmas)
def _preprocess_visual_features(doc):
if hasattr(doc, '_visual_features'):
return
# cache flag
doc._visual_features = True
phrase_by_page = defaultdict(list)
for phrase in doc.phrases:
phrase_by_page[phrase.page[0]].append(phrase)
phrase._aligned_lemmas = set()
for page, phrases in phrase_by_page.items():
# process per page alignments
yc_aligned = defaultdict(list)
x0_aligned = defaultdict(list)
xc_aligned = defaultdict(list)
x1_aligned = defaultdict(list)
for phrase in phrases:
phrase.bbox = bbox_from_phrase(phrase)
phrase.yc = (phrase.bbox.top + phrase.bbox.bottom) / 2
phrase.x0 = phrase.bbox.left
phrase.x1 = phrase.bbox.right
phrase.xc = (phrase.x0 + phrase.x1) / 2
# index current phrase by different alignment keys
yc_aligned[phrase.yc].append(phrase)
x0_aligned[phrase.x0].append(phrase)
x1_aligned[phrase.x1].append(phrase)
xc_aligned[phrase.xc].append(phrase)
for l in yc_aligned.values():
l.sort(key=lambda p: p.xc)
for l in x0_aligned.values():
l.sort(key=lambda p: p.yc)
for l in x1_aligned.values():
l.sort(key=lambda p: p.yc)
for l in xc_aligned.values():
l.sort(key=lambda p: p.yc)
_assign_alignment_features(yc_aligned, 'Y_')
_assign_alignment_features(x0_aligned, 'LEFT_')
_assign_alignment_features(x1_aligned, 'RIGHT_')
_assign_alignment_features(xc_aligned, 'CENTER_')
def get_visual_aligned_lemmas(span):
"""Return a generator of the lemmas aligned visually with the Span.
Note that if a candidate is passed in, all of its Spans will be searched.
:param span: The Span to evaluate.
:rtype: a _generator_ of lemmas
"""
spans = [span] if isinstance(span, TemporarySpan) else span.get_contexts()
for span in spans:
phrase = span.sentence
doc = phrase.document
# cache features for the entire document
_preprocess_visual_features(doc)
for aligned_lemma in phrase._aligned_lemmas:
yield aligned_lemma
def get_aligned_lemmas(span):
"""Return a set of the lemmas aligned visually with the Span.
Note that if a candidate is passed in, all of its Spans will be searched.
:param span: The Span to evaluate.
:rtype: a set of lemmas
"""
return set(get_visual_aligned_lemmas(span))
############################
# Structural feature helpers
############################
def get_tag(span):
"""Return the HTML tag of the Span.
If a candidate is passed in, only the tag of its first Span is returned.
These may be tags such as 'p', 'h2', 'table', 'div', etc.
:param span: The Span to evaluate
:rtype: string
"""
span = span if isinstance(span, TemporarySpan) else span[0]
return str(span.sentence.html_tag)
def get_attributes(span):
"""Return the HTML attributes of the Span.
If a candidate is passed in, only the tag of its first Span is returned.
A sample outout of this function on a Span in a paragraph tag is
[u'style=padding-top: 8pt;padding-left: 20pt;text-indent: 0pt;text-align: left;']
:param span: The Span to evaluate
:rtype: list of strings representing HTML attributes
"""
span = span if isinstance(span, TemporarySpan) else span[0]
return span.sentence.html_attrs
# TODO: Too slow
def _get_node(phrase):
return (etree.ElementTree(fromstring(phrase.document.text)).xpath(
phrase.xpath))[0]
def get_parent_tag(span):
"""Return the HTML tag of the Span's parent.
These may be tags such as 'p', 'h2', 'table', 'div', etc.
If a candidate is passed in, only the tag of its first Span is returned.
:param span: The Span to evaluate
:rtype: string
"""
span = span if isinstance(span, TemporarySpan) else span[0]
i = _get_node(span.sentence)
return str(i.getparent().tag) if i.getparent() is not None else None
def get_prev_sibling_tags(span):
"""Return the HTML tag of the Span's previous siblings.
Previous siblings are Spans which are at the same level in the HTML tree as
the given span, but are declared before the given span.
If a candidate is passed in, only the previous siblings of its first Span
are considered in the calculation.
:param span: The Span to evaluate
:rtype: list of strings
"""
span = span if isinstance(span, TemporarySpan) else span[0]
prev_sibling_tags = []
i = _get_node(span.sentence)
while i.getprevious() is not None:
prev_sibling_tags.insert(0, str(i.getprevious().tag))
i = i.getprevious()
return prev_sibling_tags
def get_next_sibling_tags(span):
"""Return the HTML tag of the Span's next siblings.
Next siblings are Spans which are at the same level in the HTML tree as
the given span, but are declared after the given span.
If a candidate is passed in, only the next siblings of its last Span
are considered in the calculation.
:param span: The Span to evaluate
:rtype: list of strings
"""
span = span if isinstance(span, TemporarySpan) else span[-1]
next_sibling_tags = []
i = _get_node(span.sentence)
while i.getnext() is not None:
next_sibling_tags.append(str(i.getnext().tag))
i = i.getnext()
return next_sibling_tags
def get_ancestor_class_names(span):
"""Return the HTML classes of the Span's ancestors.
If a candidate is passed in, only the ancestors of its first Span are returned.
:param span: The Span to evaluate
:rtype: list of strings
"""
span = span if isinstance(span, TemporarySpan) else span[0]
class_names = []
i = _get_node(span.sentence)
while i is not None:
class_names.insert(0, str(i.get('class')))
i = i.getparent()
return class_names
def get_ancestor_tag_names(span):
"""Return the HTML tag of the Span's ancestors.
For example, ['html', 'body', 'p'].
If a candidate is passed in, only the ancestors of its first Span are returned.
:param span: The Span to evaluate
:rtype: list of strings
"""
span = span if isinstance(span, TemporarySpan) else span[0]
tag_names = []
i = _get_node(span.sentence)
while i is not None:
tag_names.insert(0, str(i.tag))
i = i.getparent()
return tag_names
def get_ancestor_id_names(span):
"""Return the HTML id's of the Span's ancestors.
If a candidate is passed in, only the ancestors of its first Span are returned.
:param span: The Span to evaluate
:rtype: list of strings
"""
span = span if isinstance(span, TemporarySpan) else span[0]
id_names = []
i = _get_node(span.sentence)
while i is not None:
id_names.insert(0, str(i.get('id')))
i = i.getparent()
return id_names
def common_ancestor(c):
"""Return the common path to the root that is shared between a binary-Span Candidate.
In particular, this is the common path of HTML tags.
:param c: The binary-Span Candidate to evaluate
:rtype: list of strings
"""
ancestor1 = np.array(c[0].sentence.xpath.split('/'))
ancestor2 = np.array(c[1].sentence.xpath.split('/'))
min_len = min(ancestor1.size, ancestor2.size)
return list(
ancestor1[:np.argmin(ancestor1[:min_len] == ancestor2[:min_len])])
def lowest_common_ancestor_depth(c):
"""Return the minimum distance between a binary-Span Candidate to their lowest common ancestor.
For example, if the tree looked like this:
html
|----<div> span 1 </div>
|----table
| |----tr
| | |-----<th> span 2 </th>
we return 1, the distance from span 1 to the html root. Smaller values indicate
that two Spans are close structurally, while larger values indicate that two
Spans are spread far apart structurally in the document.
:param c: The binary-Span Candidate to evaluate
:rtype: integer
"""
ancestor1 = np.array(c[0].sentence.xpath.split('/'))
ancestor2 = np.array(c[1].sentence.xpath.split('/'))
min_len = min(ancestor1.size, ancestor2.size)
return min_len - np.argmin(ancestor1[:min_len] == ancestor2[:min_len])
def find_image(span):
html_content = span.document.text
soup = BeautifulSoup(html_content, "html.parser")
img_list = soup.find_all('div', class_ = 'image_table')
for i in img_list:
if i.img.get('src') == span.url:
return i
def cut_string(prev_content, name):
subs_list = []
name = name.strip()
count = 0
while True:
start = prev_content.find(name)
if not start == -1:
start = prev_content.find(name) + len(name)
subs_list.append(prev_content[0:start])
prev_content = prev_content[start:]
count += 1
elif not prev_content == '':
subs_list.append(prev_content)
break
else:
break
return subs_list, count
def get_near_string(prev_content, name):
if prev_content == '' or name == '':
# print('Error: Got empty input!')
return None, None
name = name.strip()
start = prev_content.find(name)
if not start == -1:
if start <= 100:
pre = prev_content[:start]
else:
pre = prev_content[start - 101 : start-1]
if len(prev_content) <= start + len(name) + 100:
post = prev_content[start:]
else:
post = prev_content[start:start + len(name) + 100]
return pre, post
return None, None
# get image_table xpath
def lxml_find_image_xpath(figure, root, tree):
imgs = root.findall(".//div[@class='image_table']")
# imgs = root.findall(".//div[@class='image_table']//img[@src]")
for i in imgs:
isrc = i.find(".//img[@src]").attrib.get('src')
if figure.url == isrc:
return tree.getpath(i)
def get_text_img_horizontal_distance(organic, figure):
root = fromstring(figure.document.text) # lxml.html.fromstring()
tree = etree.ElementTree(root)
# ancestor1 = np.array(organic.sentence.xpath.split('/'))
img_path = lxml_find_image_xpath(figure, root, tree)
org_path = organic.sentence.xpath
# common_path = os.path.commonprefix([organic_path, img_path])
ancestor1 = org_path.split('/')
ancestor2 = img_path.split('/')
# min_len = min(ancestor1.size, ancestor2.size)
# l = list(
# ancestor1[:min_len - np.argmin(ancestor1[:min_len] == ancestor2[:min_len])])
min_len = min(len(ancestor1), len(ancestor2))
common_path = ''
for i, e in enumerate(ancestor1[:min_len]):
if ancestor1[i] == ancestor2[i]:
common_path += e + '/'
else:
break;
if common_path == img_path:
return 0, 1
if common_path == org_path:
return 0, -1
try:
common_parent = root.xpath(common_path[:-1])[0]
except:
print(common_path)
return None, None
img_path_sub = common_path + img_path[len(common_path):].split('/')[0]
org_path_sub = common_path + org_path[len(common_path):].split('/')[0]
distance = 0
found = 0
direction = 0
for e in common_parent:
e_path = tree.getpath(e)
# e_img_prefix = os.path.commonprefix([e_path, img_path])
# e_org_prefix = os.path.commonprefix([e_path, organic_path])
if found != 0:
distance += 1
if e_path == img_path_sub or e_path == org_path_sub:
if found == 0:
# image above organic
if e_path == img_path_sub:
direction = 1
else:
direction = -1
found += 1
if found == 2:
return distance, direction
return None, None
#
def get_text_img_dfs_distance(organic, figure):
root = fromstring(figure.document.text) # lxml.html.fromstring()
tree = etree.ElementTree(root)
# ancestor1 = np.array(organic.sentence.xpath.split('/'))
img_path = lxml_find_image_xpath(figure, root, tree)
org_path = organic.sentence.xpath
# common_path = os.path.commonprefix([org_path, img_path])
ancestor1 = org_path.split('/')
ancestor2 = img_path.split('/')
# min_len = min(ancestor1.size, ancestor2.size)
# l = list(
# ancestor1[:min_len - np.argmin(ancestor1[:min_len] == ancestor2[:min_len])])
min_len = min(len(ancestor1), len(ancestor2))
common_path = ''
for i, e in enumerate(ancestor1[:min_len]):
if ancestor1[i] == ancestor2[i]:
common_path += e + '/'
else:
break;
if common_path == img_path:
return 0, 1
if common_path == org_path:
return 0, -1
try:
common_parent = root.xpath(common_path[:-1])[0]
except:
print(common_path)
return None
distance = 0
found = 0
for e in common_parent.iter():
e_path = tree.getpath(e)
# e_img_prefix = os.path.commonprefix([e_path, img_path])
# e_org_prefix = os.path.commonprefix([e_path, organic_path])
if found != 0:
distance += 1
if e_path == img_path or e_path == org_path:
found += 1
if found == 2:
return distance
return None
def text_fig_common_ancestor(c):
organic = c[0]
figure = c[1]
root = fromstring(figure.document.text) # lxml.html.fromstring()
tree = etree.ElementTree(root)
ancestor1 = np.array(organic.sentence.xpath.split('/'))
img_path = lxml_find_image_xpath(figure, root, tree)
ancestor2 = np.array(img_path.split('/'))
min_len = min(ancestor1.size, ancestor2.size)
return list(
ancestor1[:np.argmin(ancestor1[:min_len] == ancestor2[:min_len])])
def text_fig_lowest_common_ancestor_depth(c):
organic = c[0]
figure = c[1]
root = fromstring(figure.document.text) # lxml.html.fromstring()
tree = etree.ElementTree(root)
ancestor1 = np.array(organic.sentence.xpath.split('/'))
img_path = lxml_find_image_xpath(figure, root, tree)
ancestor2 = np.array(img_path.split('/'))
min_len = min(ancestor1.size, ancestor2.size)
return min_len - np.argmin(ancestor1[:min_len] == ancestor2[:min_len])
|
python
|
# coding=utf-8
#
# pylint: disable = wildcard-import, unused-wildcard-import, unused-import
# pylint: disable = missing-docstring, invalid-name, wrong-import-order
# pylint: disable = no-member, attribute-defined-outside-init
"""
Copyright (c) 2019, Alexander Magola. All rights reserved.
license: BSD 3-Clause License, see LICENSE for more details.
"""
import sys
import os
from copy import deepcopy
import pytest
import tests.common as cmn
from zm.constants import APPNAME, CAP_APPNAME, CWD
from zm import cli
joinpath = os.path.join
class TestSuite(object):
@pytest.fixture(autouse = True)
def setup(self):
self.defaults = { 'buildtype': 'somedebug' }
self.parser = cli.CmdLineParser('test', self.defaults)
def _parseHelpArgs(self, args, capsys):
# CLI prints help and does exit
with pytest.raises(SystemExit) as cm:
self.parser.parse(args)
captured = capsys.readouterr()
return cm.value.code, captured.out, captured.err
def _testMainHelpMsg(self, args, capsys):
ecode, out, err = self._parseHelpArgs(args, capsys)
assert not err
assert ecode == 0
assert CAP_APPNAME in out
assert 'based on the Waf build system' in out
assert self.parser.command is not None
assert self.parser.command.name == 'help'
assert self.parser.command.args == {'topic': 'overview'}
assert self.parser.wafCmdLine == []
def _assertAllsForCmd(self, cmdname, checks, baseExpectedArgs):
expectedArgs = None
for check in checks:
expectedArgs = deepcopy(baseExpectedArgs)
expectedArgs.update(check['expectedArgsUpdate'])
def assertAll(cmd, parsercmd, wafcmdline):
assert cmd is not None
assert parsercmd is not None
assert parsercmd == cmd
assert cmd.name == cmdname
assert cmd.args == expectedArgs
# pylint: disable = cell-var-from-loop
if 'wafArgs' in check:
assert sorted(check['wafArgs']) == sorted(wafcmdline)
# parser with explicit args
cmd = self.parser.parse(check['args'])
assertAll(cmd, self.parser.command, self.parser.wafCmdLine)
# parser with args from sys.argv
oldargv = sys.argv
sys.argv = [APPNAME] + check['args']
cmd = self.parser.parse()
sys.argv = oldargv
assertAll(cmd, self.parser.command, self.parser.wafCmdLine)
def testEmpty(self, capsys):
self._testMainHelpMsg([], capsys)
def testHelp(self, capsys):
self._testMainHelpMsg(['help'], capsys)
def testHelpWrongTopic(self, capsys):
args = ['help', 'qwerty']
ecode, out, err = self._parseHelpArgs(args, capsys)
assert not out
assert 'Unknown command/topic' in err
assert ecode != 0
def testHelpForCmds(self, capsys):
for cmd in cli.config.commands:
args = ['help', cmd.name]
ecode, out, err = self._parseHelpArgs(args, capsys)
assert ecode == 0
assert not err
if cmd.name == 'help':
assert 'show help' in out
else:
assert cmd.description.capitalize() in out
def testCmdBuild(self):
baseExpectedArgs = {
'buildtype' : self.defaults['buildtype'],
'jobs' : None,
'configure': False,
'color': 'auto',
'clean': False,
'progress': False,
'cleanAll': False,
'distclean': False,
'tasks': [],
'verbose': 0,
'verboseConfigure' : None,
'verboseBuild' : None,
'withTests': 'no',
'runTests': 'none',
'bindir' : None,
'libdir' : None,
'prefix' : cli.DEFAULT_PREFIX,
'buildroot' : None,
'forceExternalDeps' : False,
'cacheCfgActionResults' : False,
}
CMDNAME = 'build'
CMNOPTS = ['--color=auto', '--prefix=' + cli.DEFAULT_PREFIX]
checks = [
dict(
args = [CMDNAME],
expectedArgsUpdate = {},
wafArgs = [CMDNAME] + CMNOPTS,
),
dict(
args = [CMDNAME, '-b', 'release'],
expectedArgsUpdate = {'buildtype': 'release'},
wafArgs = [CMDNAME] + CMNOPTS,
),
dict(
args = [CMDNAME, '--jobs', '22'],
expectedArgsUpdate = {'jobs': 22},
wafArgs = [CMDNAME, '--jobs=22'] + CMNOPTS,
),
dict(
args = [CMDNAME, '--verbose'],
expectedArgsUpdate = {'verbose': 1},
wafArgs = [CMDNAME, '-v'] + CMNOPTS,
),
dict(
args = [CMDNAME, '-vvv'],
expectedArgsUpdate = {'verbose': 3},
wafArgs = [CMDNAME, '-vvv'] + CMNOPTS,
),
dict(
args = [CMDNAME, '--configure'],
expectedArgsUpdate = {'configure': True},
wafArgs = ['configure', CMDNAME] + CMNOPTS,
),
dict(
args = [CMDNAME, '--clean'],
expectedArgsUpdate = {'clean': True},
wafArgs = ['clean', CMDNAME] + CMNOPTS,
),
dict(
args = [CMDNAME, '--clean-all'],
expectedArgsUpdate = {'cleanAll': True},
wafArgs = ['cleanall', CMDNAME] + CMNOPTS,
),
dict(
args = [CMDNAME, '--distclean'],
expectedArgsUpdate = {'distclean': True},
wafArgs = ['distclean', CMDNAME] + CMNOPTS,
),
dict(
args = [CMDNAME, '--with-tests', 'yes'],
expectedArgsUpdate = {'withTests': 'yes'},
wafArgs = [CMDNAME] + CMNOPTS,
),
dict(
args = [CMDNAME, '--run-tests', 'all'],
expectedArgsUpdate = {'runTests': 'all'},
wafArgs = [CMDNAME, 'test'] + CMNOPTS,
),
dict(
args = [CMDNAME, '--run-tests', 'on-changes'],
expectedArgsUpdate = {'runTests': 'on-changes'},
wafArgs = [CMDNAME, 'test'] + CMNOPTS,
),
dict(
args = [CMDNAME, '--progress'],
expectedArgsUpdate = {'progress': True},
wafArgs = [CMDNAME, '--progress'] + CMNOPTS,
),
dict(
args = [CMDNAME, '--color', 'no'],
expectedArgsUpdate = {'color': 'no'},
wafArgs = [CMDNAME, '--color=no'] + CMNOPTS[1:],
),
dict(
args = [CMDNAME, 'sometask'],
expectedArgsUpdate = {'tasks': ['sometask']},
wafArgs = [CMDNAME] + CMNOPTS,
),
dict(
args = [CMDNAME, 'sometask', 'anothertask'],
expectedArgsUpdate = {'tasks': ['sometask', 'anothertask']},
wafArgs = [CMDNAME] + CMNOPTS,
),
dict(
args = [CMDNAME, '--buildroot', 'somedir'],
expectedArgsUpdate = {'buildroot' : 'somedir'},
wafArgs = [CMDNAME] + CMNOPTS,
),
]
self._assertAllsForCmd(CMDNAME, checks, baseExpectedArgs)
def testCmdTest(self):
baseExpectedArgs = {
'buildtype' : self.defaults['buildtype'],
'jobs' : None,
'configure': False,
'color': 'auto',
'clean': False,
'progress': False,
'cleanAll': False,
'distclean': False,
'tasks': [],
'verbose': 0,
'verboseConfigure' : None,
'verboseBuild' : None,
'withTests': 'yes',
'runTests': 'all',
'buildroot' : None,
'forceExternalDeps' : False,
'cacheCfgActionResults' : False,
}
CMDNAME = 'test'
CMNOPTS = ['--color=auto',]
checks = [
dict(
args = [CMDNAME],
expectedArgsUpdate = {},
wafArgs = ['build', CMDNAME] + CMNOPTS,
),
dict(
args = [CMDNAME, '-b', 'release'],
expectedArgsUpdate = {'buildtype': 'release'},
wafArgs = ['build', CMDNAME] + CMNOPTS,
),
dict(
args = [CMDNAME, '--jobs', '22'],
expectedArgsUpdate = {'jobs': 22},
wafArgs = ['build', CMDNAME, '--jobs=22'] + CMNOPTS,
),
dict(
args = [CMDNAME, '--verbose'],
expectedArgsUpdate = {'verbose': 1},
wafArgs = ['build', CMDNAME, '-v'] + CMNOPTS,
),
dict(
args = [CMDNAME, '-vvv'],
expectedArgsUpdate = {'verbose': 3},
wafArgs = ['build', CMDNAME, '-vvv'] + CMNOPTS,
),
dict(
args = [CMDNAME, '--configure'],
expectedArgsUpdate = {'configure': True},
wafArgs = ['configure', 'build', CMDNAME] + CMNOPTS,
),
dict(
args = [CMDNAME, '--clean'],
expectedArgsUpdate = {'clean': True},
wafArgs = ['clean', 'build', CMDNAME] + CMNOPTS,
),
dict(
args = [CMDNAME, '--clean-all'],
expectedArgsUpdate = {'cleanAll': True},
wafArgs = ['cleanall', 'build', CMDNAME] + CMNOPTS,
),
dict(
args = [CMDNAME, '--distclean'],
expectedArgsUpdate = {'distclean': True},
wafArgs = ['distclean', 'build', CMDNAME] + CMNOPTS,
),
dict(
args = [CMDNAME, '--with-tests', 'no'],
expectedArgsUpdate = {'withTests': 'no'},
wafArgs = ['build', CMDNAME] + CMNOPTS,
),
dict(
args = [CMDNAME, '--run-tests', 'none'],
expectedArgsUpdate = {'runTests': 'none'},
wafArgs = ['build', CMDNAME] + CMNOPTS,
),
dict(
args = [CMDNAME, '--run-tests', 'on-changes'],
expectedArgsUpdate = {'runTests': 'on-changes'},
wafArgs = ['build', CMDNAME] + CMNOPTS,
),
dict(
args = [CMDNAME, '--progress'],
expectedArgsUpdate = {'progress': True},
wafArgs = ['build', CMDNAME, '--progress'] + CMNOPTS,
),
dict(
args = [CMDNAME, '--color', 'no'],
expectedArgsUpdate = {'color': 'no'},
wafArgs = ['build', CMDNAME, '--color=no'],
),
dict(
args = [CMDNAME, 'sometask'],
expectedArgsUpdate = {'tasks': ['sometask']},
wafArgs = ['build', CMDNAME] + CMNOPTS,
),
dict(
args = [CMDNAME, 'sometask', 'anothertask'],
expectedArgsUpdate = {'tasks': ['sometask', 'anothertask']},
wafArgs = ['build', CMDNAME] + CMNOPTS,
),
dict(
args = [CMDNAME, '--buildroot', os.getcwd()],
expectedArgsUpdate = {'buildroot' : os.getcwd()},
wafArgs = ['build', CMDNAME] + CMNOPTS,
),
]
self._assertAllsForCmd(CMDNAME, checks, baseExpectedArgs)
def testCmdConfigure(self):
baseExpectedArgs = {
'buildtype' : self.defaults['buildtype'],
'color': 'auto',
'cleanAll': False,
'distclean': False,
'verbose': 0,
'verboseConfigure' : None,
'withTests': 'no',
'bindir' : None,
'libdir' : None,
'prefix' : cli.DEFAULT_PREFIX,
'buildroot' : None,
'forceExternalDeps' : False,
'cacheCfgActionResults' : False,
'force' : False,
}
CMDNAME = 'configure'
CMNOPTS = ['--color=auto', '--prefix=' + cli.DEFAULT_PREFIX]
checks = [
dict(
args = [CMDNAME],
expectedArgsUpdate = {},
wafArgs = [CMDNAME] + CMNOPTS,
),
dict(
args = [CMDNAME, '-b', 'release'],
expectedArgsUpdate = {'buildtype': 'release'},
wafArgs = [CMDNAME] + CMNOPTS,
),
dict(
args = [CMDNAME, '--clean-all'],
expectedArgsUpdate = {'cleanAll': True},
wafArgs = ['cleanall', CMDNAME] + CMNOPTS,
),
dict(
args = [CMDNAME, '--distclean'],
expectedArgsUpdate = {'distclean': True},
wafArgs = ['distclean', CMDNAME] + CMNOPTS,
),
dict(
args = [CMDNAME, '--verbose'],
expectedArgsUpdate = {'verbose': 1},
wafArgs = [CMDNAME, '-v'] + CMNOPTS,
),
dict(
args = [CMDNAME, '-vvv'],
expectedArgsUpdate = {'verbose': 3},
wafArgs = [CMDNAME, '-vvv'] + CMNOPTS,
),
dict(
args = [CMDNAME, '--color', 'no'],
expectedArgsUpdate = {'color': 'no'},
wafArgs = [CMDNAME, '--color=no'] + CMNOPTS[1:],
),
dict(
args = [CMDNAME, '--buildroot', os.getcwd()],
expectedArgsUpdate = {'buildroot' : os.getcwd()},
wafArgs = [CMDNAME] + CMNOPTS,
),
]
self._assertAllsForCmd(CMDNAME, checks, baseExpectedArgs)
def testCmdClean(self):
baseExpectedArgs = {
'buildtype' : self.defaults['buildtype'],
'color': 'auto',
'verbose': 0,
'buildroot' : None,
'forceExternalDeps' : False,
}
CMDNAME = 'clean'
CMNOPTS = ['--color=auto',]
checks = [
dict(
args = [CMDNAME],
expectedArgsUpdate = {},
wafArgs = [CMDNAME] + CMNOPTS,
),
dict(
args = [CMDNAME, '-b', 'release'],
expectedArgsUpdate = {'buildtype': 'release'},
wafArgs = [CMDNAME] + CMNOPTS,
),
dict(
args = [CMDNAME, '--verbose'],
expectedArgsUpdate = {'verbose': 1},
wafArgs = [CMDNAME, '-v'] + CMNOPTS,
),
dict(
args = [CMDNAME, '-vvv'],
expectedArgsUpdate = {'verbose': 3},
wafArgs = [CMDNAME, '-vvv'] + CMNOPTS,
),
dict(
args = [CMDNAME, '--color', 'no'],
expectedArgsUpdate = {'color': 'no'},
wafArgs = [CMDNAME, '--color=no'],
),
dict(
args = [CMDNAME, '--buildroot', os.getcwd()],
expectedArgsUpdate = {'buildroot' : os.getcwd()},
wafArgs = [CMDNAME] + CMNOPTS,
),
]
self._assertAllsForCmd(CMDNAME, checks, baseExpectedArgs)
def testCmdDistclean(self):
baseExpectedArgs = {
'color': 'auto',
'verbose': 0,
'buildroot' : None,
}
CMDNAME = 'distclean'
CMNOPTS = ['--color=auto',]
checks = [
dict(
args = [CMDNAME],
expectedArgsUpdate = {},
wafArgs = [CMDNAME] + CMNOPTS,
),
dict(
args = [CMDNAME, '--verbose'],
expectedArgsUpdate = {'verbose': 1},
wafArgs = [CMDNAME, '-v'] + CMNOPTS,
),
dict(
args = [CMDNAME, '-vvv'],
expectedArgsUpdate = {'verbose': 3},
wafArgs = [CMDNAME, '-vvv'] + CMNOPTS,
),
dict(
args = [CMDNAME, '--color', 'no'],
expectedArgsUpdate = {'color': 'no'},
wafArgs = [CMDNAME, '--color=no'],
),
dict(
args = [CMDNAME, '--buildroot', os.getcwd()],
expectedArgsUpdate = {'buildroot' : os.getcwd()},
wafArgs = [CMDNAME] + CMNOPTS,
),
]
self._assertAllsForCmd(CMDNAME, checks, baseExpectedArgs)
def testCmdZipApp(self):
baseExpectedArgs = {
'destdir' : '.',
'color': 'auto',
'verbose': 0,
}
CMDNAME = 'zipapp'
checks = [
dict(
args = [CMDNAME],
expectedArgsUpdate = { 'destdir' : CWD },
),
dict(
args = [CMDNAME, '--destdir', 'somedir'],
expectedArgsUpdate = {'destdir' : joinpath(CWD, 'somedir') },
),
dict(
args = [CMDNAME, '--verbose'],
expectedArgsUpdate = {'verbose': 1, 'destdir' : CWD},
),
dict(
args = [CMDNAME, '--color', 'no'],
expectedArgsUpdate = {'color': 'no', 'destdir' : CWD},
),
]
self._assertAllsForCmd(CMDNAME, checks, baseExpectedArgs)
def checkCmdInstall(self, cmd):
baseExpectedArgs = {
'buildtype' : self.defaults['buildtype'],
'jobs' : None,
'color': 'auto',
'configure': False,
'clean': False,
'progress': False,
'cleanAll': False,
'distclean': False,
'verbose': 0,
'verboseConfigure' : None,
'verboseBuild' : None,
'destdir' : '',
'bindir' : None,
'libdir' : None,
'prefix' : cli.DEFAULT_PREFIX,
'buildroot' : None,
'forceExternalDeps' : False,
'cacheCfgActionResults' : False,
}
if cmd == 'uninstall':
for name in ('configure', 'jobs', 'clean', 'cleanAll', 'distclean'):
baseExpectedArgs.pop(name)
CMDNAME = cmd
CMNOPTS = ['--color=auto', '--prefix=' + cli.DEFAULT_PREFIX]
checks = [
dict(
args = [CMDNAME],
expectedArgsUpdate = {},
wafArgs = [CMDNAME] + CMNOPTS,
),
dict(
args = [CMDNAME, '--destdir', 'somedir'],
expectedArgsUpdate = {'destdir' : joinpath(CWD, 'somedir') },
wafArgs = [CMDNAME, '--destdir=' + joinpath(CWD, 'somedir')] + CMNOPTS,
),
dict(
args = [CMDNAME, '--bindir', 'somedir'],
expectedArgsUpdate = {'bindir' : 'somedir' },
wafArgs = [CMDNAME, '--bindir=' + 'somedir'] + CMNOPTS,
),
dict(
args = [CMDNAME, '--libdir', 'somedir'],
expectedArgsUpdate = {'libdir' : 'somedir' },
wafArgs = [CMDNAME, '--libdir=' + 'somedir'] + CMNOPTS,
),
dict(
args = [CMDNAME, '--verbose'],
expectedArgsUpdate = {'verbose': 1},
wafArgs = [CMDNAME, '-v'] + CMNOPTS,
),
dict(
args = [CMDNAME, '--color', 'no'],
expectedArgsUpdate = {'color': 'no'},
wafArgs = [CMDNAME, '--color=no'] + CMNOPTS[1:],
),
dict(
args = [CMDNAME, '--buildroot', os.getcwd()],
expectedArgsUpdate = {'buildroot' : os.getcwd()},
wafArgs = [CMDNAME] + CMNOPTS,
),
]
self._assertAllsForCmd(CMDNAME, checks, baseExpectedArgs)
def testCmdInstall(self):
self.checkCmdInstall('install')
def testCmdUninstall(self):
self.checkCmdInstall('uninstall')
def testCmdVersion(self):
baseExpectedArgs = {
'verbose': 0,
}
CMDNAME = 'version'
checks = [
dict(
args = [CMDNAME],
expectedArgsUpdate = {},
wafArgs = [CMDNAME],
),
dict(
args = [CMDNAME, '--verbose'],
expectedArgsUpdate = {'verbose': 1},
wafArgs = [CMDNAME, '-v'],
),
]
self._assertAllsForCmd(CMDNAME, checks, baseExpectedArgs)
def testCmdSysInfo(self):
baseExpectedArgs = {
'verbose': 0,
}
CMDNAME = 'sysinfo'
checks = [
dict(
args = [CMDNAME],
expectedArgsUpdate = {},
wafArgs = [CMDNAME],
),
dict(
args = [CMDNAME, '--verbose'],
expectedArgsUpdate = {'verbose': 1},
wafArgs = [CMDNAME, '-v'],
),
]
self._assertAllsForCmd(CMDNAME, checks, baseExpectedArgs)
|
python
|
from Tkinter import *
root = Tk()
var = StringVar()
var.set("Site View")
names = ('C-cex','Bittrex')
def ffet(param):
var.set(param)
print(param)
# Appends names to names list and updates OptionMenu
#def createName(n):
# names.append(n)
# personName.delete(0, "end")
# menu = nameMenu['menu']
# menu.delete(0, "end")
# for name in names:
# menu.add_command(label=name, command=lambda: ffet(name))
# what to run when a name is selected
#def selection(name):
# var.set(name)
# print "Running" # For testing purposes to see when/if selection runs
# print name
# Option Menu for names
nameMenu = OptionMenu(root, var, ())
nameMenu.grid(row=0, column=0, columnspan=2)
nameMenu.config(width=20)
menu = nameMenu.children['menu']
menu.delete(0, "end")
#mlabel = nameMenu.children['label']
#for name in names:
# menu.add_command(label=name, command=lambda v=name: nameMenu.choice.set(v))
for name in names:
menu.add_command(label='Waht is this for', command=lambda v=name: ffet(v))
# Entry for user to submit name
#Label(root, text="Name").grid(row=1, column=0)
#personName = Entry(root, width=17)
#personName.grid(row=1, column=1)
# Add person Button
#Button(root, text="Add Person", width= 20, command=lambda: createName(personName.get())).grid(row=5, column=0, columnspan=2)
mainloop()
|
python
|
import random
import config
def shuffle_characters():
characters = list(config.BASE_STRING)
random.shuffle(characters)
rearranged_string = ''.join(characters)
return rearranged_string
|
python
|
from . import common, core3
|
python
|
#!/usr/bin/env python
#
# euclid graphics maths module
#
# Copyright (c) 2006 Alex Holkner
# [email protected]
#
# This library is free software; you can redistribute it and/or modify it
# under the terms of the GNU Lesser General Public License as published by the
# Free Software Foundation; either version 2.1 of the License, or (at your
# option) any later version.
#
# This library is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License
# for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this library; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
'''euclid graphics maths module
Documentation and tests are included in the file "euclid.txt", or online
at http://code.google.com/p/pyeuclid
'''
__docformat__ = 'restructuredtext'
__version__ = '$Id$'
__revision__ = '$Revision$'
import math
import operator
import types
# Some magic here. If _use_slots is True, the classes will derive from
# object and will define a __slots__ class variable. If _use_slots is
# False, classes will be old-style and will not define __slots__.
#
# _use_slots = True: Memory efficient, probably faster in future versions
# of Python, "better".
# _use_slots = False: Ordinary classes, much faster than slots in current
# versions of Python (2.4 and 2.5).
_use_slots = True
# If True, allows components of Vector2 and Vector3 to be set via swizzling;
# e.g. v.xyz = (1, 2, 3). This is much, much slower than the more verbose
# v.x = 1; v.y = 2; v.z = 3, and slows down ordinary element setting as
# well. Recommended setting is False.
_enable_swizzle_set = False
# Requires class to derive from object.
if _enable_swizzle_set:
_use_slots = True
# Implement _use_slots magic.
class _EuclidMetaclass(type):
def __new__(cls, name, bases, dct):
if '__slots__' in dct:
dct['__getstate__'] = cls._create_getstate(dct['__slots__'])
dct['__setstate__'] = cls._create_setstate(dct['__slots__'])
if _use_slots:
return type.__new__(cls, name, bases + (object,), dct)
else:
if '__slots__' in dct:
del dct['__slots__']
return types.ClassType.__new__(types.ClassType, name, bases, dct)
@classmethod
def _create_getstate(cls, slots):
def __getstate__(self):
d = {}
for slot in slots:
d[slot] = getattr(self, slot)
return d
return __getstate__
@classmethod
def _create_setstate(cls, slots):
def __setstate__(self, state):
for name, value in state.items():
setattr(self, name, value)
return __setstate__
__metaclass__ = _EuclidMetaclass
class Vector2:
__slots__ = ['x', 'y']
__hash__ = None
def __init__(self, x=0, y=0):
self.x = x
self.y = y
def __copy__(self):
return self.__class__(self.x, self.y)
copy = __copy__
def __repr__(self):
return 'Vector2(%.2f, %.2f)' % (self.x, self.y)
def __eq__(self, other):
if isinstance(other, Vector2):
return self.x == other.x and \
self.y == other.y
else:
assert hasattr(other, '__len__') and len(other) == 2
return self.x == other[0] and \
self.y == other[1]
def __ne__(self, other):
return not self.__eq__(other)
def __nonzero__(self):
return self.x != 0 or self.y != 0
def __len__(self):
return 2
def __getitem__(self, key):
return (self.x, self.y)[key]
def __setitem__(self, key, value):
l = [self.x, self.y]
l[key] = value
self.x, self.y = l
def __iter__(self):
return iter((self.x, self.y))
def __getattr__(self, name):
try:
return tuple([(self.x, self.y)['xy'.index(c)] \
for c in name])
except ValueError:
raise AttributeError, name
if _enable_swizzle_set:
# This has detrimental performance on ordinary setattr as well
# if enabled
def __setattr__(self, name, value):
if len(name) == 1:
object.__setattr__(self, name, value)
else:
try:
l = [self.x, self.y]
for c, v in map(None, name, value):
l['xy'.index(c)] = v
self.x, self.y = l
except ValueError:
raise AttributeError, name
def __add__(self, other):
if isinstance(other, Vector2):
# Vector + Vector -> Vector
# Vector + Point -> Point
# Point + Point -> Vector
if self.__class__ is other.__class__:
_class = Vector2
else:
_class = Point2
return _class(self.x + other.x,
self.y + other.y)
else:
assert hasattr(other, '__len__') and len(other) == 2
return Vector2(self.x + other[0],
self.y + other[1])
__radd__ = __add__
def __iadd__(self, other):
if isinstance(other, Vector2):
self.x += other.x
self.y += other.y
else:
self.x += other[0]
self.y += other[1]
return self
def __sub__(self, other):
if isinstance(other, Vector2):
# Vector - Vector -> Vector
# Vector - Point -> Point
# Point - Point -> Vector
if self.__class__ is other.__class__:
_class = Vector2
else:
_class = Point2
return _class(self.x - other.x,
self.y - other.y)
else:
assert hasattr(other, '__len__') and len(other) == 2
return Vector2(self.x - other[0],
self.y - other[1])
def __rsub__(self, other):
if isinstance(other, Vector2):
return Vector2(other.x - self.x,
other.y - self.y)
else:
assert hasattr(other, '__len__') and len(other) == 2
return Vector2(other.x - self[0],
other.y - self[1])
def __mul__(self, other):
assert type(other) in (int, long, float)
return Vector2(self.x * other,
self.y * other)
__rmul__ = __mul__
def __imul__(self, other):
assert type(other) in (int, long, float)
self.x *= other
self.y *= other
return self
def __div__(self, other):
assert type(other) in (int, long, float)
return Vector2(operator.div(self.x, other),
operator.div(self.y, other))
def __rdiv__(self, other):
assert type(other) in (int, long, float)
return Vector2(operator.div(other, self.x),
operator.div(other, self.y))
def __floordiv__(self, other):
assert type(other) in (int, long, float)
return Vector2(operator.floordiv(self.x, other),
operator.floordiv(self.y, other))
def __rfloordiv__(self, other):
assert type(other) in (int, long, float)
return Vector2(operator.floordiv(other, self.x),
operator.floordiv(other, self.y))
def __truediv__(self, other):
assert type(other) in (int, long, float)
return Vector2(operator.truediv(self.x, other),
operator.truediv(self.y, other))
def __rtruediv__(self, other):
assert type(other) in (int, long, float)
return Vector2(operator.truediv(other, self.x),
operator.truediv(other, self.y))
def __neg__(self):
return Vector2(-self.x,
-self.y)
__pos__ = __copy__
def __abs__(self):
return math.sqrt(self.x ** 2 + \
self.y ** 2)
magnitude = __abs__
def magnitude_squared(self):
return self.x ** 2 + \
self.y ** 2
def normalize(self):
d = self.magnitude()
if d:
self.x /= d
self.y /= d
return self
def normalized(self):
d = self.magnitude()
if d:
return Vector2(self.x / d,
self.y / d)
return self.copy()
def dot(self, other):
assert isinstance(other, Vector2)
return self.x * other.x + \
self.y * other.y
def cross(self):
return Vector2(self.y, -self.x)
def reflect(self, normal):
# assume normal is normalized
assert isinstance(normal, Vector2)
d = 2 * (self.x * normal.x + self.y * normal.y)
return Vector2(self.x - d * normal.x,
self.y - d * normal.y)
def angle(self, other):
"""Return the angle to the vector other"""
return math.acos(self.dot(other) / (self.magnitude()*other.magnitude()))
def project(self, other):
"""Return one vector projected on the vector other"""
n = other.normalized()
return self.dot(n)*n
class Vector3:
__slots__ = ['x', 'y', 'z']
__hash__ = None
def __init__(self, x=0, y=0, z=0):
self.x = x
self.y = y
self.z = z
def __copy__(self):
return self.__class__(self.x, self.y, self.z)
copy = __copy__
def __repr__(self):
return 'Vector3(%.2f, %.2f, %.2f)' % (self.x,
self.y,
self.z)
def __eq__(self, other):
if isinstance(other, Vector3):
return self.x == other.x and \
self.y == other.y and \
self.z == other.z
else:
assert hasattr(other, '__len__') and len(other) == 3
return self.x == other[0] and \
self.y == other[1] and \
self.z == other[2]
def __ne__(self, other):
return not self.__eq__(other)
def __nonzero__(self):
return self.x != 0 or self.y != 0 or self.z != 0
def __len__(self):
return 3
def __getitem__(self, key):
return (self.x, self.y, self.z)[key]
def __setitem__(self, key, value):
l = [self.x, self.y, self.z]
l[key] = value
self.x, self.y, self.z = l
def __iter__(self):
return iter((self.x, self.y, self.z))
def __getattr__(self, name):
try:
return tuple([(self.x, self.y, self.z)['xyz'.index(c)] \
for c in name])
except ValueError:
raise AttributeError, name
if _enable_swizzle_set:
# This has detrimental performance on ordinary setattr as well
# if enabled
def __setattr__(self, name, value):
if len(name) == 1:
object.__setattr__(self, name, value)
else:
try:
l = [self.x, self.y, self.z]
for c, v in map(None, name, value):
l['xyz'.index(c)] = v
self.x, self.y, self.z = l
except ValueError:
raise AttributeError, name
def __add__(self, other):
if isinstance(other, Vector3):
# Vector + Vector -> Vector
# Vector + Point -> Point
# Point + Point -> Vector
if self.__class__ is other.__class__:
_class = Vector3
else:
_class = Point3
return _class(self.x + other.x,
self.y + other.y,
self.z + other.z)
else:
assert hasattr(other, '__len__') and len(other) == 3
return Vector3(self.x + other[0],
self.y + other[1],
self.z + other[2])
__radd__ = __add__
def __iadd__(self, other):
if isinstance(other, Vector3):
self.x += other.x
self.y += other.y
self.z += other.z
else:
self.x += other[0]
self.y += other[1]
self.z += other[2]
return self
def __sub__(self, other):
if isinstance(other, Vector3):
# Vector - Vector -> Vector
# Vector - Point -> Point
# Point - Point -> Vector
if self.__class__ is other.__class__:
_class = Vector3
else:
_class = Point3
return Vector3(self.x - other.x,
self.y - other.y,
self.z - other.z)
else:
assert hasattr(other, '__len__') and len(other) == 3
return Vector3(self.x - other[0],
self.y - other[1],
self.z - other[2])
def __rsub__(self, other):
if isinstance(other, Vector3):
return Vector3(other.x - self.x,
other.y - self.y,
other.z - self.z)
else:
assert hasattr(other, '__len__') and len(other) == 3
return Vector3(other.x - self[0],
other.y - self[1],
other.z - self[2])
def __mul__(self, other):
if isinstance(other, Vector3):
# TODO component-wise mul/div in-place and on Vector2; docs.
if self.__class__ is Point3 or other.__class__ is Point3:
_class = Point3
else:
_class = Vector3
return _class(self.x * other.x,
self.y * other.y,
self.z * other.z)
else:
assert type(other) in (int, long, float)
return Vector3(self.x * other,
self.y * other,
self.z * other)
__rmul__ = __mul__
def __imul__(self, other):
assert type(other) in (int, long, float)
self.x *= other
self.y *= other
self.z *= other
return self
def __div__(self, other):
assert type(other) in (int, long, float)
return Vector3(operator.div(self.x, other),
operator.div(self.y, other),
operator.div(self.z, other))
def __rdiv__(self, other):
assert type(other) in (int, long, float)
return Vector3(operator.div(other, self.x),
operator.div(other, self.y),
operator.div(other, self.z))
def __floordiv__(self, other):
assert type(other) in (int, long, float)
return Vector3(operator.floordiv(self.x, other),
operator.floordiv(self.y, other),
operator.floordiv(self.z, other))
def __rfloordiv__(self, other):
assert type(other) in (int, long, float)
return Vector3(operator.floordiv(other, self.x),
operator.floordiv(other, self.y),
operator.floordiv(other, self.z))
def __truediv__(self, other):
assert type(other) in (int, long, float)
return Vector3(operator.truediv(self.x, other),
operator.truediv(self.y, other),
operator.truediv(self.z, other))
def __rtruediv__(self, other):
assert type(other) in (int, long, float)
return Vector3(operator.truediv(other, self.x),
operator.truediv(other, self.y),
operator.truediv(other, self.z))
def __neg__(self):
return Vector3(-self.x,
-self.y,
-self.z)
__pos__ = __copy__
def __abs__(self):
return math.sqrt(self.x ** 2 + \
self.y ** 2 + \
self.z ** 2)
magnitude = __abs__
def magnitude_squared(self):
return self.x ** 2 + \
self.y ** 2 + \
self.z ** 2
def normalize(self):
d = self.magnitude()
if d:
self.x /= d
self.y /= d
self.z /= d
return self
def normalized(self):
d = self.magnitude()
if d:
return Vector3(self.x / d,
self.y / d,
self.z / d)
return self.copy()
def dot(self, other):
assert isinstance(other, Vector3)
return self.x * other.x + \
self.y * other.y + \
self.z * other.z
def cross(self, other):
assert isinstance(other, Vector3)
return Vector3(self.y * other.z - self.z * other.y,
-self.x * other.z + self.z * other.x,
self.x * other.y - self.y * other.x)
def reflect(self, normal):
# assume normal is normalized
assert isinstance(normal, Vector3)
d = 2 * (self.x * normal.x + self.y * normal.y + self.z * normal.z)
return Vector3(self.x - d * normal.x,
self.y - d * normal.y,
self.z - d * normal.z)
def rotate_around(self, axis, theta):
"""Return the vector rotated around axis through angle theta. Right hand rule applies"""
# Adapted from equations published by Glenn Murray.
# http://inside.mines.edu/~gmurray/ArbitraryAxisRotation/ArbitraryAxisRotation.html
x, y, z = self.x, self.y,self.z
u, v, w = axis.x, axis.y, axis.z
# Extracted common factors for simplicity and efficiency
r2 = u**2 + v**2 + w**2
r = math.sqrt(r2)
ct = math.cos(theta)
st = math.sin(theta) / r
dt = (u*x + v*y + w*z) * (1 - ct) / r2
return Vector3((u * dt + x * ct + (-w * y + v * z) * st),
(v * dt + y * ct + ( w * x - u * z) * st),
(w * dt + z * ct + (-v * x + u * y) * st))
def angle(self, other):
"""Return the angle to the vector other"""
return math.acos(self.dot(other) / (self.magnitude()*other.magnitude()))
def project(self, other):
"""Return one vector projected on the vector other"""
n = other.normalized()
return self.dot(n)*n
# a b c
# e f g
# i j k
class Matrix3:
__slots__ = list('abcefgijk')
def __init__(self):
self.identity()
def __copy__(self):
M = Matrix3()
M.a = self.a
M.b = self.b
M.c = self.c
M.e = self.e
M.f = self.f
M.g = self.g
M.i = self.i
M.j = self.j
M.k = self.k
return M
copy = __copy__
def __repr__(self):
return ('Matrix3([% 8.2f % 8.2f % 8.2f\n' \
' % 8.2f % 8.2f % 8.2f\n' \
' % 8.2f % 8.2f % 8.2f])') \
% (self.a, self.b, self.c,
self.e, self.f, self.g,
self.i, self.j, self.k)
def __getitem__(self, key):
return [self.a, self.e, self.i,
self.b, self.f, self.j,
self.c, self.g, self.k][key]
def __setitem__(self, key, value):
L = self[:]
L[key] = value
(self.a, self.e, self.i,
self.b, self.f, self.j,
self.c, self.g, self.k) = L
def __mul__(self, other):
if isinstance(other, Matrix3):
# Caching repeatedly accessed attributes in local variables
# apparently increases performance by 20%. Attrib: Will McGugan.
Aa = self.a
Ab = self.b
Ac = self.c
Ae = self.e
Af = self.f
Ag = self.g
Ai = self.i
Aj = self.j
Ak = self.k
Ba = other.a
Bb = other.b
Bc = other.c
Be = other.e
Bf = other.f
Bg = other.g
Bi = other.i
Bj = other.j
Bk = other.k
C = Matrix3()
C.a = Aa * Ba + Ab * Be + Ac * Bi
C.b = Aa * Bb + Ab * Bf + Ac * Bj
C.c = Aa * Bc + Ab * Bg + Ac * Bk
C.e = Ae * Ba + Af * Be + Ag * Bi
C.f = Ae * Bb + Af * Bf + Ag * Bj
C.g = Ae * Bc + Af * Bg + Ag * Bk
C.i = Ai * Ba + Aj * Be + Ak * Bi
C.j = Ai * Bb + Aj * Bf + Ak * Bj
C.k = Ai * Bc + Aj * Bg + Ak * Bk
return C
elif isinstance(other, Point2):
A = self
B = other
P = Point2(0, 0)
P.x = A.a * B.x + A.b * B.y + A.c
P.y = A.e * B.x + A.f * B.y + A.g
return P
elif isinstance(other, Vector2):
A = self
B = other
V = Vector2(0, 0)
V.x = A.a * B.x + A.b * B.y
V.y = A.e * B.x + A.f * B.y
return V
else:
other = other.copy()
other._apply_transform(self)
return other
def __imul__(self, other):
assert isinstance(other, Matrix3)
# Cache attributes in local vars (see Matrix3.__mul__).
Aa = self.a
Ab = self.b
Ac = self.c
Ae = self.e
Af = self.f
Ag = self.g
Ai = self.i
Aj = self.j
Ak = self.k
Ba = other.a
Bb = other.b
Bc = other.c
Be = other.e
Bf = other.f
Bg = other.g
Bi = other.i
Bj = other.j
Bk = other.k
self.a = Aa * Ba + Ab * Be + Ac * Bi
self.b = Aa * Bb + Ab * Bf + Ac * Bj
self.c = Aa * Bc + Ab * Bg + Ac * Bk
self.e = Ae * Ba + Af * Be + Ag * Bi
self.f = Ae * Bb + Af * Bf + Ag * Bj
self.g = Ae * Bc + Af * Bg + Ag * Bk
self.i = Ai * Ba + Aj * Be + Ak * Bi
self.j = Ai * Bb + Aj * Bf + Ak * Bj
self.k = Ai * Bc + Aj * Bg + Ak * Bk
return self
def identity(self):
self.a = self.f = self.k = 1.
self.b = self.c = self.e = self.g = self.i = self.j = 0
return self
def scale(self, x, y):
self *= Matrix3.new_scale(x, y)
return self
def translate(self, x, y):
self *= Matrix3.new_translate(x, y)
return self
def rotate(self, angle):
self *= Matrix3.new_rotate(angle)
return self
# Static constructors
def new_identity(cls):
self = cls()
return self
new_identity = classmethod(new_identity)
def new_scale(cls, x, y):
self = cls()
self.a = x
self.f = y
return self
new_scale = classmethod(new_scale)
def new_translate(cls, x, y):
self = cls()
self.c = x
self.g = y
return self
new_translate = classmethod(new_translate)
def new_rotate(cls, angle):
self = cls()
s = math.sin(angle)
c = math.cos(angle)
self.a = self.f = c
self.b = -s
self.e = s
return self
new_rotate = classmethod(new_rotate)
def determinant(self):
return (self.a*self.f*self.k
+ self.b*self.g*self.i
+ self.c*self.e*self.j
- self.a*self.g*self.j
- self.b*self.e*self.k
- self.c*self.f*self.i)
def inverse(self):
tmp = Matrix3()
d = self.determinant()
if abs(d) < 0.001:
# No inverse, return identity
return tmp
else:
d = 1.0 / d
tmp.a = d * (self.f*self.k - self.g*self.j)
tmp.b = d * (self.c*self.j - self.b*self.k)
tmp.c = d * (self.b*self.g - self.c*self.f)
tmp.e = d * (self.g*self.i - self.e*self.k)
tmp.f = d * (self.a*self.k - self.c*self.i)
tmp.g = d * (self.c*self.e - self.a*self.g)
tmp.i = d * (self.e*self.j - self.f*self.i)
tmp.j = d * (self.b*self.i - self.a*self.j)
tmp.k = d * (self.a*self.f - self.b*self.e)
return tmp
# a b c d
# e f g h
# i j k l
# m n o p
class Matrix4:
__slots__ = list('abcdefghijklmnop')
def __init__(self):
self.identity()
def __copy__(self):
M = Matrix4()
M.a = self.a
M.b = self.b
M.c = self.c
M.d = self.d
M.e = self.e
M.f = self.f
M.g = self.g
M.h = self.h
M.i = self.i
M.j = self.j
M.k = self.k
M.l = self.l
M.m = self.m
M.n = self.n
M.o = self.o
M.p = self.p
return M
copy = __copy__
def __repr__(self):
return ('Matrix4([% 8.2f % 8.2f % 8.2f % 8.2f\n' \
' % 8.2f % 8.2f % 8.2f % 8.2f\n' \
' % 8.2f % 8.2f % 8.2f % 8.2f\n' \
' % 8.2f % 8.2f % 8.2f % 8.2f])') \
% (self.a, self.b, self.c, self.d,
self.e, self.f, self.g, self.h,
self.i, self.j, self.k, self.l,
self.m, self.n, self.o, self.p)
def __getitem__(self, key):
return [self.a, self.e, self.i, self.m,
self.b, self.f, self.j, self.n,
self.c, self.g, self.k, self.o,
self.d, self.h, self.l, self.p][key]
def __setitem__(self, key, value):
L = self[:]
L[key] = value
(self.a, self.e, self.i, self.m,
self.b, self.f, self.j, self.n,
self.c, self.g, self.k, self.o,
self.d, self.h, self.l, self.p) = L
def __mul__(self, other):
if isinstance(other, Matrix4):
# Cache attributes in local vars (see Matrix3.__mul__).
Aa = self.a
Ab = self.b
Ac = self.c
Ad = self.d
Ae = self.e
Af = self.f
Ag = self.g
Ah = self.h
Ai = self.i
Aj = self.j
Ak = self.k
Al = self.l
Am = self.m
An = self.n
Ao = self.o
Ap = self.p
Ba = other.a
Bb = other.b
Bc = other.c
Bd = other.d
Be = other.e
Bf = other.f
Bg = other.g
Bh = other.h
Bi = other.i
Bj = other.j
Bk = other.k
Bl = other.l
Bm = other.m
Bn = other.n
Bo = other.o
Bp = other.p
C = Matrix4()
C.a = Aa * Ba + Ab * Be + Ac * Bi + Ad * Bm
C.b = Aa * Bb + Ab * Bf + Ac * Bj + Ad * Bn
C.c = Aa * Bc + Ab * Bg + Ac * Bk + Ad * Bo
C.d = Aa * Bd + Ab * Bh + Ac * Bl + Ad * Bp
C.e = Ae * Ba + Af * Be + Ag * Bi + Ah * Bm
C.f = Ae * Bb + Af * Bf + Ag * Bj + Ah * Bn
C.g = Ae * Bc + Af * Bg + Ag * Bk + Ah * Bo
C.h = Ae * Bd + Af * Bh + Ag * Bl + Ah * Bp
C.i = Ai * Ba + Aj * Be + Ak * Bi + Al * Bm
C.j = Ai * Bb + Aj * Bf + Ak * Bj + Al * Bn
C.k = Ai * Bc + Aj * Bg + Ak * Bk + Al * Bo
C.l = Ai * Bd + Aj * Bh + Ak * Bl + Al * Bp
C.m = Am * Ba + An * Be + Ao * Bi + Ap * Bm
C.n = Am * Bb + An * Bf + Ao * Bj + Ap * Bn
C.o = Am * Bc + An * Bg + Ao * Bk + Ap * Bo
C.p = Am * Bd + An * Bh + Ao * Bl + Ap * Bp
return C
elif isinstance(other, Point3):
A = self
B = other
P = Point3(0, 0, 0)
P.x = A.a * B.x + A.b * B.y + A.c * B.z + A.d
P.y = A.e * B.x + A.f * B.y + A.g * B.z + A.h
P.z = A.i * B.x + A.j * B.y + A.k * B.z + A.l
return P
elif isinstance(other, Vector3):
A = self
B = other
V = Vector3(0, 0, 0)
V.x = A.a * B.x + A.b * B.y + A.c * B.z
V.y = A.e * B.x + A.f * B.y + A.g * B.z
V.z = A.i * B.x + A.j * B.y + A.k * B.z
return V
else:
other = other.copy()
other._apply_transform(self)
return other
def __imul__(self, other):
assert isinstance(other, Matrix4)
# Cache attributes in local vars (see Matrix3.__mul__).
Aa = self.a
Ab = self.b
Ac = self.c
Ad = self.d
Ae = self.e
Af = self.f
Ag = self.g
Ah = self.h
Ai = self.i
Aj = self.j
Ak = self.k
Al = self.l
Am = self.m
An = self.n
Ao = self.o
Ap = self.p
Ba = other.a
Bb = other.b
Bc = other.c
Bd = other.d
Be = other.e
Bf = other.f
Bg = other.g
Bh = other.h
Bi = other.i
Bj = other.j
Bk = other.k
Bl = other.l
Bm = other.m
Bn = other.n
Bo = other.o
Bp = other.p
self.a = Aa * Ba + Ab * Be + Ac * Bi + Ad * Bm
self.b = Aa * Bb + Ab * Bf + Ac * Bj + Ad * Bn
self.c = Aa * Bc + Ab * Bg + Ac * Bk + Ad * Bo
self.d = Aa * Bd + Ab * Bh + Ac * Bl + Ad * Bp
self.e = Ae * Ba + Af * Be + Ag * Bi + Ah * Bm
self.f = Ae * Bb + Af * Bf + Ag * Bj + Ah * Bn
self.g = Ae * Bc + Af * Bg + Ag * Bk + Ah * Bo
self.h = Ae * Bd + Af * Bh + Ag * Bl + Ah * Bp
self.i = Ai * Ba + Aj * Be + Ak * Bi + Al * Bm
self.j = Ai * Bb + Aj * Bf + Ak * Bj + Al * Bn
self.k = Ai * Bc + Aj * Bg + Ak * Bk + Al * Bo
self.l = Ai * Bd + Aj * Bh + Ak * Bl + Al * Bp
self.m = Am * Ba + An * Be + Ao * Bi + Ap * Bm
self.n = Am * Bb + An * Bf + Ao * Bj + Ap * Bn
self.o = Am * Bc + An * Bg + Ao * Bk + Ap * Bo
self.p = Am * Bd + An * Bh + Ao * Bl + Ap * Bp
return self
def transform(self, other):
A = self
B = other
P = Point3(0, 0, 0)
P.x = A.a * B.x + A.b * B.y + A.c * B.z + A.d
P.y = A.e * B.x + A.f * B.y + A.g * B.z + A.h
P.z = A.i * B.x + A.j * B.y + A.k * B.z + A.l
w = A.m * B.x + A.n * B.y + A.o * B.z + A.p
if w != 0:
P.x /= w
P.y /= w
P.z /= w
return P
def identity(self):
self.a = self.f = self.k = self.p = 1.
self.b = self.c = self.d = self.e = self.g = self.h = \
self.i = self.j = self.l = self.m = self.n = self.o = 0
return self
def scale(self, x, y, z):
self *= Matrix4.new_scale(x, y, z)
return self
def translate(self, x, y, z):
self *= Matrix4.new_translate(x, y, z)
return self
def rotatex(self, angle):
self *= Matrix4.new_rotatex(angle)
return self
def rotatey(self, angle):
self *= Matrix4.new_rotatey(angle)
return self
def rotatez(self, angle):
self *= Matrix4.new_rotatez(angle)
return self
def rotate_axis(self, angle, axis):
self *= Matrix4.new_rotate_axis(angle, axis)
return self
def rotate_euler(self, heading, attitude, bank):
self *= Matrix4.new_rotate_euler(heading, attitude, bank)
return self
def rotate_triple_axis(self, x, y, z):
self *= Matrix4.new_rotate_triple_axis(x, y, z)
return self
def transpose(self):
(self.a, self.e, self.i, self.m,
self.b, self.f, self.j, self.n,
self.c, self.g, self.k, self.o,
self.d, self.h, self.l, self.p) = \
(self.a, self.b, self.c, self.d,
self.e, self.f, self.g, self.h,
self.i, self.j, self.k, self.l,
self.m, self.n, self.o, self.p)
def transposed(self):
M = self.copy()
M.transpose()
return M
# Static constructors
def new(cls, *values):
M = cls()
M[:] = values
return M
new = classmethod(new)
def new_identity(cls):
self = cls()
return self
new_identity = classmethod(new_identity)
def new_scale(cls, x, y, z):
self = cls()
self.a = x
self.f = y
self.k = z
return self
new_scale = classmethod(new_scale)
def new_translate(cls, x, y, z):
self = cls()
self.d = x
self.h = y
self.l = z
return self
new_translate = classmethod(new_translate)
def new_rotatex(cls, angle):
self = cls()
s = math.sin(angle)
c = math.cos(angle)
self.f = self.k = c
self.g = -s
self.j = s
return self
new_rotatex = classmethod(new_rotatex)
def new_rotatey(cls, angle):
self = cls()
s = math.sin(angle)
c = math.cos(angle)
self.a = self.k = c
self.c = s
self.i = -s
return self
new_rotatey = classmethod(new_rotatey)
def new_rotatez(cls, angle):
self = cls()
s = math.sin(angle)
c = math.cos(angle)
self.a = self.f = c
self.b = -s
self.e = s
return self
new_rotatez = classmethod(new_rotatez)
def new_rotate_axis(cls, angle, axis):
assert(isinstance(axis, Vector3))
vector = axis.normalized()
x = vector.x
y = vector.y
z = vector.z
self = cls()
s = math.sin(angle)
c = math.cos(angle)
c1 = 1. - c
# from the glRotate man page
self.a = x * x * c1 + c
self.b = x * y * c1 - z * s
self.c = x * z * c1 + y * s
self.e = y * x * c1 + z * s
self.f = y * y * c1 + c
self.g = y * z * c1 - x * s
self.i = x * z * c1 - y * s
self.j = y * z * c1 + x * s
self.k = z * z * c1 + c
return self
new_rotate_axis = classmethod(new_rotate_axis)
def new_rotate_euler(cls, heading, attitude, bank):
# from http://www.euclideanspace.com/
ch = math.cos(heading)
sh = math.sin(heading)
ca = math.cos(attitude)
sa = math.sin(attitude)
cb = math.cos(bank)
sb = math.sin(bank)
self = cls()
self.a = ch * ca
self.b = sh * sb - ch * sa * cb
self.c = ch * sa * sb + sh * cb
self.e = sa
self.f = ca * cb
self.g = -ca * sb
self.i = -sh * ca
self.j = sh * sa * cb + ch * sb
self.k = -sh * sa * sb + ch * cb
return self
new_rotate_euler = classmethod(new_rotate_euler)
def new_rotate_triple_axis(cls, x, y, z):
m = cls()
m.a, m.b, m.c = x.x, y.x, z.x
m.e, m.f, m.g = x.y, y.y, z.y
m.i, m.j, m.k = x.z, y.z, z.z
return m
new_rotate_triple_axis = classmethod(new_rotate_triple_axis)
def new_look_at(cls, eye, at, up):
z = (eye - at).normalized()
x = up.cross(z).normalized()
y = z.cross(x)
m = cls.new_rotate_triple_axis(x, y, z)
m.d, m.h, m.l = eye.x, eye.y, eye.z
return m
new_look_at = classmethod(new_look_at)
def new_perspective(cls, fov_y, aspect, near, far):
# from the gluPerspective man page
f = 1 / math.tan(fov_y / 2)
self = cls()
assert near != 0.0 and near != far
self.a = f / aspect
self.f = f
self.k = (far + near) / (near - far)
self.l = 2 * far * near / (near - far)
self.o = -1
self.p = 0
return self
new_perspective = classmethod(new_perspective)
def determinant(self):
return ((self.a * self.f - self.e * self.b)
* (self.k * self.p - self.o * self.l)
- (self.a * self.j - self.i * self.b)
* (self.g * self.p - self.o * self.h)
+ (self.a * self.n - self.m * self.b)
* (self.g * self.l - self.k * self.h)
+ (self.e * self.j - self.i * self.f)
* (self.c * self.p - self.o * self.d)
- (self.e * self.n - self.m * self.f)
* (self.c * self.l - self.k * self.d)
+ (self.i * self.n - self.m * self.j)
* (self.c * self.h - self.g * self.d))
def inverse(self):
tmp = Matrix4()
d = self.determinant();
if abs(d) < 0.001:
# No inverse, return identity
return tmp
else:
d = 1.0 / d;
tmp.a = d * (self.f * (self.k * self.p - self.o * self.l) + self.j * (self.o * self.h - self.g * self.p) + self.n * (self.g * self.l - self.k * self.h));
tmp.e = d * (self.g * (self.i * self.p - self.m * self.l) + self.k * (self.m * self.h - self.e * self.p) + self.o * (self.e * self.l - self.i * self.h));
tmp.i = d * (self.h * (self.i * self.n - self.m * self.j) + self.l * (self.m * self.f - self.e * self.n) + self.p * (self.e * self.j - self.i * self.f));
tmp.m = d * (self.e * (self.n * self.k - self.j * self.o) + self.i * (self.f * self.o - self.n * self.g) + self.m * (self.j * self.g - self.f * self.k));
tmp.b = d * (self.j * (self.c * self.p - self.o * self.d) + self.n * (self.k * self.d - self.c * self.l) + self.b * (self.o * self.l - self.k * self.p));
tmp.f = d * (self.k * (self.a * self.p - self.m * self.d) + self.o * (self.i * self.d - self.a * self.l) + self.c * (self.m * self.l - self.i * self.p));
tmp.j = d * (self.l * (self.a * self.n - self.m * self.b) + self.p * (self.i * self.b - self.a * self.j) + self.d * (self.m * self.j - self.i * self.n));
tmp.n = d * (self.i * (self.n * self.c - self.b * self.o) + self.m * (self.b * self.k - self.j * self.c) + self.a * (self.j * self.o - self.n * self.k));
tmp.c = d * (self.n * (self.c * self.h - self.g * self.d) + self.b * (self.g * self.p - self.o * self.h) + self.f * (self.o * self.d - self.c * self.p));
tmp.g = d * (self.o * (self.a * self.h - self.e * self.d) + self.c * (self.e * self.p - self.m * self.h) + self.g * (self.m * self.d - self.a * self.p));
tmp.k = d * (self.p * (self.a * self.f - self.e * self.b) + self.d * (self.e * self.n - self.m * self.f) + self.h * (self.m * self.b - self.a * self.n));
tmp.o = d * (self.m * (self.f * self.c - self.b * self.g) + self.a * (self.n * self.g - self.f * self.o) + self.e * (self.b * self.o - self.n * self.c));
tmp.d = d * (self.b * (self.k * self.h - self.g * self.l) + self.f * (self.c * self.l - self.k * self.d) + self.j * (self.g * self.d - self.c * self.h));
tmp.h = d * (self.c * (self.i * self.h - self.e * self.l) + self.g * (self.a * self.l - self.i * self.d) + self.k * (self.e * self.d - self.a * self.h));
tmp.l = d * (self.d * (self.i * self.f - self.e * self.j) + self.h * (self.a * self.j - self.i * self.b) + self.l * (self.e * self.b - self.a * self.f));
tmp.p = d * (self.a * (self.f * self.k - self.j * self.g) + self.e * (self.j * self.c - self.b * self.k) + self.i * (self.b * self.g - self.f * self.c));
return tmp;
class Quaternion:
# All methods and naming conventions based off
# http://www.euclideanspace.com/maths/algebra/realNormedAlgebra/quaternions
# w is the real part, (x, y, z) are the imaginary parts
__slots__ = ['w', 'x', 'y', 'z']
def __init__(self, w=1, x=0, y=0, z=0):
self.w = w
self.x = x
self.y = y
self.z = z
def __copy__(self):
Q = Quaternion()
Q.w = self.w
Q.x = self.x
Q.y = self.y
Q.z = self.z
return Q
copy = __copy__
def __repr__(self):
return 'Quaternion(real=%.2f, imag=<%.2f, %.2f, %.2f>)' % \
(self.w, self.x, self.y, self.z)
def __mul__(self, other):
if isinstance(other, Quaternion):
Ax = self.x
Ay = self.y
Az = self.z
Aw = self.w
Bx = other.x
By = other.y
Bz = other.z
Bw = other.w
Q = Quaternion()
Q.x = Ax * Bw + Ay * Bz - Az * By + Aw * Bx
Q.y = -Ax * Bz + Ay * Bw + Az * Bx + Aw * By
Q.z = Ax * By - Ay * Bx + Az * Bw + Aw * Bz
Q.w = -Ax * Bx - Ay * By - Az * Bz + Aw * Bw
return Q
elif isinstance(other, Vector3):
w = self.w
x = self.x
y = self.y
z = self.z
Vx = other.x
Vy = other.y
Vz = other.z
ww = w * w
w2 = w * 2
wx2 = w2 * x
wy2 = w2 * y
wz2 = w2 * z
xx = x * x
x2 = x * 2
xy2 = x2 * y
xz2 = x2 * z
yy = y * y
yz2 = 2 * y * z
zz = z * z
return other.__class__(\
ww * Vx + wy2 * Vz - wz2 * Vy + \
xx * Vx + xy2 * Vy + xz2 * Vz - \
zz * Vx - yy * Vx,
xy2 * Vx + yy * Vy + yz2 * Vz + \
wz2 * Vx - zz * Vy + ww * Vy - \
wx2 * Vz - xx * Vy,
xz2 * Vx + yz2 * Vy + \
zz * Vz - wy2 * Vx - yy * Vz + \
wx2 * Vy - xx * Vz + ww * Vz)
else:
other = other.copy()
other._apply_transform(self)
return other
def __imul__(self, other):
assert isinstance(other, Quaternion)
Ax = self.x
Ay = self.y
Az = self.z
Aw = self.w
Bx = other.x
By = other.y
Bz = other.z
Bw = other.w
self.x = Ax * Bw + Ay * Bz - Az * By + Aw * Bx
self.y = -Ax * Bz + Ay * Bw + Az * Bx + Aw * By
self.z = Ax * By - Ay * Bx + Az * Bw + Aw * Bz
self.w = -Ax * Bx - Ay * By - Az * Bz + Aw * Bw
return self
def __abs__(self):
return math.sqrt(self.w ** 2 + \
self.x ** 2 + \
self.y ** 2 + \
self.z ** 2)
magnitude = __abs__
def magnitude_squared(self):
return self.w ** 2 + \
self.x ** 2 + \
self.y ** 2 + \
self.z ** 2
def identity(self):
self.w = 1
self.x = 0
self.y = 0
self.z = 0
return self
def rotate_axis(self, angle, axis):
self *= Quaternion.new_rotate_axis(angle, axis)
return self
def rotate_euler(self, heading, attitude, bank):
self *= Quaternion.new_rotate_euler(heading, attitude, bank)
return self
def rotate_matrix(self, m):
self *= Quaternion.new_rotate_matrix(m)
return self
def conjugated(self):
Q = Quaternion()
Q.w = self.w
Q.x = -self.x
Q.y = -self.y
Q.z = -self.z
return Q
def normalize(self):
d = self.magnitude()
if d != 0:
self.w /= d
self.x /= d
self.y /= d
self.z /= d
return self
def normalized(self):
d = self.magnitude()
if d != 0:
Q = Quaternion()
Q.w = self.w / d
Q.x = self.x / d
Q.y = self.y / d
Q.z = self.z / d
return Q
else:
return self.copy()
def get_angle_axis(self):
if self.w > 1:
self = self.normalized()
angle = 2 * math.acos(self.w)
s = math.sqrt(1 - self.w ** 2)
if s < 0.001:
return angle, Vector3(1, 0, 0)
else:
return angle, Vector3(self.x / s, self.y / s, self.z / s)
def get_euler(self):
t = self.x * self.y + self.z * self.w
if t > 0.4999:
heading = 2 * math.atan2(self.x, self.w)
attitude = math.pi / 2
bank = 0
elif t < -0.4999:
heading = -2 * math.atan2(self.x, self.w)
attitude = -math.pi / 2
bank = 0
else:
sqx = self.x ** 2
sqy = self.y ** 2
sqz = self.z ** 2
heading = math.atan2(2 * self.y * self.w - 2 * self.x * self.z,
1 - 2 * sqy - 2 * sqz)
attitude = math.asin(2 * t)
bank = math.atan2(2 * self.x * self.w - 2 * self.y * self.z,
1 - 2 * sqx - 2 * sqz)
return heading, attitude, bank
def get_matrix(self):
xx = self.x ** 2
xy = self.x * self.y
xz = self.x * self.z
xw = self.x * self.w
yy = self.y ** 2
yz = self.y * self.z
yw = self.y * self.w
zz = self.z ** 2
zw = self.z * self.w
M = Matrix4()
M.a = 1 - 2 * (yy + zz)
M.b = 2 * (xy - zw)
M.c = 2 * (xz + yw)
M.e = 2 * (xy + zw)
M.f = 1 - 2 * (xx + zz)
M.g = 2 * (yz - xw)
M.i = 2 * (xz - yw)
M.j = 2 * (yz + xw)
M.k = 1 - 2 * (xx + yy)
return M
# Static constructors
def new_identity(cls):
return cls()
new_identity = classmethod(new_identity)
def new_rotate_axis(cls, angle, axis):
assert(isinstance(axis, Vector3))
axis = axis.normalized()
s = math.sin(angle / 2)
Q = cls()
Q.w = math.cos(angle / 2)
Q.x = axis.x * s
Q.y = axis.y * s
Q.z = axis.z * s
return Q
new_rotate_axis = classmethod(new_rotate_axis)
def new_rotate_euler(cls, heading, attitude, bank):
Q = cls()
c1 = math.cos(heading / 2)
s1 = math.sin(heading / 2)
c2 = math.cos(attitude / 2)
s2 = math.sin(attitude / 2)
c3 = math.cos(bank / 2)
s3 = math.sin(bank / 2)
Q.w = c1 * c2 * c3 - s1 * s2 * s3
Q.x = s1 * s2 * c3 + c1 * c2 * s3
Q.y = s1 * c2 * c3 + c1 * s2 * s3
Q.z = c1 * s2 * c3 - s1 * c2 * s3
return Q
new_rotate_euler = classmethod(new_rotate_euler)
def new_rotate_matrix(cls, m):
if m[0*4 + 0] + m[1*4 + 1] + m[2*4 + 2] > 0.00000001:
t = m[0*4 + 0] + m[1*4 + 1] + m[2*4 + 2] + 1.0
s = 0.5/math.sqrt(t)
return cls(
s*t,
(m[1*4 + 2] - m[2*4 + 1])*s,
(m[2*4 + 0] - m[0*4 + 2])*s,
(m[0*4 + 1] - m[1*4 + 0])*s
)
elif m[0*4 + 0] > m[1*4 + 1] and m[0*4 + 0] > m[2*4 + 2]:
t = m[0*4 + 0] - m[1*4 + 1] - m[2*4 + 2] + 1.0
s = 0.5/math.sqrt(t)
return cls(
(m[1*4 + 2] - m[2*4 + 1])*s,
s*t,
(m[0*4 + 1] + m[1*4 + 0])*s,
(m[2*4 + 0] + m[0*4 + 2])*s
)
elif m[1*4 + 1] > m[2*4 + 2]:
t = -m[0*4 + 0] + m[1*4 + 1] - m[2*4 + 2] + 1.0
s = 0.5/math.sqrt(t)
return cls(
(m[2*4 + 0] - m[0*4 + 2])*s,
(m[0*4 + 1] + m[1*4 + 0])*s,
s*t,
(m[1*4 + 2] + m[2*4 + 1])*s
)
else:
t = -m[0*4 + 0] - m[1*4 + 1] + m[2*4 + 2] + 1.0
s = 0.5/math.sqrt(t)
return cls(
(m[0*4 + 1] - m[1*4 + 0])*s,
(m[2*4 + 0] + m[0*4 + 2])*s,
(m[1*4 + 2] + m[2*4 + 1])*s,
s*t
)
new_rotate_matrix = classmethod(new_rotate_matrix)
def new_interpolate(cls, q1, q2, t):
assert isinstance(q1, Quaternion) and isinstance(q2, Quaternion)
Q = cls()
costheta = q1.w * q2.w + q1.x * q2.x + q1.y * q2.y + q1.z * q2.z
if costheta < 0.:
costheta = -costheta
q1 = q1.conjugated()
elif costheta > 1:
costheta = 1
theta = math.acos(costheta)
if abs(theta) < 0.01:
Q.w = q2.w
Q.x = q2.x
Q.y = q2.y
Q.z = q2.z
return Q
sintheta = math.sqrt(1.0 - costheta * costheta)
if abs(sintheta) < 0.01:
Q.w = (q1.w + q2.w) * 0.5
Q.x = (q1.x + q2.x) * 0.5
Q.y = (q1.y + q2.y) * 0.5
Q.z = (q1.z + q2.z) * 0.5
return Q
ratio1 = math.sin((1 - t) * theta) / sintheta
ratio2 = math.sin(t * theta) / sintheta
Q.w = q1.w * ratio1 + q2.w * ratio2
Q.x = q1.x * ratio1 + q2.x * ratio2
Q.y = q1.y * ratio1 + q2.y * ratio2
Q.z = q1.z * ratio1 + q2.z * ratio2
return Q
new_interpolate = classmethod(new_interpolate)
# Geometry
# Much maths thanks to Paul Bourke, http://astronomy.swin.edu.au/~pbourke
# ---------------------------------------------------------------------------
class Geometry:
def _connect_unimplemented(self, other):
raise AttributeError, 'Cannot connect %s to %s' % \
(self.__class__, other.__class__)
def _intersect_unimplemented(self, other):
raise AttributeError, 'Cannot intersect %s and %s' % \
(self.__class__, other.__class__)
_intersect_point2 = _intersect_unimplemented
_intersect_line2 = _intersect_unimplemented
_intersect_circle = _intersect_unimplemented
_connect_point2 = _connect_unimplemented
_connect_line2 = _connect_unimplemented
_connect_circle = _connect_unimplemented
_intersect_point3 = _intersect_unimplemented
_intersect_line3 = _intersect_unimplemented
_intersect_sphere = _intersect_unimplemented
_intersect_plane = _intersect_unimplemented
_connect_point3 = _connect_unimplemented
_connect_line3 = _connect_unimplemented
_connect_sphere = _connect_unimplemented
_connect_plane = _connect_unimplemented
def intersect(self, other):
raise NotImplementedError
def connect(self, other):
raise NotImplementedError
def distance(self, other):
c = self.connect(other)
if c:
return c.length
return 0.0
def _intersect_point2_circle(P, C):
return abs(P - C.c) <= C.r
def _intersect_line2_line2(A, B):
d = B.v.y * A.v.x - B.v.x * A.v.y
if d == 0:
return None
dy = A.p.y - B.p.y
dx = A.p.x - B.p.x
ua = (B.v.x * dy - B.v.y * dx) / d
if not A._u_in(ua):
return None
ub = (A.v.x * dy - A.v.y * dx) / d
if not B._u_in(ub):
return None
return Point2(A.p.x + ua * A.v.x,
A.p.y + ua * A.v.y)
def _intersect_line2_circle(L, C):
a = L.v.magnitude_squared()
b = 2 * (L.v.x * (L.p.x - C.c.x) + \
L.v.y * (L.p.y - C.c.y))
c = C.c.magnitude_squared() + \
L.p.magnitude_squared() - \
2 * C.c.dot(L.p) - \
C.r ** 2
det = b ** 2 - 4 * a * c
if det < 0:
return None
sq = math.sqrt(det)
u1 = (-b + sq) / (2 * a)
u2 = (-b - sq) / (2 * a)
if not L._u_in(u1):
u1 = max(min(u1, 1.0), 0.0)
if not L._u_in(u2):
u2 = max(min(u2, 1.0), 0.0)
# Tangent
if u1 == u2:
return Point2(L.p.x + u1 * L.v.x,
L.p.y + u1 * L.v.y)
return LineSegment2(Point2(L.p.x + u1 * L.v.x,
L.p.y + u1 * L.v.y),
Point2(L.p.x + u2 * L.v.x,
L.p.y + u2 * L.v.y))
def _connect_point2_line2(P, L):
d = L.v.magnitude_squared()
assert d != 0
u = ((P.x - L.p.x) * L.v.x + \
(P.y - L.p.y) * L.v.y) / d
if not L._u_in(u):
u = max(min(u, 1.0), 0.0)
return LineSegment2(P,
Point2(L.p.x + u * L.v.x,
L.p.y + u * L.v.y))
def _connect_point2_circle(P, C):
v = P - C.c
v.normalize()
v *= C.r
return LineSegment2(P, Point2(C.c.x + v.x, C.c.y + v.y))
def _connect_line2_line2(A, B):
d = B.v.y * A.v.x - B.v.x * A.v.y
if d == 0:
# Parallel, connect an endpoint with a line
if isinstance(B, Ray2) or isinstance(B, LineSegment2):
p1, p2 = _connect_point2_line2(B.p, A)
return p2, p1
# No endpoint (or endpoint is on A), possibly choose arbitrary point
# on line.
return _connect_point2_line2(A.p, B)
dy = A.p.y - B.p.y
dx = A.p.x - B.p.x
ua = (B.v.x * dy - B.v.y * dx) / d
if not A._u_in(ua):
ua = max(min(ua, 1.0), 0.0)
ub = (A.v.x * dy - A.v.y * dx) / d
if not B._u_in(ub):
ub = max(min(ub, 1.0), 0.0)
return LineSegment2(Point2(A.p.x + ua * A.v.x, A.p.y + ua * A.v.y),
Point2(B.p.x + ub * B.v.x, B.p.y + ub * B.v.y))
def _connect_circle_line2(C, L):
d = L.v.magnitude_squared()
assert d != 0
u = ((C.c.x - L.p.x) * L.v.x + (C.c.y - L.p.y) * L.v.y) / d
if not L._u_in(u):
u = max(min(u, 1.0), 0.0)
point = Point2(L.p.x + u * L.v.x, L.p.y + u * L.v.y)
v = (point - C.c)
v.normalize()
v *= C.r
return LineSegment2(Point2(C.c.x + v.x, C.c.y + v.y), point)
def _connect_circle_circle(A, B):
v = B.c - A.c
d = v.magnitude()
if A.r >= B.r and d < A.r:
#centre B inside A
s1,s2 = +1, +1
elif B.r > A.r and d < B.r:
#centre A inside B
s1,s2 = -1, -1
elif d >= A.r and d >= B.r:
s1,s2 = +1, -1
v.normalize()
return LineSegment2(Point2(A.c.x + s1 * v.x * A.r, A.c.y + s1 * v.y * A.r),
Point2(B.c.x + s2 * v.x * B.r, B.c.y + s2 * v.y * B.r))
class Point2(Vector2, Geometry):
def __repr__(self):
return 'Point2(%.2f, %.2f)' % (self.x, self.y)
def intersect(self, other):
return other._intersect_point2(self)
def _intersect_circle(self, other):
return _intersect_point2_circle(self, other)
def connect(self, other):
return other._connect_point2(self)
def _connect_point2(self, other):
return LineSegment2(other, self)
def _connect_line2(self, other):
c = _connect_point2_line2(self, other)
if c:
return c._swap()
def _connect_circle(self, other):
c = _connect_point2_circle(self, other)
if c:
return c._swap()
class Line2(Geometry):
__slots__ = ['p', 'v']
def __init__(self, *args):
if len(args) == 3:
assert isinstance(args[0], Point2) and \
isinstance(args[1], Vector2) and \
type(args[2]) == float
self.p = args[0].copy()
self.v = args[1] * args[2] / abs(args[1])
elif len(args) == 2:
if isinstance(args[0], Point2) and isinstance(args[1], Point2):
self.p = args[0].copy()
self.v = args[1] - args[0]
elif isinstance(args[0], Point2) and isinstance(args[1], Vector2):
self.p = args[0].copy()
self.v = args[1].copy()
else:
raise AttributeError, '%r' % (args,)
elif len(args) == 1:
if isinstance(args[0], Line2):
self.p = args[0].p.copy()
self.v = args[0].v.copy()
else:
raise AttributeError, '%r' % (args,)
else:
raise AttributeError, '%r' % (args,)
if not self.v:
raise AttributeError, 'Line has zero-length vector'
def __copy__(self):
return self.__class__(self.p, self.v)
copy = __copy__
def __repr__(self):
return 'Line2(<%.2f, %.2f> + u<%.2f, %.2f>)' % \
(self.p.x, self.p.y, self.v.x, self.v.y)
p1 = property(lambda self: self.p)
p2 = property(lambda self: Point2(self.p.x + self.v.x,
self.p.y + self.v.y))
def _apply_transform(self, t):
self.p = t * self.p
self.v = t * self.v
def _u_in(self, u):
return True
def intersect(self, other):
return other._intersect_line2(self)
def _intersect_line2(self, other):
return _intersect_line2_line2(self, other)
def _intersect_circle(self, other):
return _intersect_line2_circle(self, other)
def connect(self, other):
return other._connect_line2(self)
def _connect_point2(self, other):
return _connect_point2_line2(other, self)
def _connect_line2(self, other):
return _connect_line2_line2(other, self)
def _connect_circle(self, other):
return _connect_circle_line2(other, self)
class Ray2(Line2):
def __repr__(self):
return 'Ray2(<%.2f, %.2f> + u<%.2f, %.2f>)' % \
(self.p.x, self.p.y, self.v.x, self.v.y)
def _u_in(self, u):
return u >= 0.0
class LineSegment2(Line2):
def __repr__(self):
return 'LineSegment2(<%.2f, %.2f> to <%.2f, %.2f>)' % \
(self.p.x, self.p.y, self.p.x + self.v.x, self.p.y + self.v.y)
def _u_in(self, u):
return u >= 0.0 and u <= 1.0
def __abs__(self):
return abs(self.v)
def magnitude_squared(self):
return self.v.magnitude_squared()
def _swap(self):
# used by connect methods to switch order of points
self.p = self.p2
self.v *= -1
return self
length = property(lambda self: abs(self.v))
class Circle(Geometry):
__slots__ = ['c', 'r']
def __init__(self, center, radius):
assert isinstance(center, Vector2) and type(radius) == float
self.c = center.copy()
self.r = radius
def __copy__(self):
return self.__class__(self.c, self.r)
copy = __copy__
def __repr__(self):
return 'Circle(<%.2f, %.2f>, radius=%.2f)' % \
(self.c.x, self.c.y, self.r)
def _apply_transform(self, t):
self.c = t * self.c
def intersect(self, other):
return other._intersect_circle(self)
def _intersect_point2(self, other):
return _intersect_point2_circle(other, self)
def _intersect_line2(self, other):
return _intersect_line2_circle(other, self)
def connect(self, other):
return other._connect_circle(self)
def _connect_point2(self, other):
return _connect_point2_circle(other, self)
def _connect_line2(self, other):
c = _connect_circle_line2(self, other)
if c:
return c._swap()
def _connect_circle(self, other):
return _connect_circle_circle(other, self)
# 3D Geometry
# -------------------------------------------------------------------------
def _connect_point3_line3(P, L):
d = L.v.magnitude_squared()
assert d != 0
u = ((P.x - L.p.x) * L.v.x + \
(P.y - L.p.y) * L.v.y + \
(P.z - L.p.z) * L.v.z) / d
if not L._u_in(u):
u = max(min(u, 1.0), 0.0)
return LineSegment3(P, Point3(L.p.x + u * L.v.x,
L.p.y + u * L.v.y,
L.p.z + u * L.v.z))
def _connect_point3_sphere(P, S):
v = P - S.c
v.normalize()
v *= S.r
return LineSegment3(P, Point3(S.c.x + v.x, S.c.y + v.y, S.c.z + v.z))
def _connect_point3_plane(p, plane):
n = plane.n.normalized()
d = p.dot(plane.n) - plane.k
return LineSegment3(p, Point3(p.x - n.x * d, p.y - n.y * d, p.z - n.z * d))
def _connect_line3_line3(A, B):
assert A.v and B.v
p13 = A.p - B.p
d1343 = p13.dot(B.v)
d4321 = B.v.dot(A.v)
d1321 = p13.dot(A.v)
d4343 = B.v.magnitude_squared()
denom = A.v.magnitude_squared() * d4343 - d4321 ** 2
if denom == 0:
# Parallel, connect an endpoint with a line
if isinstance(B, Ray3) or isinstance(B, LineSegment3):
return _connect_point3_line3(B.p, A)._swap()
# No endpoint (or endpoint is on A), possibly choose arbitrary
# point on line.
return _connect_point3_line3(A.p, B)
ua = (d1343 * d4321 - d1321 * d4343) / denom
if not A._u_in(ua):
ua = max(min(ua, 1.0), 0.0)
ub = (d1343 + d4321 * ua) / d4343
if not B._u_in(ub):
ub = max(min(ub, 1.0), 0.0)
return LineSegment3(Point3(A.p.x + ua * A.v.x,
A.p.y + ua * A.v.y,
A.p.z + ua * A.v.z),
Point3(B.p.x + ub * B.v.x,
B.p.y + ub * B.v.y,
B.p.z + ub * B.v.z))
def _connect_line3_plane(L, P):
d = P.n.dot(L.v)
if not d:
# Parallel, choose an endpoint
return _connect_point3_plane(L.p, P)
u = (P.k - P.n.dot(L.p)) / d
if not L._u_in(u):
# intersects out of range, choose nearest endpoint
u = max(min(u, 1.0), 0.0)
return _connect_point3_plane(Point3(L.p.x + u * L.v.x,
L.p.y + u * L.v.y,
L.p.z + u * L.v.z), P)
# Intersection
return None
def _connect_sphere_line3(S, L):
d = L.v.magnitude_squared()
assert d != 0
u = ((S.c.x - L.p.x) * L.v.x + \
(S.c.y - L.p.y) * L.v.y + \
(S.c.z - L.p.z) * L.v.z) / d
if not L._u_in(u):
u = max(min(u, 1.0), 0.0)
point = Point3(L.p.x + u * L.v.x, L.p.y + u * L.v.y, L.p.z + u * L.v.z)
v = (point - S.c)
v.normalize()
v *= S.r
return LineSegment3(Point3(S.c.x + v.x, S.c.y + v.y, S.c.z + v.z),
point)
def _connect_sphere_sphere(A, B):
v = B.c - A.c
d = v.magnitude()
if A.r >= B.r and d < A.r:
#centre B inside A
s1,s2 = +1, +1
elif B.r > A.r and d < B.r:
#centre A inside B
s1,s2 = -1, -1
elif d >= A.r and d >= B.r:
s1,s2 = +1, -1
v.normalize()
return LineSegment3(Point3(A.c.x + s1* v.x * A.r,
A.c.y + s1* v.y * A.r,
A.c.z + s1* v.z * A.r),
Point3(B.c.x + s2* v.x * B.r,
B.c.y + s2* v.y * B.r,
B.c.z + s2* v.z * B.r))
def _connect_sphere_plane(S, P):
c = _connect_point3_plane(S.c, P)
if not c:
return None
p2 = c.p2
v = p2 - S.c
v.normalize()
v *= S.r
return LineSegment3(Point3(S.c.x + v.x, S.c.y + v.y, S.c.z + v.z),
p2)
def _connect_plane_plane(A, B):
if A.n.cross(B.n):
# Planes intersect
return None
else:
# Planes are parallel, connect to arbitrary point
return _connect_point3_plane(A._get_point(), B)
def _intersect_point3_sphere(P, S):
return abs(P - S.c) <= S.r
def _intersect_line3_sphere(L, S):
a = L.v.magnitude_squared()
b = 2 * (L.v.x * (L.p.x - S.c.x) + \
L.v.y * (L.p.y - S.c.y) + \
L.v.z * (L.p.z - S.c.z))
c = S.c.magnitude_squared() + \
L.p.magnitude_squared() - \
2 * S.c.dot(L.p) - \
S.r ** 2
det = b ** 2 - 4 * a * c
if det < 0:
return None
sq = math.sqrt(det)
u1 = (-b + sq) / (2 * a)
u2 = (-b - sq) / (2 * a)
if not L._u_in(u1):
u1 = max(min(u1, 1.0), 0.0)
if not L._u_in(u2):
u2 = max(min(u2, 1.0), 0.0)
return LineSegment3(Point3(L.p.x + u1 * L.v.x,
L.p.y + u1 * L.v.y,
L.p.z + u1 * L.v.z),
Point3(L.p.x + u2 * L.v.x,
L.p.y + u2 * L.v.y,
L.p.z + u2 * L.v.z))
def _intersect_line3_plane(L, P):
d = P.n.dot(L.v)
if not d:
# Parallel
return None
u = (P.k - P.n.dot(L.p)) / d
if not L._u_in(u):
return None
return Point3(L.p.x + u * L.v.x,
L.p.y + u * L.v.y,
L.p.z + u * L.v.z)
def _intersect_plane_plane(A, B):
n1_m = A.n.magnitude_squared()
n2_m = B.n.magnitude_squared()
n1d2 = A.n.dot(B.n)
det = n1_m * n2_m - n1d2 ** 2
if det == 0:
# Parallel
return None
c1 = (A.k * n2_m - B.k * n1d2) / det
c2 = (B.k * n1_m - A.k * n1d2) / det
return Line3(Point3(c1 * A.n.x + c2 * B.n.x,
c1 * A.n.y + c2 * B.n.y,
c1 * A.n.z + c2 * B.n.z),
A.n.cross(B.n))
class Point3(Vector3, Geometry):
def __repr__(self):
return 'Point3(%.2f, %.2f, %.2f)' % (self.x, self.y, self.z)
def intersect(self, other):
return other._intersect_point3(self)
def _intersect_sphere(self, other):
return _intersect_point3_sphere(self, other)
def connect(self, other):
return other._connect_point3(self)
def _connect_point3(self, other):
if self != other:
return LineSegment3(other, self)
return None
def _connect_line3(self, other):
c = _connect_point3_line3(self, other)
if c:
return c._swap()
def _connect_sphere(self, other):
c = _connect_point3_sphere(self, other)
if c:
return c._swap()
def _connect_plane(self, other):
c = _connect_point3_plane(self, other)
if c:
return c._swap()
class Line3:
__slots__ = ['p', 'v']
def __init__(self, *args):
if len(args) == 3:
assert isinstance(args[0], Point3) and \
isinstance(args[1], Vector3) and \
type(args[2]) == float
self.p = args[0].copy()
self.v = args[1] * args[2] / abs(args[1])
elif len(args) == 2:
if isinstance(args[0], Point3) and isinstance(args[1], Point3):
self.p = args[0].copy()
self.v = args[1] - args[0]
elif isinstance(args[0], Point3) and isinstance(args[1], Vector3):
self.p = args[0].copy()
self.v = args[1].copy()
else:
raise AttributeError, '%r' % (args,)
elif len(args) == 1:
if isinstance(args[0], Line3):
self.p = args[0].p.copy()
self.v = args[0].v.copy()
else:
raise AttributeError, '%r' % (args,)
else:
raise AttributeError, '%r' % (args,)
# XXX This is annoying.
#if not self.v:
# raise AttributeError, 'Line has zero-length vector'
def __copy__(self):
return self.__class__(self.p, self.v)
copy = __copy__
def __repr__(self):
return 'Line3(<%.2f, %.2f, %.2f> + u<%.2f, %.2f, %.2f>)' % \
(self.p.x, self.p.y, self.p.z, self.v.x, self.v.y, self.v.z)
p1 = property(lambda self: self.p)
p2 = property(lambda self: Point3(self.p.x + self.v.x,
self.p.y + self.v.y,
self.p.z + self.v.z))
def _apply_transform(self, t):
self.p = t * self.p
self.v = t * self.v
def _u_in(self, u):
return True
def intersect(self, other):
return other._intersect_line3(self)
def _intersect_sphere(self, other):
return _intersect_line3_sphere(self, other)
def _intersect_plane(self, other):
return _intersect_line3_plane(self, other)
def connect(self, other):
return other._connect_line3(self)
def _connect_point3(self, other):
return _connect_point3_line3(other, self)
def _connect_line3(self, other):
return _connect_line3_line3(other, self)
def _connect_sphere(self, other):
return _connect_sphere_line3(other, self)
def _connect_plane(self, other):
c = _connect_line3_plane(self, other)
if c:
return c
class Ray3(Line3):
def __repr__(self):
return 'Ray3(<%.2f, %.2f, %.2f> + u<%.2f, %.2f, %.2f>)' % \
(self.p.x, self.p.y, self.p.z, self.v.x, self.v.y, self.v.z)
def _u_in(self, u):
return u >= 0.0
class LineSegment3(Line3):
def __repr__(self):
return 'LineSegment3(<%.2f, %.2f, %.2f> to <%.2f, %.2f, %.2f>)' % \
(self.p.x, self.p.y, self.p.z,
self.p.x + self.v.x, self.p.y + self.v.y, self.p.z + self.v.z)
def _u_in(self, u):
return u >= 0.0 and u <= 1.0
def __abs__(self):
return abs(self.v)
def magnitude_squared(self):
return self.v.magnitude_squared()
def _swap(self):
# used by connect methods to switch order of points
self.p = self.p2
self.v *= -1
return self
length = property(lambda self: abs(self.v))
class Sphere:
__slots__ = ['c', 'r']
def __init__(self, center, radius):
assert isinstance(center, Vector3) and type(radius) == float
self.c = center.copy()
self.r = radius
def __copy__(self):
return self.__class__(self.c, self.r)
copy = __copy__
def __repr__(self):
return 'Sphere(<%.2f, %.2f, %.2f>, radius=%.2f)' % \
(self.c.x, self.c.y, self.c.z, self.r)
def _apply_transform(self, t):
self.c = t * self.c
def intersect(self, other):
return other._intersect_sphere(self)
def _intersect_point3(self, other):
return _intersect_point3_sphere(other, self)
def _intersect_line3(self, other):
return _intersect_line3_sphere(other, self)
def connect(self, other):
return other._connect_sphere(self)
def _connect_point3(self, other):
return _connect_point3_sphere(other, self)
def _connect_line3(self, other):
c = _connect_sphere_line3(self, other)
if c:
return c._swap()
def _connect_sphere(self, other):
return _connect_sphere_sphere(other, self)
def _connect_plane(self, other):
c = _connect_sphere_plane(self, other)
if c:
return c
class Plane:
# n.p = k, where n is normal, p is point on plane, k is constant scalar
__slots__ = ['n', 'k']
def __init__(self, *args):
if len(args) == 3:
assert isinstance(args[0], Point3) and \
isinstance(args[1], Point3) and \
isinstance(args[2], Point3)
self.n = (args[1] - args[0]).cross(args[2] - args[0])
self.n.normalize()
self.k = self.n.dot(args[0])
elif len(args) == 2:
if isinstance(args[0], Point3) and isinstance(args[1], Vector3):
self.n = args[1].normalized()
self.k = self.n.dot(args[0])
elif isinstance(args[0], Vector3) and type(args[1]) == float:
self.n = args[0].normalized()
self.k = args[1]
else:
raise AttributeError, '%r' % (args,)
else:
raise AttributeError, '%r' % (args,)
if not self.n:
raise AttributeError, 'Points on plane are colinear'
def __copy__(self):
return self.__class__(self.n, self.k)
copy = __copy__
def __repr__(self):
return 'Plane(<%.2f, %.2f, %.2f>.p = %.2f)' % \
(self.n.x, self.n.y, self.n.z, self.k)
def _get_point(self):
# Return an arbitrary point on the plane
if self.n.z:
return Point3(0., 0., self.k / self.n.z)
elif self.n.y:
return Point3(0., self.k / self.n.y, 0.)
else:
return Point3(self.k / self.n.x, 0., 0.)
def _apply_transform(self, t):
p = t * self._get_point()
self.n = t * self.n
self.k = self.n.dot(p)
def intersect(self, other):
return other._intersect_plane(self)
def _intersect_line3(self, other):
return _intersect_line3_plane(other, self)
def _intersect_plane(self, other):
return _intersect_plane_plane(self, other)
def connect(self, other):
return other._connect_plane(self)
def _connect_point3(self, other):
return _connect_point3_plane(other, self)
def _connect_line3(self, other):
return _connect_line3_plane(other, self)
def _connect_sphere(self, other):
return _connect_sphere_plane(other, self)
def _connect_plane(self, other):
return _connect_plane_plane(other, self)
|
python
|
from django.conf.urls import (
include,
url,
)
from .views import (
calls,
notifications,
reviews,
submissions,
)
app_name = 'submitify'
notification_urls = [
url(r'^(?P<notification_id>\d+)/$', notifications.view_notification,
name='view_notification'),
url(r'^(?P<notification_type>(basic|accept|reject))/$',
notifications.send_notification,
name='send_notification'),
]
review_urls = [
url(r'^create/$', reviews.create_review,
name='create_review'),
url(r'(?P<review_id>\d+)/$', reviews.view_review,
name='view_review'),
url(r'(?P<review_id>\d+)/edit/$', reviews.edit_review,
name='edit_review'),
]
submission_urls = [
url(r'^$', submissions.view_submission,
name='view_submission'),
url(r'^text/$', submissions.view_submission_text,
name='view_submission_text'),
url(r'^pdf/$', submissions.view_submission_file,
name='view_submission_file'),
url(r'^original/$', submissions.view_original_file,
name='view_original_file'),
url(r'^reviews/', include(review_urls)),
url(r'^resolve/(?P<resolution_type>(accept|reject))/$',
submissions.resolve_submission,
name='resolve_submission'),
]
call_urls = [
url(r'^$', calls.view_call,
name='view_call'),
url(r'^submit/$', submissions.create_submission,
name='create_submission'),
url(r'^edit/$', calls.edit_call,
name='edit_call'),
url(r'^next/$', calls.next_step,
name='next_step'),
url(r'^notifications/', include(notification_urls)),
url(r'^(?P<submission_id>\d+)/', include(submission_urls))
]
urlpatterns = [
url(r'^$', calls.list_calls,
name='list_calls'),
url(r'^create/$', calls.create_call,
name='create_call'),
url(r'^invite/reader/$', calls.invite_reader,
name='invite_reader'),
url(r'^invite/writer/$', calls.invite_writer,
name='invite_writer'),
url(r'^(?P<call_id>\d+)/', include(call_urls)),
url(r'^(?P<call_id>\d+)-(?P<call_slug>[-\w]+)/', include(call_urls)),
]
|
python
|
from flask import Flask, request, jsonify
from service import Service
app = Flask(__name__, static_url_path='/static')
service = Service()
@app.route("/")
def hello():
return '', 200
@app.route("fullinsert")
def fullinsert():
service.init()
return '', 200
#To access parameters submitted in the URL (?key=value)
@app.route('/insert', methods=['POST'])
def insert():
data = request.get_json()
user = data['user']
interest = data['interest']
longitude = data['longitude']
latitude = data['latitude']
imgurl = data['imgurl']
service.insert(user, interest, longitude, latitude, imgurl)
return '', 200
@app.route('/query')
def query():
user = request.args.get('user')
interest = request.args.get('interest')
longitude = request.args.get('longitude')
latitude = request.args.get('latitude')
return jsonify(service.query(user, interest, longitude, latitude))
@app.route('/fullquery')
def fullquery():
return jsonify(service.fullquery())
#prompt the user to enter their interest
@app.route('/interest/')
def interest():
person = [
{'user': 'Declan',
'interest': ['reading, yikes','gaming','writing']
}
]
return jsonify(person)
if __name__ == "__main__":
app.run(host='0.0.0.0', debug=False, port=5000)
|
python
|
#Exercício Python 73: Crie uma tupla preenchida com os 20 primeiros colocados da Tabela do Campeonato Brasileiro de Futebol, na ordem de colocação. Depois mostre:
# a) Os 5 primeiros times.
# b) Os últimos 4 colocados.
# c) Times em ordem alfabética.
# d) Em que posição está o time da Chapecoense.
tabela_brasileirao = (
"Flamengo", "Internacional", "Atlético - MG", "São Paulo", "Fluminense", "Grêmio", "Palmeiras", "Santos", "Athlético Paranaense", "Bragantino - Red Bull", "Ceará", "Corinthians", "Atlético - GO", "Bahia", "Sport", "Fortaleza", "Vasco da Gama", "Goiás", "Coritiba", "Botafogo")
print(f"\n(A) Os cinco primeiros colocados do campeonato foram: {tabela_brasileirao[:5]}\n")
print(f"(B) Os últimos colocados do campeonato foram: {tabela_brasileirao[-4:]}\n")
print(f"(C) Os times em ordem alfabética são: {sorted(tabela_brasileirao)}\n")
#modo1 -busca o índice de elemento qualquer
print(f"O time da Bahia está na {tabela_brasileirao.index('Bahia')}ª posição")
#modo2 - busca um elemento usando for
for cont in range(0, len(tabela_brasileirao)):
if tabela_brasileirao[cont] == "Bahia":
print(f"(D) O time da {tabela_brasileirao[cont]} está na {cont+1}º Posição.\n")
elif cont == len(tabela_brasileirao)-1 and tabela_brasileirao[cont] != "Chapecoense":
print("(D) O time da Chapecoense não se encontra no campeonado este ano!\n")
|
python
|
from argparse import ArgumentParser
import flom
from trainer import train
def make_parser():
parser = ArgumentParser(description='Train the motion to fit to effectors')
parser.add_argument('-i', '--input', type=str, help='Input motion file', required=True)
parser.add_argument('-r', '--robot', type=str, help='Input robot model file', required=True)
parser.add_argument('-t', '--timestep', type=float, help='Timestep', default=0.0165/8)
parser.add_argument('-s', '--frame-skip', type=int, help='Frame skip', default=8)
return parser
def main(args):
motion = flom.load(args.input)
weights = train(motion, args.robot, args.timestep, args.frame_skip)
print(weights)
if __name__ == '__main__':
parser = make_parser()
args = parser.parse_args()
main(args)
|
python
|
"""
"""
from django.core.urlresolvers import reverse
from django.test import TestCase
from wagtail.tests.utils import WagtailTestUtils
class BaseTestIndexView(TestCase, WagtailTestUtils):
"""
Base test case for CRUD index view.
"""
url_namespace = None
template_dir = None
def _create_sequential_instance(self, index):
"""
Stub method for extending class to create sequential
model instances.
:param index: the sequential index to use.
"""
raise NotImplemented(
'This method must be implemented by {0}'.format(
self.__class__.__name__
)
)
def setUp(self):
self.login()
def get(self, params=None):
if not params:
params = {}
return self.client.get(
reverse('{0}:index'.format(self.url_namespace)), params)
def populate(self):
"""
Populates several model class instance.
"""
for i in range(50):
self._create_sequential_instance(i)
def test_get(self):
# Generate the response.
response = self.get()
# Check assertions.
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(
response,
'{0}/index.html'.format(self.template_dir)
)
def test_search(self):
# Generate the response.
response = self.get({'q': 'keyword'})
# Check assertions.
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context['query_string'], 'keyword')
def test_pagination(self):
# Create model class instances.
self.populate()
# Generate the response.
response = self.get({'p': 2})
# Check assertions.
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(
response,
'{0}/index.html'.format(self.template_dir)
)
self.assertEqual(response.context['page_obj'].number, 2)
def test_pagination_invalid(self):
# Create model class instances.
self.populate()
# Generate the response.
response = self.get({'p': 'fake'})
# Check assertions.
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(
response,
'{0}/index.html'.format(self.template_dir)
)
self.assertEqual(response.context['page_obj'].number, 1)
def test_pagination_out_of_range(self):
# Create model class instances.
self.populate()
# Generate the response.
response = self.get({'p': 99999})
# Check assertions.
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(
response,
'{0}/index.html'.format(self.template_dir)
)
self.assertEqual(
response.context['page_obj'].number,
response.context['paginator'].num_pages
)
def test_ordering(self):
orderings = ['title', '-created_at']
for ordering in orderings:
response = self.get({'ordering': ordering})
self.assertEqual(response.status_code, 200)
class BaseTestCreateView(TestCase, WagtailTestUtils):
"""
Base test case for CRUD add view.
"""
url_namespace = None
template_dir = None
model_class = None
def _get_post_data(self):
"""
Stub method for extending class to return data dictionary
to create a new model instance on POST.
:rtype: dict.
"""
raise NotImplemented(
'This method must be implemented by {0}'.format(
self.__class__.__name__
)
)
def setUp(self):
self.login()
def test_get(self):
# Generate the response.
response = self.client.get(
reverse('{0}:add'.format(self.url_namespace))
)
# Check assertions.
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(
response,
'{0}/add.html'.format(self.template_dir)
)
def test_post(self):
# Get POST data.
data = self._get_post_data()
# Generate the response.
response = self.client.post(
reverse('{0}:add'.format(self.url_namespace)),
data
)
# Check assertions.
self.assertRedirects(
response,
reverse('{0}:index'.format(self.url_namespace))
)
self.assertTrue(
self.model_class.objects.filter(**data).exists()
)
class BaseTestUpdateView(TestCase, WagtailTestUtils):
"""
Base test case for CRUD edit view.
"""
url_namespace = None
template_dir = None
model_class = None
def _get_instance(self):
"""
Stub method for extending class to return saved model class
instance.
:rtype: django.db.models.Model.
"""
raise NotImplemented(
'This method must be implemented by {0}'.format(
self.__class__.__name__
)
)
def _get_post_data(self):
"""
Stub method for extending class to return data dictionary
to create a new model instance on POST.
:rtype: dict.
"""
raise NotImplemented(
'This method must be implemented by {0}'.format(
self.__class__.__name__
)
)
def setUp(self):
# Create the instance and login.
self.instance = self._get_instance()
self.login()
def test_get(self):
# Generate the response.
response = self.client.get(
reverse(
'{0}:edit'.format(self.url_namespace),
args=(self.instance.pk,)
)
)
# Check assertions.
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(
response,
'{0}/edit.html'.format(self.template_dir)
)
def test_post(self):
# Get POST data.
data = self._get_post_data()
# Generate the response.
response = self.client.post(
reverse(
'{0}:edit'.format(self.url_namespace),
args=(self.instance.pk,)
),
data
)
# Check assertions.
self.assertRedirects(
response,
reverse('{0}:index'.format(self.url_namespace)))
self.assertTrue(
self.model_class.objects.filter(**data).exists()
)
class BaseTestDeleteView(TestCase, WagtailTestUtils):
"""
Base test case for CRUD delete view.
"""
url_namespace = None
template_dir = None
model_class = None
def _get_instance(self):
"""
Stub method for extending class to return saved model class
instance.
:rtype: django.db.models.Model.
"""
raise NotImplemented(
'This method must be implemented by {0}'.format(
self.__class__.__name__
)
)
def setUp(self):
# Create the instance and login.
self.instance = self._get_instance()
self.login()
def test_get(self):
# Generate the response.
response = self.client.get(
reverse(
'{0}:delete'.format(self.url_namespace),
args=(self.instance.pk,)
)
)
# Check assertions.
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(
response,
'{0}/confirm_delete.html'.format(self.template_dir)
)
def test_delete(self):
# Generate the response.
response = self.client.post(
reverse(
'{0}:delete'.format(self.url_namespace),
args=(self.instance.pk,)
),
{'foo': 'bar'}
)
# Check assertions.
self.assertRedirects(
response,
reverse('{0}:index'.format(self.url_namespace))
)
self.assertFalse(
self.model_class.objects.filter(pk=self.instance.pk).exists()
)
class BaseTestChooserView(TestCase, WagtailTestUtils):
"""
Base test for chooser view.
"""
url_namespace = None
template_dir = None
model_class = None
def _create_sequential_instance(self, index):
"""
Stub method for extending class to create sequential
model instances.
:param index: the sequential index to use.
"""
raise NotImplemented(
'This method must be implemented by {0}'.format(
self.__class__.__name__
)
)
def setUp(self):
self.login()
def get(self, params=None):
if not params:
params = {}
return self.client.get(
reverse('{0}:choose'.format(self.url_namespace)),
params
)
def populate(self):
"""
Populates several model class instance.
"""
for i in range(50):
self._create_sequential_instance(i)
def test_get(self):
# Generate the response.
response = self.get()
# Check assertions.
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(
response,
'{0}/chooser.html'.format(self.template_dir)
)
self.assertTemplateUsed(
response,
'{0}/results.html'.format(self.template_dir)
)
self.assertTemplateUsed(
response,
'{0}/chooser.js'.format(self.template_dir)
)
def test_search(self):
# Generate the response.
response = self.get({'q': 'keyword'})
# Check assertions.
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context['query_string'], 'keyword')
def test_pagination(self):
# Create model class instances.
self.populate()
# Generate the response.
response = self.get({'p': 2})
# Check assertions.
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(
response,
'{0}/results.html'.format(self.template_dir)
)
self.assertEqual(response.context['page_obj'].number, 2)
def test_pagination_invalid(self):
# Create model class instances.
self.populate()
# Generate the response.
response = self.get({'p': 'fake'})
# Check assertions.
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(
response,
'{0}/results.html'.format(self.template_dir)
)
self.assertEqual(response.context['page_obj'].number, 1)
def test_pagination_out_of_range(self):
# Create model class instances.
self.populate()
# Generate the response.
response = self.get({'p': 99999})
# Check assertions.
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(
response,
'{0}/results.html'.format(self.template_dir)
)
self.assertEqual(
response.context['page_obj'].number,
response.context['paginator'].num_pages
)
class BaseTestChosenView(TestCase, WagtailTestUtils):
url_namespace = None
template_dir = None
model_class = None
def _get_instance(self):
"""
Stub method for extending class to return saved model class
instance.
:rtype: django.db.models.Model.
"""
raise NotImplemented(
'This method must be implemented by {0}'.format(
self.__class__.__name__
)
)
def setUp(self):
# Create the instance and login.
self.instance = self._get_instance()
self.login()
def test_get(self):
# Generate the response.
response = self.client.get(
reverse(
'{0}:chosen'.format(self.url_namespace),
args=(self.instance.id,)
)
)
# Check assertions.
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(
response,
'{0}/chosen.js'.format(self.template_dir)
)
class BaseTestChooserCreateView(TestCase, WagtailTestUtils):
"""
Base test case for CRUD add view.
"""
url_namespace = None
template_dir = None
model_class = None
def _get_post_data(self):
"""
Stub method for extending class to return data dictionary
to create a new model instance on POST.
:rtype: dict.
"""
raise NotImplemented(
'This method must be implemented by {0}'.format(
self.__class__.__name__
)
)
def setUp(self):
self.login()
def test_get(self):
# Generate the response.
response = self.client.get(
reverse('{0}:choose'.format(self.url_namespace))
)
# Check assertions.
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(
response,
'{0}/chooser.html'.format(self.template_dir)
)
self.assertTemplateUsed(
response,
'{0}/results.html'.format(self.template_dir)
)
self.assertTemplateUsed(
response,
'{0}/chooser.js'.format(self.template_dir)
)
def test_post(self):
# Get POST data.
data = self._get_post_data()
# Generate the response.
response = self.client.post(
reverse('{0}:choose'.format(self.url_namespace)),
data
)
# Check assertions.
self.assertTemplateUsed(
response,
'{0}/chosen.js'.format(self.template_dir)
)
self.assertContains(
response,
'modal.respond'
)
self.assertTrue(
self.model_class.objects.filter(**data).exists()
)
|
python
|
from ect_def import add_dict
"""
Rules of Follow
1) if A is a nonterminal and start sign then FOLLOW(A) include $
2) if B -> aAb, b != epsilon then FOLLOW(A) include FIRST(b) without epsilon
3) if B -> aA or B -> aAb b=>epsilon then add FOLLOW(B) to FOLLOW(A)
"""
def getFollow(terminals:list, non_terminals:list, cfg:dict, first:dict, start_nonterminal:str) :
follow = {}
#rule 1
add_dict(follow, start_nonterminal, "$")
for non_terminal in non_terminals:
# rule 2
for cfg_result in cfg[non_terminal]:
splited = cfg_result.split(' ')
for index, word in enumerate(splited):
# if word is non termianl and next word exist
if word in non_terminals and index < len(splited)-1:
next_word = splited[index+1]
if next_word in terminals:
add_dict(follow, word, splited[index+1])
else :
if len(first[next_word]) == 1 and 'epsilon' in first[next_word]:
continue
else :
for first_elm in first[next_word]:
if first_elm != 'epsilon' and not(word in follow.keys() and first_elm in follow[word]):
add_dict(follow, word, first_elm)
# rule3
include_relation = {}
for non_terminal in non_terminals:
for cfg_result in cfg[non_terminal]:
splited = cfg_result.split(' ')
for index, word in enumerate(splited):
# if word is non termianl and word is last word then follow(word) include non_terminal
if word in non_terminals and index == len(splited) - 1:
if word == non_terminal:
continue
if word in include_relation.keys():
if not(non_terminal in include_relation[word]) :
add_dict(include_relation, word, non_terminal)
else :
add_dict(include_relation, word, non_terminal)
# if word is non termianl and word is not last word and all word's next words can be epsilon then follow(word) include non_terminal
elif word in non_terminals and index != len(splited) - 1:
possible_epsilon = True
for i in range(index+1,len(splited)):
if splited[i] in terminals:
possible_epsilon = False
continue
if not('epsilon' in first[splited[i]]):
possible_epsilon = False
if possible_epsilon == True:
add_dict(include_relation, word, non_terminal)
# add follow with include relation until no change
while(True):
change_count = 0
for key in include_relation.keys():
for value in include_relation[key]:
if not(value in follow.keys()):
continue
else:
for add_value in follow[value]:
if add_dict(follow, key, add_value):
change_count += 1
if change_count == 0 :
break
return follow
|
python
|
# Copyright 2018-2020 Jakub Kuczys (https://github.com/jack1142)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import annotations
from typing import TYPE_CHECKING, Dict, List, Literal, Tuple
from ..schema import COG_KEYS_ORDER, REPO_KEYS_ORDER, SHARED_FIELDS_KEYS_ORDER
from ..typedefs import CogInfoDict
if TYPE_CHECKING:
from ..context import InfoGenMainCommand
__all__ = ("check_key_order",)
def check_key_order(ctx: InfoGenMainCommand) -> bool:
"""Temporary order checking, until strictyaml adds proper support for sorting."""
success = True
success &= _check_repo_info_and_shared_fields_key_order(ctx)
success &= _check_cog_names_alphaorder(ctx)
success &= _check_cog_info_key_order(ctx)
return success
def _check_repo_info_and_shared_fields_key_order(ctx: InfoGenMainCommand) -> bool:
to_check: Dict[Literal["repo", "shared_fields"], List[str]] = {
"repo": REPO_KEYS_ORDER,
"shared_fields": SHARED_FIELDS_KEYS_ORDER,
}
success = True
for key, order in to_check.items():
section = ctx.data[key]
original_keys = list(section.keys())
sorted_keys = sorted(section.keys(), key=order.index)
if original_keys != sorted_keys:
print(
"\033[93m\033[1mWARNING:\033[0m "
f"Keys in `{key}` section have wrong order - use this order: "
f"{', '.join(sorted_keys)}"
)
success = False
return success
def _check_cog_names_alphaorder(ctx: InfoGenMainCommand) -> bool:
original_cog_names = list(ctx.cogs.keys())
sorted_cog_names = sorted(ctx.cogs.keys())
if original_cog_names != sorted_cog_names:
print(
"\033[93m\033[1mWARNING:\033[0m "
"Cog names in `cogs` section aren't sorted. Use alphabetical order."
)
return False
return True
def _check_cog_info_key_order(ctx: InfoGenMainCommand) -> bool:
success = True
for pkg_name, cog_info in ctx.cogs.items():
# strictyaml breaks ordering of keys for optionals with default values
original_keys = list((k for k, v in cog_info.items() if v))
sorted_keys = sorted(
(k for k, v in cog_info.items() if v), key=COG_KEYS_ORDER.index
)
if original_keys != sorted_keys:
print(
"\033[93m\033[1mWARNING:\033[0m "
f"Keys in `cogs->{pkg_name}` section have wrong order"
f" - use this order: {', '.join(sorted_keys)}"
)
print(original_keys)
print(sorted_keys)
success = False
success &= _check_cog_info_collections_alphaorder(pkg_name, cog_info)
return success
def _check_cog_info_collections_alphaorder(
pkg_name: str, cog_info: CogInfoDict
) -> bool:
collections: Tuple[Literal["required_cogs", "requirements", "tags"], ...] = (
"required_cogs",
"requirements",
"tags",
)
success = True
for key in collections:
list_or_dict = cog_info[key]
if isinstance(list_or_dict, dict):
original_list = list(list_or_dict.keys())
else:
original_list = list_or_dict
sorted_list = sorted(original_list)
if original_list != sorted_list:
friendly_name = key.capitalize().replace("_", " ")
print(
"\033[93m\033[1mWARNING:\033[0m "
f"{friendly_name} for `{pkg_name}` cog aren't sorted."
" Use alphabetical order."
)
print(original_list)
print(sorted_list)
success = False
return success
|
python
|
# Exercícios sobre Listas, do curso Python Impressionador da Hashtag
## 1. Faturamento do Melhor e do Pior Mês do Ano
# Qual foi o valor de vendas do melhor mês do Ano?
# E valor do pior mês do ano?
meses = ['jan', 'fev', 'mar', 'abr', 'mai', 'jun', 'jul', 'ago', 'set', 'out', 'nov', 'dez']
vendas_1sem = [25000, 29000, 22200, 17750, 15870, 19900]
vendas_2sem = [19850, 20120, 17540, 15555, 49051, 9650]
# Somando as listas
vendas = vendas_1sem + vendas_2sem
# Identificando o maior e menor valor
maiorValor = max(vendas)
menorValor = min(vendas)
# Identificando o mês do maior e menor valor
melhorMes = vendas.index(maiorValor)
piorMes = vendas.index(menorValor)
## 2. Continuação
# Agora relacione as duas listas para printar 'O melhor mês do ano foi {} com {} vendas' e o mesmo para o pior mês do ano.
# Calcule também o faturamento total do Ano e quanto que o melhor mês representou do faturamento total.
# Obs: Para o faturamento total, pode usar a função sum(lista) que soma todos os itens de uma lista
print(f'O melhor mês do ano foi {meses[melhorMes]} com {maiorValor} vendas, \nE o pior mês do ano foi {meses[piorMes]} com {menorValor} vendas.')
# Calculando o total com o metodo sum()
total = sum(vendas)
print(f'O faturamento total foi de R$ {total:.2f}.')
# Percentual
percentual = (maiorValor / total)
print(f'O maior mês representa {percentual:.2%} do total de vendas.')
## 3. Crie uma lista com o top 3 valores de vendas do ano (sem fazer "no olho")
# Dica: o método remove retira um item da lista.
top3 = []
# Duplicando a lista, utilizando o metodo copy() para não perder a posição em relação aos meses e removendo o maior valor
lista_temp = vendas.copy()
# O maior valor já é o primeiro do top 3
# Criando um loop para encontrar os 3 maiores valores, removendo o item de maior valor da lista temporaria e adicionando-o no top3
x = 1
while x < 4:
top3.append(max(lista_temp))
lista_temp.remove(max(lista_temp))
x += 1
# Relacionando os top valores com a lista dos meses
# Criando um for simples com range do tamanho da lista
# Cada índice do top3 corresponderá a um valor da lista original de vendas, e com isso podemos relacionar a posição de cada valor com a posição de cada mes na lista meses
print(f'Os meses com maiores vendas foram:')
for i in range(0, len(top3)):
print(f'{meses[vendas.index(top3[i])]} com {top3[i]}')
|
python
|
"""
Module to look for, and parse, Function and settings.
"""
import os
import json
from . import logger
def is_function(folder: str) -> bool:
return os.path.isfile("{}/function.json".format(folder))
def get_functions(path: str) -> list:
functions = []
for file in os.listdir(path):
candidate = "{}/{}".format(path, file)
if os.path.isdir(candidate):
if is_function(candidate):
functions.append((candidate, file))
return functions
def load_json_file(path: str) -> dict:
with open(path, "r") as file:
return json.load(file)
def load_function_settings(path: str) -> dict:
return load_json_file("{}/function.json".format(path))
def load_project_settings(path: str) -> dict:
return load_json_file("{}/host.json".format(path))
def load_project(path: str) -> dict:
try:
project_settings = load_project_settings(path)
except json.decoder.JSONDecodeError:
logger.error("Unable to parse host.json: invalid JSON.")
project_settings = {}
except FileNotFoundError:
logger.error("Unable to parse host.json: file not found.")
project_settings = {}
functions_settings = []
functions = get_functions(path)
for function in functions:
try:
functions_settings.append((function[1], load_function_settings(function[0])))
except json.decoder.JSONDecodeError:
logger.error("[{}] Unable to parse Function settings: invalid JSON.".format(function[1]))
return {"project": project_settings, "functions": functions_settings}
|
python
|
from django.contrib import admin
from django.apps import apps
models = apps.get_models()
for model in models:
# admin.site.register(model)
admin.register(model)
|
python
|
"""
To be filled in with official datajoint information soon
"""
from .connection import conn, Connection
|
python
|
"""
pipeline effects
"""
import sys
import abc
import enum
import logging
from itertools import zip_longest
from typing import Dict, Optional
from .actions import Action, SendOutputAction, CheckOutputAction
from .exceptions import CheckDelivery, Retry
from .utils import NamedSerializable, class_from_string
_registered_effects: Dict = {}
logger = logging.getLogger()
def register_effect(effect_cls):
"""Register effect in library.
Will raise exception if `name` already registered.
"""
if effect_cls.name in _registered_effects: # pragma: no cover
raise Exception(
"Effect with name %s already registered" % effect_cls.name
)
_registered_effects[effect_cls.name] = effect_cls
return effect_cls
def load_effect(data):
"""Load effect.
"""
cls = _registered_effects[data[0]]
return cls.load(data[1], data[2])
def get_class_instance(cls_name, args, kwargs):
"""Get instance from class name and arguments.
"""
cls = class_from_string(cls_name)
return cls(*args, **kwargs)
class EffectStatus(enum.Enum):
"""Route status.
"""
PENDING = 1
FINISHED = 2
FAILED = 3
class OutputStatus(enum.Enum):
"""Output status for message.
TODO: move to better place
"""
PENDING = 1
CHECK = 2
SUCCESS = 3
FAIL = 4
RETRY = 5
SKIP = 6
class Effect(NamedSerializable, abc.ABC):
"""Abstract pipeline effect.
Effect used in delivery pipeline generator. Pipeline return effects instead
of performing heavy operations. Any effect can be serialized and
transferred to the place of execution.
The method `next_effect` must be implemented on all derived classes and
must return an `Action` instance or `None` if no next action available for
this effect and it can be marked as applied by `MessageConsumer`.
"""
name: str
def __init__(self, *args, **kwargs):
self.args = args
self.kwargs = kwargs
assert self.name, "Effect must define `name` property"
@abc.abstractmethod
def next_action(self, state) -> Optional[Action]:
"""Get next effect action.
Receive state from last `apply` call.
Return `Action` instance or `None` if no more actions available for
this effect.
"""
pass # pragma: no cover
@abc.abstractmethod
def apply(self, message):
"""Apply next action and return next state.
"""
pass # pragma: no cover
# pylint: disable=no-self-use
def serialize_state(self, state):
"""Serialize effect state.
"""
return state # pragma: no cover
def load_state(self, data):
"""Load serialized effect state.
"""
return data # pragma: no cover
def pretty(self, state): # pragma: no cover
"""Pretty print effect.
"""
return ''
def __eq__(self, other):
if not isinstance(other, self.__class__): # pragma: no cover
raise TypeError("Effect and %s can't be compared" % type(other))
return self.serialize() == other.serialize()
@register_effect
class SendEffect(Effect):
"""Effect: send message through outputs.
Accepts outputs as the args.
"""
name = 'send'
def next_action(self, state=None):
"""Next effect action.
"""
state = self.reset_state(state)
position = self.next_action_pos(state)
if position is None:
return None
selected_output = self.args[position]
if state[position] == OutputStatus.CHECK:
return CheckOutputAction(selected_output)
return SendOutputAction(selected_output)
def next_action_pos(self, state):
"""Next effect action position.
"""
state = self.reset_state(state, reset_pending=True)
selected_output = None
# search next pending backend
for i, (_, status) in enumerate(zip(self.args, state)):
if status == OutputStatus.PENDING:
selected_output = i
break
else:
for i, (_, status) in enumerate(zip(self.args, state)):
if status == OutputStatus.CHECK:
selected_output = i
break
return selected_output
def reset_state(self, state, reset_pending=False):
"""Reset state.
`reset_pending=True` force reset all RETRY to PENDING.
TODO: also reset CHECK to CHECK_PENDING
:params bool reset_pending: reset to reset_pending
"""
if state is None or state == []:
# create default state with all backends pending
state = [OutputStatus.PENDING for b in self.args]
assert len(state) == len(self.args), "State and args length must match"
if reset_pending and OutputStatus.PENDING not in state:
for i, status in enumerate(state):
if status == OutputStatus.RETRY:
state[i] = OutputStatus.PENDING
return state
def apply(self, message):
"""Send message through next pending output.
Modifies message route. Return state.
"""
state = message.get_route_state(self)
state = self.reset_state(state)
position = self.next_action_pos(state)
action = self.next_action(state)
retry = message.get_route_retry(self)
try:
result = action.execute(message, retry)
if result is False: # ignore None
state[position] = OutputStatus.FAIL
else:
state[position] = OutputStatus.SUCCESS
except CheckDelivery:
state[position] = OutputStatus.CHECK
except Retry:
prev = message.get_route_retry(self)
message.set_route_retry(self, prev + 1)
state[position] = OutputStatus.RETRY
message.log.info("Delivery retried (%i)", prev + 1)
return state
def load_state(self, data):
if not data:
data = []
return [OutputStatus(status) for status in data]
def serialize_state(self, state):
if not state:
state = []
return [status.value for status in state]
def serialize_args(self):
return [b.serialize() for b in self.args]
@classmethod
def load_args(cls, args):
return [get_class_instance(*b) for b in args]
def pretty(self, state):
"""Pretty format effect.
"""
action_format = "{a.__class__.__name__} <{s}>"
if not state:
state = self.reset_state(state)
return '\n\t\t\t'.join([
action_format.format(a=a, s=s) for a, s in zip_longest(
self.args, state
)
])
# @register_effect
# class CallEffect(Effect):
# name = 'call'
# def next_effect(self, state):
# """Execute callable in message consumer.
# """
# return CallAction(self.args[0], *self.args[1:], **self.kwargs)
send = SendEffect
# call = CallEffect
for name, effect in _registered_effects.items():
setattr(sys.modules[__name__], name, effect)
|
python
|
import numpy as np
from . import backends
from importlib import import_module
class Predictor():
def __init__(self, model, config={}, backend=backends.backend()):
self.model = model
self.config = config
self.backend = backend
assert(model)
self.postprocessors = []
postprocessors = self.config.get('postprocessors')
if postprocessors:
print('___ loading postprocessors ___')
for f in postprocessors:
full_function = list(f.keys())[0]
module_name, function_name = full_function.rsplit('.', 1)
parameters = f[full_function]
print(module_name, function_name, parameters)
mod = import_module(module_name)
met = getattr(mod, function_name)
self.postprocessors.append(
{'function': met, 'parameters': parameters})
def postprocessing(self, img):
for f in self.postprocessors:
if type(f['parameters']) is list:
img = f['function'](img, *f['parameters'])
else:
img = f['function'](img, **f['parameters'])
return img
def predict(self, img):
prediction = self.backend.predict(self, img)
return self.postprocessing(prediction)
def batch_predict(self, img_batch):
prediction = self.backend.batch_predict(self, img_batch)
return self.postprocessing(prediction)
|
python
|
MAX_ARRAY_COUNT = MAX_ROWS = 9
MAX_ARRAY_SUM = 45
COMPLETE_ARRAY = [1, 2, 3, 4, 5, 6, 7, 8, 9]
SUBGRIDS_BY_ROWS = [
[0, 0, 0, 1, 1, 1, 2, 2, 2],
[0, 0, 0, 1, 1, 1, 2, 2, 2],
[0, 0, 0, 1, 1, 1, 2, 2, 2],
[3, 3, 3, 4, 4, 4, 5, 5, 5],
[3, 3, 3, 4, 4, 4, 5, 5, 5],
[3, 3, 3, 4, 4, 4, 5, 5, 5],
[6, 6, 6, 7, 7, 7, 8, 8, 8],
[6, 6, 6, 7, 7, 7, 8, 8, 8],
[6, 6, 6, 7, 7, 7, 8, 8, 8],
]
def _filter_zeros(xs):
"""Return a list of numbers with 0s removed from the list."""
return list(filter(lambda x: x != 0, xs))
def _subgrid(r, c):
"""Given a row/col coordinate, identify which subgrid it's in."""
return SUBGRIDS_BY_ROWS[r][c]
def validate_row(numbers):
"""
Given a row of numbers, verify it could be a sudoku row.
Arguments:
- list of integers
Returns: n/a
Notes:
- A valid list
- must be 9 numbers.
- each number must be between 0-9.
- 0's can be repeated in the list.
- numbers 1-9 must be unique
- If a row is invalid, an exception is thrown.
"""
if type(numbers).__name__ != "list":
raise TypeError("Invalid argument, must be a list")
if len(numbers) != MAX_ARRAY_COUNT:
raise ValueError("Invalid array: too many numbers")
no_zeros = _filter_zeros(numbers)
if len(set(no_zeros)) != len(no_zeros):
raise ValueError("Invalid row: row has duplicate numbers")
if sum(numbers) > MAX_ARRAY_SUM:
raise ValueError("Invalid row: row total is too high")
# TODO: What if each "cell" was an object, that contained it's row, column,
# subgrid?
class Grid:
"""
Provide instances of sudoku board/grids.
The values in the board are stored in 3 attributes of the object. The
trade off, in theory, is by keeping the rows, columns, and subgrids
duplicated, it should be more efficient to check if a board is solved.
An alternative would be to store the board as a 2D array, and the concept
of rows, columns, and subgrids can be calculated when checking for a cells'
solution. However given that each cell may have to be looked up multiple
times I opted to triplicate the data...which feels gross.
"""
def __init__(self):
self.grid_type = "grid"
self.rows = []
self.columns = [[], [], [], [], [], [], [], [], []]
self.subgrids = {}
for i in range(9):
self.subgrids[i] = []
def add_row(self, numbers):
"""
Add a row to the grid.
Arguments:
- list of positive integers, 1-9
Returns: n/a
Usage:
g.add_row([1, 2, 3, 4, 5, 6, 7, 8, 9])
g.add_row([1, 2, 3, 0, 0, 0, 7, 8, 9])
Notes:
- The list of numbers is validated and will be rejected if invalid.
- Must have 9 digits
- Can include multiple 0's for unknown cells
- Any numbers in the range 1-9 must be unique
- The grid is not checked to see if a row will make a board that's
impossible to solve.
"""
validate_row(numbers)
if len(self.rows) >= MAX_ROWS:
raise RuntimeError("Grid is full: no more rows can be added")
row_number = len(self.rows)
for col, val in enumerate(numbers):
self.columns[col].append(val)
self.subgrids[_subgrid(row_number, col)].append(val)
self.rows.append(numbers)
# TODO: should this print() or return an array of arrays?
def show(self):
"""Print the grid in its current state."""
for row in self.rows:
print(row)
def solve(self):
"""
Solve the sudoku puzzle.
Arguments: n/a
Returns: n/a
Usage:
g.solve()
Notes:
- This method will mutate the instance's attributes. As it finds
solutions to cells it will update accordingly.
- If a puzzle can't be solved (the puzzle is iterated through with
no changes), an exception is thrown to avoid an infinite loop.
"""
while self.solved() is False:
changes = 0
for r, row in enumerate(self.rows):
for c, _ in enumerate(row):
if self.rows[r][c] == 0:
result = self._solve_cell(r, c)
if result > 0:
self._update_cell(r, c, result)
changes += 1
if changes == 0:
raise Exception("Puzzle is unsolvable")
def solved(self):
"""
Check if puzzle is solved.
Arguments: n/a
Returns:
- bool, True or False
"""
for row in self.rows:
if sorted(row) != COMPLETE_ARRAY:
return False
for col in self.columns:
if sorted(col) != COMPLETE_ARRAY:
return False
for subgrid in self.subgrids:
if sorted(self.subgrids[subgrid]) != COMPLETE_ARRAY:
return False
return True
def _solve_cell(self, r, c):
"""
Given a cell (row, column), try to identify it's correct value.
Arguments:
- row, integer
- column, integer
Returns:
- value, integer
Details:
- Row and column correspond to a row and column in the puzzle.
- The check is to try and find a unique integer not yet used in
- the row
- the column
- and the subgrid
"""
subgrid = self.subgrids[_subgrid(r, c)]
row = self.rows[r]
col = self.columns[c]
available = set.difference(
set(COMPLETE_ARRAY), set(subgrid), set(row), set(col)
)
if len(available) == 1:
return available.pop()
return 0
# TODO: we store the same data 3 ways...I wonder if there's a way to do this
# just once to make updating easier?
def _update_cell(self, r, c, val):
"""
Given a row/col coordinate and value, update the grid with the value.
Arguments:
- row, integer
- column, integer
Returns: n/a
Details:
- Since the instance tracks rows, columns, and subgrids separately
this method is used to keep them updated so they're in sync.
"""
self.rows[r][c] = val
self.columns[c][r] = val
subgrid = self.subgrids[_subgrid(r, c)]
subgrid_index = (r % 3) * 3 + c % 3
subgrid[subgrid_index] = val
|
python
|
import logging
from django import http
from django.contrib import messages
from django.contrib.auth.decorators import login_required
from django.core.exceptions import PermissionDenied
from django.core.mail import EmailMultiAlternatives
from django.http import Http404, HttpResponse
from django.shortcuts import get_object_or_404, redirect, render
from django.template.loader import render_to_string
from django.urls import reverse
from flags.state import flag_enabled
from ..base.models import FeedbackURLConfig, MessagingLog, User
from ..base.utils import get_admin_email
from ..base.views import (
ReportAbuseView,
SendMessageView,
get_private_profiles,
get_profiles_data,
)
from ..localgroups.models import LocalGroup
from .forms import (
DeleteProfileForm,
EditProfileCareerForm,
EditProfileCauseAreasForm,
EditProfileCommunityForm,
EditProfileForm,
)
from .models import (
CauseArea,
ExpertiseArea,
GivingPledge,
Membership,
OrganisationalAffiliation,
Profile,
ProfileAnalyticsLog,
ProfileSlug,
)
def profile_detail_or_redirect(request, slug, first_visit=False):
slug_entry = get_object_or_404(ProfileSlug, slug=slug)
profile = slug_entry.content_object
if not (profile and request.user.has_perm("profiles.view_profile", profile)):
raise Http404("No profile exists with that slug.")
if slug_entry.redirect:
return redirect("profile", slug=profile.slug, permanent=True)
return render(
request, "eahub/profile.html", {"profile": profile, "first_visit": first_visit}
)
def profile_redirect_from_legacy_record(request, legacy_record):
user = request.user
profile = get_object_or_404(
Profile.objects.visible_to_user(user), legacy_record=legacy_record
)
assert user.has_perm("profiles.view_profile", profile)
return redirect("profile", slug=profile.slug, permanent=True)
@login_required
def my_profile(request, first_visit=False):
if not hasattr(request.user, "profile"):
raise http.Http404("user has no profile")
return profile_detail_or_redirect(
request, slug=request.user.profile.slug, first_visit=first_visit
)
@login_required
def my_profile_first_visit(request):
return my_profile(request, True)
class ReportProfileAbuseView(ReportAbuseView):
def profile(self):
return Profile.objects.get(slug=self.kwargs["slug"])
def get_type(self):
return "profile"
class SendProfileMessageView(SendMessageView):
def get_recipient(self):
profile = Profile.objects.get(slug=self.kwargs["slug"])
if profile is None:
raise Exception("Could not find profile")
return profile
def form_valid(self, form):
message = form.cleaned_data["your_message"]
recipient = self.get_recipient()
sender_name = form.cleaned_data["your_name"]
subject = f"{sender_name} wants to connect with {recipient.name}!"
sender_email_address = form.cleaned_data["your_email_address"]
feedback_url = FeedbackURLConfig.get_solo().site_url
admins_email = get_admin_email()
profile_edit_url = self.request.build_absolute_uri(reverse("edit_profile"))
txt_message = render_to_string(
"emails/message_profile.txt",
{
"sender_name": sender_name,
"recipient": recipient.name,
"message": message,
"admin_email": admins_email,
"feedback_url": feedback_url,
"profile_edit_url": profile_edit_url,
},
)
html_message = render_to_string(
"emails/message_profile.html",
{
"sender_name": sender_name,
"recipient": recipient.name,
"message": message,
"admin_email": admins_email,
"feedback_url": feedback_url,
"profile_edit_url": profile_edit_url,
},
)
email = EmailMultiAlternatives(
subject=subject,
body=txt_message,
from_email=admins_email,
to=[recipient.user.email],
reply_to=[sender_email_address],
)
email.attach_alternative(html_message, "text/html")
email.send()
log = MessagingLog(
sender_email=sender_email_address,
recipient_email=recipient.user.email,
recipient_type=MessagingLog.USER,
)
log.save()
messages.success(
self.request, "Your message to " + recipient.name + " has been sent"
)
return redirect(reverse("profile", args=([recipient.slug])))
def get(self, request, *args, **kwargs):
if not request.user.has_perm("profiles.message_users"):
raise PermissionDenied
recipient = self.get_recipient()
if not flag_enabled("MESSAGING_FLAG", request=request):
raise Http404("Messaging toggled off")
if recipient.get_can_receive_message():
return super().get(request, *args, **kwargs)
else:
raise Http404("Messaging not enabled for this user")
def post(self, request, *args, **kwargs):
if not request.user.has_perm("profiles.message_users"):
raise PermissionDenied
recipient = self.get_recipient()
if not flag_enabled("MESSAGING_FLAG", request=request):
raise Http404("Messaging toggled off")
if recipient.get_can_receive_message():
return super().post(request, *args, **kwargs)
else:
raise Http404("Messaging not enabled for this user")
@login_required
def edit_profile(request):
if not hasattr(request.user, "profile"):
raise http.Http404("user has no profile")
profile = Profile.objects.get(pk=request.user.profile.id)
if request.method == "POST":
form = EditProfileForm(
request.POST, request.FILES, instance=request.user.profile
)
if form.is_valid():
profile = form.save(commit=False)
profile = profile.geocode()
profile.save()
return redirect("my_profile")
else:
form = EditProfileForm(instance=request.user.profile)
opportunities = []
if profile.open_to_job_offers:
opportunities.append("job offers")
if profile.available_to_volunteer:
opportunities.append("volunteering opportunities")
if profile.available_as_speaker:
opportunities.append("speaking opportunities")
return render(
request,
"eahub/edit_profile.html",
{"form": form, "profile": profile, "opportunities": opportunities},
)
def reorder_cause_areas(causes):
return sorted(causes, key=lambda x: x[1].label)
@login_required
def edit_profile_cause_areas(request):
if not hasattr(request.user, "profile"):
raise http.Http404("user has no profile")
if request.method == "POST":
form = EditProfileCauseAreasForm(request.POST, instance=request.user.profile)
if form.is_valid():
profile = form.save(commit=False)
cause_areas = request.POST.getlist("cause_areas")
profile.cause_areas = cause_areas
giving_pledges = request.POST.getlist("giving_pledges")
profile.giving_pledges = giving_pledges
profile.save()
return redirect("my_profile")
else:
form = EditProfileCauseAreasForm(instance=request.user.profile)
return render(
request,
"eahub/edit_profile_cause_areas.html",
{
"form": form,
"profile": Profile.objects.get(pk=request.user.profile.id),
"cause_area_choices": reorder_cause_areas(CauseArea.choices()),
"giving_pledge_choices": GivingPledge.choices,
},
)
@login_required
def edit_profile_career(request):
if not hasattr(request.user, "profile"):
raise http.Http404("user has no profile")
if request.method == "POST":
form = EditProfileCareerForm(request.POST, instance=request.user.profile)
if form.is_valid():
profile = form.save(commit=False)
expertise_areas = request.POST.getlist("expertise_areas")
profile.expertise_areas = expertise_areas
career_interest_areas = request.POST.getlist("career_interest_areas")
profile.career_interest_areas = career_interest_areas
profile.save()
return redirect("my_profile")
else:
form = EditProfileCareerForm(instance=request.user.profile)
return render(
request,
"eahub/edit_profile_career.html",
{
"form": form,
"profile": Profile.objects.get(pk=request.user.profile.id),
"expertise_area_choices": ExpertiseArea.choices,
},
)
def reorder_orgs(orgs):
return sorted(orgs, key=lambda x: x[1].label)
@login_required
def edit_profile_community(request):
if not hasattr(request.user, "profile"):
raise http.Http404("user has no profile")
if request.method == "POST":
form = EditProfileCommunityForm(request.POST, instance=request.user.profile)
old_local_groups = [
group.name
for group in LocalGroup.objects.filter(
membership__profile=request.user.profile
)
]
if form.is_valid():
profile = form.save(commit=False)
profile.local_groups.clear()
organisational_affiliations = request.POST.getlist(
"organisational_affiliations"
)
profile.organisational_affiliations = [
int(x) for x in organisational_affiliations
]
profile.save()
group_affiliations = request.POST.getlist("local_groups")
local_groups = LocalGroup.objects.filter(id__in=group_affiliations)
for group in local_groups:
membership = Membership(profile=profile, local_group=group)
membership.save()
if old_local_groups != [x.name for x in local_groups.all()]:
log = ProfileAnalyticsLog()
log.profile = request.user.profile
log.action = "Update"
log.old_value = old_local_groups
log.new_value = [x.name for x in local_groups.all()]
log.field = "local_groups"
log.save()
return redirect("my_profile")
else:
form = EditProfileCommunityForm(instance=request.user.profile)
return render(
request,
"eahub/edit_profile_community.html",
{
"form": form,
"profile": Profile.objects.get(pk=request.user.profile.id),
"organisation_choices": reorder_orgs(OrganisationalAffiliation.choices()),
},
)
@login_required
def delete_profile(request):
if request.method == "POST":
logging.info(
"user_id={} email={} has deleted their account".format(
request.user.id, request.user.email
)
)
user = User.objects.get(id=request.user.id)
user.delete()
return redirect("account_logout")
else:
form = DeleteProfileForm()
return render(request, "eahub/delete_profile.html", {"form": form})
def profiles(request):
profiles_data = get_profiles_data(request.user)
private_profiles = get_private_profiles(request.user)
return render(
request,
"eahub/profiles.html",
{
"page_name": "Profiles",
"profiles": profiles_data["rows"],
"map_locations": {
"profiles": profiles_data["map_data"],
"private_profiles": private_profiles,
},
},
)
|
python
|
from django.urls import path
from . import views
app_name = 'kandidaturen' # here for namespacing of urls.
urlpatterns = [
path("", views.main_screen, name="homepage"),
path("erstellen", views.kandidaturErstellenView, name="erstellenView"),
path("erstellen/speichern", views.erstellen, name="erstellen"),
path("<int:kandidatur_id>/bearbeiten", views.kandidaturBearbeitenView, name="bearbeitenView"),
path("<int:kandidatur_id>/bearbeiten/speichern", views.speichern, name="speichern"),
path('ajax/laden', views.kandidatur_laden, name='kandidatur_laden'),
path("ajax/kandidaturen-loeschen", views.kandidaturen_loeschen, name="kandidaturen_loeschen"),
path('ajax/bereiche-laden', views.bereiche_laden, name='bereiche_laden'),
path('ajax/funktionen-laden', views.funktionen_laden, name='aemter_laden'),
path('ajax/funktionen-html-laden', views.funktionen_html_laden, name='aemter_html_laden'),
path('ajax/funktion-loeschen', views.funktion_loeschen, name='amt_loeschen'),
path('ajax/email-html-laden', views.email_html_laden, name='email_html_laden'),
path('ajax/email-loeschen', views.email_loeschen, name='email_loeschen'),
path('ajax/suchen', views.suchen, name="suchen"),
path('ajax/kandidatur-aufnehmen', views.kandidatur_aufnehmen, name="kandidatur_aufnehmen")
]
|
python
|
import pyfiglet
ascii_banner = pyfiglet.figlet_format("G o d s - e y e")
print(ascii_banner)
|
python
|
# Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""image"""
import numbers
import numpy as np
import mindspore.common.dtype as mstype
from mindspore.common.tensor import Tensor
from mindspore.ops import operations as P
from mindspore.ops import functional as F
from mindspore.ops.primitive import constexpr
from mindspore._checkparam import Rel, Validator as validator
from .conv import Conv2d
from .container import CellList
from .pooling import AvgPool2d
from .activation import ReLU
from ..cell import Cell
__all__ = ['ImageGradients', 'SSIM', 'MSSSIM', 'PSNR', 'CentralCrop']
class ImageGradients(Cell):
r"""
Returns two tensors, the first is along the height dimension and the second is along the width dimension.
Assume an image shape is :math:`h*w`. The gradients along the height and the width are :math:`dy` and :math:`dx`,
respectively.
.. math::
dy[i] = \begin{cases} image[i+1, :]-image[i, :], &if\ 0<=i<h-1 \cr
0, &if\ i==h-1\end{cases}
dx[i] = \begin{cases} image[:, i+1]-image[:, i], &if\ 0<=i<w-1 \cr
0, &if\ i==w-1\end{cases}
Inputs:
- **images** (Tensor) - The input image data, with format 'NCHW'.
Outputs:
- **dy** (Tensor) - vertical image gradients, the same type and shape as input.
- **dx** (Tensor) - horizontal image gradients, the same type and shape as input.
Examples:
>>> net = nn.ImageGradients()
>>> image = Tensor(np.array([[[[1,2],[3,4]]]]), dtype=mstype.int32)
>>> net(image)
[[[[2,2]
[0,0]]]]
[[[[1,0]
[1,0]]]]
"""
def __init__(self):
super(ImageGradients, self).__init__()
def construct(self, images):
check = _check_input_4d(F.shape(images), "images", self.cls_name)
images = F.depend(images, check)
batch_size, depth, height, width = P.Shape()(images)
if height == 1:
dy = P.Fill()(P.DType()(images), (batch_size, depth, 1, width), 0)
else:
dy = images[:, :, 1:, :] - images[:, :, :height - 1, :]
dy_last = P.Fill()(P.DType()(images), (batch_size, depth, 1, width), 0)
dy = P.Concat(2)((dy, dy_last))
if width == 1:
dx = P.Fill()(P.DType()(images), (batch_size, depth, height, 1), 0)
else:
dx = images[:, :, :, 1:] - images[:, :, :, :width - 1]
dx_last = P.Fill()(P.DType()(images), (batch_size, depth, height, 1), 0)
dx = P.Concat(3)((dx, dx_last))
return dy, dx
def _convert_img_dtype_to_float32(img, max_val):
"""convert img dtype to float32"""
# Ususally max_val is 1.0 or 255, we will do the scaling if max_val > 1.
# We will scale img pixel value if max_val > 1. and just cast otherwise.
ret = F.cast(img, mstype.float32)
max_val = F.scalar_cast(max_val, mstype.float32)
if max_val > 1.:
scale = 1. / max_val
ret = ret * scale
return ret
@constexpr
def _get_dtype_max(dtype):
"""get max of the dtype"""
np_type = mstype.dtype_to_nptype(dtype)
if issubclass(np_type, numbers.Integral):
dtype_max = np.float64(np.iinfo(np_type).max)
else:
dtype_max = 1.0
return dtype_max
@constexpr
def _check_input_4d(input_shape, param_name, func_name):
if len(input_shape) != 4:
raise ValueError(f"{func_name} {param_name} should be 4d, but got shape {input_shape}")
return True
@constexpr
def _check_input_filter_size(input_shape, param_name, filter_size, func_name):
_check_input_4d(input_shape, param_name, func_name)
validator.check(param_name + " shape[2]", input_shape[2], "filter_size", filter_size, Rel.GE, func_name)
validator.check(param_name + " shape[3]", input_shape[3], "filter_size", filter_size, Rel.GE, func_name)
@constexpr
def _check_input_dtype(input_dtype, param_name, allow_dtypes, cls_name):
validator.check_type_name(param_name, input_dtype, allow_dtypes, cls_name)
def _conv2d(in_channels, out_channels, kernel_size, weight, stride=1, padding=0):
return Conv2d(in_channels, out_channels, kernel_size=kernel_size, stride=stride,
weight_init=weight, padding=padding, pad_mode="valid")
def _create_window(size, sigma):
x_data, y_data = np.mgrid[-size // 2 + 1:size // 2 + 1, -size // 2 + 1:size // 2 + 1]
x_data = np.expand_dims(x_data, axis=-1).astype(np.float32)
x_data = np.expand_dims(x_data, axis=-1) ** 2
y_data = np.expand_dims(y_data, axis=-1).astype(np.float32)
y_data = np.expand_dims(y_data, axis=-1) ** 2
sigma = 2 * sigma ** 2
g = np.exp(-(x_data + y_data) / sigma)
return np.transpose(g / np.sum(g), (2, 3, 0, 1))
def _split_img(x):
_, c, _, _ = F.shape(x)
img_split = P.Split(1, c)
output = img_split(x)
return output, c
def _compute_per_channel_loss(c1, c2, img1, img2, conv):
"""computes ssim index between img1 and img2 per single channel"""
dot_img = img1 * img2
mu1 = conv(img1)
mu2 = conv(img2)
mu1_sq = mu1 * mu1
mu2_sq = mu2 * mu2
mu1_mu2 = mu1 * mu2
sigma1_tmp = conv(img1 * img1)
sigma1_sq = sigma1_tmp - mu1_sq
sigma2_tmp = conv(img2 * img2)
sigma2_sq = sigma2_tmp - mu2_sq
sigma12_tmp = conv(dot_img)
sigma12 = sigma12_tmp - mu1_mu2
a = (2 * mu1_mu2 + c1)
b = (mu1_sq + mu2_sq + c1)
v1 = 2 * sigma12 + c2
v2 = sigma1_sq + sigma2_sq + c2
ssim = (a * v1) / (b * v2)
cs = v1 / v2
return ssim, cs
def _compute_multi_channel_loss(c1, c2, img1, img2, conv, concat, mean):
"""computes ssim index between img1 and img2 per color channel"""
split_img1, c = _split_img(img1)
split_img2, _ = _split_img(img2)
multi_ssim = ()
multi_cs = ()
for i in range(c):
ssim_per_channel, cs_per_channel = _compute_per_channel_loss(c1, c2, split_img1[i], split_img2[i], conv)
multi_ssim += (ssim_per_channel,)
multi_cs += (cs_per_channel,)
multi_ssim = concat(multi_ssim)
multi_cs = concat(multi_cs)
ssim = mean(multi_ssim, (2, 3))
cs = mean(multi_cs, (2, 3))
return ssim, cs
class SSIM(Cell):
r"""
Returns SSIM index between img1 and img2.
Its implementation is based on Wang, Z., Bovik, A. C., Sheikh, H. R., & Simoncelli, E. P. (2004). `Image quality
assessment: from error visibility to structural similarity <https://ieeexplore.ieee.org/document/1284395>`_.
IEEE transactions on image processing.
.. math::
l(x,y)&=\frac{2\mu_x\mu_y+C_1}{\mu_x^2+\mu_y^2+C_1}, C_1=(K_1L)^2.\\
c(x,y)&=\frac{2\sigma_x\sigma_y+C_2}{\sigma_x^2+\sigma_y^2+C_2}, C_2=(K_2L)^2.\\
s(x,y)&=\frac{\sigma_{xy}+C_3}{\sigma_x\sigma_y+C_3}, C_3=C_2/2.\\
SSIM(x,y)&=l*c*s\\&=\frac{(2\mu_x\mu_y+C_1)(2\sigma_{xy}+C_2}{(\mu_x^2+\mu_y^2+C_1)(\sigma_x^2+\sigma_y^2+C_2)}.
Args:
max_val (Union[int, float]): The dynamic range of the pixel values (255 for 8-bit grayscale images).
Default: 1.0.
filter_size (int): The size of the Gaussian filter. Default: 11. The value must be greater than or equal to 1.
filter_sigma (float): The standard deviation of Gaussian kernel. Default: 1.5. The value must be greater than 0.
k1 (float): The constant used to generate c1 in the luminance comparison function. Default: 0.01.
k2 (float): The constant used to generate c2 in the contrast comparison function. Default: 0.03.
Inputs:
- **img1** (Tensor) - The first image batch with format 'NCHW'. It must be the same shape and dtype as img2.
- **img2** (Tensor) - The second image batch with format 'NCHW'. It must be the same shape and dtype as img1.
Outputs:
Tensor, has the same dtype as img1. It is a 1-D tensor with shape N, where N is the batch num of img1.
Examples:
>>> net = nn.SSIM()
>>> img1 = Tensor(np.random.random((1,3,16,16)), mindspore.float32)
>>> img2 = Tensor(np.random.random((1,3,16,16)), mindspore.float32)
>>> ssim = net(img1, img2)
[0.12174469]
"""
def __init__(self, max_val=1.0, filter_size=11, filter_sigma=1.5, k1=0.01, k2=0.03):
super(SSIM, self).__init__()
validator.check_value_type('max_val', max_val, [int, float], self.cls_name)
validator.check_number('max_val', max_val, 0.0, Rel.GT, self.cls_name)
self.max_val = max_val
self.filter_size = validator.check_int(filter_size, 1, Rel.GE, 'filter_size', self.cls_name)
self.filter_sigma = validator.check_positive_float(filter_sigma, 'filter_sigma', self.cls_name)
self.k1 = validator.check_value_type('k1', k1, [float], self.cls_name)
self.k2 = validator.check_value_type('k2', k2, [float], self.cls_name)
window = _create_window(filter_size, filter_sigma)
self.conv = _conv2d(1, 1, filter_size, Tensor(window))
self.conv.weight.requires_grad = False
self.reduce_mean = P.ReduceMean()
self.concat = P.Concat(axis=1)
def construct(self, img1, img2):
_check_input_dtype(F.dtype(img1), "img1", [mstype.float32, mstype.float16], self.cls_name)
_check_input_filter_size(F.shape(img1), "img1", self.filter_size, self.cls_name)
P.SameTypeShape()(img1, img2)
dtype_max_val = _get_dtype_max(F.dtype(img1))
max_val = F.scalar_cast(self.max_val, F.dtype(img1))
max_val = _convert_img_dtype_to_float32(max_val, dtype_max_val)
img1 = _convert_img_dtype_to_float32(img1, dtype_max_val)
img2 = _convert_img_dtype_to_float32(img2, dtype_max_val)
c1 = (self.k1 * max_val) ** 2
c2 = (self.k2 * max_val) ** 2
ssim_ave_channel, _ = _compute_multi_channel_loss(c1, c2, img1, img2, self.conv, self.concat, self.reduce_mean)
loss = self.reduce_mean(ssim_ave_channel, -1)
return loss
def _downsample(img1, img2, op):
a = op(img1)
b = op(img2)
return a, b
class MSSSIM(Cell):
r"""
Returns MS-SSIM index between img1 and img2.
Its implementation is based on Wang, Zhou, Eero P. Simoncelli, and Alan C. Bovik. `Multiscale structural similarity
for image quality assessment <https://ieeexplore.ieee.org/document/1292216>`_.
Signals, Systems and Computers, 2004.
.. math::
l(x,y)&=\frac{2\mu_x\mu_y+C_1}{\mu_x^2+\mu_y^2+C_1}, C_1=(K_1L)^2.\\
c(x,y)&=\frac{2\sigma_x\sigma_y+C_2}{\sigma_x^2+\sigma_y^2+C_2}, C_2=(K_2L)^2.\\
s(x,y)&=\frac{\sigma_{xy}+C_3}{\sigma_x\sigma_y+C_3}, C_3=C_2/2.\\
MSSSIM(x,y)&=l^alpha_M*{\prod_{1\leq j\leq M} (c^beta_j*s^gamma_j)}.
Args:
max_val (Union[int, float]): The dynamic range of the pixel values (255 for 8-bit grayscale images).
Default: 1.0.
power_factors (Union[tuple, list]): Iterable of weights for each scal e.
Default: (0.0448, 0.2856, 0.3001, 0.2363, 0.1333). Default values obtained by Wang et al.
filter_size (int): The size of the Gaussian filter. Default: 11.
filter_sigma (float): The standard deviation of Gaussian kernel. Default: 1.5.
k1 (float): The constant used to generate c1 in the luminance comparison function. Default: 0.01.
k2 (float): The constant used to generate c2 in the contrast comparison function. Default: 0.03.
Inputs:
- **img1** (Tensor) - The first image batch with format 'NCHW'. It must be the same shape and dtype as img2.
- **img2** (Tensor) - The second image batch with format 'NCHW'. It must be the same shape and dtype as img1.
Outputs:
Tensor, the value is in range [0, 1]. It is a 1-D tensor with shape N, where N is the batch num of img1.
Examples:
>>> net = nn.MSSSIM(power_factors=(0.033, 0.033, 0.033))
>>> img1 = Tensor(np.random.random((1,3,128,128)))
>>> img2 = Tensor(np.random.random((1,3,128,128)))
>>> msssim = net(img1, img2)
"""
def __init__(self, max_val=1.0, power_factors=(0.0448, 0.2856, 0.3001, 0.2363, 0.1333), filter_size=11,
filter_sigma=1.5, k1=0.01, k2=0.03):
super(MSSSIM, self).__init__()
validator.check_value_type('max_val', max_val, [int, float], self.cls_name)
validator.check_number('max_val', max_val, 0.0, Rel.GT, self.cls_name)
self.max_val = max_val
validator.check_value_type('power_factors', power_factors, [tuple, list], self.cls_name)
self.filter_size = validator.check_int(filter_size, 1, Rel.GE, 'filter_size', self.cls_name)
self.filter_sigma = validator.check_positive_float(filter_sigma, 'filter_sigma', self.cls_name)
self.k1 = validator.check_value_type('k1', k1, [float], self.cls_name)
self.k2 = validator.check_value_type('k2', k2, [float], self.cls_name)
window = _create_window(filter_size, filter_sigma)
self.level = len(power_factors)
self.conv = []
for i in range(self.level):
self.conv.append(_conv2d(1, 1, filter_size, Tensor(window)))
self.conv[i].weight.requires_grad = False
self.multi_convs_list = CellList(self.conv)
self.weight_tensor = Tensor(power_factors, mstype.float32)
self.avg_pool = AvgPool2d(kernel_size=2, stride=2, pad_mode='valid')
self.relu = ReLU()
self.reduce_mean = P.ReduceMean()
self.prod = P.ReduceProd()
self.pow = P.Pow()
self.pack = P.Pack(axis=-1)
self.concat = P.Concat(axis=1)
def construct(self, img1, img2):
_check_input_4d(F.shape(img1), "img1", self.cls_name)
_check_input_4d(F.shape(img2), "img2", self.cls_name)
_check_input_dtype(F.dtype(img1), 'img1', mstype.number_type, self.cls_name)
P.SameTypeShape()(img1, img2)
dtype_max_val = _get_dtype_max(F.dtype(img1))
max_val = F.scalar_cast(self.max_val, F.dtype(img1))
max_val = _convert_img_dtype_to_float32(max_val, dtype_max_val)
img1 = _convert_img_dtype_to_float32(img1, dtype_max_val)
img2 = _convert_img_dtype_to_float32(img2, dtype_max_val)
c1 = (self.k1 * max_val) ** 2
c2 = (self.k2 * max_val) ** 2
sim = ()
mcs = ()
for i in range(self.level):
sim, cs = _compute_multi_channel_loss(c1, c2, img1, img2,
self.multi_convs_list[i], self.concat, self.reduce_mean)
mcs += (self.relu(cs),)
img1, img2 = _downsample(img1, img2, self.avg_pool)
mcs = mcs[0:-1:1]
mcs_and_ssim = self.pack(mcs + (self.relu(sim),))
mcs_and_ssim = self.pow(mcs_and_ssim, self.weight_tensor)
ms_ssim = self.prod(mcs_and_ssim, -1)
loss = self.reduce_mean(ms_ssim, -1)
return loss
class PSNR(Cell):
r"""
Returns Peak Signal-to-Noise Ratio of two image batches.
It produces a PSNR value for each image in batch.
Assume inputs are :math:`I` and :math:`K`, both with shape :math:`h*w`.
:math:`MAX` represents the dynamic range of pixel values.
.. math::
MSE&=\frac{1}{hw}\sum\limits_{i=0}^{h-1}\sum\limits_{j=0}^{w-1}[I(i,j)-K(i,j)]^2\\
PSNR&=10*log_{10}(\frac{MAX^2}{MSE})
Args:
max_val (Union[int, float]): The dynamic range of the pixel values (255 for 8-bit grayscale images).
The value must be greater than 0. Default: 1.0.
Inputs:
- **img1** (Tensor) - The first image batch with format 'NCHW'. It must be the same shape and dtype as img2.
- **img2** (Tensor) - The second image batch with format 'NCHW'. It must be the same shape and dtype as img1.
Outputs:
Tensor, with dtype mindspore.float32. It is a 1-D tensor with shape N, where N is the batch num of img1.
Examples:
>>> net = nn.PSNR()
>>> img1 = Tensor(np.random.random((1,3,16,16)))
>>> img2 = Tensor(np.random.random((1,3,16,16)))
>>> psnr = net(img1, img2)
[7.8297315]
"""
def __init__(self, max_val=1.0):
super(PSNR, self).__init__()
validator.check_value_type('max_val', max_val, [int, float], self.cls_name)
validator.check_number('max_val', max_val, 0.0, Rel.GT, self.cls_name)
self.max_val = max_val
def construct(self, img1, img2):
_check_input_4d(F.shape(img1), "img1", self.cls_name)
_check_input_4d(F.shape(img2), "img2", self.cls_name)
P.SameTypeShape()(img1, img2)
dtype_max_val = _get_dtype_max(F.dtype(img1))
max_val = F.scalar_cast(self.max_val, F.dtype(img1))
max_val = _convert_img_dtype_to_float32(max_val, dtype_max_val)
img1 = _convert_img_dtype_to_float32(img1, dtype_max_val)
img2 = _convert_img_dtype_to_float32(img2, dtype_max_val)
mse = P.ReduceMean()(F.square(img1 - img2), (-3, -2, -1))
psnr = 10 * P.Log()(F.square(max_val) / mse) / F.scalar_log(10.0)
return psnr
@constexpr
def _raise_dims_rank_error(input_shape, param_name, func_name):
"""raise error if input is not 3d or 4d"""
raise ValueError(f"{func_name} {param_name} should be 3d or 4d, but got shape {input_shape}")
@constexpr
def _get_bbox(rank, shape, size_h, size_w):
"""get bbox start and size for slice"""
if rank == 3:
c, h, w = shape
else:
n, c, h, w = shape
bbox_h_start = int((float(h) - size_h) / 2)
bbox_w_start = int((float(w) - size_w) / 2)
bbox_h_size = h - bbox_h_start * 2
bbox_w_size = w - bbox_w_start * 2
if rank == 3:
bbox_begin = (0, bbox_h_start, bbox_w_start)
bbox_size = (c, bbox_h_size, bbox_w_size)
else:
bbox_begin = (0, 0, bbox_h_start, bbox_w_start)
bbox_size = (n, c, bbox_h_size, bbox_w_size)
return bbox_begin, bbox_size
class CentralCrop(Cell):
"""
Crop the centeral region of the images with the central_fraction.
Args:
central_fraction (float): Fraction of size to crop. It must be float and in range (0.0, 1.0].
Inputs:
- **image** (Tensor) - A 3-D tensor of shape [C, H, W], or a 4-D tensor of shape [N, C, H, W].
Outputs:
Tensor, 3-D or 4-D float tensor, according to the input.
Examples:
>>> net = nn.CentralCrop(central_fraction=0.5)
>>> image = Tensor(np.random.random((4, 3, 4, 4)), mindspore.float32)
>>> output = net(image)
"""
def __init__(self, central_fraction):
super(CentralCrop, self).__init__()
validator.check_value_type("central_fraction", central_fraction, [float], self.cls_name)
self.central_fraction = validator.check_float_range(central_fraction, 0.0, 1.0, Rel.INC_RIGHT,
'central_fraction', self.cls_name)
self.slice = P.Slice()
def construct(self, image):
image_shape = F.shape(image)
rank = len(image_shape)
h, w = image_shape[-2], image_shape[-1]
if not rank in (3, 4):
return _raise_dims_rank_error(image_shape, "image", self.cls_name)
if self.central_fraction == 1.0:
return image
size_h = self.central_fraction * h
size_w = self.central_fraction * w
bbox_begin, bbox_size = _get_bbox(rank, image_shape, size_h, size_w)
image = self.slice(image, bbox_begin, bbox_size)
return image
|
python
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.