content
stringlengths 0
894k
| type
stringclasses 2
values |
---|---|
from datetime import date
nascimento = int(input('qual o ano do seu nascimento: '))
anoatual = date.today().year
idade = anoatual - nascimento
if idade <= 9:
print('VocÊ tem {} anos, sua categoria é Mirim'.format(idade))
elif idade <= 14 and idade > 9:
print('Você tem {} anos, sua categoria é Infantil'.format(idade))
elif idade > 14 and idade < 20:
print('Você tem {} anos, sua categoria é Junior'.format(idade))
elif idade == 20:
print('Você tem {} anos, sua categoria é Senior'.format(idade))
else:
print('Você tem {} anos, sua categoria é Master'.format(idade))
| python |
# coding=utf-8
class Human(object):
def __init__(self, input_gender):
self.gender = input_gender
def printGender(self):
print self.gender
li_lei = Human('male') # 这里,'male'作为参数传递给__init__()方法的input_gender变量。
print li_lei.gender #这一行结果与下一行对比
li_lei.printGender() #这一行结果与上一行对比 | python |
from pytorch_grad_cam import GradCAM, ScoreCAM, GradCAMPlusPlus, AblationCAM, XGradCAM, EigenCAM, FullGrad
from pytorch_grad_cam.utils.model_targets import ClassifierOutputTarget
from pytorch_grad_cam.utils.image import show_cam_on_image
from torchvision.models import resnet50
import torch
model = resnet50(pretrained=True)
target_layers = [model.layer4[-1]]
input_tensor = 0
# Note: input_tensor can be a batch tensor with several images!
# Construct the CAM object once, and then re-use it on many images:
cam = GradCAM(model=model, target_layers=target_layers, use_cuda=True if torch.cuda.is_available() else False)
# You can also use it within a with statement, to make sure it is freed,
# In case you need to re-create it inside an outer loop:
# with GradCAM(model=model, target_layers=target_layers, use_cuda=args.use_cuda) as cam:
# ...
# We have to specify the target we want to generate
# the Class Activation Maps for.
# If targets is None, the highest scoring category
# will be used for every image in the batch.
# Here we use ClassifierOutputTarget, but you can define your own custom targets
# That are, for example, combinations of categories, or specific outputs in a non standard model.
target_category = 0
# You can also pass aug_smooth=True and eigen_smooth=True, to apply smoothing.
grayscale_cam = cam(input_tensor=input_tensor, target_category=target_category)
# In this example grayscale_cam has only one image in the batch:
grayscale_cam = grayscale_cam[0, :]
visualization = show_cam_on_image(input_tensor, grayscale_cam, use_rgb=True) | python |
#!/usr/bin/env python3
from email.message import EmailMessage
import smtplib, ssl
import getpass
message = EmailMessage()
sender = "[email protected]"
recipient = "[email protected]"
message['From'] = sender
message['To'] = recipient
message['Subject'] = 'Greetings from {} to {}!'.format(sender, recipient)
body = """Hey there!
I'm learning to send emails using Python!"""
message.set_content(body)
mail_server = smtplib.SMTP('localhost')
mail_server.send_message(message)
#mail_server.set_debuglevel(1)
#print(mail_pass)
#print(message)
| python |
# Copyright 2020 The TensorFlow Probability Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Tests for tensorflow_probability.spinoffs.oryx.experimental.nn.normalization."""
from absl.testing import absltest
from absl.testing import parameterized
import jax
from jax import random
from jax import test_util as jtu
import numpy as np
from oryx.core import state
from oryx.experimental.nn import normalization
class NormalizationTest(parameterized.TestCase):
def setUp(self):
super().setUp()
self._seed = random.PRNGKey(0)
@parameterized.named_parameters(
('hwc', (0, 1), (7,), (1, 1, 7)),
('chw', (1, 2), (5,), (5, 1, 1)))
def test_spec(self, axis, param_shape, moving_shape):
key = self._seed
net_init = normalization.BatchNorm(axis)
in_shape = (5, 6, 7)
out_shape = net_init.spec(state.Shape(in_shape)).shape
net = net_init.init(key, state.Shape(in_shape))
self.assertEqual(out_shape, in_shape)
beta, gamma = net.params
self.assertEqual(param_shape, beta.shape)
self.assertEqual(param_shape, gamma.shape)
moving_mean, moving_var = net.state.moving_mean, net.state.moving_var
self.assertEqual(moving_shape, moving_mean.shape)
self.assertEqual(moving_shape, moving_var.shape)
@parameterized.named_parameters(
('center_scale', True, True),
('no_center', False, True),
('no_scale', True, False),
('no_center_no_scale', False, False))
def test_params(self, center, scale):
key = self._seed
net_init = normalization.BatchNorm(center=center, scale=scale)
in_shape = (5, 6, 7)
out_shape = net_init.spec(state.Shape(in_shape)).shape
net = net_init.init(key, state.Shape(in_shape))
self.assertEqual(out_shape, in_shape)
beta, gamma = net.params
if center:
self.assertEqual(beta.shape, (7,))
np.testing.assert_almost_equal(np.zeros_like(beta), beta)
else:
self.assertEqual(beta, ())
if scale:
self.assertEqual(gamma.shape, (7,))
np.testing.assert_almost_equal(np.ones_like(gamma), gamma)
else:
self.assertEqual(gamma, ())
def test_call_no_batch(self):
epsilon = 1e-5
axis = (0, 1)
net_rng, data_rng = random.split(self._seed)
net_init = normalization.BatchNorm(axis, epsilon=epsilon)
in_shape = (5, 6, 7)
net = net_init.init(net_rng, state.Shape(in_shape))
x = random.normal(data_rng, in_shape)
net_y = net(x)
np.testing.assert_allclose(x, net_y)
with self.assertRaises(ValueError):
net_y = net(x[None])
@parameterized.named_parameters(
('center_scale', True, True),
('no_center', False, True),
('no_scale', True, False),
('no_center_no_scale', False, False))
def test_call(self, center, scale):
epsilon = 1e-5
axis = (0, 1)
net_rng, data_rng = random.split(self._seed)
net_init = normalization.BatchNorm(axis, center=center, scale=scale)
in_shape = (5, 6, 7)
net = net_init.init(net_rng, state.Shape(in_shape))
beta, gamma = net.params
x = random.normal(data_rng, (10,) + in_shape)
batch_axis = (0,) + tuple(a + 1 for a in axis)
mean = np.mean(np.array(x), batch_axis, keepdims=True)[0]
var = np.var(np.array(x), batch_axis, keepdims=True)[0]
z = (x - mean) / np.sqrt(var + epsilon)
if center and scale:
y = gamma * z + beta
elif center:
y = z + beta
elif scale:
y = gamma * z
else:
y = z
net_y = jax.vmap(net)(x)
np.testing.assert_almost_equal(y, np.array(net_y), decimal=6)
def test_no_training(self):
epsilon = 1e-5
axis = (0, 1)
net_rng, data_rng = random.split(self._seed)
net_init = normalization.BatchNorm(axis, center=False, scale=False)
in_shape = (5, 6, 7)
net = net_init.init(net_rng, state.Shape(in_shape))
x = random.normal(data_rng, (4,) + in_shape)
z = x / np.sqrt(1.0 + epsilon)
y = jax.vmap(lambda x: net(x, training=False))(x)
np.testing.assert_almost_equal(z, np.array(y), decimal=6)
def test_updates_moving_mean_var(self):
axis = (0, 1)
net_rng, data_rng = random.split(self._seed)
net_init = normalization.BatchNorm(axis, momentum=0.9)
in_shape = (5, 6, 7)
net = net_init.init(net_rng, state.Shape(in_shape))
self.assertAlmostEqual(0.1, net.info.decay)
x = random.normal(data_rng, (4,) + in_shape)
batch_axis = (0,) + tuple(a + 1 for a in axis)
mean = np.mean(np.array(x), batch_axis, keepdims=True)[0]
var = np.var(np.array(x), batch_axis, keepdims=True)[0]
net_state = net.state
# Initial values
np.testing.assert_almost_equal(np.zeros_like(mean), net_state.moving_mean)
np.testing.assert_almost_equal(np.ones_like(var), net_state.moving_var)
# Update state (moving_mean, moving_var)
for _ in range(100):
net = jax.vmap(net.update, out_axes=None)(x)
# Final values
np.testing.assert_almost_equal(mean, net.state.moving_mean, decimal=4)
np.testing.assert_almost_equal(var, net.state.moving_var, decimal=4)
def test_check_grads(self):
axis = (0, 1, 2)
in_shape = (4, 5, 6, 7)
net_rng, data_rng = random.split(self._seed)
net_init = normalization.BatchNorm(axis)
net = net_init.init(net_rng, state.Shape(in_shape))
x = random.normal(data_rng, in_shape)
jtu.check_grads(net, (x,), 2)
def mse(x, y):
return jax.numpy.mean(jax.numpy.square(y - x))
def reconstruct_loss(net, x, **kwargs):
preds, net = jax.vmap(
lambda x: net.call_and_update(x, **kwargs), # pylint: disable=unnecessary-lambda
out_axes=(0, None))(x)
return mse(x, preds), net
class GradTest(absltest.TestCase):
def setUp(self):
super().setUp()
self._seed = random.PRNGKey(0)
def test_batch_norm_moving_vars_grads(self):
net_rng, data_rng = random.split(self._seed)
axis = (0, 1)
in_shape = (2, 2, 2)
network_init = normalization.BatchNorm(axis)
network = network_init.init(net_rng, state.Shape(in_shape))
grad_fn = jax.grad(reconstruct_loss, has_aux=True)
x0 = random.normal(data_rng, (2,) + in_shape)
grads, _ = grad_fn(network, x0)
grads_moving_mean, grads_moving_var = grads.state
np.testing.assert_almost_equal(np.zeros_like(grads_moving_mean),
grads_moving_mean)
np.testing.assert_almost_equal(np.zeros_like(grads_moving_var),
grads_moving_var)
def test_batch_norm(self):
net_rng, data_rng = random.split(self._seed)
axis = (0, 1)
in_shape = (2, 2, 2)
network_init = normalization.BatchNorm(axis)
initial_network = network_init.init(net_rng, state.Shape(in_shape))
grad_fn = jax.grad(reconstruct_loss, has_aux=True)
x0 = random.normal(data_rng, (2,) + in_shape)
# reconstruct_loss updates network state
initial_loss, network = reconstruct_loss(initial_network, x0)
# grad also updates network state
grads, new_network = grad_fn(network, x0)
self.assertGreater(initial_loss, 0.0)
# Make sure grad_fn updates the state.
self.assertGreater(mse(initial_network.state.moving_mean,
new_network.state.moving_mean),
0.0)
self.assertGreater(mse(initial_network.state.moving_var,
new_network.state.moving_var),
0.0)
final_network = new_network.replace(params=jax.tree_util.tree_multimap(
lambda w, g: w - 0.1 * g, network.params, grads.params))
final_loss, final_network = reconstruct_loss(final_network, x0)
self.assertLess(final_loss, initial_loss)
self.assertGreater(mse(new_network.state.moving_mean,
final_network.state.moving_mean), 0.0)
self.assertGreater(mse(new_network.state.moving_var,
final_network.state.moving_var), 0.0)
if __name__ == '__main__':
absltest.main()
| python |
import os
import sys
import math import json
import numpy as np
from pycocotools.coco import COCO
import pickle
sys.path.insert(0,'..' )
from config import cfg
COCO_TO_OURS = [0, 15, 14, 17, 16, 5, 2, 6, 3, 7, 4, 11, 8, 12, 9, 13, 10]
def processing(ann_path, filelist_path, masklist_path, json_path, mask_dir):
coco = COCO(ann_path)
ids = list(coco.imgs.keys())
lists = []
filelist_fp = open(filelist_path, 'w')
masklist_fp = open(masklist_path, 'w')
for i, img_id in enumerate(ids):
ann_ids = coco.getAnnIds(imgIds=img_id)
img_anns = coco.loadAnns(ann_ids)
numPeople = len(img_anns)
name = coco.imgs[img_id]['file_name']
height = coco.imgs[img_id]['height']
width = coco.imgs[img_id]['width']
person_centers = []
info = dict()
info['filename'] = name
info['info'] = []
for p in range(numPeople):
if img_anns[p]['num_keypoints'] < 5 or img_anns[p]['area'] < 32 * 32:
continue
kpt = img_anns[p]['keypoints']
dic = dict()
# person center
person_center = [img_anns[p]['bbox'][0] + img_anns[p]['bbox'][2] / 2.0, img_anns[p]['bbox'][1] + img_anns[p]['bbox'][3] / 2.0]
scale = img_anns[p]['bbox'][3] / float(cfg.INPUT_SIZE)
# skip this person if the distance to exiting person is too small
flag = 0
for pc in person_centers:
dis = math.sqrt((person_center[0] - pc[0]) * (person_center[0] - pc[0]) + (person_center[1] - pc[1]) * (person_center[1] - pc[1]))
if dis < pc[2] * 0.3:
flag = 1;
break
if flag == 1:
continue
dic['pos'] = person_center
dic['keypoints'] = np.zeros((18, 3)).tolist()
dic['scale'] = scale
for part in range(17):
dic['keypoints'][COCO_TO_OURS[part]][0] = kpt[part * 3]
dic['keypoints'][COCO_TO_OURS[part]][1] = kpt[part * 3 + 1]
# visiable is 2, unvisiable is 1 and not labeled is 0
dic['keypoints'][COCO_TO_OURS[part]][2] = kpt[part * 3 + 2]
# generate neck point based on LShoulder and RShoulder
dic['keypoints'][1][0] = (kpt[5 * 3] + kpt[6 * 3]) * 0.5
dic['keypoints'][1][1] = (kpt[5 * 3 + 1] + kpt[6 * 3 + 1]) * 0.5
if kpt[5 * 3 + 2] == 0 or kpt[6 * 3 + 2] == 0:
dic['keypoints'][1][2] = 0
else:
dic['keypoints'][1][2] = 1
info['info'].append(dic)
person_centers.append(np.append(person_center, max(img_anns[p]['bbox'][2], img_anns[p]['bbox'][3])))
if len(info['info']) > 0:
lists.append(info)
filelist_fp.write(name + '\n')
mask_all = np.zeros((height, width), dtype=np.uint8)
mask_miss = np.zeros((height, width), dtype=np.uint8)
flag = 0
for p in img_anns:
if p['iscrowd'] == 1:
mask_crowd = coco.annToMask(p)
temp = np.bitwise_and(mask_all, mask_crowd)
mask_crowd = mask_crowd - temp
flag += 1
continue
else:
mask = coco.annToMask(p)
mask_all = np.bitwise_or(mask, mask_all)
if p['num_keypoints'] <= 0:
mask_miss = np.bitwise_or(mask, mask_miss)
if flag < 1:
mask_miss = np.logical_not(mask_miss)
elif flag == 1:
mask_miss = np.logical_not(np.bitwise_or(mask_miss, mask_crowd))
mask_all = np.bitwise_or(mask_all, mask_crowd)
else:
raise Exception('crowd segments > 1')
pickle.dump(mask_miss, open(os.path.join(mask_dir, name.split('.')[0] + '.npy'), 'w'))
masklist_fp.write(os.path.join(mask_dir, name.split('.')[0] + '.npy') + '\n')
if i % 1000 == 0:
print "Processed {} of {}".format(i, len(ids))
masklist_fp.close()
filelist_fp.close()
fp = open(json_path, 'w')
fp.write(json.dumps(lists))
fp.close()
print 'done!'
if __name__ == '__main__':
processing(cfg.TRAIN_ANNO_PATH,
cfg.TRAIN_IMAGELIST_FILE,
cfg.TRAIN_MASKLIST_FILE,
cfg.TRAIN_KPTJSON_FILE,
cfg.TRAIN_MASK_PATH)
processing(cfg.TEST_ANNO_PATH,
cfg.TEST_IMAGELIST_FILE,
cfg.TEST_MASKLIST_FILE,
cfg.TEST_KPTJSON_FILE,
cfg.TEST_MASK_PATH)
| python |
# Copyright 2017 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
SPEC = {
'settings': {
'build_gs_bucket': 'chromium-v8',
# WARNING: src-side runtest.py is only tested with chromium CQ builders.
# Usage not covered by chromium CQ is not supported and can break
# without notice.
'src_side_runtest_py': True,
},
'builders': {
'Linux - Future': {
'chromium_config': 'chromium',
'chromium_apply_config': [
'mb',
'ninja_confirm_noop',
'chrome_with_codecs'
],
'gclient_config': 'chromium',
'chromium_config_kwargs': {
'BUILD_CONFIG': 'Release',
'TARGET_BITS': 64,
},
'bot_type': 'builder_tester',
'compile_targets': [
'chromium_swarm_tests',
],
'testing': {
'platform': 'linux',
},
'enable_swarming': True,
'checkout_dir': 'linux',
},
'Linux - Future (dbg)': {
'chromium_config': 'chromium',
'chromium_apply_config': ['mb', 'ninja_confirm_noop'],
'gclient_config': 'chromium',
'chromium_config_kwargs': {
'BUILD_CONFIG': 'Debug',
'TARGET_BITS': 64,
},
'bot_type': 'builder_tester',
'testing': {
'platform': 'linux',
},
'enable_swarming': True,
'checkout_dir': 'linux',
},
'Linux V8 API Stability': {
'chromium_config': 'chromium',
'chromium_apply_config': ['mb'],
'gclient_config': 'chromium',
'gclient_apply_config': ['v8_canary', 'with_branch_heads'],
'chromium_config_kwargs': {
'BUILD_CONFIG': 'Release',
'TARGET_BITS': 64,
},
'bot_type': 'builder_tester',
'compile_targets': [
'all',
],
'test_results_config': 'staging_server',
'testing': {
'platform': 'linux',
},
},
},
}
| python |
# Copyright (c) 2015 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from yaql.language import exceptions
import yaql.tests
class TestQueries(yaql.tests.TestCase):
def test_where(self):
data = [1, 2, 3, 4, 5, 6]
self.assertEqual([4, 5, 6], self.eval('$.where($ > 3)', data=data))
def test_select(self):
data = [1, 2, 3]
self.assertEqual([1, 4, 9], self.eval('$.select($ * $)', data=data))
def test_keyword_collection_access(self):
data = [{'a': 2}, {'a': 4}]
self.assertEqual([2, 4], self.eval('$.a', data=data))
self.assertEqual([2, 4], self.eval('$.select($).a', data=data))
def test_skip(self):
data = [1, 2, 3, 4]
self.assertEqual([2, 3, 4], self.eval('$.skip(1)', data=data))
def test_limit(self):
data = [1, 2, 3, 4]
self.assertEqual([1, 2], self.eval('$.limit(2)', data=data))
self.assertEqual([1, 2], self.eval('$.take(2)', data=data))
def test_append(self):
data = [1, 2]
self.assertEqual([1, 2, 3, 4], self.eval('$.append(3, 4)', data=data))
def test_complex_query(self):
data = [1, 2, 3, 4, 5, 6]
self.assertEqual(
[4],
self.eval('$.where($ < 4).select($ * $).skip(1).limit(1)',
data=data))
def test_distinct(self):
data = [1, 2, 3, 2, 4, 8]
self.assertEqual([1, 2, 3, 4, 8], self.eval('$.distinct()', data=data))
self.assertEqual([1, 2, 3, 4, 8], self.eval('distinct($)', data=data))
def test_distinct_structures(self):
data = [{'a': 1}, {'b': 2}, {'a': 1}]
self.assertEqual(
[{'a': 1}, {'b': 2}],
self.eval('$.distinct()', data=data))
def test_distinct_with_selector(self):
data = [['a', 1], ['b', 2], ['c', 1], ['d', 3], ['e', 2]]
self.assertCountEqual([['a', 1], ['b', 2], ['d', 3]],
self.eval('$.distinct($[1])', data=data))
self.assertCountEqual([['a', 1], ['b', 2], ['d', 3]],
self.eval('distinct($, $[1])', data=data))
def test_any(self):
self.assertFalse(self.eval('$.any()', data=[]))
self.assertTrue(self.eval('$.any()', data=[0]))
def test_all(self):
self.assertTrue(self.eval('$.all()', data=[]))
self.assertFalse(self.eval('$.all()', data=[1, 0]))
self.assertTrue(self.eval('$.all()', data=[1, 2]))
self.assertFalse(self.eval('$.all($ > 1)', data=[2, 1]))
self.assertTrue(self.eval('$.all($ > 1)', data=[2, 3]))
def test_enumerate(self):
data = [1, 2, 3]
self.assertEqual([[0, 1], [1, 2], [2, 3]],
self.eval('$.enumerate()', data=data))
self.assertEqual([[3, 1], [4, 2], [5, 3]],
self.eval('$.enumerate(3)', data=data))
self.assertEqual([[0, 1], [1, 2], [2, 3]],
self.eval('enumerate($)', data=data))
self.assertEqual([[3, 1], [4, 2], [5, 3]],
self.eval('enumerate($, 3)', data=data))
def test_concat(self):
data = [1, 2, 3]
self.assertEqual(
[1, 2, 3, 2, 4, 6],
self.eval('$.select($).concat($.select(2 * $))', data=data))
self.assertEqual(
[1, 2, 3, 2, 4, 6, 1, 2, 3],
self.eval('concat($, $.select(2 * $), $)', data=data))
def test_len(self):
data = [1, 2, 3]
self.assertEqual(3, self.eval('len($)', data=data))
self.assertEqual(3, self.eval('$.len()', data=data))
self.assertEqual(3, self.eval('$.count()', data=data))
self.assertRaises(
exceptions.FunctionResolutionError,
self.eval, 'count($)', data=data)
def test_sum(self):
data = range(4)
self.assertEqual(6, self.eval('$.sum()', data=data))
self.assertEqual(106, self.eval('$.sum(100)', data=data))
self.assertEqual(100, self.eval('[].sum(100)'))
def test_memorize(self):
generator_func = lambda: (i for i in range(3)) # noqa: E731
self.assertRaises(
TypeError,
self.eval, '$.len() + $.sum()', data=generator_func())
self.assertEqual(
6,
self.eval('let($.memorize()) -> $.len() + $.sum()',
data=generator_func()))
def test_first(self):
self.assertEqual(2, self.eval('list(2, 3).first()'))
self.assertEqual(4, self.eval('list(2, 3).select($ * 2).first()'))
self.assertIsNone(self.eval('list().first(null)'))
self.assertRaises(StopIteration, self.eval, 'list().first()')
self.assertEqual(99, self.eval('list().first(99)'))
def test_single(self):
self.assertEqual(2, self.eval('list(2).single()'))
self.assertRaises(StopIteration, self.eval, 'list().single()')
self.assertRaises(StopIteration, self.eval, 'list(1, 2).single()')
def test_last(self):
self.assertEqual(3, self.eval('list(2, 3).last()'))
self.assertEqual(6, self.eval('list(2, 3).select($ * 2).last()'))
self.assertIsNone(self.eval('list().last(null)'))
self.assertEqual(99, self.eval('list().last(99)'))
self.assertRaises(StopIteration, self.eval, 'list().last()')
def test_range(self):
self.assertEqual([0, 1], self.eval('range(2)'))
self.assertEqual([1, 2, 3], self.eval('range(1, 4)'))
self.assertEqual([4, 3, 2], self.eval('range(4, 1, -1)'))
def test_select_many(self):
self.assertEqual([0, 0, 1, 0, 1, 2],
self.eval('range(4).selectMany(range($))'))
def test_select_many_scalar(self):
# check that string is not interpreted as a sequence and that
# selectMany works when selector returns scalar
self.assertEqual(
['xx', 'xx'],
self.eval('range(2).selectMany(xx)'))
def test_order_by(self):
self.assertEqual(
[1, 2, 3, 4],
self.eval('$.orderBy($)', data=[4, 2, 1, 3]))
self.assertEqual(
[4, 3, 2, 1],
self.eval('$.orderByDescending($)', data=[4, 2, 1, 3]))
def test_order_by_multilevel(self):
self.assertEqual(
[[1, 0], [1, 5], [2, 2]],
self.eval(
'$.orderBy($[0]).thenBy($[1])',
data=[[2, 2], [1, 5], [1, 0]]))
self.assertEqual(
[[1, 5], [1, 0], [2, 2]],
self.eval(
'$.orderBy($[0]).thenByDescending($[1])',
data=[[2, 2], [1, 5], [1, 0]]))
self.assertEqual(
[[2, 2], [1, 0], [1, 5]],
self.eval(
'$.orderByDescending($[0]).thenBy($[1])',
data=[[2, 2], [1, 5], [1, 0]]))
self.assertEqual(
[[2, 2], [1, 5], [1, 0]],
self.eval(
'$.orderByDescending($[0]).thenByDescending($[1])',
data=[[2, 2], [1, 5], [1, 0]]))
def test_group_by(self):
data = {'a': 1, 'b': 2, 'c': 1, 'd': 3, 'e': 2}
self.assertCountEqual(
[
[1, [['a', 1], ['c', 1]]],
[2, [['b', 2], ['e', 2]]],
[3, [['d', 3]]]
],
self.eval('$.items().orderBy($[0]).groupBy($[1])', data=data))
self.assertCountEqual(
[[1, ['a', 'c']], [2, ['b', 'e']], [3, ['d']]],
self.eval('$.items().orderBy($[0]).groupBy($[1], $[0])',
data=data))
self.assertCountEqual(
[[1, 'ac'], [2, 'be'], [3, 'd']],
self.eval('$.items().orderBy($[0]).'
'groupBy($[1], $[0], $.sum())', data=data))
self.assertCountEqual(
[[1, ['a', 1, 'c', 1]], [2, ['b', 2, 'e', 2]], [3, ['d', 3]]],
self.eval('$.items().orderBy($[0]).'
'groupBy($[1],, $.sum())',
data=data))
self.assertCountEqual(
[[1, ['a', 1, 'c', 1]], [2, ['b', 2, 'e', 2]], [3, ['d', 3]]],
self.eval('$.items().orderBy($[0]).'
'groupBy($[1], aggregator => $.sum())',
data=data))
def test_group_by_old_syntax(self):
# Test the syntax used in 1.1.1 and earlier, where the aggregator
# function was passed the key as well as the value list, and returned
# the key along with the aggregated value. This ensures backward
# compatibility with existing expressions.
data = {'a': 1, 'b': 2, 'c': 1, 'd': 3, 'e': 2}
self.assertItemsEqual(
[[1, 'ac'], [2, 'be'], [3, 'd']],
self.eval('$.items().orderBy($[0]).'
'groupBy($[1], $[0], [$[0], $[1].sum()])', data=data))
self.assertItemsEqual(
[[1, ['a', 1, 'c', 1]], [2, ['b', 2, 'e', 2]], [3, ['d', 3]]],
self.eval('$.items().orderBy($[0]).'
'groupBy($[1],, [$[0], $[1].sum()])',
data=data))
self.assertItemsEqual(
[[1, ['a', 1, 'c', 1]], [2, ['b', 2, 'e', 2]], [3, ['d', 3]]],
self.eval('$.items().orderBy($[0]).'
'groupBy($[1], aggregator => [$[0], $[1].sum()])',
data=data))
def test_join(self):
self.assertEqual(
[[2, 1], [3, 1], [3, 2], [4, 1], [4, 2], [4, 3]],
self.eval('$.join($, $1 > $2, [$1, $2])', data=[1, 2, 3, 4]))
self.assertEqual(
[[1, 3], [1, 4], [2, 3], [2, 4]],
self.eval('[1,2].join([3, 4], true, [$1, $2])'))
def test_zip(self):
self.assertEqual(
[[1, 4], [2, 5]],
self.eval('[1, 2, 3].zip([4, 5])'))
self.assertEqual(
[[1, 4, 6], [2, 5, 7]],
self.eval('[1, 2, 3].zip([4, 5], [6, 7, 8])'))
def test_zip_longest(self):
self.assertEqual(
[[1, 4], [2, 5], [3, None]],
self.eval('[1, 2, 3].zipLongest([4, 5])'))
self.assertEqual(
[[1, 4, 6], [2, 5, None], [3, None, None]],
self.eval('[1, 2, 3].zipLongest([4, 5], [6])'))
self.assertEqual(
[[1, 4], [2, 5], [3, 0]],
self.eval('[1, 2, 3].zipLongest([4, 5], default => 0)'))
def test_repeat(self):
self.assertEqual(
[None, None],
self.eval('null.repeat(2)'))
self.assertEqual(
[1, 1, 1, 1, 1],
self.eval('1.repeat().limit(5)'))
def test_cycle(self):
self.assertEqual(
[1, 2, 1, 2, 1],
self.eval('[1, 2].cycle().take(5)'))
def test_take_while(self):
self.assertEqual(
[1, 2, 3],
self.eval('[1, 2, 3, 4, 5].takeWhile($ < 4)'))
def test_skip_while(self):
self.assertEqual(
[4, 5],
self.eval('[1, 2, 3, 4, 5].skipWhile($ < 4)'))
def test_index_of(self):
self.assertEqual(1, self.eval('[1, 2, 3, 2, 1].indexOf(2)'))
self.assertEqual(-1, self.eval('[1, 2, 3, 2, 1].indexOf(22)'))
def test_last_index_of(self):
self.assertEqual(3, self.eval('[1, 2, 3, 2, 1].lastIndexOf(2)'))
self.assertEqual(-1, self.eval('[1, 2, 3, 2, 1].lastIndexOf(22)'))
def test_index_where(self):
self.assertEqual(1, self.eval('[1, 2, 3, 2, 1].indexWhere($ = 2)'))
self.assertEqual(-1, self.eval('[1, 2, 3, 2, 1].indexWhere($ = 22)'))
def test_last_index_where(self):
self.assertEqual(3, self.eval('[1, 2, 3, 2, 1].lastIndexWhere($ = 2)'))
self.assertEqual(
-1, self.eval('[1, 2, 3, 2, 1].lastIndexWhere($ = 22)'))
def test_slice(self):
self.assertEqual(
[[1, 2], [3, 4], [5]],
self.eval('range(1, 6).slice(2)'))
self.assertEqual(
[[1, 2], [3, 4], [5]],
self.eval('[1,2,3,4,5].slice(2)'))
def test_split_where(self):
self.assertEqual(
[[], [2, 3], [5]],
self.eval('range(1, 6).splitWhere($ mod 3 = 1)'))
def test_split_at(self):
self.assertEqual(
[[1, 2], [3, 4, 5]],
self.eval('range(1, 6).splitAt(2)'))
def test_slice_where(self):
self.assertEqual(
[['a', 'a'], ['b'], ['a', 'a']],
self.eval('[a,a,b,a,a].sliceWhere($ != a)'))
def test_aggregate(self):
self.assertEqual(
'aabaa',
self.eval('[a,a,b,a,a].aggregate($1 + $2)'))
self.assertRaises(
TypeError,
self.eval, '[].aggregate($1 + $2)')
self.assertEqual(
1,
self.eval('[].aggregate($1 + $2, 1)'))
self.assertEqual(
'aabaa',
self.eval('[a,a,b,a,a].reduce($1 + $2)'))
self.assertEqual(
0,
self.eval('[].reduce(max($1, $2), 0)'))
def test_accumulate(self):
self.assertEqual(
['a', 'aa', u'aab', 'aaba', 'aabaa'],
self.eval('[a,a,b,a,a].accumulate($1 + $2)'))
self.assertEqual(
[1],
self.eval('[].accumulate($1 + $2, 1)'))
def test_default_if_empty(self):
self.assertEqual(
[1, 2],
self.eval('[].defaultIfEmpty([1, 2])'))
self.assertEqual(
[3, 4],
self.eval('[3, 4].defaultIfEmpty([1, 2])'))
self.assertEqual(
[1, 2],
self.eval('[].select($).defaultIfEmpty([1, 2])'))
self.assertEqual(
[3, 4],
self.eval('[3, 4].select($).defaultIfEmpty([1, 2])'))
def test_generate(self):
self.assertEqual(
[0, 2, 4, 6, 8],
self.eval('generate(0, $ < 10, $ + 2)'))
self.assertEqual(
[0, 4, 16, 36, 64],
self.eval('generate(0, $ < 10, $ + 2, $ * $)'))
def test_generate_many(self):
friends = {
'John': ['Jim'],
'Jim': ['Jay', 'Jax'],
'Jax': ['John', 'Jacob', 'Jonathan'],
'Jacob': ['Jonathan', 'Jenifer'],
}
self.assertEqual(
['John', 'Jim', 'Jay', 'Jax', 'Jacob', 'Jonathan', 'Jenifer'],
self.eval(
'generateMany(John, $data.get($, []), decycle => true)',
friends))
self.assertEqual(
['John', 'Jim', 'Jay', 'Jax', 'Jacob', 'Jonathan', 'Jenifer'],
self.eval(
'generateMany(John, $data.get($, []), '
'decycle => true, depthFirst => true)', friends))
self.assertEqual(
['Jay'],
self.eval('generateMany(Jay, $data.get($, []))', friends))
self.assertEqual(
['JAX', 'JOHN', 'JACOB', 'JONATHAN', 'JIM', 'JENIFER', 'JAY'],
self.eval(
'generateMany(Jax, $data.get($, []), $.toUpper(), '
'decycle => true)', friends))
def test_max(self):
self.assertEqual(
0,
self.eval('[].max(0)'))
self.assertRaises(
TypeError,
self.eval, '[].max()')
self.assertEqual(
234,
self.eval('[44, 234, 23].max()'))
def test_min(self):
self.assertEqual(
0,
self.eval('[].min(0)'))
self.assertRaises(
TypeError,
self.eval, '[].min()')
self.assertEqual(
23,
self.eval('[44, 234, 23].min()'))
def test_reverse(self):
self.assertEqual(
[9, 4, 1],
self.eval('range(1, 4).select($*$).reverse()'))
def test_merge_with(self):
dict1 = {'a': 1, 'b': 'x', 'c': [1, 2], 'x': {'a': 1}}
dict2 = {'d': 5, 'b': 'y', 'c': [2, 3], 'x': {'b': 2}}
self.assertEqual(
{'a': 1, 'c': [1, 2, 3], 'b': 'y', 'd': 5, 'x': {'a': 1, 'b': 2}},
self.eval(
'$.d1.mergeWith($.d2)',
data={'d1': dict1, 'd2': dict2}))
dict1 = {'a': 1, 'b': 2, 'c': [1, 2]}
dict2 = {'d': 5, 'b': 3, 'c': [2, 3]}
self.assertEqual(
{'a': 1, 'c': [1, 2, 2, 3], 'b': 3, 'd': 5},
self.eval(
'$.d1.mergeWith($.d2, $1 + $2)',
data={'d1': dict1, 'd2': dict2}))
self.assertEqual(
{'a': 1, 'b': 3, 'c': [2, 3], 'd': 5},
self.eval(
'$.d1.mergeWith($.d2, $1 + $2, maxLevels => 1)',
data={'d1': dict1, 'd2': dict2}))
self.assertEqual(
{'a': 1, 'b': 2, 'c': [1, 2, 3], 'd': 5},
self.eval(
'$.d1.mergeWith($.d2,, min($1, $2))',
data={'d1': dict1, 'd2': dict2}))
def test_is_iterable(self):
self.assertEqual(
True,
self.eval('isIterable([])'))
self.assertEqual(
True,
self.eval('isIterable([1,2])'))
self.assertEqual(
True,
self.eval('isIterable(set(1,2))'))
self.assertEqual(
False,
self.eval('isIterable(1)'))
self.assertEqual(
False,
self.eval('isIterable("foo")'))
self.assertEqual(
False,
self.eval('isIterable({"a" => 1})'))
def test_infinite_collections(self):
self.assertRaises(
exceptions.CollectionTooLargeException,
self.eval, 'len(list(sequence()))')
self.assertRaises(
exceptions.CollectionTooLargeException,
self.eval, 'list(sequence())')
self.assertRaises(
exceptions.CollectionTooLargeException,
self.eval, 'len(dict(sequence().select([$, $])))')
self.assertRaises(
exceptions.CollectionTooLargeException,
self.eval, 'dict(sequence().select([$, $]))')
self.assertRaises(
exceptions.CollectionTooLargeException,
self.eval, 'sequence()')
self.assertRaises(
exceptions.CollectionTooLargeException,
self.eval, 'set(sequence())')
| python |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import unittest
import sys
import rosunit
from mock import patch
from parameterized import parameterized, param
from fiware_ros_bridge.logging import getLogger
class TestGetLogger(unittest.TestCase):
@parameterized.expand([
param(logm='debugf', rosm='logdebug'),
param(logm='infof', rosm='loginfo'),
param(logm='warnf', rosm='logwarn'),
param(logm='errorf', rosm='logerr'),
param(logm='fatalf', rosm='logfatal'),
])
@patch('fiware_ros_bridge.logging.rospy')
def test_log_wo_params(self, mocked_rospy, logm, rosm):
name = 'foo'
message = 'test message'
log_message = '[{name}:{caller}] {message}'.format(
name=name,
caller=self.__class__.__name__ + '.' + sys._getframe().f_code.co_name,
message=message,
)
logger = getLogger(name)
assert logger.name == name
getattr(logger, logm)(message)
getattr(mocked_rospy, rosm).assert_called_once_with(log_message)
@parameterized.expand([
param(logm='debugf', rosm='logdebug'),
param(logm='infof', rosm='loginfo'),
param(logm='warnf', rosm='logwarn'),
param(logm='errorf', rosm='logerr'),
param(logm='fatalf', rosm='logfatal'),
])
@patch('fiware_ros_bridge.logging.rospy')
def test_log_w_params(self, mocked_rospy, logm, rosm):
name = 'foo'
message = 'test message'
arg0 = 'arg0'
arg1 = 'arg1'
kwargs0 = 'kwargs0'
kwargs1 = 'kwargs1'
log_message = '[{name}:{caller}] {message}, {arg1}, {kwargs0}, {arg0}, {kwargs1}'.format(
name=name,
caller=self.__class__.__name__ + '.' + sys._getframe().f_code.co_name,
message=message,
arg0=arg0,
arg1=arg1,
kwargs0=kwargs0,
kwargs1=kwargs1,
)
logger = getLogger(name)
assert logger.name == name
getattr(logger, logm)(message + ', {1}, {kwargs0}, {0}, {kwargs1}', arg0, arg1, kwargs1=kwargs1, kwargs0=kwargs0)
getattr(mocked_rospy, rosm).assert_called_once_with(log_message)
if __name__ == '__main__':
rosunit.unitrun('fiware_ros_bridge', 'test_logging', TestGetLogger)
| python |
from sims4.tuning.tunable import HasTunableSingletonFactory, AutoFactoryInit, OptionalTunable, TunableVariant
from ui.ui_dialog import UiDialogOk, UiDialogOkCancel
import enum
import services
class SituationTravelRequestType(enum.Int):
ALLOW = ...
CAREER_EVENT = ...
DISALLOW = ...
class _SituationTravelRequestDisallow(HasTunableSingletonFactory, AutoFactoryInit):
FACTORY_TUNABLES = {'dialog': OptionalTunable(description='\n If enabled, show a dialog informing the player of the travel\n prohibition. If disabled, silently fail.\n ', tunable=UiDialogOk.TunableFactory(description='\n The dialog to show when an incoming request is denied.\n '))}
def __call__(self, user_facing_situation, travel_situation_type, travel_request_fn, **kwargs):
if self.dialog is not None:
dialog = self.dialog(services.active_sim_info())
dialog.show_dialog()
@property
def restrict(self):
return SituationTravelRequestType.DISALLOW
class _SituationTravelRequestAllow(HasTunableSingletonFactory, AutoFactoryInit):
FACTORY_TUNABLES = {'dialog': OptionalTunable(description='\n If enabled, display a prompt requiring player confirmation. If\n disabled, immediately end this situation and allow the travel\n request to go through.\n ', tunable=UiDialogOkCancel.TunableFactory())}
def __call__(self, user_facing_situation, travel_situation_type, travel_request_fn, **kwargs):
if self.dialog is None:
return travel_request_fn()
def on_response(dialog):
if dialog.accepted:
travel_request_fn()
dialog = self.dialog(services.active_sim_info())
dialog.show_dialog(on_response=on_response)
@property
def restrict(self):
return SituationTravelRequestType.ALLOW
class TunableSituationTravelRequestBehaviorVariant(TunableVariant):
def __init__(self, *args, **kwargs):
super().__init__(*args, disallow=_SituationTravelRequestDisallow.TunableFactory(), allow=_SituationTravelRequestAllow.TunableFactory(), default='disallow', **kwargs)
| python |
"""
Intersecting Linked Lists
Given two singly linked lists that intersect at some point, find the intersecting node. The lists are non-cyclical.
In this example, assume nodes with the same value are the exact same node objects.
Input: 3 -> 7 -> 8 -> 10, 99 -> 1 -> 8 -> 10
Output: 8
=========================================
Find the longer linked list and move the pointer (now both list will have same number of elements).
After that move both pointers from the both lists and compare elements.
Time Complexity: O(N + M)
Space Complexity: O(1)
"""
############
# Solution #
############
# import ListNode class from ll_helpers.py
from ll_helpers import ListNode
def find_intersecting_node(ll1, ll2):
# count how many nodes contains the first ll
count1 = 0
temp1 = ll1
while temp1 is not None:
count1 += 1
temp1 = temp1.next
# count how many nodes contains the second ll
count2 = 0
temp2 = ll2
while temp2 is not None:
count2 += 1
temp2 = temp2.next
# move only one of the lls for the difference
m = min(count1, count2)
for i in range(count1 - m):
ll1 = ll1.next
for i in range(count2 - m):
ll2 = ll2.next
# find the intersecting node
intersect = None
while ll1 is not None:
# if the values are different, this is not the intersecting node
if ll1.val != ll2.val:
intersect = None
else:
# if the values are equal and there is no an intersecting node from before
# then this is the intersecting node
if intersect == None:
intersect = ll1
ll1 = ll1.next
ll2 = ll2.next
return intersect
###########
# Testing #
###########
# import build_ll method from ll_helpers.py
from ll_helpers import build_ll
# Test 1
# Correct result => 8
ll1 = build_ll([3, 7, 8, 10])
ll2 = build_ll([1, 8, 10])
print(find_intersecting_node(ll1, ll2).val)
| python |
from setuptools import setup
from torch.utils.cpp_extension import CUDAExtension, BuildExtension
setup(name='syncbn_gpu',
ext_modules=[CUDAExtension('syncbn_gpu', ['syncbn_cuda.cpp', 'syncbn_cuda_kernel.cu'])],
cmdclass={'build_ext': BuildExtension}) | python |
# -*- coding: utf-8 -*-
class MetadataError(Exception):
pass
class CopyError(RuntimeError):
pass
def err_contains_group(path):
raise ValueError('path %r contains a group' % path)
def err_contains_array(path):
raise ValueError('path %r contains an array' % path)
def err_array_not_found(path):
raise ValueError('array not found at path %r' % path)
def err_group_not_found(path):
raise ValueError('group not found at path %r' % path)
def err_path_not_found(path):
raise ValueError('nothing found at path %r' % path)
def err_bad_compressor(compressor):
raise ValueError('bad compressor; expected Codec object, found %r' %
compressor)
def err_fspath_exists_notdir(fspath):
raise ValueError('path exists but is not a directory: %r' % fspath)
def err_read_only():
raise PermissionError('object is read-only')
def err_boundscheck(dim_len):
raise IndexError('index out of bounds for dimension with length {}'
.format(dim_len))
def err_negative_step():
raise IndexError('only slices with step >= 1 are supported')
def err_too_many_indices(selection, shape):
raise IndexError('too many indices for array; expected {}, got {}'
.format(len(shape), len(selection)))
def err_vindex_invalid_selection(selection):
raise IndexError('unsupported selection type for vectorized indexing; only '
'coordinate selection (tuple of integer arrays) and mask selection '
'(single Boolean array) are supported; got {!r}'.format(selection))
| python |
#Write a function to swap a number in place( that is, without temporary variables)
#Hint 491: Try picturing the two numbers, a and b, on a number
#Hint 715: Lef diff be the difference betweeen a and b. Can you use diff some way? Then can you get rid of this temporary variable.
#Hint 736: You could also try to using XOR
def swap(numberA, numberB):
numberA = numberA ^ numberB
numberB = numberA ^ numberB
numberA = numberA ^ numberB
return (numberA, numberB)
print(swap(20,10))
#Solution: Swipe using XOR, first using the variable A for to save the XOR of A = A ^ B. Second step is apply the XOR again but save
# in B = A ^ B (that's moment move A to B). Now , apply XOR of B in A to recovery B in A= A ^ B. | python |
msg = ['We see immediately that one needs little information to begin to break down the process.','An enciphering-deciphering machine (in general outline) of my invention has been sent to your organization.','The significance of this general conjecture, assuming its truth, is easy to see. It means that it may be feasible to design ciphers that are effectively unbreakable.','If qualified opinions incline to believe in the exponential conjecture, then I think we cannot afford not to make use of it.']
for item in msg:
print (len(item))
| python |
"""
Sazonov, S. Yu., Ostriker, J. P., & Sunyaev, R. A. 2004, MNRAS, 347, 144
"""
import numpy as np
# Parameters for the Sazonov & Ostriker AGN template
_Alpha = 0.24
_Beta = 1.60
_Gamma = 1.06
_E_1 = 83e3
_K = 0.0041
_E_0 = (_Beta - _Alpha) * _E_1
_A = np.exp(2e3 / _E_1) * 2e3**_Alpha
_B = ((_E_0**(_Beta - _Alpha)) \
* np.exp(-(_Beta - _Alpha))) / \
(1.0 + (_K * _E_0**(_Beta - _Gamma)))
# Normalization constants to make the SOS04 spectrum continuous.
_SX_Normalization = 1.0
_UV_Normalization = _SX_Normalization * ((_A * 2e3**-_Alpha) * \
np.exp(-2e3 / _E_1)) / ((1.2 * 2e3**-1.7) * np.exp(2000.0 / 2000.))
_IR_Normalization = _UV_Normalization * ((1.2 * 10**-1.7) \
* np.exp(10.0 / 2e3)) / (1.2 * 159 * 10**-0.6)
_HX_Normalization = _SX_Normalization * (_A * _E_0**-_Alpha * \
np.exp(-_E_0 / _E_1)) / (_A * _B * (1.0 + _K * _E_0**(_Beta - _Gamma)) * \
_E_0**-_Beta)
def Spectrum(E, t=0.0, **kwargs):
"""
Broadband quasar template spectrum.
References
----------
Sazonov, S., Ostriker, J.P., & Sunyaev, R.A. 2004, MNRAS, 347, 144.
"""
op = (E < 10)
uv = (E >= 10) & (E < 2e3)
xs = (E >= 2e3) & (E < _E_0)
xh = (E >= _E_0) & (E < 4e5)
if type(E) in [int, float]:
if op:
F = _IR_Normalization * 1.2 * 159 * E**-0.6
elif uv:
F = _UV_Normalization * 1.2 * E**-1.7 * np.exp(E / 2000.0)
elif xs:
F = _SX_Normalization * _A * E**-_Alpha * np.exp(-E / _E_1)
elif xh:
F = _HX_Normalization * _A * _B * (1.0 + _K * \
E**(_Beta - _Gamma)) * E**-_Beta
else:
F = 0
else:
F = np.zeros_like(E)
F += op * _IR_Normalization * 1.2 * 159 * E**-0.6
F += uv * _UV_Normalization * 1.2 * E**-1.7 * np.exp(E / 2000.0)
F += xs * _SX_Normalization * _A * E**-_Alpha * np.exp(-E / _E_1)
F += xh * _HX_Normalization * _A * _B * (1.0 + _K * \
E**(_Beta - _Gamma)) * E**-_Beta
return E * F
| python |
import sympy as sp
import numpy as np
import pickle
class SymbolicRateMatrixArrhenius(sp.Matrix):
"""
Symbolic representation of Arrhenius process rate matrix.
"""
class Symbols:
@classmethod
def _barrier_element_symbol(cls, i, j):
if i == j:
return 0
return sp.symbols('B_%d%d' % (i + 1, j + 1), real=True)
def __init__(self, N):
self.E_i = sp.symbols('E_1:%d' % (N + 1), real=True)
self.B_ij = sp.Matrix(N, N, self._barrier_element_symbol)
self.T = sp.symbols('T', real=True)
@classmethod
def _create_elements(cls, N):
symbols = cls.Symbols(N)
def create_symbolic_rate_matrix_element(i, j):
if i == j:
return 0
return sp.exp(- (symbols.B_ij[i, j] - symbols.E_i[j]) / symbols.T)
rate_matrix_symbolic = sp.Matrix(N, N, create_symbolic_rate_matrix_element)
# Set each diagonal element as minus the sum of the other elements in its column (ensures Detailed Balance)
rate_matrix_symbolic -= sp.diag(*np.sum(rate_matrix_symbolic, axis=0))
return rate_matrix_symbolic, symbols
def __new__(cls, N):
"""
Parameters
----------
N : int
Number of states.
"""
elements, symbols = cls._create_elements(N)
self = super().__new__(cls, elements)
self.symbols = symbols
return self
def subs_symbols(self, energies=None, barriers=None, temperature=None):
"""
Return a new rate matrix with subs applied to each entry.
Parameters
----------
energies : 1-D array or sequence of float
Energies of the states of the arrhenius, ordered in ascending order.
barriers : 2-D array
Matrix of energy barriers between states.
temperature : float
Temperature.
Returns
-------
new : SymbolicRateMatrixArrhenius
New instance of RateMatrixArrhenius with subs applied.
"""
subs_dict = {}
if energies is not None:
subs_dict.update(zip(self.symbols.E_i, energies))
if barriers is not None:
subs_dict.update(zip(np.ravel(self.symbols.B_ij), np.ravel(barriers)))
del subs_dict[0]
if temperature is not None:
subs_dict.update({self.symbols.T: temperature})
expr = self.subs(subs_dict)
if not expr.free_symbols:
expr = np.array(expr).astype(np.float64)
return expr
def lambdify(self, symmetric_barriers=False):
params = (self.symbols.T,) + self.symbols.E_i
if symmetric_barriers:
barriers_subs = dict(zip(np.ravel(np.triu(self.symbols.B_ij.T)),
np.ravel(np.triu(self.symbols.B_ij))))
barriers_free_symbols = set(barriers_subs.values())
expr = self.subs(barriers_subs)
else:
barriers_free_symbols = set(self.symbols.B_ij.values())
expr = self
params += tuple(filter(lambda b: b in barriers_free_symbols, self.symbols.B_ij.values()))
return sp.lambdify(params, expr)
class _SymbolicThreeStateEigensystem:
FILE_NAME_EIGENSYSTEM = 'three_state_eigensystem_symbolic.pickle'
@classmethod
def _file_path(cls):
import os
__location__ = os.path.realpath(os.path.join(os.getcwd(), os.path.dirname(__file__)))
return os.path.join(__location__, cls.FILE_NAME_EIGENSYSTEM)
@classmethod
def _save_eigensystem(cls):
r_sym = SymbolicRateMatrixArrhenius(3)
eigensystem_right = r_sym.eigenvects()
eigensystem_left = r_sym.T.eigenvects()
eigenvalues, _, V = zip(*eigensystem_right)
_, _, U = zip(*eigensystem_left)
# The returned eigenvalues (from sympy) is ordered as: lam1, lam3, lam2 (seen in numerical checks)
u1, u3, u2 = [sp.Matrix(U[i][0]) for i in [0, 1, 2]]
lam1, lam3, lam2 = eigenvalues
v1, v3, v2 = [sp.Matrix(V[i][0]) for i in [0, 1, 2]]
# Normalization of left eigenvectors by their sum of their components
u1 = sp.simplify(u1 / (np.sum(u1) / 3.))
u2 = u2 / (np.sum(u2) / 3.)
u3 = u3 / (np.sum(u3) / 3.)
# Normalization of right eigenvectors by the inner product with the left eigenvectors
v1 = v1 / u1.dot(v1)
v2 = v2 / u2.dot(v2)
v3 = v3 / u3.dot(v3)
es = (u1, u2, u3), (lam1, lam2, lam3), (v1, v2, v3)
pickle.dump(es, open(cls._file_path(), 'wb'))
@classmethod
def load_eigensystem(cls):
return pickle.load(open(cls._file_path(), 'rb'))
def symbolic_three_state_eigensystem():
return _SymbolicThreeStateEigensystem.load_eigensystem()
| python |
"""Author: Brandon Trabucco.
Very the installation of GloVe is function.
"""
import glove
config = glove.configuration.Configuration(
embedding=50,
filedir="./embeddings/",
length=127,
start_word="</StArT/>",
end_word="</StoP/>",
unk_word="</UnKnOwN/>")
vocab, embeddings = glove.load(config)
assert len(vocab.reverse_vocab) == 127, ""
for w in vocab.reverse_vocab:
assert w in vocab.vocab, ""
assert vocab.word_to_id(config.start_word) == vocab.start_id, ""
assert vocab.word_to_id(config.end_word) == vocab.end_id, ""
assert vocab.word_to_id(config.unk_word) == vocab.unk_id, ""
assert vocab.word_to_id("./.2!#&*@^@%") == vocab.unk_id, ""
assert vocab.id_to_word(vocab.start_id) == config.start_word, ""
assert vocab.id_to_word(vocab.end_id) == config.end_word, ""
assert vocab.id_to_word(vocab.unk_id) == config.unk_word, ""
assert vocab.id_to_word(11182819) == config.unk_word, ""
assert embeddings.shape[0] == 127, ""
assert embeddings.shape[1] == 50, ""
assert embeddings.size == 127 * 50, ""
print("All test cases passed.") | python |
# -*- coding: utf-8 -*-
'''
程序思想:
有两个本地语音库,美音库Speech_US,英音库Speech_US
调用有道api,获取语音MP3,存入对应的语音库中
主要接口:
word_pronounce() 单词发音
multi_thread_download() 单词发音的批量多线程下载
'''
import urllib.request
from concurrent.futures import ThreadPoolExecutor
import os
from playsound import playsound
class pronounciation():
def __init__(self, type=0, word='hellow'):
'''
调用youdao API
type = 0:美音
type = 1:英音
判断当前目录下是否存在两个语音库的目录
如果不存在,创建
'''
word = word.lower() # 小写
self._type = type # 发音方式
self._word = word # 单词
# 文件根目录
self._dirRoot = os.path.dirname(os.path.abspath(__file__))
if 0 == self._type:
self._dir_speech = os.path.join(self._dirRoot + '/..', 'Speech_US') # 美音库
else:
self._dir_speech = os.path.join(self._dirRoot + '/..', 'Speech_EN') # 英音库
# 判断是否存在美音库
# print(os.path)
if not os.path.exists('../Speech_US'):
# 不存在,就创建
os.makedirs('../Speech_US')
# 判断是否存在英音库
if not os.path.exists('../Speech_EN'):
# 不存在,就创建
os.makedirs('../Speech_EN')
def word_input(self, word_and_type):
'''
测试使用 单词的输入 形如 [(word,type),(word,type),(word,type)]的list
'''
word = 'hello'
print('input word \nEnds with a #')
while word != '#':
word = input('word: ')
if word == '#':
break
type = input('type( US(0) or EN(1) or both(2) ): ')
if type == '1':
t = 1
elif type == '0':
t = 0
else:
t = 2
word_and_type.append((word, t))
def print_wordlist(self, word_and_type):
for cur in word_and_type:
print('word: ' + cur[0] + ' type: ' + str(cur[1]))
def down(self, w_t):
'''
下载单词的MP3
判断语音库中是否有对应的MP3
如果没有就下载
'''
word = w_t[0].lower()
type = w_t[1]
dir_speech = self._get_dir_speech(type)
tmp = self._get_mp3_file_path(word, type, dir_speech)[0]
filePath = self._get_mp3_file_path(word, type, dir_speech)[1]
fileName = self._get_mp3_file_path(word, type, dir_speech)[2]
if tmp is False:
cur_url = self._getURL(word, type)
# 组合URL
# 调用下载程序,下载到目标文件夹
# print('不存在 %s.mp3 文件\n将URL:\n' % word, self._url, '\n下载到:\n', self._filePath)
# 下载到目标地址
# print('%s.mp3 正在下载\n' % fileName)
urllib.request.urlretrieve(cur_url, filename=filePath)
# print('%s.mp3 下载完成\n' % fileName)
else:
pass
# print('已经存在 %s.mp3, 不需要下载' % fileName)
# 返回声音文件路径
return filePath
def _getURL(self, word, type):
'''
私有函数,生成发音的目标URL
http://dict.youdao.com/dictvoice?type=0&audio=
'''
url = r'http://dict.youdao.com/dictvoice?type=' + str(
type) + r'&audio=' + word
return url
def _get_mp3_file_path(self, word, type, dir_speech):
'''
获取单词的MP3本地文件路径
如果有MP3文件,返回路径(绝对路径)
如果没有,返回None
'''
word = word.lower() # 小写
# print('word: '+self._word+' type: '+str(self._type)+'\n')
if type == 0:
fileName = word + '_US.mp3'
else:
fileName = word + '_EN.mp3'
filePath = os.path.join(dir_speech, fileName)
# 判断是否存在这个MP3文件
if os.path.exists(filePath):
# 存在这个mp3
return (True, filePath, fileName)
else:
# 不存在这个MP3,返回none
return (False, filePath, fileName)
def _get_dir_speech(self, type): # 返回MP3文件的上一级绝对路径
if 0 == type:
dir_speech = os.path.join(self._dirRoot + '/..', 'Speech_US') # 美音库
else:
dir_speech = os.path.join(self._dirRoot + '/..', 'Speech_EN') # 英音库
return dir_speech
def word_pronounce(self, w_t=('hello', 0)):
'''
实现 单词发音
如果单词发音已经下载,直接发音
如果尚未下载,将进行下载,并发音
输入参数为一个二元组
第一个参数:单词
第二个参数:单词发音类别(0:美音 1:英音 2:函数内重新判断 <对应美音,英音全都下载了的情况> )
'''
self._word = w_t[0]
self._type = w_t[1]
if w_t[1] == 2:
print('US(0) or EN(1): ')
self._type = input()
dir_speech = self._get_dir_speech(self._type)
tmp = self._get_mp3_file_path(self._word, self._type, dir_speech)
if tmp[0] is False:
# print("该单词尚未下载\n")
# print("即将下载\n")
self.down(w_t)
self.word_pronounce(w_t)
else:
playsound(tmp[1])
def multi_thread_download(self, word_and_type, num=9):
'''
函数实现多线程批量单词发音下载功能
输入参数包括两部分
1.一个由二元组组成的list 二元粗参数 :第一个参数:单词
第二个参数:单词发音类别(0:美音 1:英音 2:美音,英音全都下载 )
形如 [(word,type),(word,type),(word,type)]的list
2.线程池大小 默认为9 可以不输入
最佳线程池大小 2N+1 (N为电脑cpu个数)
'''
# 多线程实现参考 https://www.jb51.net/article/170571.htm
pool = ThreadPoolExecutor(num) # 线程池的大小
for cur_w_t in word_and_type:
if cur_w_t[1] == 2:
new1_w_t = (cur_w_t[0], 0)
new2_w_t = (cur_w_t[0], 1)
word_and_type.append(new1_w_t)
word_and_type.append(new2_w_t)
continue
pool.submit(self.down, cur_w_t)
'''
if __name__ == "__main__":
word_and_type = []
ss = pronounciation()
ss.word_input(word_and_type) # 输入函数 供测试使用
ss.multi_thread_download(word_and_type)
ss.word_pronounce(('Lebron', 0))
'''
| python |
# **********************************************************************
# Copyright (C) 2020 Johns Hopkins University Applied Physics Laboratory
#
# All Rights Reserved.
# For any other permission, please contact the Legal Office at JHU/APL.
# **********************************************************************
| python |
# -*- coding: utf-8 -*-
"""
Created on Mon Jun 29 23:33:28 2020
@author: abhi0
"""
class Solution:
def plusOne(self, digits: List[int]) -> List[int]:
tempTilda=''
for i in digits:
tempTilda=tempTilda+str(i)
temp=re.split('',tempTilda)
temp=temp[1:len(temp)-1]
sumIp=1
sumOp=[]
carOp=[]
carFlag=1
for i in reversed(temp):
if sumIp==1 and carFlag==1:
tempPrime=int(i)+1
else:
tempPrime=int(i)
if tempPrime>9:
sumOp.append(0)
carOp.append(1)
carFlag=1
sumIp=1
else:
sumOp.append(tempPrime)
carOp.append(0)
carFlag=0
sumIp=0
totSum=[]
if carOp[len(carOp)-1]==1:
totSum.append(carOp[len(carOp)-1])
totSum.extend(sumOp)
print(totSum)
else:
totSum.extend(sumOp[::-1])
return totSum
| python |
from unyt import unyt_array, unyt_quantity
from astropy.units import Quantity
import logging
from more_itertools import always_iterable
import numpy as np
pyxsimLogger = logging.getLogger("pyxsim")
ufstring = "%(name)-3s : [%(levelname)-9s] %(asctime)s %(message)s"
cfstring = "%(name)-3s : [%(levelname)-18s] %(asctime)s %(message)s"
pyxsim_sh = logging.StreamHandler()
# create formatter and add it to the handlers
formatter = logging.Formatter(ufstring)
pyxsim_sh.setFormatter(formatter)
# add the handler to the logger
pyxsimLogger.addHandler(pyxsim_sh)
pyxsimLogger.setLevel('INFO')
pyxsimLogger.propagate = False
mylog = pyxsimLogger
def parse_value(value, default_units, ds=None):
if isinstance(value, Quantity):
value = unyt_quantity.from_astropy(value)
if ds is None:
quan = unyt_quantity
else:
quan = ds.quan
if isinstance(value, unyt_quantity):
return quan(value.v, value.units).in_units(default_units)
elif isinstance(value, tuple):
return quan(value[0], value[1]).in_units(default_units)
else:
return quan(value, default_units)
def isunitful(a):
if isinstance(a, (Quantity, unyt_array)):
return True
elif isinstance(a, tuple):
try:
unyt_array(a[0], a[1])
return True
except:
pass
return False
def ensure_list(obj):
return list(always_iterable(obj))
def validate_parameters(first, second, skip=None):
if skip is None:
skip = []
keys1 = list(first.keys())
keys2 = list(second.keys())
keys1.sort()
keys2.sort()
if keys1 != keys2:
raise RuntimeError("The two inputs do not have the same parameters!")
for k1, k2 in zip(keys1, keys2):
if k1 not in skip:
v1 = first[k1][()]
v2 = first[k2][()]
if isinstance(v1, (str, bytes)) or isinstance(v2, (str, bytes)):
check_equal = v1 == v2
else:
check_equal = np.allclose(np.array(v1), np.array(v2), rtol=0.0, atol=1.0e-10)
if not check_equal:
raise RuntimeError(f"The values for the parameter '{k1}' in the two inputs"
f" are not identical ({v1} vs. {v2})!")
def merge_files(input_files, output_file, overwrite=False,
add_exposure_times=False):
r"""
Helper function for merging PhotonList or EventList HDF5 files.
Parameters
----------
input_files : list of strings
List of filenames that will be merged together.
output_file : string
Name of the merged file to be outputted.
overwrite : boolean, default False
If a the output file already exists, set this to True to
overwrite it.
add_exposure_times : boolean, default False
If set to True, exposure times will be added together. Otherwise,
the exposure times of all of the files must be the same.
Examples
--------
>>> from pyxsim import merge_files
>>> merge_files(["events_0.h5","events_1.h5","events_3.h5"], "events.h5",
... overwrite=True, add_exposure_times=True)
Notes
-----
Currently, to merge files it is mandated that all of the parameters have the
same values, with the exception of the exposure time parameter "exp_time". If
add_exposure_times=False, the maximum exposure time will be used.
"""
from collections import defaultdict
from pathlib import Path
import h5py
if Path(output_file).exists() and not overwrite:
raise IOError(f"Cannot overwrite existing file {output_file}. "
"If you want to do this, set overwrite=True.")
f_in = h5py.File(input_files[0], "r")
f_out = h5py.File(output_file, "w")
exp_time_key = ""
p_out = f_out.create_group("parameters")
for key, param in f_in["parameters"].items():
if key.endswith("exp_time"):
exp_time_key = key
else:
p_out[key] = param[()]
skip = [exp_time_key] if add_exposure_times else []
for fn in input_files[1:]:
f = h5py.File(fn, "r")
validate_parameters(f_in["parameters"], f["parameters"], skip=skip)
f.close()
f_in.close()
data = defaultdict(list)
tot_exp_time = 0.0
for i, fn in enumerate(input_files):
f = h5py.File(fn, "r")
if add_exposure_times:
tot_exp_time += f["/parameters"][exp_time_key][()]
else:
tot_exp_time = max(tot_exp_time, f["/parameters"][exp_time_key][()])
for key in f["/data"]:
data[key].append(f["/data"][key][:])
f.close()
p_out[exp_time_key] = tot_exp_time
d = f_out.create_group("data")
for k in data:
d.create_dataset(k, data=np.concatenate(data[k]))
f_out.close()
| python |
import PIL
from PIL import Image, ImageOps, ImageEnhance
import numpy as np
import sys
import os, cv2
import csv
import pandas as pd
myDir = "..\GujOCR\Output"
#Useful function
def createFileList(myDir, format='.png'):
fileList = []
print(myDir)
for root, dirs, files in os.walk(myDir, topdown=False):
for name in files:
if name.endswith(format):
fullName = os.path.join(root, name)
fileList.append(fullName)
return fileList
columnNames = list()
for i in range(784):
pixel = 'p'
pixel += str(i)
columnNames.append(pixel)
l = os.listdir("..\GujOCR\Output")
print(l)
dic = {val : idx for idx, val in enumerate(l)}
print(dic)
train_data = pd.DataFrame(columns = columnNames)
train_data.to_csv("trainset28.csv",index = False)
label_count = list()
print(len(l))
for i in range(len(l)):
mydir = 'OUTPUT/' + l[i]
fileList = createFileList(mydir)
for file in fileList:
# print("hello")
img_file = Image.open(file) # imgfile.show()
width, height = img_file.size
format = img_file.format
mode = img_file.mode
label_count.append(dic[l[i]])
inverted_image = img_file.convert('RGB')
im_invert = ImageOps.invert(inverted_image)
size = (28, 28)
new_image = img_file.resize(size)
enhancer = ImageEnhance.Contrast(new_image)
new_image = enhancer.enhance(3)
img_grey = new_image.convert('L')
value = np.asarray(img_grey.getdata(), dtype=np.int).reshape((img_grey.size[1], img_grey.size[0]))
value = value.flatten()
with open("trainset28.csv", 'a', newline='') as f:
writer = csv.writer(f)
writer.writerow(value)
read_data = pd.read_csv('trainset28.csv')
read_data['Label'] = label_count
print(read_data)
#Write back dataframe to csv
read_data.to_csv("training_label28.csv",index = False)
print(train_data)
| python |
from __future__ import unicode_literals
import frappe
import re
def execute():
for srl in frappe.get_all('Salary Slip',['name']):
if srl.get("name"):
substring = re.search("\/(.*?)\/",srl.get("name")).group(1)
emp = frappe.db.get_value('Employee',{'name':substring},'user_id')
if "Employee" in frappe.get_roles(emp) and "HR Manager" not in frappe.get_roles(emp) and len(frappe.get_all('User Permission',filters={'allow':"Salary Slip",'for_value':srl.get("name"),'user':emp}))==0:
print(emp,"***",substring)
permission=frappe.new_doc('User Permission')
permission.user= emp
permission.allow= 'Salary Slip'
permission.for_value= srl.get("name")
permission.apply_to_all_doctypes = 0
permission.applicable_for = 'Salary Slip'
permission.save()
#homzhub_customization.homzhub_customization.patches.set_salary_permission.execute | python |
# coding: utf-8
r"""timeout decorators for Windows and Linux
Beware that the Windows and the Linux decorator versions
do not raise the same exception if the timeout is exceeded
"""
import platform
# import errno
# import os
import signal
import multiprocessing
import multiprocessing.pool
from functools import wraps
# Python 2 compatibility.
try:
TimeoutError
except NameError:
TimeoutError = RuntimeError
def timeout(max_timeout):
r"""Use the right timeout based on platform.system()
Parameters
----------
max_timeout : int or float
The maximum time in seconds for the decorated function to complete
"""
if platform.system() == "Windows":
return timeout_windows(max_timeout)
elif platform.system() == "Linux":
return timeout_linux(max_timeout)
else:
raise NotImplementedError
def timeout_windows(max_timeout):
"""Timeout decorator, parameter in seconds.
Parameters
----------
max_timeout : int or float
The maximum time in seconds for the decorated function to complete
Raises
------
multiprocessing.TimeoutError
if the function call exceeds max_timeout
"""
def timeout_decorator(item):
"""Wrap the original function."""
@wraps(item)
def func_wrapper(*args, **kwargs):
"""Closure for function."""
pool = multiprocessing.pool.ThreadPool(processes=1)
async_result = pool.apply_async(item, args, kwargs)
# raises a TimeoutError if execution exceeds max_timeout
return async_result.get(max_timeout)
return func_wrapper
return timeout_decorator
# class TimeoutError(Exception):
# r"""Error for the Linux version of the timeout decorator"""
# pass
def timeout_linux(max_timeout):
"""Timeout decorator, parameter in seconds.
Parameters
----------
max_timeout : int or float
The maximum time in seconds for the decorated function to complete
Raises
------
TimeoutError
if the function call exceeds max_timeout
"""
def decorator(func):
def _handle_timeout(signum, frame):
raise TimeoutError
def wrapper(*args, **kwargs):
signal.signal(signal.SIGALRM, _handle_timeout)
signal.alarm(max_timeout)
try:
result = func(*args, **kwargs)
finally:
signal.alarm(0)
return result
return wraps(func)(wrapper)
return decorator
| python |
import argparse
import calendar
import dotenv
import json
import libraries.api
import libraries.handle_file
import libraries.record
import logging
import logging.config
import os
import pandas as pd
import requests
import time
from csv import writer
from oauthlib.oauth2 import BackendApplicationClient, TokenExpiredError
from requests.auth import HTTPBasicAuth
from requests_oauthlib import OAuth2Session
from typing import Callable, Dict, Set, TextIO
dotenv_file = dotenv.find_dotenv()
dotenv.load_dotenv(dotenv_file)
logging.config.fileConfig('logging.conf', disable_existing_loggers=False)
logger = logging.getLogger(__name__)
class RecordsBuffer:
"""
A buffer of records. DO NOT INSTANTIATE THIS CLASS DIRECTLY.
Instead, instantiate one of its subclasses:
- AlmaRecordsBuffer: A buffer of records with MMS ID and OCLC number
- WorldCatRecordsBuffer: A buffer of records with OCLC number only
Attributes
----------
auth: HTTPBasicAuth
The HTTP Basic Auth object used when requesting an access token
oauth_session: OAuth2Session
The OAuth 2 Session object used to request an access token and make HTTP
requests to the WorldCat Metadata API (note that the OAuth2Session class
is a subclass of requests.Session)
Methods
-------
get_transaction_id()
Builds transaction_id to include with WorldCat Metadata API request
make_api_request(api_request, api_url)
Makes the specified API request to the WorldCat Metadata API
"""
def __init__(self) -> None:
"""Initializes a RecordsBuffer object by creating its OAuth2Session."""
logger.debug('Started RecordsBuffer constructor...')
self.contents = None
logger.debug(f'{type(self.contents)=}')
# Create OAuth2Session for WorldCat Metadata API
logger.debug('Creating OAuth2Session...')
self.auth = HTTPBasicAuth(os.environ['WORLDCAT_METADATA_API_KEY'],
os.environ['WORLDCAT_METADATA_API_SECRET'])
logger.debug(f'{type(self.auth)=}')
client = BackendApplicationClient(
client_id=os.environ['WORLDCAT_METADATA_API_KEY'],
scope=['WorldCatMetadataAPI refresh_token'])
token = {
'access_token': os.environ['WORLDCAT_METADATA_API_ACCESS_TOKEN'],
'expires_at': float(
os.environ['WORLDCAT_METADATA_API_ACCESS_TOKEN_EXPIRES_AT']),
'token_type': os.environ['WORLDCAT_METADATA_API_ACCESS_TOKEN_TYPE']
}
self.oauth_session = OAuth2Session(client=client, token=token)
logger.debug(f'{type(self.oauth_session)=}')
logger.debug('OAuth2Session created.')
logger.debug('Completed RecordsBuffer constructor.')
def __len__(self) -> int:
"""Returns the number of records in this records buffer.
Returns
-------
int
The number of records in this records buffer
Raises
------
TypeError
If the contents attribute is not defined (i.e. is None)
"""
return len(self.contents)
def get_transaction_id(self) -> str:
"""Builds transaction_id to include with WorldCat Metadata API request.
Returns
-------
str
The transaction_id
"""
transaction_id = ''
if ('OCLC_INSTITUTION_SYMBOL' in os.environ
or 'WORLDCAT_PRINCIPAL_ID' in os.environ):
# Add OCLC Institution Symbol, if present
transaction_id = os.getenv('OCLC_INSTITUTION_SYMBOL', '')
if transaction_id != '':
transaction_id += '_'
# Add timestamp and, if present, your WorldCat Principal ID
transaction_id += time.strftime('%Y-%m-%dT%H:%M:%SZ', time.gmtime())
if 'WORLDCAT_PRINCIPAL_ID' in os.environ:
transaction_id += f"_{os.getenv('WORLDCAT_PRINCIPAL_ID')}"
logger.debug(f'{transaction_id=}')
return transaction_id
def make_api_request(
self,
api_request: Callable[..., requests.models.Response],
api_url: str) -> requests.models.Response:
"""Makes the specified API request to the WorldCat Metadata API.
Parameters
----------
api_request: Callable[..., requests.models.Response]
The specific WorldCat Metadata API request to make
api_url: str
The specific WorldCat Metadata API URL to use
Returns
-------
requests.models.Response
The API response returned by the api_request function
"""
transaction_id = self.get_transaction_id()
if transaction_id != '':
api_url += f"&transactionID={transaction_id}"
headers = {"Accept": "application/json"}
response = None
# Make API request
try:
response = api_request(api_url, headers=headers)
except TokenExpiredError as e:
logger.debug(f'Access token {self.oauth_session.access_token} '
f'expired. Requesting new access token...')
datetime_format = '%Y-%m-%d %H:%M:%SZ'
# Confirm the epoch is January 1, 1970, 00:00:00 (UTC).
# See https://docs.python.org/3.8/library/time.html for an
# explanation of the term 'epoch'.
system_epoch = time.strftime(datetime_format, time.gmtime(0))
expected_epoch = '1970-01-01 00:00:00Z'
if system_epoch != expected_epoch:
logger.warning(f"The system's epoch ({system_epoch}) is not "
f"equal to the expected epoch ({expected_epoch}). There "
f"may therefore be issues in determining whether the "
f"WorldCat Metadata API's refresh token has expired.")
# Convert the WORLDCAT_METADATA_API_REFRESH_TOKEN_EXPIRES_AT value
# to a float representing seconds since the epoch.
# Note that the WORLDCAT_METADATA_API_REFRESH_TOKEN_EXPIRES_AT value
# is a string in ISO 8601 format, except that it substitutes the 'T'
# delimiter (which separates the date from the time) for a space, as
# in '2021-09-30 22:43:07Z'.
refresh_token_expires_at = 0.0
if 'WORLDCAT_METADATA_API_REFRESH_TOKEN_EXPIRES_AT' in os.environ:
logger.debug(f'WORLDCAT_METADATA_API_REFRESH_TOKEN_EXPIRES_AT '
f'variable exists in .env file, so using this value: '
f'{os.getenv("WORLDCAT_METADATA_API_REFRESH_TOKEN_EXPIRES_AT")}'
f' (UTC), which will be converted to seconds since the '
f'epoch')
refresh_token_expires_at = calendar.timegm(
time.strptime(
os.getenv(
'WORLDCAT_METADATA_API_REFRESH_TOKEN_EXPIRES_AT'),
datetime_format))
refresh_token_expires_in = refresh_token_expires_at - time.time()
logger.debug(f'{refresh_token_expires_at=} seconds since the epoch')
logger.debug(f'Current time: {time.time()} seconds since the epoch,'
f' which is {time.strftime(datetime_format, time.gmtime())} '
f'(UTC). So the Refresh Token (if one exists) expires in '
f'{refresh_token_expires_in} seconds.')
# Obtain a new Access Token
token = None
if ('WORLDCAT_METADATA_API_REFRESH_TOKEN' in os.environ
and refresh_token_expires_in > 25):
# Use Refresh Token to request new Access Token
token = self.oauth_session.refresh_token(
os.environ['OCLC_AUTHORIZATION_SERVER_TOKEN_URL'],
refresh_token=os.getenv(
'WORLDCAT_METADATA_API_REFRESH_TOKEN'),
auth=self.auth)
else:
# Request Refresh Token and Access Token
token = self.oauth_session.fetch_token(
os.environ['OCLC_AUTHORIZATION_SERVER_TOKEN_URL'],
auth=self.auth)
logger.debug(f"Refresh token granted ({token['refresh_token']})"
f", which expires at {token['refresh_token_expires_at']}")
# Set Refresh Token environment variables and update .env file
libraries.handle_file.set_env_var(
'WORLDCAT_METADATA_API_REFRESH_TOKEN',
token['refresh_token'])
libraries.handle_file.set_env_var(
'WORLDCAT_METADATA_API_REFRESH_TOKEN_EXPIRES_AT',
token['refresh_token_expires_at'])
logger.debug(f'{token=}')
logger.debug(f'New access token granted: '
f'{self.oauth_session.access_token}')
# Set environment variables based on new Access Token info and
# update .env file accordingly
libraries.handle_file.set_env_var(
'WORLDCAT_METADATA_API_ACCESS_TOKEN',
token['access_token'])
libraries.handle_file.set_env_var(
'WORLDCAT_METADATA_API_ACCESS_TOKEN_TYPE',
token['token_type'])
logger.debug(f"{token['expires_at']=}")
libraries.handle_file.set_env_var(
'WORLDCAT_METADATA_API_ACCESS_TOKEN_EXPIRES_AT',
str(token['expires_at']))
response = api_request(api_url, headers=headers)
libraries.api.log_response_and_raise_for_status(response)
return response
class AlmaRecordsBuffer(RecordsBuffer):
"""
A buffer of Alma records, each with an MMS ID and OCLC number.
Attributes
----------
oclc_num_dict: Dict[str, str]
A dictionary containing each record's original OCLC number (key) and its
MMS ID (value)
records_with_current_oclc_num: TextIO
The CSV file object where records with a current OCLC number are added
records_with_current_oclc_num_writer: writer
The CSV writer object for the records_with_current_oclc_num file object
records_with_old_oclc_num: TextIO
The CSV file object where records with an old OCLC number are added
records_with_old_oclc_num_writer: writer
The CSV writer object for the records_with_old_oclc_num file object
records_with_errors: TextIO
The CSV file object where records are added if an error is encountered
records_with_errors_writer: writer
The CSV writer object for the records_with_errors file object
Methods
-------
add(orig_oclc_num, mms_id)
Adds the given record to this buffer (i.e. to oclc_num_dict)
process_records(results)
Checks each record in oclc_num_dict for the current OCLC number
remove_all_records()
Removes all records from this buffer (i.e. clears oclc_num_dict)
"""
def __init__(self,
records_with_current_oclc_num: TextIO,
records_with_old_oclc_num: TextIO,
records_with_errors: TextIO) -> None:
"""Instantiates an AlmaRecordsBuffer object.
Parameters
----------
records_with_current_oclc_num: TextIO
The CSV file object where records with a current OCLC number are
added
records_with_old_oclc_num: TextIO
The CSV file object where records with an old OCLC number are added
records_with_errors: TextIO
The CSV file object where records are added if an error is
encountered
"""
logger.debug('Started AlmaRecordsBuffer constructor...')
self.oclc_num_dict = {}
logger.debug(f'{type(self.oclc_num_dict)=}')
self.records_with_current_oclc_num = records_with_current_oclc_num
self.records_with_current_oclc_num_writer = \
writer(records_with_current_oclc_num)
self.records_with_old_oclc_num = records_with_old_oclc_num
self.records_with_old_oclc_num_writer = \
writer(records_with_old_oclc_num)
self.records_with_errors = records_with_errors
self.records_with_errors_writer = writer(records_with_errors)
# Create OAuth2Session for WorldCat Metadata API
super().__init__()
self.contents = self.oclc_num_dict
logger.debug(f'{type(self.contents)=}')
logger.debug('Completed AlmaRecordsBuffer constructor.\n')
def __str__(self) -> str:
"""Returns a string listing the contents of this records buffer.
In specific, this method lists the contents of the OCLC Number
dictionary.
Returns
-------
str
The contents of the OCLC Number dictionary
"""
return (f'Records buffer contents ({{OCLC Number: MMS ID}}): '
f'{self.oclc_num_dict}')
def add(self, orig_oclc_num: str, mms_id: str) -> None:
"""Adds the given record to this buffer (i.e. to oclc_num_dict).
Parameters
----------
orig_oclc_num: str
The record's original OCLC number
mms_id: str
The record's MMS ID
Raises
------
AssertionError
If the original OCLC number is already in the OCLC Number dictionary
"""
assert orig_oclc_num not in self.oclc_num_dict, (f'OCLC number '
f'{orig_oclc_num} already exists in records buffer with MMS ID '
f'{self.oclc_num_dict[orig_oclc_num]}')
self.oclc_num_dict[orig_oclc_num] = mms_id
logger.debug(f'Added {orig_oclc_num} to records buffer.')
def process_records(self, results: Dict[str, int]) -> None:
"""Checks each record in oclc_num_dict for the current OCLC number.
This is done by making a GET request to the WorldCat Metadata API:
https://worldcat.org/bib/checkcontrolnumbers?oclcNumbers={oclcNumbers}
Parameters
----------
results: Dict[str, int]
A dictionary containing the total number of records in the following
categories: records with the current OCLC number, records with an
old OCLC number, records with errors
Raises
------
json.decoder.JSONDecodeError
If there is an error decoding the API response
"""
logger.debug('Started processing records buffer...')
api_response_error_msg = ('Problem with Get Current OCLC Number API '
'response')
# Build URL for API request
url = (f"{os.environ['WORLDCAT_METADATA_SERVICE_URL']}"
f"/bib/checkcontrolnumbers"
f"?oclcNumbers={','.join(self.oclc_num_dict.keys())}")
try:
api_response = super().make_api_request(
self.oauth_session.get,
url
)
json_response = api_response.json()
logger.debug(f'Get Current OCLC Number API response:\n'
f'{json.dumps(json_response, indent=2)}')
for record_index, record in enumerate(json_response['entry'],
start=1):
found_requested_oclc_num = record['found']
is_current_oclc_num = not record['merged']
# Look up MMS ID based on OCLC number
mms_id = self.oclc_num_dict[record['requestedOclcNumber']]
logger.debug(f'Started processing record #{record_index} (OCLC '
f'number {record["requestedOclcNumber"]})...')
logger.debug(f'{is_current_oclc_num=}')
if not found_requested_oclc_num:
logger.exception(f'{api_response_error_msg}: OCLC number '
f'{record["requestedOclcNumber"]} not found')
results['num_records_with_errors'] += 1
# Add record to
# records_with_errors_when_getting_current_oclc_number.csv
if self.records_with_errors.tell() == 0:
# Write header row
self.records_with_errors_writer.writerow([
'MMS ID',
'OCLC Number',
'Error'
])
self.records_with_errors_writer.writerow([
mms_id,
record['requestedOclcNumber'],
f'{api_response_error_msg}: OCLC number not found'
])
elif is_current_oclc_num:
results['num_records_with_current_oclc_num'] += 1
# Add record to already_has_current_oclc_number.csv
if self.records_with_current_oclc_num.tell() == 0:
# Write header row
self.records_with_current_oclc_num_writer.writerow([
'MMS ID',
'Current OCLC Number'
])
self.records_with_current_oclc_num_writer.writerow([
mms_id,
record['currentOclcNumber']
])
else:
results['num_records_with_old_oclc_num'] += 1
# Add record to needs_current_oclc_number.csv
if self.records_with_old_oclc_num.tell() == 0:
# Write header row
self.records_with_old_oclc_num_writer.writerow([
'MMS ID',
'Current OCLC Number',
'Original OCLC Number'
])
self.records_with_old_oclc_num_writer.writerow([
mms_id,
record['currentOclcNumber'],
record['requestedOclcNumber']
])
logger.debug(f'Finished processing record #{record_index}.\n')
except json.decoder.JSONDecodeError:
# except (requests.exceptions.JSONDecodeError,
# json.decoder.JSONDecodeError):
logger.exception(f'{api_response_error_msg}: Error decoding JSON')
logger.exception(f'{api_response.text=}')
# Re-raise exception so that the script is halted (since future API
# requests may result in the same error)
raise
logger.debug('Finished processing records buffer.')
def remove_all_records(self) -> None:
"""Removes all records from this buffer (i.e. clears oclc_num_dict)."""
self.oclc_num_dict.clear()
logger.debug(f'Cleared records buffer.')
logger.debug(self.__str__() + '\n')
class WorldCatRecordsBuffer(RecordsBuffer):
"""
A buffer of WorldCat records, each with an OCLC number.
Attributes
----------
oclc_num_set: Set[str]
A set containing each record's OCLC number
records_with_holding_already_set: TextIO
The CSV file object where records whose holding is already set are added
(i.e. records that were not updated)
records_with_holding_already_set_writer: writer
The CSV writer object for the records_with_holding_already_set file
object
records_with_holding_successfully_set: TextIO
The CSV file object where records whose holding was successfully set are
added (i.e. records that were successfully updated)
records_with_holding_successfully_set_writer: writer
The CSV writer object for the records_with_holding_successfully_set file
object
records_with_errors: TextIO
The CSV file object where records are added if an error is encountered
records_with_errors_writer: writer
The CSV writer object for the records_with_errors file object
Methods
-------
add(oclc_num)
Adds the given record to this buffer (i.e. to oclc_num_set)
process_records(results)
Attempts to set the institution holding for each record in oclc_num_set
remove_all_records()
Removes all records from this buffer (i.e. clears oclc_num_set)
"""
def __init__(self,
records_with_holding_already_set: TextIO,
records_with_holding_successfully_set: TextIO,
records_with_errors: TextIO) -> None:
"""Instantiates a WorldCatRecordsBuffer object.
Parameters
----------
records_with_holding_already_set: TextIO
The CSV file object where records whose holding is already set are
added (i.e. records that were not updated)
records_with_holding_successfully_set: TextIO
The CSV file object where records whose holding was successfully set
are added (i.e. records that were successfully updated)
records_with_errors: TextIO
The CSV file object where records are added if an error is
encountered
"""
logger.debug('Started WorldCatRecordsBuffer constructor...')
self.oclc_num_set = set()
logger.debug(f'{type(self.oclc_num_set)=}')
self.records_with_holding_already_set = records_with_holding_already_set
self.records_with_holding_already_set_writer = \
writer(records_with_holding_already_set)
self.records_with_holding_successfully_set = \
records_with_holding_successfully_set
self.records_with_holding_successfully_set_writer = \
writer(records_with_holding_successfully_set)
self.records_with_errors = records_with_errors
self.records_with_errors_writer = writer(records_with_errors)
# Create OAuth2Session for WorldCat Metadata API
super().__init__()
self.contents = self.oclc_num_set
logger.debug(f'{type(self.contents)=}')
logger.debug('Completed WorldCatRecordsBuffer constructor.\n')
def __str__(self) -> str:
"""Returns a string listing the contents of this records buffer.
In specific, this method lists the contents of the OCLC Number set.
Returns
-------
str
The contents of the OCLC Number set
"""
return (f'Records buffer contents (OCLC Numbers): {self.oclc_num_set}')
def add(self, oclc_num: str) -> None:
"""Adds the given record to this buffer (i.e. to oclc_num_set).
Parameters
----------
oclc_num: str
The record's OCLC number
Raises
------
AssertionError
If the OCLC number is already in the OCLC Number set
"""
assert oclc_num not in self.oclc_num_set, (f'OCLC number {oclc_num} '
f'already exists in records buffer')
self.oclc_num_set.add(oclc_num)
logger.debug(f'Added {oclc_num} to records buffer.')
def process_records(self, results: Dict[str, int]) -> None:
"""Attempts to set the holding for each record in oclc_num_set.
This is done by making a POST request to the WorldCat Metadata API:
https://worldcat.org/ih/datalist?oclcNumbers={oclcNumbers}
Parameters
----------
results: Dict[str, int]
A dictionary containing the total number of records in the following
categories: records successfully set, records already set, records
with errors
Raises
------
json.decoder.JSONDecodeError
If there is an error decoding the API response
"""
logger.debug('Started processing records buffer...')
api_response_error_msg = ('Problem with Set Holding API response')
# Build URL for API request
url = (f"{os.environ['WORLDCAT_METADATA_SERVICE_URL']}"
f"/ih/datalist?oclcNumbers={','.join(self.oclc_num_set)}")
try:
api_response = super().make_api_request(
self.oauth_session.post,
url
)
json_response = api_response.json()
logger.debug(f'Set Holding API response:\n'
f'{json.dumps(json_response, indent=2)}')
for record_index, record in enumerate(json_response['entry'],
start=1):
is_current_oclc_num = (record['requestedOclcNumber']
== record['currentOclcNumber'])
new_oclc_num = ''
oclc_num_msg = ''
if not is_current_oclc_num:
new_oclc_num = record['currentOclcNumber']
oclc_num_msg = (f'OCLC number '
f'{record["requestedOclcNumber"]} has been updated to '
f'{new_oclc_num}. Consider updating Alma record.')
logger.warning(oclc_num_msg)
oclc_num_msg = f'Warning: {oclc_num_msg}'
logger.debug(f'Started processing record #{record_index} (OCLC '
f'number {record["requestedOclcNumber"]})...')
logger.debug(f'{is_current_oclc_num=}')
logger.debug(f'{record["httpStatusCode"]=}')
logger.debug(f'{record["errorDetail"]=}')
if record['httpStatusCode'] == 'HTTP 200 OK':
results['num_records_successfully_set'] += 1
# Add record to records_with_holding_successfully_set.csv
if self.records_with_holding_successfully_set.tell() == 0:
# Write header row
self.records_with_holding_successfully_set_writer.writerow([
'Requested OCLC Number',
'New OCLC Number (if applicable)',
'Warning'
])
self.records_with_holding_successfully_set_writer.writerow([
record['requestedOclcNumber'],
new_oclc_num,
oclc_num_msg
])
elif record['httpStatusCode'] == 'HTTP 409 Conflict':
results['num_records_already_set'] += 1
# Add record to records_with_holding_already_set.csv
if self.records_with_holding_already_set.tell() == 0:
# Write header row
self.records_with_holding_already_set_writer.writerow([
'Requested OCLC Number',
'New OCLC Number (if applicable)',
'Error'
])
self.records_with_holding_already_set_writer.writerow([
record['requestedOclcNumber'],
new_oclc_num,
(f"{api_response_error_msg}: {record['errorDetail']}. "
f"{oclc_num_msg}")
])
else:
logger.exception(f"{api_response_error_msg} for OCLC "
f"Number {record['requestedOclcNumber']}: "
f"{record['errorDetail']} ({record['httpStatusCode']})."
)
results['num_records_with_errors'] += 1
# Add record to records_with_errors_when_setting_holding.csv
if self.records_with_errors.tell() == 0:
# Write header row
self.records_with_errors_writer.writerow([
'Requested OCLC Number',
'New OCLC Number (if applicable)',
'Error'
])
self.records_with_errors_writer.writerow([
record['requestedOclcNumber'],
new_oclc_num,
(f"{api_response_error_msg}: {record['httpStatusCode']}"
f": {record['errorDetail']}. {oclc_num_msg}")
])
logger.debug(f'Finished processing record #{record_index}.\n')
except json.decoder.JSONDecodeError:
# except (requests.exceptions.JSONDecodeError,
# json.decoder.JSONDecodeError):
logger.exception(f'{api_response_error_msg}: Error decoding JSON')
logger.exception(f'{api_response.text=}')
# Re-raise exception so that the script is halted (since future API
# requests may result in the same error)
raise
logger.debug('Finished processing records buffer.')
def remove_all_records(self) -> None:
"""Removes all records from this buffer (i.e. clears oclc_num_set)."""
self.oclc_num_set.clear()
logger.debug(f'Cleared records buffer.')
logger.debug(self.__str__() + '\n')
def init_argparse() -> argparse.ArgumentParser:
"""Initializes and returns ArgumentParser object."""
parser = argparse.ArgumentParser(
usage=('%(prog)s [-h] [-v] --input_file INPUT_FILE --operation '
'{get_current_oclc_number, set_holding}'),
description=('For each row in the input file, perform the specified '
'operation.')
)
parser.add_argument(
'-v', '--version', action='version',
version=f'{parser.prog} version 1.0.0'
)
parser.add_argument(
'--input_file',
required=True,
type=str,
help=('the name and path of the file to be processed, which must be in '
'CSV format (e.g. '
'csv/master_list_records_with_potentially_old_oclc_num.csv)')
)
parser.add_argument(
'--operation',
required=True,
choices=['get_current_oclc_number', 'set_holding'],
help=('the operation to be performed on each row of the input file '
'(either get_current_oclc_number or set_holding)')
)
return parser
def main() -> None:
"""Performs the specified operation on every record in the input file.
Gathers the maximum OCLC numbers possible before sending the appropriate
request to the WorldCat Metadata API.
Operations:
- get_current_oclc_number
For each row, check whether the given OCLC number is the current one:
-- If so, then add the record to csv/already_has_current_oclc_number.csv
-- If not, then add the record to csv/needs_current_oclc_number.csv
-- If an error is encountered, then add the record to
csv/records_with_errors_when_getting_current_oclc_number.csv
- set_holding
For each row, set holding for the given OCLC number
-- If holding is set successfully, then add the record to
csv/records_with_holding_successfully_set.csv
-- If holding was already set, then add the record to
csv/records_with_holding_already_set.csv
-- If an error is encountered, then add the record to
csv/records_with_errors_when_setting_holding.csv
"""
# Initialize parser and parse command-line args
parser = init_argparse()
args = parser.parse_args()
# Convert input file into pandas DataFrame
data = None
if args.input_file.endswith('.csv'):
data = pd.read_csv(args.input_file, dtype='str', keep_default_na=False)
else:
logger.exception(f'Invalid format for input file ({args.input_file}). '
f'Must be a CSV file (.csv)')
return
records_already_processed = set()
logger.debug(f'{records_already_processed=}\n')
logger.debug(f'{args.operation=}')
results = None
filename_for_records_to_update = None
filename_for_records_with_no_update_needed = None
filename_for_records_with_errors = None
if args.operation == 'get_current_oclc_number':
results = {
'num_records_with_current_oclc_num': 0,
'num_records_with_old_oclc_num': 0,
'num_records_with_errors': 0
}
filename_for_records_to_update = 'csv/needs_current_oclc_number.csv'
filename_for_records_with_no_update_needed = \
'csv/already_has_current_oclc_number.csv'
filename_for_records_with_errors = \
'csv/records_with_errors_when_getting_current_oclc_number.csv'
else:
results = {
'num_records_successfully_set': 0,
'num_records_already_set': 0,
'num_records_with_errors': 0
}
filename_for_records_to_update = \
'csv/records_with_holding_successfully_set.csv'
filename_for_records_with_no_update_needed = \
'csv/records_with_holding_already_set.csv'
filename_for_records_with_errors = \
'csv/records_with_errors_when_setting_holding.csv'
with open(filename_for_records_to_update, mode='a',
newline='') as records_to_update, \
open(filename_for_records_with_no_update_needed, mode='a',
newline='') as records_with_no_update_needed, \
open(filename_for_records_with_errors, mode='a',
newline='') as records_with_errors:
records_with_errors_writer = writer(records_with_errors)
records_buffer = None
if args.operation == 'get_current_oclc_number':
records_buffer = AlmaRecordsBuffer(
records_with_no_update_needed,
records_to_update,
records_with_errors
)
else:
records_buffer = WorldCatRecordsBuffer(
records_with_no_update_needed,
records_to_update,
records_with_errors
)
logger.debug(f'{type(records_buffer)=}')
logger.debug(records_buffer)
logger.debug(f'{type(records_buffer.contents)=}')
logger.debug(f'{len(records_buffer)=}\n')
# Loop over each row in DataFrame and check whether OCLC number is the
# current one
for index, row in data.iterrows():
logger.debug(f'Started processing row {index + 2} of input file...')
error_occurred = False
error_msg = None
try:
mms_id = None
orig_oclc_num = None
if args.operation == 'get_current_oclc_number':
mms_id = row['MMS ID']
orig_oclc_num = \
row["Unique OCLC Number from Alma Record's 035 $a"]
mms_id = libraries.record.get_valid_record_identifier(
mms_id,
'MMS ID'
)
else:
orig_oclc_num = row['OCLC Number']
# Make sure OCLC Number is valid
orig_oclc_num = libraries.record.get_valid_record_identifier(
orig_oclc_num, 'OCLC number')
orig_oclc_num = \
libraries.record.remove_leading_zeros(orig_oclc_num)
if args.operation == 'get_current_oclc_number':
assert mms_id not in records_already_processed, (f'Record '
f'with MMS ID {mms_id} has already been processed.')
records_already_processed.add(mms_id)
else:
assert orig_oclc_num not in records_already_processed, (
f'Record with OCLC Number {orig_oclc_num} has already '
f'been processed.')
records_already_processed.add(orig_oclc_num)
if len(records_buffer) < int(os.environ[
'WORLDCAT_METADATA_API_MAX_RECORDS_PER_REQUEST']):
if args.operation == 'get_current_oclc_number':
records_buffer.add(orig_oclc_num, mms_id)
else:
records_buffer.add(orig_oclc_num)
else:
# records_buffer has the maximum records possible per API
# request, so process these records
logger.debug('Records buffer is full.\n')
records_buffer.process_records(results)
# Now that its records have been processed, clear buffer
records_buffer.remove_all_records()
# Add current row's data to the empty buffer
if args.operation == 'get_current_oclc_number':
records_buffer.add(orig_oclc_num, mms_id)
else:
records_buffer.add(orig_oclc_num)
except AssertionError as assert_err:
if args.operation == 'get_current_oclc_number':
logger.exception(f"An assertion error occurred when "
f"processing MMS ID '{row['MMS ID']}' (at row "
f"{index + 2} of input file): {assert_err}")
else:
logger.exception(f"An assertion error occurred when "
f"processing OCLC Number '{row['OCLC Number']}' (at "
f"row {index + 2} of input file): {assert_err}")
error_msg = f"Assertion Error: {assert_err}"
error_occurred = True
finally:
if error_occurred:
results['num_records_with_errors'] += 1
# Add record to records_with_errors spreadsheet
if args.operation == 'get_current_oclc_number':
if records_with_errors.tell() == 0:
# Write header row
records_with_errors_writer.writerow([
'MMS ID',
'OCLC Number',
'Error'
])
records_with_errors_writer.writerow([
mms_id,
orig_oclc_num,
error_msg
])
else:
if records_with_errors.tell() == 0:
# Write header row
records_with_errors_writer.writerow([
'Requested OCLC Number',
'New OCLC Number (if applicable)',
'Error'
])
records_with_errors_writer.writerow([
orig_oclc_num,
'',
error_msg
])
logger.debug(f'Finished processing row {index + 2} of input '
f'file.\n')
# If records_buffer is not empty, process remaining records
if len(records_buffer) > 0:
records_buffer.process_records(results)
# logger.debug(f'{records_already_processed=}\n')
logger.debug(f'{len(records_already_processed)=}\n')
print(f'\nEnd of script. Processed {len(data.index)} rows from input file:')
if args.operation == 'get_current_oclc_number':
print(f'- {results["num_records_with_current_oclc_num"]} record(s) '
f'with current OCLC number\n'
f'- {results["num_records_with_old_oclc_num"]} record(s) with '
f'old OCLC number\n'
f'- {results["num_records_with_errors"]} record(s) with errors')
else:
print(f'- {results["num_records_successfully_set"]} record(s) updated, '
f'i.e. holding was successfully set\n'
f'- {results["num_records_already_set"]} record(s) not updated '
f'because holding was already set\n'
f'- {results["num_records_with_errors"]} record(s) with errors')
if __name__ == "__main__":
main()
| python |
import json
from csv import DictReader
def parse_txt(fd, settings):
return fd.read().splitlines()
def parse_csv(fd, settings):
return [dict(x) for x in DictReader(fd)]
def parse_json(fd, settings):
return json.load(fd)
| python |
import torch
import torch.nn as nn
import pytorchvideo
AVAILABLE_3D_BACKBONES = [
"i3d_r50",
"c2d_r50",
"csn_r101",
"r2plus1d_r50",
"slow_r50",
"slowfast_r50",
"slowfast_r101",
"slowfast_16x8_r101_50_50",
"x3d_xs",
"x3d_s",
"x3d_m",
"x3d_l",
]
class CNN3D(nn.Module):
"""
Initializes the 3D Convolution backbone.
**Supported Backbones**
- `i3d_r50`
- `c2d_r50`
- `csn_r101`
- `r2plus1d_r5`
- `slow_r50`
- `slowfast_r50`
- `slowfast_r101`
- `slowfast_16x8_r101_50_50`
- `x3d_xs`
- `x3d_s`
- `x3d_m`
- `x3d_l`
Args:
in_channels (int): Number of input channels
backbone (string): Backbone to use
pretrained (bool, optional): Whether to use pretrained Backbone. Default: ``True``
**kwargs (optional): Will be passed to pytorchvideo.models.hub models;
"""
def __init__(self, in_channels, backbone, pretrained=True, **kwargs):
super().__init__()
self.backbone = self.get_3d_backbone(
backbone, in_channels, pretrained, **kwargs
)
self.n_out_features = 400 # list(self.backbone.modules())[-2].out_features
def forward(self, x):
"""
forward step
"""
x = self.backbone(x)
return x.transpose(0, 1) # Batch-first
def get_3d_backbone(
self,
name,
in_channels=3,
pretrained: bool = False,
progress: bool = True,
**kwargs
):
assert name in AVAILABLE_3D_BACKBONES, "Please use any bonebone from " + str(
AVAILABLE_3D_BACKBONES
)
import pytorchvideo.models.hub as ptv_hub
model = getattr(ptv_hub, name)(
pretrained=pretrained, progress=progress, **kwargs
)
if in_channels != 3:
reshape_conv_input_size(in_channels, model)
return model
def reshape_conv_input_size(in_channels, model):
"""
Change convolution layer to adopt to various input channels
"""
assert in_channels == 1 or in_channels >= 4
for module in model.modules():
if isinstance(module, nn.Conv3d):
break
module.in_channels = in_channels
weight = module.weight.detach()
if in_channels == 1:
module.weight = nn.parameter.Parameter(weight.sum(1, keepdim=True))
else:
curr_in_channels = module.weight.shape[1]
to_concat = torch.Tensor(
module.out_channels,
module.in_channels - curr_in_channels,
*module.kernel_size,
)
module.weight = nn.parameter.Parameter(
torch.cat([module.weight, to_concat], axis=1)
)
| python |
# Copyright 2016 VMware, Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_log import log as logging
from vmware_nsx.plugins.nsx_v3 import cert_utils
from vmware_nsx.shell.admin.plugins.common import constants
from vmware_nsx.shell.admin.plugins.common import utils as admin_utils
from vmware_nsx.shell.admin.plugins.nsxv3.resources import utils
from vmware_nsx.shell import resources as shell
from vmware_nsxlib.v3 import client_cert
from vmware_nsxlib.v3 import trust_management
from neutron_lib.callbacks import registry
from neutron_lib import context
from neutron_lib import exceptions
from oslo_config import cfg
LOG = logging.getLogger(__name__)
CERT_DEFAULTS = {'key-size': 2048,
'sig-alg': 'sha256',
'valid-days': 3650,
'country': 'US',
'state': 'California',
'org': 'default org',
'unit': 'default unit',
'host': 'defaulthost.org'}
def get_nsx_trust_management(**kwargs):
username, password = None, None
if kwargs.get('property'):
properties = admin_utils.parse_multi_keyval_opt(kwargs['property'])
username = properties.get('user')
password = properties.get('password')
nsx_client = utils.get_nsxv3_client(username, password, True)
nsx_trust = trust_management.NsxLibTrustManagement(nsx_client, {})
return nsx_trust
def get_certificate_manager(**kwargs):
storage_driver_type = cfg.CONF.nsx_v3.nsx_client_cert_storage.lower()
LOG.info("Certificate storage is %s", storage_driver_type)
if storage_driver_type == 'nsx-db':
storage_driver = cert_utils.DbCertificateStorageDriver(
context.get_admin_context())
elif storage_driver_type == 'none':
storage_driver = cert_utils.DummyCertificateStorageDriver()
# TODO(annak) - add support for barbican storage driver
return client_cert.ClientCertificateManager(
cert_utils.NSX_OPENSTACK_IDENTITY,
get_nsx_trust_management(**kwargs),
storage_driver)
def verify_client_cert_on():
if cfg.CONF.nsx_v3.nsx_use_client_auth:
return True
LOG.info("Operation not applicable since client authentication "
"is disabled")
return False
@admin_utils.output_header
def generate_cert(resource, event, trigger, **kwargs):
"""Generate self signed client certificate and private key
"""
if not verify_client_cert_on():
return
if cfg.CONF.nsx_v3.nsx_client_cert_storage.lower() == "none":
LOG.info("Generate operation is not supported "
"with storage type 'none'")
return
# update cert defaults based on user input
properties = CERT_DEFAULTS.copy()
if kwargs.get('property'):
properties.update(admin_utils.parse_multi_keyval_opt(
kwargs['property']))
try:
prop = 'key-size'
key_size = int(properties.get(prop))
prop = 'valid-days'
valid_for_days = int(properties.get(prop))
except ValueError:
LOG.info("%s property must be a number", prop)
return
signature_alg = properties.get('sig-alg')
# TODO(annak): use nsxlib constants when they land
subject = {}
subject['country'] = properties.get('country')
subject['state'] = properties.get('state')
subject['organization'] = properties.get('org')
subject['unit'] = properties.get('org')
subject['hostname'] = properties.get('host')
with get_certificate_manager(**kwargs) as cert:
if cert.exists():
LOG.info("Deleting existing certificate")
# Need to delete cert first
cert.delete()
try:
cert.generate(subject, key_size, valid_for_days, signature_alg)
except exceptions.InvalidInput as e:
LOG.info(e)
return
LOG.info("Client certificate generated succesfully")
@admin_utils.output_header
def delete_cert(resource, event, trigger, **kwargs):
"""Delete client certificate and private key """
if not verify_client_cert_on():
return
with get_certificate_manager(**kwargs) as cert:
if cfg.CONF.nsx_v3.nsx_client_cert_storage.lower() == "none":
filename = get_cert_filename(**kwargs)
if not filename:
LOG.info("Please specify file containing the certificate "
"using filename property")
return
cert.delete_pem(filename)
else:
if not cert.exists():
LOG.info("Nothing to clean")
return
cert.delete()
LOG.info("Client certificate deleted succesfully")
@admin_utils.output_header
def show_cert(resource, event, trigger, **kwargs):
"""Show client certificate details """
if not verify_client_cert_on():
return
with get_certificate_manager(**kwargs) as cert:
if cert.exists():
cert_pem, key_pem = cert.get_pem()
expires_on = cert.expires_on()
expires_in_days = cert.expires_in_days()
cert_data = cert.get_subject()
cert_data['alg'] = cert.get_signature_alg()
cert_data['key_size'] = cert.get_key_size()
if expires_in_days >= 0:
LOG.info("Client certificate is valid. "
"Expires on %(date)s UTC (in %(days)d days).",
{'date': expires_on,
'days': expires_in_days})
else:
LOG.info("Client certificate expired on %s.", expires_on)
LOG.info("Key Size %(key_size)s, "
"Signature Algorithm %(alg)s\n"
"Subject: Country %(country)s, State %(state)s, "
"Organization %(organization)s, Unit %(unit)s, "
"Common Name %(hostname)s", cert_data)
LOG.info(cert_pem)
else:
LOG.info("Client certificate is not registered "
"in storage")
def get_cert_filename(**kwargs):
filename = cfg.CONF.nsx_v3.nsx_client_cert_file
if kwargs.get('property'):
properties = admin_utils.parse_multi_keyval_opt(kwargs['property'])
filename = properties.get('filename', filename)
if not filename:
LOG.info("Please specify file containing the certificate "
"using filename property")
return filename
@admin_utils.output_header
def import_cert(resource, event, trigger, **kwargs):
"""Import client certificate that was generated externally"""
if not verify_client_cert_on():
return
if cfg.CONF.nsx_v3.nsx_client_cert_storage.lower() != "none":
LOG.info("Import operation is supported "
"with storage type 'none' only")
return
with get_certificate_manager(**kwargs) as cert:
if cert.exists():
LOG.info("Deleting existing certificate")
cert.delete()
filename = get_cert_filename(**kwargs)
if not filename:
return
cert.import_pem(filename)
LOG.info("Client certificate imported succesfully")
@admin_utils.output_header
def show_nsx_certs(resource, event, trigger, **kwargs):
"""Show client certificates associated with openstack identity in NSX"""
# Note - this operation is supported even if the feature is disabled
nsx_trust = get_nsx_trust_management(**kwargs)
ids = nsx_trust.get_identities(cert_utils.NSX_OPENSTACK_IDENTITY)
if not ids:
LOG.info("Principal identity %s not found",
cert_utils.NSX_OPENSTACK_IDENTITY)
return
LOG.info("Certificate(s) associated with principal identity %s\n",
cert_utils.NSX_OPENSTACK_IDENTITY)
cert = None
for identity in ids:
if 'certificate_id' in identity:
cert = nsx_trust.get_cert(identity['certificate_id'])
LOG.info(cert['pem_encoded'])
if not cert:
LOG.info("No certificates found")
registry.subscribe(generate_cert,
constants.CERTIFICATE,
shell.Operations.GENERATE.value)
registry.subscribe(show_cert,
constants.CERTIFICATE,
shell.Operations.SHOW.value)
registry.subscribe(delete_cert,
constants.CERTIFICATE,
shell.Operations.CLEAN.value)
registry.subscribe(import_cert,
constants.CERTIFICATE,
shell.Operations.IMPORT.value)
registry.subscribe(show_nsx_certs,
constants.CERTIFICATE,
shell.Operations.NSX_LIST.value)
| python |
def distance(x, y):
return (x-y).norm(2,-1)
def invprod(x, y):
return 1/(((x*y).sigmoid()).sum(-1)) | python |
import os
import cv2
import numpy as np
if __name__ == '__main__':
# 启动一个dicom server,用于接收来自X光机的dicom文件
from pydicom.uid import ImplicitVRLittleEndian
from pynetdicom import AE, debug_logger, evt
from pynetdicom.sop_class import XRayAngiographicImageStorage
from pynetdicom.sop_class import _VERIFICATION_CLASSES as VC
debug_logger()
def handle_store(event, storage_dir):
"""Handle EVT_C_STORE events."""
try:
os.makedirs(storage_dir, exist_ok=True)
except:
return 0xC001
ds = event.dataset
if len(ds.PixelData) == 2097152:
img = np.frombuffer(ds.PixelData, dtype=np.uint16)
img = (img.reshape((ds.Rows, ds.Columns)) / 256).astype(np.uint8)
elif len(ds.PixelData) == 3145728:
img = np.frombuffer(ds.PixelData, dtype=np.uint8)
img = img.reshape((ds.Rows, ds.Columns, 3))
else:
raise Exception('Not support pixel data format...')
img = np.rot90(img, 1) # TODO: -1 为实验室,1 为医院
bmp = os.path.join(storage_dir, ds.SOPInstanceUID + '.bmp')
print(bmp, 'saved...')
cv2.imwrite(bmp, img)
return 0x0000
handlers = [(evt.EVT_C_STORE, handle_store, ['static/data'])]
ae = AE()
ae.add_supported_context(XRayAngiographicImageStorage, ImplicitVRLittleEndian)
for key in VC:
ae.add_supported_context(VC[key])
print('server starting...')
ae.start_server(('0.0.0.0', 5104), block=True, evt_handlers=handlers)
| python |
from app.data_models.relationship_store import Relationship, RelationshipStore
relationships = [
{
"list_item_id": "123456",
"to_list_item_id": "789101",
"relationship": "Husband or Wife",
},
{
"list_item_id": "123456",
"to_list_item_id": "ghijkl",
"relationship": "Husband or Wife",
},
]
def test_serialisation():
relationship_store = RelationshipStore(relationships)
assert relationship_store.serialize() == relationships
def test_deserialisation():
relationship_store = RelationshipStore(relationships)
assert Relationship(**relationships[0]) in relationship_store
assert len(relationship_store) == 2
def test_clear(): # pylint: disable=redefined-outer-name
relationship_store = RelationshipStore(relationships)
relationship_store.clear()
assert not relationship_store
assert relationship_store.is_dirty
def test_add_relationship():
relationship = Relationship(**relationships[0])
relationship_store = RelationshipStore()
relationship_store.add_or_update(relationship)
assert (
relationship_store.get_relationship(
relationship.list_item_id, relationship.to_list_item_id
)
== relationship
)
assert len(relationship_store) == 1
assert relationship_store.is_dirty
def test_add_relationship_that_already_exists():
relationship = relationships[0]
relationship_store = RelationshipStore([relationship])
relationship_store.add_or_update(Relationship(**relationship))
assert len(relationship_store) == 1
assert not relationship_store.is_dirty
def test_get_relationship():
relationship_store = RelationshipStore(relationships)
relationship = relationship_store.get_relationship(
list_item_id="123456", to_list_item_id="789101"
)
assert relationship
def test_get_relationship_that_doesnt_exist():
relationship_store = RelationshipStore(relationships)
relationship = relationship_store.get_relationship(
list_item_id="123456", to_list_item_id="yyyyyy"
)
assert not relationship
def test_remove_relationship():
relationship_store = RelationshipStore(relationships)
relationship_store.remove_relationship(
list_item_id="123456", to_list_item_id="789101"
)
assert relationship_store.is_dirty
assert len(relationship_store) == 1
def test_remove_relationship_that_doesnt_exist():
relationship_store = RelationshipStore(relationships)
relationship_store.remove_relationship(
list_item_id="123456", to_list_item_id="yyyyyy"
)
assert not relationship_store.is_dirty
assert len(relationship_store) == 2
def test_remove_id_in_multiple_relationships():
relationship_store = RelationshipStore(relationships)
relationship_store.remove_all_relationships_for_list_item_id("123456")
assert not relationship_store
assert relationship_store.is_dirty
def test_remove_id_in_single_relationship():
relationship_store = RelationshipStore(relationships)
relationship_store.remove_all_relationships_for_list_item_id("789101")
remaining_relationship = Relationship(**relationships[1])
assert len(relationship_store) == 1
assert (
relationship_store.get_relationship(
remaining_relationship.list_item_id, remaining_relationship.to_list_item_id
)
== remaining_relationship
)
assert relationship_store.is_dirty
def test_update_existing_relationship():
relationship_store = RelationshipStore(relationships)
relationship = Relationship(**relationships[0])
relationship.relationship = "test"
relationship_store.add_or_update(relationship)
assert len(relationship_store) == 2
updated_relationship = relationship_store.get_relationship(
relationship.list_item_id, relationship.to_list_item_id
)
assert updated_relationship.relationship == "test"
assert relationship_store.is_dirty
| python |
import os
import gc
import gym
import random
import numpy as np
from collections import deque
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
class Actor(nn.Module):
def __init__(self, epochs, state_dim, action_size=2, action_limit=1.):
super(Actor, self).__init__()
self.epochs = epochs
self.state_dim = state_dim
self.action_dim = action_size
self.action_lim = action_limit
''' softmax network '''
hidden_layers=[64, 32, 8]
modules = []
seq = [state_dim] + hidden_layers
for in_dim, out_dim in zip(seq[: -1], seq[1:]):
modules.append(nn.Linear(in_dim, out_dim))
modules.append(nn.ReLU())
self.hidden = nn.Sequential(*seq)
self.out = nn.Linear(seq[-1], action_size)
self._init_weight()
def forward(self, state):
x = self.hidden(state)
x = self.out(x)
action = F.tanh(x)
action *= self.action_lim
return action
def _init_weight(self):
for m in self.hidden:
if isinstance(m, nn.Linear):
nn.init.xavier_uniform_(m.weight)
nn.init.constant_(m.bias, 0.01)
nn.init.normal_(self.softmax_in.weight)
nn.init.constant_(self.softmax_in.bias, 0.01)
class Critic(nn.Module):
def __init__(self, state_dim, action_dim):
super(Critic, self).__init__()
self.state_dim = state_dim
self.action_dim = action_dim
s_layer = [64, 32, 8]
modules = []
seq = [state_dim] + s_layer
for in_dim, out_dim in zip(seq[: -1], seq[1:]):
modules.append(nn.Linear(in_dim, out_dim))
modules.append(nn.ReLU())
self.s_hidden = nn.Sequential(*seq)
s_layer = [64, 32, 8]
modules = []
seq = [state_dim] + s_layer
for in_dim, out_dim in zip(seq[: -1], seq[1:]):
modules.append(nn.Linear(in_dim, out_dim))
modules.append(nn.ReLU())
self.s_hidden = nn.Sequential(*seq)
a_layer = [32, 8]
modules = []
seq = [action_dim] + s_layer
for in_dim, out_dim in zip(seq[: -1], seq[1:]):
modules.append(nn.Linear(in_dim, out_dim))
modules.append(nn.ReLU())
self.a_hidden = nn.Sequential(*seq)
self.out = nn.Linear(a_layer[-1] + s_layer[-1], 1)
self._init_weight()
def _init_weight(self):
for m in self.s_hidden:
if isinstance(m, nn.Linear):
nn.init.xavier_uniform_(m.weight)
nn.init.constant_(m.bias, 0.01)
for m in self.a_hidden:
if isinstance(m, nn.Linear):
nn.init.xavier_uniform_(m.weight)
nn.init.constant_(m.bias, 0.01)
nn.init.normal_(self.out.weight)
nn.init.constant_(self.out.bias, 0.01)
def forward(self, state, action):
'''
Q(s, a)
'''
s = self.s_hidden(state)
a = self.a_hidden(action)
x = torch.cat((s, a), dim=1)
x = self.out(x)
return x
class Noise(object):
"""
implement ornstein-uhlenbeck noise
Example:
>>> no = Noise(1)
>>> states = []
>>> for i in range(1000):
... states.append(no.sample())
>>> import matplotlib.pyplot as plt
>>> plt.plot(states)
>>> plt.show()
"""
def __init__(self, action_dim, mu=0, theta=0.15, sigma=0.2):
self.action_dim = action_dim
self.mu = mu
self.theta = theta
self.sigma = sigma
self.X = mu * np.ones(action_dim)
def reset(self):
self.X = np.ones(self.action_dim) * self.mu
def sample(self):
dx = self.theta * (self.mu - self.X)
dx += self.sigma * np.random.randn(len(self.X))
self.X += dx
return self.X
class Trainer(object):
def __init__(self, buffer, state_dim, action_dim, action_limit, batch_size=128, lr=0.001, gamma=0.99, tau=0.001):
self.state_dim = state_dim
self.action_dim = action_dim
self.action_lim = action_limit
self.buffer = buffer
self.iter = 0
self.batch_size = batch_size
self.tau = tau
self.gamma = gamma
self.noise = Noise(action_dim)
self.actor = Actor(state_dim, action_dim, action_limit)
self.target_actor = Actor(state_dim, action_dim, action_limit)
self.actor_optimizer = optim.Adam(self.actor.parameters(), lr)
self.critic = Critic(state_dim, action_dim)
self.target_critic = Critic(state_dim, action_dim)
self.critic_optimizer = optim.Adam(self.critic.parameters(), lr)
self._update(self.target_actor, self.actor)
self._update(self.target_critic, self.critic)
def _update(self, tar, src):
for tar_param, param in zip(tar.parameters(), src.parameters()):
tar_param.data.copy_(param.data)
def _soft_update(self, tar, src):
for target_param, param in zip(tar.parameters(), src.parameters()):
target_param.data.copy_(
target_param.data * (1 - self.tau) + param.data * self.tau
)
def get_exploitation_action(self, state):
state = torch.from_numpy(state)
action = self.target_actor.forward(state).detach()
return action.data.numpy()
def get_exploration_action(self, state):
state = torch.from_numpy(state)
action = self.actor.forward(state).detach()
new_action = action.data.numpy() + (self.noise.sample() * self.action_lim)
return new_action
def optimize(self):
s1, a1, r1, s2 = self.buffer.sample(self.batch_size)
s1 = torch.from_numpy(s1)
a1 = torch.from_numpy(a1)
r1 = torch.from_numpy(r1)
s2 = torch.from_numpy(s2)
''' optimize critic '''
a2 = self.target_actor.forward(s2).detach()
next_val = torch.squeeze(self.target_critic.forward(s2, a2).detach())
val_expected = r1 + self.gamma * next_val
val_predicted = torch.squeeze(self.critic.forward(s1, a1))
critic_loss = F.mse_loss(val_predicted, val_expected)
self.critic_optimizer.zero_grad()
critic_loss.backward()
self.critic_optimizer.step()
''' optimize actor '''
pred_a1 = self.actor.forward(s1)
actor_loss = -1 * torch.sum(self.critic.forward(s1, pred_a1))
self.actor_optimizer.zero_grad()
actor_loss.backward()
self.actor_optimizer.step()
self._soft_update(self.target_actor, self.actor)
self._soft_update(self.target_critic, self.critic)
if self.iter % 100 == 0:
print(f'Iteration :- {self.iter}, Loss_actor :- {actor_loss.data.numpy()}, Loss_critic :- {critic_loss.data.numpy()}')
self.iter += 1
def save(self, eps_cnt):
if not os.path.exists('./model/'):
os.makedirs('./model/')
torch.save(self.target_actor.state_dict(), f'./model/{eps_cnt}_actor.pt')
torch.save(self.target_critic.state_dict(), f'./model/{eps_cnt}_critic.pt')
print('Models saved successfully')
def load(self, eps_cnt):
self.actor.load_state_dict(torch.load(f'./model/{eps_cnt}_actor.pt'))
self.critic.load_state_dict(torch.load(f'./model/{eps_cnt}_critic.pt'))
self._update(self.target_actor, self.actor)
self._update(self.target_critic, self.critic)
print('Models loaded successfully')
class Buffer(object):
def __init__(self, size):
self.buffer = deque(maxlen=size)
self.max_size = size
self.len = 0
def sample(self, cnt):
"""
samples a random batch from the replay memory buffer
:param cnt: batch size
:return: batch (numpy array)
"""
batch = []
cnt = min(cnt, self.len)
s_arr = np.float32([arr[0] for arr in batch])
a_arr = np.float32([arr[1] for arr in batch])
r_arr = np.float32([arr[2] for arr in batch])
s1_arr = np.float32([arr[3] for arr in batch])
return s_arr, a_arr, r_arr, s1_arr
def add(self, s, a, r, s1):
"""
add a particular transaction in the memory buffer
:param s: current state
:param a: action taken
:param r: reward received
:param s1: next state
"""
transaction = (s, a, r, s1)
self.len += 1
if self.len > self.max_size:
self.len = self.max_size
self.buffer.append(transaction)
def length(self):
return self.len
if __name__ == '__main__':
max_episodes = 400
# state_dim = 10
# action_dim = 2
# action_max = 1
max_step = 1000
env = gym.make('BipedalWalker-v2')
state_dim = env.observation_space.shape[0]
action_dim = env.action_space.shape[0]
action_max = env.action_space.high[0]
print(
f'State Dimension : {state_dim}',
f'action Dimension : {action_dim}',
f'action limitation : {action_max}',
sep='\n'
)
ram = Buffer(max_episodes)
trainer = Trainer(ram, state_dim, action_dim, action_max)
for eps in range(max_episodes):
observation = env.reset()
print(f'[EPISODE {eps}]')
for r in range(max_step):
state = np.float32(observation)
action = trainer.get_exploration_action(state)
new_observation, reward, done, info = env.step(action)
if done:
new_state = None
else:
new_state = np.float32(new_observation)
# push this experience in ram
ram.add(state, action, reward, new_state)
observation = new_observation
trainer.optimize()
if done:
break
gc.collect()
if eps % 100 == 0:
trainer.save(eps)
print('Complete!')
| python |
#All MPOS
MPOS = {"Abilene": {"Jones": "253", "Taylor": "441"},
"Amarillo": {"Potter": "375", "Randall": "381"},
"Brownsville": {"Cameron": "061"},
"Bryan-College Station": {"Brazos": "041"},
"Capital Area": {"Bastrop": "021", "Burnet": "053", "Caldwell": "055", "Hays": "209", "Travis": "453", "Williamson": "491"},
"Corpus Christi": {"Aransas": "007", "Nueces": "355", "San Patricio": "409"},
"El Paso": {"Atascosa": "013", "El Paso": "141"},
"Harlingen-San Benito": {"Cameron": "061"},
"Hidalgo": {"Hidalgo": "215"},
"Killeen-Temple": {"Bell": "027", "Coryell": "099", "Lampasas": "281" },
"Laredo": {"Webb": "479"},
"Longview": {"Gregg": "183", "Harrison": "203", "Rusk": "401", "Upshur": "459"},
"LRGV": {"Cameron": "061", "Hidalgo": "215"},
"Lubbock": {"Lubbock": "303"},
"Midland-Odessa": {"Ector": "135", "Midland": "329"},
"San Angelo": {"Tom Green": "451"},
"Sherman-Denison": {"Grayson": "181"},
"South East Texas": {"Hardin": "199", "Jefferson": "245", "Orange": "361"},
"Texarkana": {"Bowie": "037", "Comal": "091"},
"Victoria": {"Victoria": "469"},
"Waco": {"McLennan": "309"},
"Witchita Falls": {"Archer": "009", "Wichita": "485"}
}
| python |
from drivers import *
print "Driver loaded"
from drivers.nidaq.asserv import Asserv
from PyDAQmx import *
import numpy as np
from pyqtgraph.Qt import QtGui, QtCore
import pyqtgraph as pg
import sys
default_fm_dev = 400 # Profondeur de modulation (Hz pour 5 V)
fs = E8254A(gpibAdress=19,name="freqSynth")
default_frequency = fs.frequency
sampling_rate = 1e6 # Hz
modulation_frequency = 271 # Hz
cycle_number = 50 # Number of cycles between fc correction
n_samples_per_cycle = int(sampling_rate/(modulation_frequency*2))*2 #Make sure that this is divisible by 2
modulation_frequency = sampling_rate/n_samples_per_cycle
discarded_samples = n_samples_per_cycle/4
gain = 100000
amplitude = 1 # V
waveform = np.hstack([-amplitude *np.ones(n_samples_per_cycle/2),
amplitude *np.ones(n_samples_per_cycle/2)])
# dds_frequency = default_frequency
asserv = Asserv(dds_frequency=default_frequency, gain = gain, device="Dev2",outChan="ao2",inChanList=["ai0"],numSamp=n_samples_per_cycle,nbSampCropped=discarded_samples,vpp=2*amplitude,freq=sampling_rate,inRange=(-5.,5.),outRange=(-10.,10.), waveform =waveform, cycle_number=cycle_number)
app = QtGui.QApplication([])
win = pg.GraphicsWindow()
win.resize(1000,600)
win.setWindowTitle('Pyqtgraph : Live NIDAQmx data')
pg.setConfigOptions(antialias=True)
p1 = win.addPlot(title="correction_DDS", col = 0, row = 0)
p1.addLegend()
p2 = win.addPlot(title="error signal", col = 0, row = 1)
p2.addLegend()
p3 = win.addPlot(title="laser power", col = 0, row = 2)
p3.addLegend()
p4 = win.addPlot(title="aux photodiode", col = 0, row = 3)
p4.addLegend()
p5 = win.addPlot(title="therminstance", col = 0, row = 4)
p5.addLegend()
curve = p1.plot(pen = 'm', name = 'DDS_freq')
curve2 = p2.plot(pen = 'c', name = 'error_signal')
curve3 = p3.plot(pen = 'r', name = 'transmitted_power')
curve4 = p4.plot(pen = 'g', name = 'aux photodiode')
curve5 = p5.plot(pen = 'y', name = 'thermistance')
def update() :
x, y1, y2, y3, y4, y5 = asserv.graph[0], asserv.graph[1], asserv.graph[2], asserv.graph[3], asserv.graph[4], asserv.graph[5]
curve.setData(x=x, y=y1)
curve2.setData(x=x, y=y2)
curve3.setData(x=x, y=y3)
curve4.setData(x=x, y=y4)
curve5.setData(x=x, y=y5)
timer = QtCore.QTimer()
timer.timeout.connect(update)
timer.start(50)
asserv.start()
if (sys.flags.interactive != 1) or not hasattr(QtCore, 'PYQT_VERSION'):
ret = QtGui.QApplication.instance().exec_()
print "Closing"
asserv.stop()
sys.exit(ret)
| python |
#!/usr/bin/env python3
#
# Copyright 2018 Brian T. Park <[email protected]>
#
# MIT License
#
"""Monitor the output of the given serial port and echo the output to the
STDOUT. If nothing is seen on the serial output for more than 10 seconds, an
error message is printed.
If the --test flag is given, the output is assumed to come from an AUnit unit
test, and the script validates that the test ran successfully. The script exits
with a status 0 if the test is successful, otherwise exits with a status 1.
Usage:
serial_monitor.py [--help] [--log_level] [--list | --test | --monitor)
[--port /dev/ttyPort] [--baud 115200] [--eof eof]
Flags:
--list List the known tty ports. (default)
--monitor Monitor the serial port and echo the lines to the STDOUT.
--test Verify an AUnit test suite.
--port {tty} Set the tty port.
--baud {baud} Set the baud rate.
--log_level (INFO|DEBUG|ERROR) Set the logging level.
--eof eof The End-of-File string marker.
"""
import argparse
import serial
import serial.tools.list_ports
import logging
import re
from time import sleep
# Logging message format.
LOG_FORMAT = '%(asctime)s %(levelname)s %(name)s: %(message)s'
# Logging date format.
DATE_FORMAT = '%Y-%m-%dT%H:%M:%S%z'
# Time out after this many seconds if the serial port produces no output.
TIMEOUT_ON_IDLE = 10
# Starting point of the number of seconds to wait for the serial port.
# Actual wait time increases using exponential back off.
WAIT_TIME_BASE = 1
# Number attempts to try opening the serial port.
NUM_ATTEMPTS = 4
# Regular expressions that match the start and end of an AUnit test run.
TEST_START_RE = re.compile(r'TestRunner started')
TEST_END_RE = re.compile(r'TestRunner summary.*(\d+) failed.*(\d+) timed out')
# Constants for the test_mode finite state machine
TEST_MODE_UNKNOWN = 0
TEST_MODE_START_FOUND = 1
TEST_MODE_END_SUMMARY_FOUND = 2
def monitor(port, baud, eof, timeout):
"""Read the serial output and echo the lines to the STDOUT."""
logging.info('Reading the serial port %s at %s baud' % (port, baud))
ser = open_port(port, baud, timeout)
logging.info('Monitoring port %s...' % port)
try:
while True:
line = ser.readline()
line = line.decode('ascii')
if line == '':
logging.error(
f"No output detected after {timeout} seconds... exiting."
)
break
line = line.rstrip()
print(line)
if eof and eof in line:
# The line with eof is *included* in the output.
logging.info(f"Detected '{eof}' EOF string... exiting.")
break
finally:
ser.close()
def validate_test(port, baud, timeout):
"""Read and verify an AUnit test looking and matching specific lines from
the TestRunner of AUnit in the serial output.
"""
logging.info('Reading the AUnit test on serial port %s at %s baud' %
(port, baud))
ser = open_port(port, baud, timeout)
try:
summary_line = ''
test_mode = TEST_MODE_UNKNOWN
while True:
line = ser.readline()
line = line.decode('ascii')
if line == '': break
line = line.rstrip()
print(line)
if test_mode == TEST_MODE_UNKNOWN:
match = TEST_START_RE.match(line)
if match:
test_mode = TEST_MODE_START_FOUND
continue
match = TEST_END_RE.match(line)
if match:
logging.error("Found 'TestRunner summary' " +
"without 'TestRunner started'")
break
elif test_mode == TEST_MODE_START_FOUND:
match = TEST_START_RE.match(line)
if match:
logging.error("Unexpected 'TestRunner started'")
break
match = TEST_END_RE.match(line)
if match:
test_mode = TEST_MODE_END_SUMMARY_FOUND
summary_line = line
break
finally:
ser.close()
if test_mode != TEST_MODE_END_SUMMARY_FOUND:
raise Exception('No output detected after 10 seconds... exiting.')
if summary_line:
match = TEST_END_RE.match(line)
if match:
num_failed = match.group(1)
num_expired = match.group(2)
if num_failed != '0' or num_expired != '0':
raise Exception('Found %s failed and/or %s timed out' %
(num_failed, num_expired))
else:
raise Exception('Unexpected TestRunner output')
# See https://stackoverflow.com/questions/12090503
def list_ports():
"""List the available serial ports."""
for comport in serial.tools.list_ports.comports():
print(comport)
def open_port(port, baud, timeout):
"""Open the given port. Boards like Teensy, Leonardo, and Micro do not
create a virtual serial port until the Arduino program runs, so we make
multiple attempts (NUM_ATTEMPTS) to open the port using an exponential back
off wait time.
"""
wait_time = WAIT_TIME_BASE
count = 1
ser = serial.Serial(port=None, baudrate=baud, timeout=timeout)
ser.port = port
while True:
try:
logging.info('Opening serial port %s' % port)
ser.open()
break
except:
if count >= NUM_ATTEMPTS:
break
logging.info('Failed... waiting %s seconds to retry...' %
wait_time)
sleep(wait_time)
count += 1
wait_time *= 1.5
if not ser.is_open:
raise Exception('Unable to open serial port %s after %s attempts' %
(port, NUM_ATTEMPTS))
return ser
def main():
parser = argparse.ArgumentParser(
description='Read the given Arduino serial port')
parser.add_argument(
'--log_level', action='store', default='DEBUG', help='Logging level')
parser.add_argument(
'--port', action='store', default='/dev/ttyUSB0', help='port')
parser.add_argument(
'--baud', action='store', default='115200', help='baud')
parser.add_argument(
'--list',
action='store_true',
help='List the available ports (default)')
parser.add_argument(
'--test', action='store_true', help='Verify an AUnit test')
parser.add_argument(
'--monitor', action='store_true', help='Monitor the serial port')
parser.add_argument(
'--eof', action='store', default='', help='End of File string')
parser.add_argument(
'--timeout',
action='store',
default=TIMEOUT_ON_IDLE,
help='End of File string')
args = parser.parse_args()
# Configure logging.
logging.basicConfig(
level=args.log_level, format=LOG_FORMAT, datefmt=DATE_FORMAT)
if args.monitor:
monitor(args.port, args.baud, args.eof, args.timeout)
elif args.test:
validate_test(args.port, args.baud, args.timeout)
else:
list_ports()
if __name__ == '__main__':
main()
| python |
# Lint as: python3
# Copyright 2018, The TensorFlow Federated Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for computations.py (and __init__.py)."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from six.moves import range
import tensorflow as tf
from tensorflow_federated.python.common_libs import test
from tensorflow_federated.python.core import api as tff
class ComputationsTest(test.TestCase):
def test_tf_comp_first_mode_of_usage_as_non_polymorphic_wrapper(self):
# Wrapping a lambda with a parameter.
foo = tff.tf_computation(lambda x: x > 10, tf.int32)
self.assertEqual(str(foo.type_signature), '(int32 -> bool)')
self.assertEqual(foo(9), False)
self.assertEqual(foo(11), True)
# Wrapping an existing Python function with a parameter.
bar = tff.tf_computation(tf.add, (tf.int32, tf.int32))
self.assertEqual(str(bar.type_signature), '(<int32,int32> -> int32)')
# Wrapping a no-parameter lambda.
baz = tff.tf_computation(lambda: tf.constant(10))
self.assertEqual(str(baz.type_signature), '( -> int32)')
self.assertEqual(baz(), 10)
# Wrapping a no-parameter Python function.
def bak_fn():
return tf.constant(10)
bak = tff.tf_computation(bak_fn)
self.assertEqual(str(bak.type_signature), '( -> int32)')
self.assertEqual(bak(), 10)
def test_tf_fn_with_variable(self):
@tff.tf_computation
def read_var():
v = tf.Variable(10, name='test_var')
return v
self.assertEqual(read_var(), 10)
def test_tf_comp_second_mode_of_usage_as_non_polymorphic_decorator(self):
# Decorating a Python function with a parameter.
@tff.tf_computation(tf.int32)
def foo(x):
return x > 10
self.assertEqual(str(foo.type_signature), '(int32 -> bool)')
self.assertEqual(foo(9), False)
self.assertEqual(foo(10), False)
self.assertEqual(foo(11), True)
# Decorating a no-parameter Python function.
@tff.tf_computation
def bar():
return tf.constant(10)
self.assertEqual(str(bar.type_signature), '( -> int32)')
self.assertEqual(bar(), 10)
def test_tf_comp_with_sequence_inputs_and_outputs_does_not_fail(self):
@tff.tf_computation(tff.SequenceType(tf.int32))
def _(x):
return x
def test_with_sequence_of_pairs(self):
pairs = tf.data.Dataset.from_tensor_slices(
(list(range(5)), list(range(5, 10))))
@tff.tf_computation
def process_pairs(ds):
return ds.reduce(0, lambda state, pair: state + pair[0] + pair[1])
self.assertEqual(process_pairs(pairs), 45)
def test_with_four_element_dataset_pipeline(self):
@tff.tf_computation
def comp1():
return tf.data.Dataset.range(5)
@tff.tf_computation(tff.SequenceType(tf.int64))
def comp2(ds):
return ds.map(lambda x: tf.cast(x + 1, tf.float32))
@tff.tf_computation(tff.SequenceType(tf.float32))
def comp3(ds):
return ds.repeat(5)
@tff.tf_computation(tff.SequenceType(tf.float32))
def comp4(ds):
return ds.reduce(0.0, lambda x, y: x + y)
@tff.tf_computation
def comp5():
return comp4(comp3(comp2(comp1())))
self.assertEqual(comp5(), 75.0)
def test_tf_comp_third_mode_of_usage_as_polymorphic_callable(self):
# Wrapping a lambda.
foo = tff.tf_computation(lambda x: x > 0)
self.assertEqual(foo(-1), False)
self.assertEqual(foo(0), False)
self.assertEqual(foo(1), True)
# Decorating a Python function.
@tff.tf_computation
def bar(x, y):
return x > y
self.assertEqual(bar(0, 1), False)
self.assertEqual(bar(1, 0), True)
self.assertEqual(bar(0, 0), False)
def test_fed_comp_typical_usage_as_decorator_with_unlabeled_type(self):
@tff.federated_computation((tff.FunctionType(tf.int32, tf.int32), tf.int32))
def foo(f, x):
assert isinstance(f, tff.Value)
assert isinstance(x, tff.Value)
assert str(f.type_signature) == '(int32 -> int32)'
assert str(x.type_signature) == 'int32'
result_value = f(f(x))
assert isinstance(result_value, tff.Value)
assert str(result_value.type_signature) == 'int32'
return result_value
self.assertEqual(
str(foo.type_signature), '(<(int32 -> int32),int32> -> int32)')
@tff.tf_computation(tf.int32)
def third_power(x):
return x**3
self.assertEqual(foo(third_power, 10), int(1e9))
self.assertEqual(foo(third_power, 1), 1)
def test_fed_comp_typical_usage_as_decorator_with_labeled_type(self):
@tff.federated_computation((
('f', tff.FunctionType(tf.int32, tf.int32)),
('x', tf.int32),
))
def foo(f, x):
return f(f(x))
@tff.tf_computation(tf.int32)
def square(x):
return x**2
@tff.tf_computation(tf.int32, tf.int32)
def square_drop_y(x, y): # pylint: disable=unused-argument
return x * x
self.assertEqual(
str(foo.type_signature), '(<f=(int32 -> int32),x=int32> -> int32)')
self.assertEqual(foo(square, 10), int(1e4))
self.assertEqual(square_drop_y(square_drop_y(10, 5), 100), int(1e4))
self.assertEqual(square_drop_y(square_drop_y(10, 100), 5), int(1e4))
with self.assertRaisesRegexp(TypeError,
'is not assignable from source type'):
self.assertEqual(foo(square_drop_y, 10), 100)
def test_with_tf_datasets(self):
@tff.tf_computation(tff.SequenceType(tf.int64))
def foo(ds):
return ds.reduce(np.int64(0), lambda x, y: x + y)
self.assertEqual(str(foo.type_signature), '(int64* -> int64)')
@tff.tf_computation
def bar():
return tf.data.Dataset.range(10)
self.assertEqual(str(bar.type_signature), '( -> int64*)')
self.assertEqual(foo(bar()), 45)
def test_no_argument_fed_comp(self):
@tff.federated_computation
def foo():
return 10
self.assertEqual(str(foo.type_signature), '( -> int32)')
self.assertEqual(foo(), 10)
if __name__ == '__main__':
test.main()
| python |
import pandas as pd
pd.options.display.max_columns = None
from sklearn.preprocessing import OrdinalEncoder
from torchvision import datasets, transforms
import torch
import plotly.express as px
import os, sys
currentdir = os.path.dirname(os.path.realpath(__file__))
parentdir = os.path.dirname(currentdir)
sys.path.append(parentdir)
from utils.dataset import NumpyDataset, TorchDataSet
class MNISTData(TorchDataSet):
def __init__(self, split=False, normalize=False, shuffle=True, seed=None):
X, y = self.get_X_y()
super().__init__(X=X, y=y, one_hot_target=False, normalize=normalize, split=split, dataloader_shuffle=shuffle, seed=seed, label_type='categoric')
# self.get_tensors()
def get_X_y(self):
mnist_train = datasets.MNIST(root="./mnist-model/datasets/mnist_train",
download=True, train=True,
transform=transforms.Compose([transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))]))
mnist_test = datasets.MNIST(root="./mnist-model/datasets/mnist_test",
download=True, train=False,
transform=transforms.Compose([transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))]))
X = mnist_train.data
X = torch.cat((X, mnist_test.data), dim=0).reshape(-1, 1, 28, 28)
y = mnist_train.targets
y = torch.cat((y, mnist_test.targets), dim=0)
return X.detach().numpy(), y.detach().numpy()
if __name__ == "__main__":
mnist = MNISTData(split=True, normalize=True)
X = mnist.X
print(X.shape)
print(X.dtype)
print(torch.unique(mnist.y))
print(mnist.y_sets[0].shape) | python |
import asyncio
import logging
from ottoengine import const, helpers
from ottoengine.model import dataobjects
_LOG = logging.getLogger(__name__)
# _LOG.setLevel(logging.DEBUG)
class RuleActionItem(object):
""" This is a single action step in an action sequence """
def get_dict_config(self) -> dict:
# This will be overridden by the subclasses
raise NotImplementedError("get_dict_config was not properly overridden")
def serialize(self) -> dict:
# This MAY be overridden by the subclass to accomodate special handling
return self.get_dict_config()
async def async_execute(self, engine) -> bool:
'''Runs the action.
Returns True if action was successful.
Returns False if the action was unsuccessful.
'''
# This will be overridden by the subclasses
raise NotImplementedError("async_execute was not properly overridden")
class ServiceAction(RuleActionItem):
# domain: light
# service: turn_on
# data:
# entity_id: group.bedroom
# brightness: 100
def __init__(self, domain, service, entity_id=None, data_dict={}):
self._domain = domain
self._service = service # string
self._data_dict = data_dict # {} dictionary
if entity_id is not None:
self._data_dict["entity_id"] = entity_id
# Override
async def async_execute(self, engine):
_LOG.info("Service called - domain: {}, service: {}, data: {}".format(
self._domain, self._service, self._data_dict)
)
await engine.call_service(
dataobjects.ServiceCall(self._domain, self._service, self._data_dict)
)
return True
@staticmethod
def from_dict(dict_obj):
# j = json
# kwargs = {
# "domain": j['domain'],
# "service": j["service"]
# }
# if "data" in j:
# kwargs["data"] = j["data"]
# return ServiceAction(**kwargs)
domain = dict_obj.get(const.DOMAIN)
service = dict_obj.get(const.SERVICE)
data = dict_obj.get(const.DATA, [])
return ServiceAction(domain, service, data_dict=data)
# Override
def get_dict_config(self) -> dict:
d = {
"domain": self._domain,
"service": self._service,
}
if self._data_dict:
d["data"] = self._data_dict
return d
class ConditionAction(RuleActionItem):
# This is just a condition object
def __init__(self, condition_obj):
self._condition_obj = condition_obj
# No from_dict function since this is just a condition object
# We use the _condition_from_dict() function in persistence.py instead
# Override
async def async_execute(self, engine):
'''Tests the condition. Returns the result of the test'''
result = False
if self._condition_obj.evaluate(engine):
result = True
_LOG.info("Condition action is {}: {}".format(result, self._condition_obj.serialize()))
return result
# Override
def get_dict_config(self) -> dict:
return self._condition_obj.get_condition_config()
class DelayAction(RuleActionItem):
# delay: 00:01:30
def __init__(self, delay_delta):
self._delay_delta = delay_delta # datetime.timedelta
# Override
async def async_execute(self, engine):
delay_secs = self._delay_delta.total_seconds()
_LOG.info("Delay action for {} seconds".format(delay_secs))
await asyncio.sleep(delay_secs)
return True
@staticmethod
def from_dict(json):
return DelayAction(helpers.hms_string_to_timedelta(json["delay"]))
# Override
def get_dict_config(self) -> dict:
# To re-create: timedelta(days, seconds, microseconds)
return {
"delay": helpers.timedelta_to_hms_string(self._delay_delta)
}
class LogAction(RuleActionItem):
# log_message: message
def __init__(self, message):
self._message = message
@staticmethod
def from_dict(json):
return LogAction(json.get("log_message"))
# Overrides
async def async_execute(self, engine):
_LOG.info("LogAction: {}".format(self._message))
return True
def get_dict_config(self) -> dict:
return {"log_message": self._message}
| python |
import tests2 as t
t.testing(method = 'KIR', initial = 'sin', velocity = 'const')
t.testing(method = 'KIR', initial = 'sin', velocity = 'x')
t.testing(method = 'KIR', initial = 'sin', velocity = 'func')
t.testing(method = 'KIR', initial = 'peak', velocity = 'const')
t.testing(method = 'KIR', initial = 'peak', velocity = 'x')
t.testing(method = 'KIR', initial = 'peak', velocity = 'func')
t.testing(method = 'KIR', initial = 'rectangle', velocity = 'const')
t.testing(method = 'KIR', initial = 'rectangle', velocity = 'x')
t.testing(method = 'KIR', initial = 'rectangle', velocity = 'func')
t.testing(method = 'McCormack', initial = 'sin', velocity = 'const')
t.testing(method = 'McCormack', initial = 'sin', velocity = 'x')
t.testing(method = 'McCormack', initial = 'sin', velocity = 'func')
t.testing(method = 'McCormack', initial = 'peak', velocity = 'const')
t.testing(method = 'McCormack', initial = 'peak', velocity = 'x')
t.testing(method = 'McCormack', initial = 'peak', velocity = 'func')
t.testing(method = 'McCormack', initial = 'rectangle', velocity = 'const')
t.testing(method = 'McCormack', initial = 'rectangle', velocity = 'x')
t.testing(method = 'McCormack', initial = 'rectangle', velocity = 'func')
t.testing(method = 'Beam-Warming', initial = 'sin', velocity = 'const')
t.testing(method = 'Beam-Warming', initial = 'sin', velocity = 'x')
t.testing(method = 'Beam-Warming', initial = 'sin', velocity = 'func')
t.testing(method = 'Beam-Warming', initial = 'peak', velocity = 'const')
t.testing(method = 'Beam-Warming', initial = 'peak', velocity = 'x')
t.testing(method = 'Beam-Warming', initial = 'peak', velocity = 'func')
t.testing(method = 'Beam-Warming', initial = 'rectangle', velocity = 'const')
t.testing(method = 'Beam-Warming', initial = 'rectangle', velocity = 'x')
t.testing(method = 'Beam-Warming', initial = 'rectangle', velocity = 'func')
t.testing(method = 'Lax-Wendroff', initial = 'sin', velocity = 'const')
t.testing(method = 'Lax-Wendroff', initial = 'sin', velocity = 'x')
t.testing(method = 'Lax-Wendroff', initial = 'sin', velocity = 'func')
t.testing(method = 'Lax-Wendroff', initial = 'peak', velocity = 'const')
t.testing(method = 'Lax-Wendroff', initial = 'peak', velocity = 'x')
t.testing(method = 'Lax-Wendroff', initial = 'peak', velocity = 'func')
t.testing(method = 'Lax-Wendroff', initial = 'rectangle', velocity = 'const')
t.testing(method = 'Lax-Wendroff', initial = 'rectangle', velocity = 'x')
t.testing(method = 'Lax-Wendroff', initial = 'rectangle', velocity = 'func')
t.testing(method = 'Fedorenko', initial = 'sin', velocity = 'const')
t.testing(method = 'Fedorenko', initial = 'sin', velocity = 'x')
t.testing(method = 'Fedorenko', initial = 'sin', velocity = 'func')
t.testing(method = 'Fedorenko', initial = 'peak', velocity = 'const')
t.testing(method = 'Fedorenko', initial = 'peak', velocity = 'x')
t.testing(method = 'Fedorenko', initial = 'peak', velocity = 'func')
t.testing(method = 'Fedorenko', initial = 'rectangle', velocity = 'const')
t.testing(method = 'Fedorenko', initial = 'rectangle', velocity = 'x')
t.testing(method = 'Fedorenko', initial = 'rectangle', velocity = 'func')
t.testing(method = 'Rusanov', initial = 'sin', velocity = 'const')
t.testing(method = 'Rusanov', initial = 'sin', velocity = 'x')
t.testing(method = 'Rusanov', initial = 'sin', velocity = 'func')
t.testing(method = 'Rusanov', initial = 'peak', velocity = 'const')
t.testing(method = 'Rusanov', initial = 'peak', velocity = 'x')
t.testing(method = 'Rusanov', initial = 'peak', velocity = 'func')
t.testing(method = 'Rusanov', initial = 'rectangle', velocity = 'const')
t.testing(method = 'Rusanov', initial = 'rectangle', velocity = 'x')
t.testing(method = 'Rusanov', initial = 'rectangle', velocity = 'func')
| python |
import numpy as np
import tqdm
def add_iteration_column_np(df):
"""
Only used for numerical integral timings, but perhaps also useful for other timings with some
adaptations. Adds iteration information, which can be deduced from the order, ppid, num_cpu
and name (because u0_int is only done once, we have to add a special check for that).
"""
iteration = np.empty(len(df), dtype='int64')
it = 0
N_names = len(df.name.unique())
# local_N_num_int is the number of numerical integrals in the local (current) iteration
# it determines after how long the next iteration starts
local_N_num_int = df.num_cpu.iloc[0] * N_names
# the current iteration starts here:
current_iteration_start = 0
current_ppid = df.ppid.iloc[0]
for irow, row in tqdm.tqdm(enumerate(df.itertuples())):
# for irow in tqdm.tqdm(range(len(df))):
# if current_ppid != df.ppid.iloc[irow] or ((irow - current_iteration_start) == local_N_num_int):
if current_ppid != row.ppid or ((irow - current_iteration_start) == local_N_num_int):
# current_ppid = df.ppid.iloc[irow]
current_ppid = row.ppid
current_iteration_start = irow
it += 1
# num_cpu = df.num_cpu.iloc[irow]
num_cpu = row.num_cpu
local_N_names = len(df[irow:irow + N_names * num_cpu].name.unique())
local_N_num_int = num_cpu * local_N_names
iteration[irow] = it
# if (irow + 1) % local_N_num_int == 0:
# it += 1
df['iteration'] = iteration
# following stuff thanks to Carlos, Janneke, Atze, Berend and Lourens for discussion and suggestions on Slack:
class IterationGrouper:
"""
N.B.: the used df must have a reset index!
Use df = df.reset_index(drop=True) if necessary before grouping with this
class.
"""
def __init__(self, df):
self._group_id = 0
self._count = {}
self._max = {}
self._df = df
def __call__(self, index):
row = self._df.iloc[index]
if row.name not in self._count:
self._max[row.name] = row.num_cpu
self._count[row.name] = 1
else:
if self._count[row.name] < self._max[row.name]:
self._count[row.name] += 1
else:
self._group_id += 1
self._count = {}
self._count[row.name] = 1
self._max[row.name] = row.num_cpu
return self._group_id
df_numints_selection0 = df_numints.iloc[:100000].copy()
df_numints_selection1 = df_numints.iloc[:100000].copy()
df_numints_selection2 = df_numints.iloc[:100000].copy().reset_index(drop=True)
load_timing.add_iteration_column(df_numints_selection0)
add_iteration_column_np(df_numints_selection1)
for it, (count, group) in enumerate(df_numints_selection2.groupby(IterationGrouper(df_numints_selection2))):
df_numints_selection2.set_value(group.index, 'iteration', it)
| python |
"""
0.92%
"""
import collections
class MinStack(object):
def __init__(self):
"""
initialize your data structure here.
"""
self.stack = collections.deque()
self.minlist = []
def push(self, x):
"""
:type x: int
:rtype: void
"""
self.stack.append(x)
self.minlist.append(x)
self.minlist = sorted(self.minlist)
def pop(self):
"""
:rtype: void
"""
p = self.stack.pop()
self.minlist.remove(p)
return p
def top(self):
"""
:rtype: int
"""
top = self.stack.pop()
self.stack.append(top)
return top
def getMin(self):
"""
:rtype: int
"""
return self.minlist[0] if self.minlist else None
# Your MinStack object will be instantiated and called as such:
# obj = MinStack()
# obj.push(x)
# obj.pop()
# param_3 = obj.top()
# param_4 = obj.getMin() | python |
import os
from setuptools import setup
# Utility function to read the README file.
# Used for the long_description. It"s nice, because now 1) we have a top level
# README file and 2) it"s easier to type in the README file than to put a raw
# string in below ...
def read(fname):
return open(os.path.join(os.path.dirname(__file__), fname)).read()
setup(
name="kaggle_learn",
version="0.0.1",
author="Bangda Sun",
author_email="[email protected]",
description=("Generic data science toolbox"),
license="MIT",
url="https://github.com/bangdasun/kaggle_learn",
# url="http://packages.python.org/an_example_pypi_project",
# packages=["an_example_pypi_project", "tests"],
long_description=read("README.md"),
install_requires=[
"numpy",
"pandas",
"scikit-learn",
"matplotlib",
"tensorflow",
"keras"
],
classifiers=[
"Development Status :: 3 - Alpha",
"Topic :: Utilities",
"License :: OSI Approved :: MIT License",
],
)
| python |
index = {'Halifax': 'Q2141',
'Los Angeles': 'Q65',
'Wilkesboro': 'Q1025995',
'New York': 'Q1384',
'Uvalde': 'Q868860',
'Saint James': 'Q7401398',
'Ottawa': 'Q1930',
'Newton': 'Q49196',
'Mahé':'Q277480',
'Milwaukee': 'Q37836',
'Pomona': 'Q486868',
'Pasco': 'Q844016',
'Triumph': 'Q7844478',
'United States': 'Q30',
'Canada': 'Q16',
'India': 'Q668',
'Trinidad and Tobago': 'Q754',
'acetaminophen': 'Q57055',
'aspirin': 'Q18216',
'ibuprofen': 'Q186969',
'naproxen': 'Q1215575',
'sertraline': 'Q407617'} | python |
num=input("enter any number")
if num > 0:
print("positive")
elif num < 0:
print("negative")
else:
print("it is a zero")
| python |
import pyviz3d.visualizer as viz
import numpy as np
import math
def main():
v = viz.Visualizer()
v.add_arrow('Arrow_1', start=np.array([0, 0.2, 0]), end=np.array([1, 0.2, 0]))
v.add_arrow('Arrow_2', start=np.array([0, 0.5, 0.5]), end=np.array([0.5, 0, 0.5]), color=np.array([0, 0, 255]))
v.add_arrow('Arrow_3', start=np.array([0, 1, 0]), end=np.array([1, 1, 1]), color=np.array([30, 255, 50]),
alpha=0.5, stroke_width=0.04, head_width=0.1)
v.save('example_arrows')
if __name__ == '__main__':
main()
| python |
import pandas as pd
import numpy as np
from sklearn.preprocessing import LabelEncoder
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
import keras
from keras.models import Sequential
from keras.layers import Dense, Dropout
from sklearn.metrics import confusion_matrix
import sys
def main():
print(sys.argv)
BlockId = sys.argv[1]
data = pd.read_csv('./model/upload/data.csv')
# data = pd.read_csv('./test_data/data.csv')
del data['Unnamed: 32']
# data = data[:50]
X = data.iloc[:, 2:].values
y = data.iloc[:, 1].values
labelencoder_X1 = LabelEncoder()
y = labelencoder_X1.fit_transform(y)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.2, random_state = 0)
sc = StandardScaler()
X_train = sc.fit_transform(X_train)
X_test = sc.transform(X_test)
model = Sequential()
model.add(Dense(16, activation='relu', input_dim=30))
model.add(Dropout(0.1))
model.add(Dense(16, activation='relu'))
model.add(Dropout(0.1))
model.add(Dense(1, activation='sigmoid'))
model.load_weights("./model/downloadedWeights/"+ BlockId +".h5")
model.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy'])
model.fit(X_train, y_train, batch_size=100, epochs=5)
scores = model.evaluate(X_test, y_test)
print("Loss: ", scores[0]) #Loss
print("Accuracy: ", scores[1]) #Accuracy
#Saving Model
model.save("./output.h5")
if __name__ == '__main__':
main() | python |
# encoding=utf8
import jenkins_job_wrecker.modules.base
from jenkins_job_wrecker.helpers import get_bool, gen_raw
from jenkins_job_wrecker.modules.triggers import Triggers
PARAMETER_MAPPER = {
'stringparameterdefinition': 'string',
'booleanparameterdefinition': 'bool',
'choiceparameterdefinition': 'choice',
'textparameterdefinition': 'text',
'fileparameterdefinition': 'file',
}
class Properties(jenkins_job_wrecker.modules.base.Base):
component = 'properties'
def gen_yml(self, yml_parent, data):
parameters = []
properties = []
for child in data:
object_name = child.tag.split('.')[-1].lower()
object_name = object_name.replace('-', '').replace('_', '')
if object_name == 'parametersdefinitionproperty':
self.registry.dispatch(self.component, 'parameters', child, parameters)
continue
elif object_name == 'pipelinetriggersjobproperty':
# Pipeline scripts put triggers in properties section
trigger = Triggers(self.registry)
for grandchild in child:
# Find the triggers tag and then generate the yaml
if grandchild.tag == 'triggers':
trigger.gen_yml(yml_parent, grandchild)
continue
self.registry.dispatch(self.component, object_name, child, properties)
if len(properties) > 0:
yml_parent.append(['properties', properties])
if len(parameters) > 0:
yml_parent.append(['parameters', parameters])
def githubprojectproperty(top, parent):
github = {}
for child in top:
if child.tag == 'projectUrl':
github['url'] = child.text
elif child.tag == 'displayName':
pass
else:
raise NotImplementedError("cannot handle XML %s" % child.tag)
parent.append({'github': github})
def envinjectjobproperty(top, parent):
env_info = {}
for child in top:
if child.tag == 'info':
for grandchild in child:
if grandchild.tag == 'loadFilesFromMaster':
env_info['load-from-master'] = get_bool(grandchild.text)
elif grandchild.tag == 'groovyScriptContent':
if grandchild.text:
env_info['groovy-content'] = grandchild.text
elif grandchild.tag == 'secureGroovyScript':
for ggchild in grandchild:
if ggchild.tag == 'script':
if ggchild.text:
env_info['groovy-content'] = ggchild.text
elif ggchild.tag == 'sandbox':
# No support in jjb for this, fail quietly for
# this one
pass
else:
raise NotImplementedError("cannot handle XML %s" % ggchild.tag)
elif grandchild.tag == 'scriptContent':
if grandchild.text:
env_info['script-content'] = grandchild.text
elif grandchild.tag == 'scriptFilePath':
if grandchild.text:
env_info['script-file'] = grandchild.text
elif grandchild.tag == 'propertiesContent':
if grandchild.text:
env_info['properties-content'] = grandchild.text
elif grandchild.tag == 'propertiesFilePath':
if grandchild.text:
env_info['properties-file'] = grandchild.text
else:
raise NotImplementedError("cannot handle XML %s" % grandchild.tag)
elif child.tag == 'on':
env_info['enabled'] = get_bool(child.text)
elif child.tag == 'keepJenkinsSystemVariables':
env_info['keep-system-variables'] = get_bool(child.text)
elif child.tag == 'keepBuildVariables':
env_info['keep-build-variables'] = get_bool(child.text)
elif child.tag == 'overrideBuildParameters':
env_info['override-build-parameters'] = get_bool(child.text)
else:
raise NotImplementedError("cannot handle XML %s" % child.tag)
parent.append({'inject': env_info})
def parameters(top, parent):
for params in top:
if params.tag != 'parameterDefinitions':
raise NotImplementedError("cannot handle XML %s" % params.tag)
for param in params:
param_name = param.tag.split('.')[-1].lower()
if param_name not in PARAMETER_MAPPER:
gen_raw(param, parent)
continue
param_type = PARAMETER_MAPPER[param_name]
parameter = {}
for setting in param:
key = {'defaultValue': 'default'}.get(setting.tag, setting.tag)
if setting.text is None:
parameter[key] = ''
elif param_type == 'bool' and (setting.text == 'true' or setting.text == 'false'):
parameter[key] = (setting.text == 'true')
elif param_type == 'choice' and setting.tag == 'choices':
choices = []
for sub_setting in setting:
if(sub_setting.attrib['class'] == 'string-array'):
for item in sub_setting:
choices.append(item.text)
else:
raise NotImplementedError(sub_setting.attrib['class'])
parameter[key] = choices
else:
parameter[key] = setting.text
parent.append({param_type: parameter})
def throttlejobproperty(top, parent):
throttle = {}
for child in top:
if child.tag == 'maxConcurrentPerNode':
throttle['max-per-node'] = child.text
elif child.tag == 'maxConcurrentTotal':
throttle['max-total'] = child.text
elif child.tag == 'throttleOption':
throttle['option'] = child.text
elif child.tag == 'throttleEnabled':
throttle['enabled'] = get_bool(child.text)
elif child.tag == 'categories':
throttle['categories'] = []
elif child.tag == 'configVersion':
pass # assigned by jjb
else:
raise NotImplementedError("cannot handle XML %s" % child.tag)
parent.append({'throttle': throttle})
def slacknotifierslackjobproperty(top, parent):
slack = {}
notifications = {
"notifySuccess": "notify-success",
"notifyAborted": "notify-aborted",
"notifyNotBuilt": "notify-not-built",
"notifyUnstable": "notify-unstable",
"notifyFailure": "notify-failure",
"notifyBackToNormal": "notify-back-to-normal",
"notifyRepeatedFailure": "notify-repeated-failure"
}
for child in top:
if child.tag == 'teamDomain':
slack['team-domain'] = child.text
elif child.tag == 'token':
slack['token'] = child.text
elif child.tag == 'room':
slack['room'] = child.text
elif child.tag == 'includeTestSummary':
slack['include-test-summary'] = (child.text == 'true')
elif child.tag == 'showCommitList':
slack['show-commit-list'] = (child.text == 'true')
elif child.tag == 'includeCustomMessage':
slack['include-custom-message'] = (child.text == 'true')
elif child.tag == 'customMessage':
slack['custom-message'] = child.text
elif child.tag == 'startNotification':
slack['start-notification'] = (child.text == 'true')
elif child.tag in notifications:
slack[notifications[child.tag]] = (child.text == 'true')
else:
raise NotImplementedError("cannot handle XML %s" % child.tag)
parent.append({'slack': slack})
def builddiscarderproperty(top, parent):
discarder = {}
mapping = {'daysToKeep': 'days-to-keep',
'numToKeep': 'num-to-keep',
'artifactDaysToKeep': 'artifact-days-to-keep',
'artifactNumToKeep': 'artifact-num-to-keep'}
for child in top[0]:
discarder[mapping[child.tag]] = int(child.text)
parent.append({'build-discarder': discarder})
def disableconcurrentbuildsjobproperty(top, parent):
# Pipeline job specific tag.
# concurrent is false by default anyway, so just going to ignore it
# Check cli.py root_to_yaml func for more info
pass
def authorizationmatrixproperty(top, parent):
# mirror image of: https://opendev.org/jjb/jenkins-job-builder/src/commit/074985c7ff9360bb58be80ffab686746267f814f/jenkins_jobs/modules/properties.py#L530
credentials = 'com.cloudbees.plugins.credentials.CredentialsProvider.'
ownership = 'com.synopsys.arc.jenkins.plugins.ownership.OwnershipPlugin.'
permissions = {
''.join((credentials, 'Create')): 'credentials-create',
''.join((credentials, 'Delete')): 'credentials-delete',
''.join((credentials, 'ManageDomains')): 'credentials-manage-domains',
''.join((credentials, 'Update')): 'credentials-update',
''.join((credentials, 'View')): 'credentials-view',
'hudson.model.Item.Build': 'job-build',
'hudson.model.Item.Cancel': 'job-cancel',
'hudson.model.Item.Configure': 'job-configure',
'hudson.model.Item.Create': 'job-create',
'hudson.model.Item.Delete': 'job-delete',
'hudson.model.Item.Discover': 'job-discover',
'hudson.model.Item.ExtendedRead': 'job-extended-read',
'hudson.model.Item.Move': 'job-move',
'hudson.model.Item.Read': 'job-read',
'hudson.model.Item.ViewStatus': 'job-status',
'hudson.model.Item.Workspace': 'job-workspace',
''.join((ownership, 'Jobs')): 'ownership-jobs',
'hudson.model.Run.Delete': 'run-delete',
'hudson.model.Run.Replay': 'run-replay',
'hudson.model.Run.Update': 'run-update',
'hudson.scm.SCM.Tag': 'scm-tag'
}
authorization = {}
for child in top:
if child.tag == 'inheritanceStrategy':
class_ = child.get('class')
if class_ != 'org.jenkinsci.plugins.matrixauth.inheritance.InheritParentStrategy':
raise NotImplementedError('cannot handle inheritance strategy - not implemented in JJB')
elif child.tag == 'permission':
permission, name = child.text.split(':', 1)
if name not in authorization:
authorization[name] = []
authorization[name].append(permissions[permission])
else:
raise NotImplementedError('cannot handle XML {}'.format(child.tag))
parent.append({'authorization': authorization})
| python |
import itertools
import pymel.core as pm
import flottitools.test as mayatest
import flottitools.utils.materialutils as matutils
import flottitools.utils.skeletonutils as skelutils
import flottitools.utils.skinutils as skinutils
class TestGetSkinCluster(mayatest.MayaTestCase):
def test_get_skin_cluster_from_cube(self):
cube = self.create_cube()
joint = self.create_joint()
skin_cluster = self.pm.skinCluster(joint, cube)
result = skinutils. get_skincluster(cube)
self.assertEqual(result, skin_cluster)
def test_get_from_shape_node(self):
test_cube, test_joints, test_skincluster = self.create_skinned_cube()
shape = test_cube.getShape()
result = skinutils.get_skincluster(shape)
self.assertEqual(test_skincluster, result)
def test_returns_none_if_no_skincluster(self):
test_cube = self.create_cube()
self.assertIsNone(skinutils.get_skincluster(test_cube))
def test_returns_none_if_no_shape(self):
test_node = self.create_transform_node()
self.assertIsNone(skinutils.get_skincluster(test_node))
def test_get_skin_cluster_from_vert(self):
test_cube, test_joints, test_skincluster = self.create_skinned_cube()
test_vert = test_cube.vtx[0]
result = skinutils.get_skincluster(test_vert)
self.assertEqual(test_skincluster, result)
class TestBindMeshToJoints(mayatest.MayaTestCase):
def setUp(self):
super(TestBindMeshToJoints, self).setUp()
self.test_cube = self.create_cube()
self.test_joints = [self.create_joint() for _ in range(5)]
def test_returns_skincluster(self):
skincl = skinutils.bind_mesh_to_joints(self.test_cube, self.test_joints)
self.assertIsNotNone(skincl)
def test_raises_with_no_mesh_to_skin(self):
self.assertRaises(RuntimeError, lambda: skinutils.bind_mesh_to_joints(None, self.test_joints))
def test_raises_with_no_joint(self):
self.assertRaises(RuntimeError, lambda: skinutils.bind_mesh_to_joints(self.test_cube, None))
def test_maintains_max_influences_default_four(self):
skincl = skinutils.bind_mesh_to_joints(self.test_cube, self.test_joints)
inf_values = pm.skinPercent(skincl, self.test_cube.vtx[0], q=True, value=True)
inf_count = len([i for i in inf_values if i != 0.0])
self.assertEqual(4, inf_count)
def test_maintains_max_influences_five(self):
skincl = skinutils.bind_mesh_to_joints(self.test_cube, self.test_joints, maximumInfluences=5)
inf_values = pm.skinPercent(skincl, self.test_cube.vtx[0], q=True, value=True)
inf_count = len([i for i in inf_values if i != 0.0])
self.assertEqual(5, inf_count)
def test_extra_joints_in_skeleton(self):
skincl = skinutils.bind_mesh_to_joints(self.test_cube, self.test_joints[2:4])
result = skincl.influenceObjects()
self.assertListEqual(self.test_joints[2:4], result)
def test_voxel_method(self):
# the geodesic voxel bind method requires a GPU so the command cannot be run in Maya standalone.
# skincl = skinutils.bind_mesh_geodesic_voxel(self.test_cube, self.test_joints, maximumInfluences=1)
# self.assertIsNotNone(skincl)
pass
class TestGetVertsWithExceedingInfluences(mayatest.MayaTestCase):
def test_get_verts_with_more_than_four_infs(self):
test_cube = self.create_cube()
test_joints = [self.create_joint() for _ in range(5)]
skincl = skinutils.bind_mesh_to_joints(test_cube, test_joints, maximumInfluences=5)
flagged_vert_indexes = skinutils.get_vert_indexes_with_exceeding_influences(
test_cube, skin_cluster=skincl, max_influences=4)
flagged_verts = [test_cube.vtx[i] for i in flagged_vert_indexes.keys()]
flagged_verts.sort()
expected = list(test_cube.vtx)
expected.sort()
self.assertListEqual(expected, flagged_verts)
def test_no_bad_verts(self):
test_cube = self.create_cube()
test_joints = [self.create_joint() for _ in range(5)]
skincl = skinutils.bind_mesh_to_joints(test_cube, test_joints, maximumInfluences=4)
flagged_vert_indexes = skinutils.get_vert_indexes_with_exceeding_influences(
test_cube, skin_cluster=skincl, max_influences=4)
flagged_verts = [test_cube.vtx[i] for i in flagged_vert_indexes.keys()]
self.assertListEqual([], flagged_verts)
class TestGetNonZeroInfluencesFromVert(mayatest.MayaTestCase):
def test_get_non_zero_influences_from_vert(self):
test_cube = self.create_cube()
test_joints = [self.create_joint() for _ in range(5)]
skincl = skinutils.bind_mesh_to_joints(test_cube, test_joints, maximumInfluences=5)
non_zero_infs = skinutils.get_weighted_influences(test_cube.vtx[0], skincl)
self.assertEqual(5, len(non_zero_infs))
class TestGetSkinnedMeshesFromScene(mayatest.MayaTestCase):
def test_get_skinned_meshes_from_scene(self):
test_skinned_cubes = [self.create_cube() for x in range(3)]
test_cube = self.create_cube()
test_joints = [self.create_joint() for _ in range(5)]
skinclusters = []
for each in test_skinned_cubes:
skincl = skinutils.bind_mesh_to_joints(each, test_joints, maximumInfluences=5)
skinclusters.append(skincl)
skinned_meshes_from_scene = skinutils.get_skinned_meshes_from_scene()
skinned_meshes_from_scene.sort()
test_skinned_cubes.sort()
self.assertListEqual(test_skinned_cubes, skinned_meshes_from_scene)
def test_skinned_curve_in_scene(self):
"""
Should only return skinned meshes in the scene. Not skinned curves.
"""
test_skinned_cubes = [self.create_cube() for x in range(3)]
test_curve = self.pm.curve(p=[(0, 0, 0), (3, 5, 6), (5, 6, 7), (9, 9, 9)])
test_joints = [self.create_joint() for _ in range(5)]
curve_skincl = skinutils.bind_mesh_to_joints(test_curve, test_joints)
skinclusters = []
for each in test_skinned_cubes:
skincl = skinutils.bind_mesh_to_joints(each, test_joints, maximumInfluences=5)
skinclusters.append(skincl)
skinned_meshes_from_scene = skinutils.get_skinned_meshes_from_scene()
skinned_meshes_from_scene.sort()
test_skinned_cubes.sort()
self.assertListEqual(test_skinned_cubes, skinned_meshes_from_scene)
def test_multiple_mats_assigned_to_skinned_mesh(self):
test_skinned_cube = self.create_cube()
test_joints = [self.create_joint() for _ in range(5)]
skincl = skinutils.bind_mesh_to_joints(test_skinned_cube, test_joints, maximumInfluences=5)
mat1, _ = matutils.create_material('foo')
mat2, _ = matutils.create_material('bar')
matutils.assign_material(test_skinned_cube, mat1)
matutils.assign_material(test_skinned_cube.f[0], mat2)
skinned_meshes_from_scene = skinutils.get_skinned_meshes_from_scene()
self.assertListEqual([test_skinned_cube], skinned_meshes_from_scene)
class TestGetPrunedInfluencesToWeights(mayatest.MayaTestCase):
def test_no_op_with_four_infs(self):
influences_to_weights = {'foo': 0.5, 'bar': 0.1, 'spam': 0.1, 'eggs': 0.3}
result = skinutils.get_pruned_influences_to_weights(influences_to_weights)
self.assertDictEqual(influences_to_weights, result)
def test_max_3_influences(self):
influences_to_weights = {'foo': 0.5, 'bar': 0.2, 'spam': 0.2, 'eggs': 0.1}
result = skinutils.get_pruned_influences_to_weights(influences_to_weights, max_influences=3)
expected = {'foo': 0.5, 'bar': 0.2, 'spam': 0.2, 'eggs': 0.0}
self.assertDictEqual(expected, result)
def test_five_influences(self):
influences_to_weights = {'foo': 0.5, 'bar': 0.2, 'spam': 0.1, 'eggs': 0.1, 'ham': 0.05}
result = skinutils.get_pruned_influences_to_weights(influences_to_weights)
expected = {'foo': 0.5, 'bar': 0.2, 'spam': 0.1, 'eggs': 0.1, 'ham': 0.0}
self.assertDictEqual(expected, result)
def test_five_influences_with_equal_min_values(self):
influences_to_weights = {'foo': 0.5, 'bar': 0.2, 'spam': 0.2, 'eggs': 0.05, 'ham': 0.05}
result = skinutils.get_pruned_influences_to_weights(influences_to_weights)
expected = {'foo': 0.5, 'bar': 0.2, 'spam': 0.2, 'eggs': 0.0, 'ham': 0.0}
self.assertDictEqual(expected, result)
def test_divisor_is_2(self):
influences_to_weights = {'foo': 1.0, 'bar': 0.4, 'spam': 0.2, 'eggs': 0.2}
result = skinutils.get_pruned_influences_to_weights(influences_to_weights, divisor=2.0)
expected = {'foo': 0.5, 'bar': 0.2, 'spam': 0.1, 'eggs': 0.1}
self.assertDictEqual(expected, result)
def test_too_many_infs_all_equal(self):
influences_to_weights = {'foo': 0.2, 'bar': 0.2, 'spam': 0.2, 'eggs': 0.2, 'ham': 0.2}
result = skinutils.get_pruned_influences_to_weights(influences_to_weights)
expected = {'foo': 0.2, 'bar': 0.2, 'spam': 0.0, 'eggs': 0.2, 'ham': 0.2}
self.assertDictEqual(expected, result)
def test_far_too_many_infs_all_equal(self):
influences_to_weights = {'foo': 0.2, 'bar': 0.2, 'spam': 0.2, 'eggs': 0.2, 'ham': 0.2,
'foo2': 0.2, 'bar2': 0.2, 'spam2': 0.2, 'eggs2': 0.2, 'ham2': 0.2}
result = skinutils.get_pruned_influences_to_weights(influences_to_weights)
expected = {'foo': 0.0, 'bar': 0.2, 'spam': 0.0, 'eggs': 0.2, 'ham': 0.0,
'foo2': 0.0, 'bar2': 0.2, 'spam2': 0.0, 'eggs2': 0.2, 'ham2': 0.0}
self.assertDictEqual(expected, result)
class TestPruneExceedingInfluences(mayatest.MayaTestCase):
def test_prune_exceeding_influences(self):
test_cube = self.create_cube()
test_joints = [self.create_joint() for _ in range(5)]
skincl = skinutils.bind_mesh_to_joints(test_cube, test_joints, maximumInfluences=5)
influences_to_weights = skinutils.get_weighted_influences(test_cube.vtx[0], skincl)
skinutils.prune_exceeding_influences(test_cube.vtx[0], skincl, influences_to_weights)
result = skinutils.get_weighted_influences(test_cube.vtx[0], skincl)
self.assertEqual(4, len(result))
class TestGetNonNormalizedVerts(mayatest.MayaTestCase):
def test_zero_bad_verts(self):
test_cube = self.create_cube()
test_joints = [self.create_joint() for _ in range(5)]
skincl = skinutils.bind_mesh_to_joints(test_cube, test_joints, maximumInfluences=4)
skincl.setNormalizeWeights(2) # 2 == post normalize method
result = skinutils.get_non_normalized_vert_indexes(test_cube.vtx, skincl)
self.assertEqual(0, len(result))
def test_one_bad_vert(self):
test_cube = self.create_cube()
test_joints = [self.create_joint() for _ in range(5)]
skincl = skinutils.bind_mesh_to_joints(test_cube, test_joints, maximumInfluences=4)
skincl.setNormalizeWeights(2) # 2 == post normalize method
pm.skinPercent(skincl, test_cube.vtx[0], transformValue=(test_joints[0], 1.5))
result = skinutils.get_non_normalized_vert_indexes(test_cube.vtx, skincl)
self.assertEqual(1, len(result))
def test_returns_total(self):
test_cube = self.create_cube()
test_joints = [self.create_joint() for _ in range(5)]
skincl = skinutils.bind_mesh_to_joints(test_cube, test_joints, maximumInfluences=4)
skincl.setNormalizeWeights(2) # 2 == post normalize method
pm.skinPercent(skincl, test_cube.vtx[0], transformValue=(test_joints[0], 1.5))
pm.skinPercent(skincl, test_cube.vtx[1], transformValue=(test_joints[0], 1.5))
expected = {0: 2.25, 1: 2.25}
result = skinutils.get_non_normalized_vert_indexes(test_cube.vtx, skincl)
self.assertDictEqual(expected, result)
class TestMoveWeights(mayatest.MayaTestCase):
def setUp(self):
super(TestMoveWeights, self).setUp()
test_cube = self.create_cube()
test_joints = [self.create_joint() for _ in range(5)]
self.skincl = skinutils.bind_mesh_to_joints(test_cube, test_joints, maximumInfluences=4)
self.vert = test_cube.vtx[0]
self.origin_inf = test_joints[0]
self.destination_inf = test_joints[1]
self.initial_origin_weight = self.pm.skinPercent(self.skincl, self.vert, q=True, transform=self.origin_inf)
self.initial_destination_weight = self.pm.skinPercent(
self.skincl, self.vert, q=True, transform=self.destination_inf)
def test_move_weight_single_vert_expected_dest_weight(self):
# test_cube = self.create_cube()
# test_joints = [self.create_joint() for _ in range(5)]
# skincl = skinutils.bind_mesh_to_joints(test_cube, test_joints, maximumInfluences=4)
# vert = test_cube.vtx[0]
# origin_inf = test_joints[0]
# destination_inf = test_joints[1]
# initial_origin_weight = self.pm.skinPercent(skincl, vert, q=True, transform=origin_inf)
# initial_destination_weight = self.pm.skinPercent(skincl, vert, q=True, transform=destination_inf)
skinutils.move_weights(self.skincl, self.vert, self.origin_inf, self.destination_inf)
expected_dest_weight = self.initial_origin_weight + self.initial_destination_weight
result_dest_weight = self.pm.skinPercent(self.skincl, self.vert, q=True, transform=self.destination_inf)
self.assertEqual(expected_dest_weight, result_dest_weight)
def test_single_vert_expected_origin_weight(self):
skinutils.move_weights(self.skincl, self.vert, self.origin_inf, self.destination_inf)
expected_origin_weight = 0.0
result_origin_weight = self.pm.skinPercent(self.skincl, self.vert, q=True, transform=self.origin_inf)
self.assertEqual(expected_origin_weight, result_origin_weight)
class TestMaxInfluencesNormalizeWeightsDisabled(mayatest.MayaTestCase):
def test_max_influences_normalize_weights_disabled(self):
pass
class TestPruneExceedingSkinnedMesh(mayatest.MayaTestCase):
def test_prune_exceeding_skinned_mesh(self):
test_cube = self.create_cube()
test_joints = [self.create_joint() for _ in range(5)]
skincl = skinutils.bind_mesh_to_joints(test_cube, test_joints, maximumInfluences=5)
initial_influences = []
for vert in test_cube.vtx:
initial_inf = skinutils.get_weighted_influences(vert, skincl)
initial_influences.append(len(initial_inf))
expected_initial = [5, 5, 5, 5, 5, 5, 5, 5]
self.assertListEqual(expected_initial, initial_influences)
skinutils.prune_exceeding_skinned_mesh(test_cube, skincluster=skincl)
results = []
for vert in test_cube.vtx:
result = skinutils.get_weighted_influences(vert, skincl)
results.append(len(result))
expected = [4, 4, 4, 4, 4, 4, 4, 4]
self.assertListEqual(expected, results)
class TestDeltaMeshSkinning(mayatest.MayaTestCase):
def test_modifies_skinning(self):
test_cube = self.create_cube()
test_joints = [self.create_joint() for _ in range(5)]
[pm.move(j, (1,0,0)) for j in test_joints]
skinutils.bind_mesh_to_joints(test_cube, test_joints, maximumInfluences=1)
start_infs = skinutils.get_weighted_influences(test_cube.vtx[0])
self.assertEqual(1, len(start_infs))
skinutils.apply_delta_mush_skinning(test_cube, cleanup=True)
after_infs = skinutils.get_weighted_influences(test_cube.vtx[0])
self.assertEqual(4, len(after_infs))
def test_clean_up_mush_nodes(self):
pass
def test_clean_up_extra_meshes(self):
pass
class TestApplyDeltaMush(mayatest.MayaTestCase):
def test_creates_mush_node(self):
test_cube = self.create_cube()
result = skinutils.apply_delta_mush(test_cube)
mush_nodes = pm.ls(type=pm.nt.DeltaMush)
self.assertEqual(mush_nodes, [result])
def test_default_settings(self):
test_cube = self.create_cube()
mush_node = skinutils.apply_delta_mush(test_cube)
self.scene_nodes.append(mush_node)
expected = {'smoothingIterations': 20,
'smoothingStep': 1.0,
'pinBorderVertices': False,
'envelope': 1.0,
'inwardConstraint': 0.0,
'outwardConstraint': 0.0,
'distanceWeight': 1.0,
'displacement': 1.0}
result = {'smoothingIterations': mush_node.smoothingIterations.get(),
'smoothingStep': mush_node.smoothingStep.get(),
'pinBorderVertices': mush_node.pinBorderVertices.get(),
'envelope': mush_node.envelope.get(),
'inwardConstraint': mush_node.inwardConstraint.get(),
'outwardConstraint': mush_node.outwardConstraint.get(),
'distanceWeight': mush_node.distanceWeight.get(),
'displacement': mush_node.displacement.get()}
self.assertDictEqual(expected, result)
def test_not_default_settings(self):
test_cube = self.create_cube()
kwargs = {'smoothingIterations': 10,
'smoothingStep': 0.5,
'pinBorderVertices': True,
'envelope': 0.5,
'inwardConstraint': 0.5,
'outwardConstraint': 1.0}
mush_node = skinutils.apply_delta_mush(test_cube, 0.0, 0.0, **kwargs)
self.scene_nodes.append(mush_node)
expected = {'distanceWeight': 0.0,
'displacement': 0.0}
expected.update(kwargs)
result = {'smoothingIterations': mush_node.smoothingIterations.get(),
'smoothingStep': mush_node.smoothingStep.get(),
'pinBorderVertices': mush_node.pinBorderVertices.get(),
'envelope': mush_node.envelope.get(),
'inwardConstraint': mush_node.inwardConstraint.get(),
'outwardConstraint': mush_node.outwardConstraint.get(),
'distanceWeight': mush_node.distanceWeight.get(),
'displacement': mush_node.displacement.get()}
self.assertDictEqual(expected, result)
class TestBakeDeformer(mayatest.MayaTestCase):
def test_one_skeleton(self):
source_cube = self.create_cube()
target_cube = self.create_cube()
test_joints = [self.create_joint() for _ in range(5)]
skinutils.bind_mesh_to_joints(source_cube, test_joints)
target_skincl = skinutils.bind_mesh_to_joints(target_cube, test_joints)
self.scene_nodes.append(skinutils.apply_delta_mush(source_cube))
pm.skinPercent(target_skincl, target_cube.vtx, transformValue=(test_joints[-1], 1.0))
previous_val = pm.skinPercent(target_skincl, target_cube.vtx[0], query=True, transform=test_joints[-1])
# pm.skinPercent(skincluster, vertex, transformValue=pruned_infs_to_weights.items())
target_skincl = skinutils.bake_deformer_to_skin(source_cube, target_cube)
result = pm.skinPercent(target_skincl, target_cube.vtx[0], query=True, transform=test_joints[-1])
self.assertNotEqual(previous_val, result)
def test_two_skeletons(self):
source_cube = self.create_cube()
target_cube = self.create_cube()
source_joints = [self.create_joint() for _ in range(5)]
pm.select(clear=True)
target_joints = [self.create_joint() for _ in range(5)]
skinutils.bind_mesh_to_joints(source_cube, source_joints)
target_skincl = skinutils.bind_mesh_to_joints(target_cube, target_joints)
self.scene_nodes.append(skinutils.apply_delta_mush(source_cube))
pm.skinPercent(target_skincl, target_cube.vtx, transformValue=(target_joints[-1], 1.0))
previous_val = pm.skinPercent(target_skincl, target_cube.vtx[0], query=True, transform=target_joints[-1])
# pm.skinPercent(skincluster, vertex, transformValue=pruned_infs_to_weights.items())
target_skincl = skinutils.bake_deformer_to_skin(source_cube, target_cube, source_joints, target_joints)
result = pm.skinPercent(target_skincl, target_cube.vtx[0], query=True, transform=target_joints[-1])
self.assertNotEqual(previous_val, result)
def test_respects_max_influences(self):
source_cube = self.create_cube()
target_cube = self.create_cube()
test_joints = [self.create_joint() for _ in range(5)]
skinutils.bind_mesh_to_joints(source_cube, test_joints)
skinutils.bind_mesh_to_joints(target_cube, test_joints)
self.scene_nodes.append(skinutils.apply_delta_mush(source_cube))
expected = 3
target_skincl = skinutils.bake_deformer_to_skin(source_cube, target_cube, max_influences=expected)
result = target_skincl.getMaximumInfluences()
self.assertEqual(expected, result)
def test_normalizes_weights(self):
source_cube = self.create_cube()
target_cube = self.create_cube()
test_joints = [self.create_joint() for _ in range(5)]
skinutils.bind_mesh_to_joints(source_cube, test_joints)
target_skincl = skinutils.bind_mesh_to_joints(target_cube, test_joints)
target_skincl.setNormalizeWeights(False)
pm.skinPercent(target_skincl, target_cube.vtx, transformValue=(test_joints[-1], 2.0))
weights = [sum(pm.skinPercent(target_skincl, v, value=True, q=True)) for v in target_cube.vtx]
[self.assertLess(1.0, w) for w in weights]
self.scene_nodes.append(skinutils.apply_delta_mush(source_cube))
target_skincl = skinutils.bake_deformer_to_skin(source_cube, target_cube, cleanup=True)
# target_skincl.forceNormalizeWeights()
weights = [sum(pm.skinPercent(target_skincl, v, value=True, q=True)) for v in target_cube.vtx]
[self.assertGreaterEqual(1.0, w) for w in weights]
class CopyWeights(mayatest.MayaTestCase):
def test_simple(self):
source_cube = self.create_cube()
target_cube = self.create_cube()
source_joints = [self.create_joint() for _ in range(5)]
[pm.move(j, (0.1, 0.1, 0.1)) for j in source_joints]
source_skincl = skinutils.bind_mesh_to_joints(source_cube, source_joints)
expected = [pm.skinPercent(source_skincl, v, value=True, q=True) for v in source_cube.vtx]
pm.select(clear=True)
target_joints = [self.create_joint() for _ in range(5)]
[pm.move(j, (0.1, 0.1, 0.1)) for j in target_joints]
target_skincl = skinutils.bind_mesh_to_joints(target_cube, target_joints)
pm.skinPercent(target_skincl, target_cube.vtx, transformValue=(target_joints[-1], 1.0))
skinutils.copy_weights(source_cube, target_cube)
result = [pm.skinPercent(source_skincl, v, value=True, q=True) for v in source_cube.vtx]
for e, r in zip(expected, result):
[self.assertAlmostEqual(expected_weight, result_weight) for expected_weight, result_weight in zip(e, r)]
class TestGetRootFromSkinnedMesh(mayatest.MayaTestCase):
def test_get_root_joint_from_skinned_mesh(self):
test_cube = self.create_cube()
test_joints = [self.create_joint() for _ in range(5)]
skinutils.bind_mesh_to_joints(test_cube, test_joints)
result = skinutils.get_root_joint_from_skinned_mesh(test_cube)
self.assertEqual(test_joints[0], result)
class TestGetVertsToWeightedInfluences(mayatest.MayaTestCase):
def test_get_verts_to_weighted_influences(self):
test_cube, test_joints, skin_cluster = self.create_skinned_cube()
expected = {}
inf_index = 0
for vert in test_cube.vtx:
expected[vert.index()] = {test_joints[inf_index]: 1.0}
pm.skinPercent(skin_cluster, vert, transformValue=expected[vert.index()].items())
inf_index += 1
if inf_index > 4:
inf_index = 0
result = skinutils.get_vert_indexes_to_weighted_influences(skin_cluster)
self.assertDictEqual(expected, result)
def test_multiple_influences_per_vert(self):
test_cube, test_joints, skin_cluster = self.create_skinned_cube()
expected = {}
inf_index = 0
weight_values = [0.3, 0.2, 0.4, 0.1]
for vert in test_cube.vtx:
inf_wts = {}
for weight in weight_values:
inf_wts[test_joints[inf_index]] = weight
inf_index += 1
if inf_index > 4:
inf_index = 0
pm.skinPercent(skin_cluster, vert, transformValue=inf_wts.items())
expected[vert.index()] = inf_wts
result = skinutils.get_vert_indexes_to_weighted_influences(skin_cluster)
self.assertDictEqual(expected, result)
def test_subset_of_meshes_verts(self):
test_cube, test_joints, skin_cluster = self.create_skinned_cube()
expected = {}
inf_index = 0
weight_values = [0.3, 0.2, 0.4, 0.1]
for vert in test_cube.vtx:
inf_wts = {}
for weight in weight_values:
inf_wts[test_joints[inf_index]] = weight
inf_index += 1
if inf_index > 4:
inf_index = 0
pm.skinPercent(skin_cluster, vert, transformValue=inf_wts.items())
expected[vert.index()] = inf_wts
for i in [0, 1, 7]:
expected.pop(i)
result = skinutils.get_vert_indexes_to_weighted_influences(skin_cluster, test_cube.vtx[2:6])
self.assertDictEqual(expected, result)
def test_skin_cluster_has_removed_influences(self):
"""An influence index can be greater than the length all influences in the skin_cluster"""
test_cube = self.create_cube()
test_joints = [self.create_joint() for _ in range(15)]
skin_cluster = self.pm.skinCluster(test_joints, test_cube)
for index in [13, 10, 9]:
skin_cluster.removeInfluence(test_joints[index])
self.scene_nodes.append(skin_cluster)
expected = {}
for vert in test_cube.vtx:
expected[vert.index()] = {test_joints[-1]: 1.0}
pm.skinPercent(skin_cluster, vert, transformValue=expected[vert.index()].items())
result = skinutils.get_vert_indexes_to_weighted_influences(skin_cluster)
self.assertDictEqual(expected, result)
def test_removed_influence_had_non_zero_weights_before(self):
test_cube = self.create_cube()
test_joints = [self.create_joint() for _ in range(15)]
skin_cluster = self.pm.skinCluster(test_joints, test_cube)
test_indices = [13, 10, 9]
for vert in test_cube.vtx:
for index in test_indices:
pm.skinPercent(skin_cluster, vert, transformValue=(test_joints[index], 0.5))
for index in test_indices[1:]:
skin_cluster.removeInfluence(test_joints[index])
expected = {}
for vert in test_cube.vtx:
expected[vert.index()] = {test_joints[0]: 1.0}
pm.skinPercent(skin_cluster, vert, transformValue=(expected[vert.index()].items()))
self.scene_nodes.append(skin_cluster)
result = skinutils.get_vert_indexes_to_weighted_influences(skin_cluster)
self.assertDictEqual(expected, result)
class TestGetInfluenceIndex(mayatest.MayaTestCase):
def test_influence_passed_as_pynode(self):
test_cube, test_joints, skin_cluster = self.create_skinned_cube()
expected = 3
result = skinutils.get_influence_index(test_joints[expected], skin_cluster)
self.assertEqual(expected, result)
def test_influence_passed_as_string(self):
test_cube, test_joints, skin_cluster = self.create_skinned_cube()
expected = 3
result = skinutils.get_influence_index(test_joints[expected].name(), skin_cluster)
self.assertEqual(expected, result)
def test_more_than_one_joint_with_same_name_pynode(self):
test_cube, test_joints, skin_cluster = self.create_skinned_cube()
dummy_joints = [self.create_joint() for _ in range(5)]
expected = 3
test_joints[expected].rename('foo')
dummy_joints[expected].rename('foo')
result = skinutils.get_influence_index(test_joints[expected], skin_cluster)
self.assertEqual(expected, result)
def test_more_than_one_joint_with_same_name_string(self):
test_cube, test_joints, skin_cluster = self.create_skinned_cube()
dummy_joints = [self.create_joint() for _ in range(5)]
expected = 3
test_joints[expected].rename('foo')
dummy_joints[expected].rename('foo')
result = skinutils.get_influence_index(test_joints[expected].nodeName(), skin_cluster)
self.assertEqual(expected, result)
class TestMoveWeightAndRemoveInfluence(mayatest.MayaTestCase):
def test_removes_influence(self):
test_cube, test_joints, skin_cluster = self.create_skinned_cube()
skinutils.move_weight_and_remove_influence(test_joints[-1], test_joints[0], skin_cluster)
self.assertFalse(test_joints[-1] in skin_cluster.getInfluence())
def test_moves_weights_to_parent(self):
test_cube, test_joints, skin_cluster = self.create_skinned_cube()
values = [0, 0.25, 0.25, 0.25, 0.25]
infs_to_wts = dict(zip(test_joints, values))
with skinutils.max_influences_normalize_weights_disabled(skin_cluster):
for vertex in test_cube.vtx:
pm.skinPercent(skin_cluster, vertex, transformValue=infs_to_wts.items())
skinutils.move_weight_and_remove_influence(test_joints[-1], test_joints[-2], skin_cluster)
result = skinutils.get_weighted_influences(test_cube.vtx[0], skin_cluster)
expected_values = [0.25, 0.25, 0.5]
expected = dict(zip(test_joints[1:-1], expected_values))
self.assertDictEqual(expected, result)
class TestCopyWeightsVertOrder(mayatest.MayaTestCase):
def test_simple(self):
source_test_cube, source_test_joints, source_skin_cluster = self.create_skinned_cube()
target_test_cube, target_test_joints, target_skin_cluster = self.create_skinned_cube()
inf_map = dict([(sj, [tj]) for sj, tj in zip(source_test_joints, target_test_joints)])
for vertex in source_test_cube.vtx:
pm.skinPercent(source_skin_cluster, vertex, transformValue=(source_test_joints[0], 1.0))
skinutils.copy_weights_vert_order(source_test_cube, target_test_cube, inf_map)
result = skinutils.get_weighted_influences(target_test_cube.vtx[0])
expected = {target_test_joints[0]: 1.0}
self.assertDictEqual(expected, result)
class TestGetInfluenceMapByInfluenceIndex(mayatest.MayaTestCase):
def test_update_inf_map_by_skincluster_index(self):
source_cube, source_joints, source_skin_cluster = self.create_skinned_cube()
target_cube, target_joints, target_skin_cluster = self.create_skinned_cube()
expected_map = dict([(x, [y]) for x, y in zip(source_joints, target_joints)])
result_map, result_remaining = skinutils.update_inf_map_by_skincluster_index(source_joints,
target_joints,
source_skin_cluster,
target_skin_cluster)
self.assertDictEqual(result_map, expected_map)
self.assertListEqual([], result_remaining)
def test_skincluster_index_influence_lists_order_differ(self):
source_cube, source_joints, source_skin_cluster = self.create_skinned_cube()
target_cube, target_joints, target_skin_cluster = self.create_skinned_cube()
expected_map = dict([(x, [y]) for x, y in zip(source_joints, target_joints)])
target_joints.reverse()
result_map, result_remaining = skinutils.update_inf_map_by_skincluster_index(source_joints,
target_joints,
source_skin_cluster,
target_skin_cluster)
self.assertDictEqual(result_map, expected_map)
self.assertListEqual([], result_remaining)
def test_more_source_influences(self):
source_cube, source_joints, source_skin_cluster = self.create_skinned_cube(joint_count=10)
target_cube, target_joints, target_skin_cluster = self.create_skinned_cube()
expected_map = dict([(x, [y]) for x, y in zip(source_joints, target_joints)])
result_map, result_remaining = skinutils.update_inf_map_by_skincluster_index(source_joints,
target_joints,
source_skin_cluster,
target_skin_cluster)
self.assertDictEqual(result_map, expected_map)
self.assertListEqual([], result_remaining)
def test_more_target_influences(self):
source_cube, source_joints, source_skin_cluster = self.create_skinned_cube()
target_cube, target_joints, target_skin_cluster = self.create_skinned_cube(joint_count=10)
expected_map = dict([(x, [y]) for x, y in zip(source_joints, target_joints)])
expected_remaining = target_joints[5:]
result_map, result_remaining = skinutils.update_inf_map_by_skincluster_index(source_joints,
target_joints,
source_skin_cluster,
target_skin_cluster)
self.assertDictEqual(result_map, expected_map)
self.assertListEqual(expected_remaining, result_remaining)
class TestCopyWeights(mayatest.MayaTestCase):
def test_copy_weights_vert_order_same_skeleton(self):
source_cube, source_joints, source_skincluster = self.create_skinned_cube()
target_cube = self.create_cube()
target_skincluster = skinutils.bind_mesh_to_joints(target_cube, source_joints)
transform_values = dict(itertools.zip_longest(source_joints[:4], [0.25], fillvalue=0.25))
transform_values[source_joints[-1]] = 0.0
pm.skinPercent(source_skincluster, source_cube.vtx[0], transformValue=transform_values.items())
source_weightedinfs = skinutils.get_weighted_influences(target_cube.vtx[0], target_skincluster)
transform_values = dict(itertools.zip_longest(source_joints[1:], [0.25], fillvalue=0.25))
transform_values[source_joints[0]] = 0.0
pm.skinPercent(target_skincluster, target_cube.vtx[0], transformValue=transform_values.items())
target_weightedinfs = skinutils.get_weighted_influences(target_cube.vtx[0], target_skincluster)
self.assertNotEqual(source_weightedinfs, target_weightedinfs)
skinutils.copy_weights_vert_order_inf_order(source_cube, target_cube, source_skincluster, target_skincluster)
expected = skinutils.get_weighted_influences(source_cube.vtx[0], source_skincluster)
result = skinutils.get_weighted_influences(target_cube.vtx[0], target_skincluster)
self.assertDictEqual(expected, result)
class TestGetBindPose(mayatest.MayaTestCase):
def test_get_bind_pose_from_skinned_mesh(self):
test_cube, test_joints, test_skincluster = self.create_skinned_cube()
expected = pm.ls(type='dagPose')[0]
result = skinutils.get_bind_pose_from_skinned_mesh(test_cube)
self.assertEqual(expected, result)
def test_multiple_bind_poses_on_skel(self):
test_cube, test_joints, test_skincluster = self.create_skinned_cube()
expected = pm.ls(type='dagPose')[0]
dummy_cube = self.create_cube()
test_joints[2].rotateX.set(30)
skinutils.bind_mesh_to_joints(dummy_cube, test_joints)
pm.dagPose(test_joints[0], bindPose=True, save=True)
bind_poses = pm.ls(type='dagPose')
self.assertEqual(3, len(bind_poses))
result = skinutils.get_bind_pose_from_skincluster(test_skincluster)
self.assertEqual(expected, result)
class TestDuplicateSkinnedMesh(mayatest.MayaTestCase):
def test_default_params(self):
test_cube, test_joints, test_skincluster = self.create_skinned_cube()
dup_cube, dup_cluster = skinutils.duplicate_skinned_mesh(test_cube)
self.scene_nodes.extend([dup_cube, dup_cluster])
self.assertListEqual(test_joints, dup_cluster.influenceObjects())
self.assertNotEqual(test_cube, dup_cube)
test_weights = skinutils.get_vert_indexes_to_weighted_influences(test_skincluster)
dup_weights = skinutils.get_vert_indexes_to_weighted_influences(dup_cluster)
self.assertDictEqual(test_weights, dup_weights)
def test_dup_skinnedmesh_and_skel(self):
test_cube, test_joints, test_skincluster = self.create_skinned_cube()
dup_cube, dup_root, dup_cluster = skinutils.duplicate_skinned_mesh_and_skeleton(test_cube)
self.scene_nodes.extend([dup_cube, dup_root, dup_cluster])
self.assertEqual(len(test_joints), len(dup_cluster.influenceObjects()))
self.assertNotEqual(test_joints, dup_cluster.influenceObjects())
self.assertNotEqual(test_cube, dup_cube)
def test_dup_namespace(self):
test_cube, test_joints, test_skincluster = self.create_skinned_cube()
pm.namespace(set=':')
self.create_namespace('foo')
dup_cube, dup_root, dup_cluster = skinutils.duplicate_skinned_mesh_and_skeleton(test_cube, dup_namespace='foo')
self.scene_nodes.extend([dup_cube, dup_root, dup_cluster])
expected_joint_names = [x.nodeName(stripNamespace=True) for x in skelutils.get_hierarchy_from_root(test_joints[0])]
result_joint_names = [x.nodeName(stripNamespace=True) for x in skelutils.get_hierarchy_from_root(dup_root)]
self.assertListEqual(expected_joint_names, result_joint_names)
self.assertNotEqual(test_joints, dup_cluster.influenceObjects())
self.assertNotEqual(test_cube, dup_cube)
self.assertEqual('foo', dup_root.parentNamespace())
| python |
import array
import unittest
import pickle
import struct
import sys
from pyhmmer.easel import Vector, VectorF, VectorU8
class _TestVectorBase(object):
Vector = NotImplemented
def test_pickle(self):
v1 = self.Vector(range(6))
v2 = pickle.loads(pickle.dumps(v1))
self.assertSequenceEqual(v1, v2)
def test_pickle_protocol4(self):
v1 = self.Vector(range(6))
v2 = pickle.loads(pickle.dumps(v1, protocol=4))
self.assertEqual(v1.shape, v2.shape)
self.assertSequenceEqual(v1, v2)
self.assertSequenceEqual(memoryview(v1), memoryview(v2))
@unittest.skipUnless(sys.version_info >= (3, 8), "pickle protocol 5 requires Python 3.8+")
def test_pickle_protocol5(self):
v1 = self.Vector(range(6))
v2 = pickle.loads(pickle.dumps(v1, protocol=5))
self.assertEqual(v1.shape, v2.shape)
self.assertSequenceEqual(v1, v2)
self.assertSequenceEqual(memoryview(v1), memoryview(v2))
def test_empty_vector(self):
v1 = self.Vector([])
v2 = self.Vector.zeros(0)
v3 = self.Vector()
self.assertEqual(len(v1), 0)
self.assertEqual(len(v2), 0)
self.assertEqual(len(v3), 0)
self.assertEqual(v1, v2)
self.assertEqual(v1, v3)
self.assertFalse(v1)
self.assertFalse(v2)
self.assertFalse(v3)
if sys.implementation.name != "pypy":
v3 = self.Vector.zeros(3)
self.assertLess(sys.getsizeof(v1), sys.getsizeof(v3))
def test_init(self):
vec = self.Vector([1, 2, 3])
self.assertEqual(vec[0], 1)
self.assertEqual(vec[1], 2)
self.assertEqual(vec[2], 3)
def test_init_memcpy(self):
v1 = self.Vector([1, 2, 3])
a = array.array(v1.format, v1)
v2 = self.Vector(a)
self.assertEqual(v1, v2)
def test_init_error(self):
self.assertRaises(TypeError, self.Vector, 1)
self.assertRaises(TypeError, self.Vector.zeros, [1, 2, 3])
self.assertRaises(TypeError, self.Vector.zeros, "1")
def test_shape(self):
vec = self.Vector([1, 2, 3])
self.assertEqual(vec.shape, (3,))
vec2 = self.Vector.zeros(100)
self.assertEqual(vec2.shape, (100,))
vec3 = self.Vector.zeros(0)
self.assertEqual(vec3.shape, (0,))
def test_len(self):
vec = self.Vector([1, 2, 3])
self.assertEqual(len(vec), 3)
vec2 = self.Vector.zeros(100)
self.assertEqual(len(vec2), 100)
vec3 = self.Vector([])
self.assertEqual(len(vec3), 0)
def test_copy(self):
vec = self.Vector([1, 2, 3])
vec2 = vec.copy()
del vec
self.assertIsInstance(vec2, self.Vector)
self.assertEqual(vec2[0], 1)
self.assertEqual(vec2[1], 2)
self.assertEqual(vec2[2], 3)
vec3 = self.Vector([])
vec4 = vec3.copy()
self.assertEqual(vec3, vec4)
self.assertEqual(len(vec4), 0)
def test_reverse(self):
vec = self.Vector([1, 2, 3])
vec.reverse()
self.assertEqual(vec[0], 3)
self.assertEqual(vec[1], 2)
self.assertEqual(vec[2], 1)
vec2 = self.Vector([1, 2, 3, 4])
vec2.reverse()
self.assertEqual(vec2[0], 4)
self.assertEqual(vec2[1], 3)
self.assertEqual(vec2[2], 2)
self.assertEqual(vec2[3], 1)
vec3 = self.Vector([])
vec3.reverse()
self.assertEqual(vec3, self.Vector([]))
self.assertEqual(len(vec3), 0)
def test_add(self):
vec = self.Vector([1, 2, 3])
vec2 = vec + 1
self.assertEqual(vec2[0], 2)
self.assertEqual(vec2[1], 3)
self.assertEqual(vec2[2], 4)
with self.assertRaises(ValueError):
vec + self.Vector([1])
v2 = self.Vector([])
v3 = v2 + self.Vector([])
self.assertEqual(v3, self.Vector([]))
def test_iadd_scalar(self):
vec = self.Vector([1, 2, 3])
vec += 3
self.assertEqual(vec[0], 4)
self.assertEqual(vec[1], 5)
self.assertEqual(vec[2], 6)
v2 = self.Vector([])
v2 += 3
self.assertEqual(v2, self.Vector([]))
def test_iadd_vector(self):
vec = self.Vector([4, 5, 6])
vec += self.Vector([10, 11, 12])
self.assertEqual(vec[0], 14)
self.assertEqual(vec[1], 16)
self.assertEqual(vec[2], 18)
with self.assertRaises(ValueError):
vec += self.Vector([1])
v2 = self.Vector([])
v2 += self.Vector([])
self.assertEqual(v2, self.Vector([]))
def test_sub(self):
vec = self.Vector([1, 2, 3])
v2 = vec - 1
self.assertEqual(v2[0], 0)
self.assertEqual(v2[1], 1)
self.assertEqual(v2[2], 2)
v3 = self.Vector([8, 10, 12])
v4 = self.Vector([1, 2, 3])
v5 = v3 - v4
self.assertEqual(v5[0], 7)
self.assertEqual(v5[1], 8)
self.assertEqual(v5[2], 9)
def test_isub_scalar(self):
vec = self.Vector([4, 5, 6])
vec -= 2
self.assertEqual(vec[0], 2)
self.assertEqual(vec[1], 3)
self.assertEqual(vec[2], 4)
def test_isub_vector(self):
vec = self.Vector([4, 5, 6])
vec -= self.Vector([2, 3, 2])
self.assertEqual(vec[0], 2)
self.assertEqual(vec[1], 2)
self.assertEqual(vec[2], 4)
def test_mul_scalar(self):
vec = self.Vector([1, 2, 3])
v2 = vec * 3
self.assertEqual(v2[0], 3)
self.assertEqual(v2[1], 6)
self.assertEqual(v2[2], 9)
v2 = self.Vector([])
v3 = v2 * 3
self.assertEqual(v3, self.Vector([]))
def test_mul_vector(self):
vec = self.Vector([1, 2, 3])
v2 = self.Vector([3, 6, 9])
v3 = vec * v2
self.assertEqual(v3[0], 3)
self.assertEqual(v3[1], 12)
self.assertEqual(v3[2], 27)
v2 = self.Vector([])
v3 = v2 * self.Vector([])
self.assertEqual(v3, self.Vector([]))
def test_imul_scalar(self):
vec = self.Vector([1, 2, 3])
vec *= 3
self.assertEqual(vec[0], 3)
self.assertEqual(vec[1], 6)
self.assertEqual(vec[2], 9)
v2 = self.Vector([])
v2 *= 3
self.assertEqual(v2, self.Vector([]))
def test_matmul_vector(self):
u = self.Vector([4, 5, 6])
v = self.Vector([1, 2, 3])
self.assertEqual(u @ v, 1*4 + 2*5 + 3*6)
x = self.Vector([])
y = self.Vector([])
self.assertEqual(x @ y, 0)
def test_sum(self):
vec = self.Vector([1, 2, 3])
self.assertEqual(vec.sum(), 1 + 2 + 3)
vec2 = self.Vector([])
self.assertEqual(vec2.sum(), 0)
def test_slice(self):
vec = self.Vector([1, 2, 3, 4])
v1 = vec[:]
self.assertEqual(len(v1), 4)
self.assertEqual(v1[0], 1)
self.assertEqual(v1[-1], 4)
v2 = vec[1:3]
self.assertEqual(len(v2), 2)
self.assertEqual(v2[0], 2)
self.assertEqual(v2[1], 3)
v3 = vec[:-1]
self.assertEqual(len(v3), 3)
self.assertEqual(v3[-1], 3)
v4 = vec[0:10]
self.assertEqual(len(v4), 4)
self.assertEqual(v4[-1], 4)
with self.assertRaises(ValueError):
vec[::-1]
def test_min(self):
vec = self.Vector([1, 2, 3])
self.assertEqual(vec.min(), 1)
v2 = self.Vector([])
self.assertRaises(ValueError, v2.min)
def test_max(self):
vec = self.Vector([1, 2, 3])
self.assertEqual(vec.max(), 3)
v2 = self.Vector([])
self.assertRaises(ValueError, v2.max)
def test_argmin(self):
vec = self.Vector([4, 2, 8])
self.assertEqual(vec.argmin(), 1)
v2 = self.Vector([])
self.assertRaises(ValueError, v2.argmin)
def test_argmax(self):
vec = self.Vector([2, 8, 4])
self.assertEqual(vec.argmax(), 1)
v2 = self.Vector([])
self.assertRaises(ValueError, v2.argmax)
class TestVector(unittest.TestCase):
def test_abstract(self):
self.assertRaises(TypeError, Vector, [1, 2, 3])
self.assertRaises(TypeError, Vector.zeros, 1)
class TestVectorF(_TestVectorBase, unittest.TestCase):
Vector = VectorF
def test_strides(self):
vec = self.Vector([1, 2, 3])
sizeof_float = len(struct.pack('f', 1.0))
self.assertEqual(vec.strides, (sizeof_float,))
def test_normalize(self):
vec = self.Vector([1, 3])
vec.normalize()
self.assertEqual(vec[0], 1/4)
self.assertEqual(vec[1], 3/4)
vec2 = self.Vector([])
vec2.normalize()
def test_memoryview_tolist(self):
vec = self.Vector([1, 2, 3])
mem = memoryview(vec)
self.assertEqual(mem.tolist(), [1.0, 2.0, 3.0])
def test_neg(self):
vec = self.Vector([1, 2, 3])
v2 = -vec
self.assertEqual(v2[0], -1)
self.assertEqual(v2[1], -2)
self.assertEqual(v2[2], -3)
def test_div_scalar(self):
vec = self.Vector([1, 2, 3])
v2 = vec / 2
self.assertEqual(v2[0], 0.5)
self.assertEqual(v2[1], 1.0)
self.assertEqual(v2[2], 1.5)
v2 = self.Vector([])
v3 = v2 / 3
self.assertEqual(v3, self.Vector([]))
def test_div_vector(self):
vec = self.Vector([1, 2, 3])
v2 = self.Vector([2, 4, 6])
v3 = vec / v2
self.assertEqual(v3[0], 0.5)
self.assertEqual(v3[1], 0.5)
self.assertEqual(v3[2], 0.5)
v2 = self.Vector([])
v3 = v2 / self.Vector([])
self.assertEqual(v3, self.Vector([]))
def test_idiv_scalar(self):
vec = self.Vector([1, 2, 3])
vec /= 2
self.assertEqual(vec[0], 0.5)
self.assertEqual(vec[1], 1.0)
self.assertEqual(vec[2], 1.5)
vec = self.Vector([])
vec /= 3
self.assertEqual(vec, self.Vector([]))
def test_idiv_vector(self):
vec = self.Vector([1, 2, 3])
vec /= self.Vector([2, 4, 6])
self.assertEqual(vec[0], 0.5)
self.assertEqual(vec[1], 0.5)
self.assertEqual(vec[2], 0.5)
vec = self.Vector([])
vec /= self.Vector([])
self.assertEqual(vec, self.Vector([]))
class TestVectorU8(_TestVectorBase, unittest.TestCase):
Vector = VectorU8
def test_strides(self):
vec = self.Vector([1, 2, 3])
sizeof_u8 = len(struct.pack('B', 1))
self.assertEqual(vec.strides, (sizeof_u8,))
def test_isub_wrapping(self):
vec = self.Vector([0, 1, 2])
vec -= 1
self.assertEqual(vec[0], 255)
self.assertEqual(vec[1], 0)
self.assertEqual(vec[2], 1)
def test_sum_wrapping(self):
vec = self.Vector([124, 72, 116])
self.assertEqual(vec.sum(), (124 + 72 + 116) % 256)
def test_memoryview_tolist(self):
vec = self.Vector([1, 2, 3])
mem = memoryview(vec)
self.assertEqual(mem.tolist(), [1, 2, 3])
def test_eq_bytebuffer(self):
vec = self.Vector([1, 2, 3])
b1 = bytearray([1, 2, 3])
self.assertEqual(vec, b1)
b2 = array.array('B', [1, 2, 3])
self.assertEqual(vec, b2)
b3 = array.array('B', [1, 2, 3, 4])
self.assertNotEqual(vec, b3)
b4 = array.array('L', [1, 2, 3])
self.assertNotEqual(vec, b4)
def test_floordiv_scalar(self):
vec = self.Vector([1, 2, 3])
v2 = vec // 2
self.assertEqual(v2[0], 0)
self.assertEqual(v2[1], 1)
self.assertEqual(v2[2], 1)
v2 = self.Vector([])
v3 = v2 // 3
self.assertEqual(v3, self.Vector([]))
def test_floordiv_vector(self):
vec = self.Vector([1, 2, 3])
v2 = self.Vector([2, 4, 1])
v3 = vec // v2
self.assertEqual(v3[0], 0)
self.assertEqual(v3[1], 0)
self.assertEqual(v3[2], 3)
v2 = self.Vector([])
v3 = v2 // self.Vector([])
self.assertEqual(v3, self.Vector([]))
def test_ifloordiv_scalar(self):
vec = self.Vector([1, 2, 3])
vec //= 2
self.assertEqual(vec[0], 0)
self.assertEqual(vec[1], 1)
self.assertEqual(vec[2], 1)
vec = self.Vector([])
vec //= 3
self.assertEqual(vec, self.Vector([]))
def test_ifloordiv_vector(self):
vec = self.Vector([1, 2, 3])
vec //= self.Vector([2, 4, 6])
self.assertEqual(vec[0], 0)
self.assertEqual(vec[1], 0)
self.assertEqual(vec[2], 0)
vec = self.Vector([])
vec //= self.Vector([])
self.assertEqual(vec, self.Vector([]))
| python |
from distutils.core import setup
import requests.certs
import py2exe
setup(
name='hogge',
version='1.0.1',
url='https://github.com/igortg/ir_clubchamps',
license='LGPL v3.0',
author='Igor T. Ghisi',
description='',
console=[{
"dest_base": "ir_clubchamps",
"script": "main.py",
}],
zipfile = None,
data_files = [(".", [requests.certs.where()])],
options={
"py2exe": {
"compressed": True,
"dll_excludes": ["msvcr100.dll"],
"excludes": ["Tkinter"],
"bundle_files": 1,
"dist_dir": "ir_clubchamps"
}
},
)
| python |
import re
from abc import ABC
class TemplateFillerI(ABC):
def fill(self, template: str, entity: str, **kwargs):
return template.replace("XXX", entity)
class ItalianTemplateFiller(TemplateFillerI):
def __init__(self):
self._reduction_rules = {'diil': 'del', 'dilo': 'dello', 'dila': 'della', 'dii': 'dei', 'digli': 'degli',
'dile': 'delle', 'dil': 'dell\'',
'ail': 'al', 'alo': 'allo', 'ala': 'alla', 'ai': 'ai', 'agli': 'agli', 'ale': 'alle',
'dail': 'dal', 'dalo': 'dallo', 'dala': 'dalla', 'dai': 'dai', 'dagli': 'dagli',
'dale': 'dalle',
'inil': 'nel', 'inlo': 'nello', 'inla': 'nella', 'ini': 'nei', 'ingli': 'negli',
'inle': 'nelle',
'conil': 'col', 'conlo': 'cóllo', 'conla': 'cólla', 'coni': 'coi', 'congli': 'cogli',
'conle': 'cólle',
'suil': 'sul', 'sulo': 'sullo', 'sula': 'sulla', 'sui': 'sui', 'sugli': 'sugli',
'sule': 'sulle',
'peril': 'pel', 'perlo': 'pello', 'perla': 'pella', 'peri': 'pei', 'pergli': 'pegli',
'perle': 'pelle'}
self._template = "(?P<preposition>" + "|".join(["\\b" + preposition + "\\b"
for preposition in self._reduction_rules.keys()]) + ")"
self._finder = re.compile(self._template, re.IGNORECASE)
self._articles_gender = {'il': 'o', 'lo': 'o', 'i': 'i', 'gli': 'i', 'la': 'a', 'le': 'e'}
def fill(self, template: str, entity: str, **kwargs):
article = kwargs['article'].lower()
article_in_entity = True if entity.lower().startswith(article) else False
if article:
if article_in_entity and re.search("(di|a|da|in|con|su|per)YYY", template):
entity = re.sub("\\b" + article + "\\b", "", entity, 1, re.IGNORECASE)
template = template.replace("YYY", article)
elif article_in_entity:
template = template.replace("YYY", "")
else:
template = template.replace("YYY", article)
template = self._reduce(template)
else:
template = template.replace("YYY", "")
gender = self._articles_gender.get(article, 'o')
template = template.replace("GGG", gender)
template = template.replace("XXX", entity)
if '\' ' + entity in template:
template = template.replace("\' ", "\'")
template = re.sub("\s{2,}", " ", template)
return template
def _reduce(self, template):
match = self._finder.search(template)
if match:
preposition = match.group('preposition').lower().strip()
template = template.replace(preposition, self._reduction_rules[preposition])
return template
class FrenchTemplateFiller(TemplateFillerI):
def __init__(self):
self._vowels = {'a', 'e', 'i', 'o', 'u', 'â', 'ê', 'î', 'ô', 'û', 'ë', 'ï', 'ü', 'y', 'ÿ', 'à', 'è', 'ù', 'é'}
def fill(self, template: str, entity: str, **kwargs):
if re.search("de\sXXX", template) and entity[0].lower() in self._vowels:
template = re.sub("de\sXXX", "d'XXX", template)
template = template.replace("XXX", entity)
template = re.sub("\s{2,}", " ", template)
return template.strip()
class GermanTemplateFiller(TemplateFillerI):
def fill(self, template: str, entity: str, **kwargs):
article = kwargs['article'].lower()
article_in_entity = True if entity.lower().startswith(article) else False
if article_in_entity:
article = ""
template = re.sub("YYY", article, template)
template = template.replace("XXX", entity)
template = re.sub("\s{2,}", " ", template)
template = template.strip()
template = template[0].upper() + template[1:]
return template.strip()
class SpanishTemplateFiller(TemplateFillerI):
def __init__(self):
self._articles_gender = {'el': 'o', 'la': 'a', 'los': 'es', 'las': 'as'}
def fill(self, template: str, entity: str, **kwargs):
article = kwargs['article'].lower()
article_in_entity = True if entity.lower().startswith(article) else False
skip = False
if article_in_entity and not re.search("(de)YYY", template):
skip = True
if article and not skip:
if article == "el" and re.search("(de)YYY", template):
template = template.replace("deYYY", 'del')
else:
template = template.replace("YYY", " " + article)
else:
template = template.replace("YYY", "")
gender = self._articles_gender.get(article, 'o')
template = template.replace("GGG", gender)
template = template.replace("XXX", entity)
template = re.sub("\s{2,}", " ", template)
return template
class TemplateFillerFactory(object):
@staticmethod
def make_filler(lang):
if lang == "en":
return TemplateFillerI()
if lang == "it":
return ItalianTemplateFiller()
if lang == "de":
return GermanTemplateFiller()
if lang == "es":
return SpanishTemplateFiller()
if lang == "fr":
return FrenchTemplateFiller()
return TemplateFillerI()
| python |
import gc
import os
import cv2
import numpy as np
import torch
from SRL4RL import SRL4RL_path
from SRL4RL.rl.utils.runner import StateRunner
from SRL4RL.utils.nn_torch import numpy2pytorch, pytorch2numpy, save_model
from SRL4RL.utils.utils import createFolder, loadPickle
from SRL4RL.utils.utilsEnv import (
NCWH2WHC,
add_noise,
render_env,
reset_stack,
tensor2image,
update_video,
)
from SRL4RL.utils.utilsPlot import plot_xHat, plotEmbedding, visualizeMazeExplor
from SRL4RL.xsrl.arguments import is_with_discoveryPi
np2torch = lambda x, device: numpy2pytorch(x, differentiable=False, device=device)
def omega_last_layer(x):
return torch.sigmoid(x)
def sampleNormal(mu, sig):
noise = torch.randn_like(mu)
return mu + noise * sig, noise
def resetState(obs, alpha, beta, gamma, config):
device = torch.device(config["device"])
if len(obs.shape) > 3:
numEnv = obs.shape[0]
else:
numEnv = 1
state = np.random.normal(0, 0.02, [numEnv, config["state_dim"]])
# do not add noise at reset! obs = add_noise(obs)
state = initState(numEnv, state, np2torch(obs, device), alpha, beta, gamma, config)
return state
def init_action(size, config):
return np.zeros((size, config["action_dim"]))
def initState(size, states, x, alpha, beta, gamma, config):
device = torch.device(config["device"])
with torch.no_grad():
actions = init_action(size, config)
# Compute state
o_alpha = alpha(x)
o_beta = beta(
torch.cat((np2torch(states, device), np2torch(actions, device)), dim=1)
)
input_gamma = torch.cat((o_alpha, o_beta), dim=1)
states = pytorch2numpy(gamma(input_gamma))
return states
def update_target_network(target, source, device=None):
if device:
source.to("cpu")
with torch.no_grad():
for target_param, param in zip(target.parameters(), source.parameters()):
target_param.data.copy_(param.data)
if device:
source.to(device)
return target
def normalizePi(pi, logPi, mu):
"""Apply squashing function.
See appendix C from https://arxiv.org/pdf/1812.05905.pdf.
"""
# action_max = envEval.action_space.high[0]
# action_min = envEval.action_space.low[0]
# action_scale = torch.tensor((action_max - action_min).item() / 2.)
# action_bias = torch.tensor((action_max + action_min) / 2.)
action_scale = 1
action_bias = 0
mu = torch.tanh(mu) * action_scale + action_bias
pi = torch.tanh(pi)
epsilon = 1e-6 # Avoid NaN (prevents division by zero or log of zero)
LogPi_jacobian = torch.log(action_scale * (1 - pi.pow(2)) + epsilon).sum(
-1, keepdim=True
)
logPi -= LogPi_jacobian
pi = pi * action_scale + action_bias
return pi, logPi, mu, LogPi_jacobian
def gaussian_logprob(noise, log_sig):
"""Compute Gaussian log probability."""
residual = (-0.5 * noise.pow(2) - log_sig).sum(-1, keepdim=True)
return residual - 0.5 * np.log(2 * np.pi) * noise.size(-1)
def policy_last_layer_op(s, pi_head, mu_tail, log_sig_tail, config):
head_out = pi_head(s)
mu = mu_tail(head_out)
log_sig_min = -10 # before: - config['action_dim'] * norm
log_sig_max = 2 # before: 12 * norm
log_sig = log_sig_tail(head_out) # +3
log_sig = torch.clamp(log_sig, min=log_sig_min, max=log_sig_max)
sig = log_sig.exp()
assert not torch.isnan(log_sig).any().item(), "isnan in log_sig!!"
log_sig_detach = log_sig
# for repameterization trick (mu + sig * N(0,1))
x_t, noise = sampleNormal(mu=mu, sig=sig)
logPi = gaussian_logprob(noise, log_sig)
pi, logPi, mu, LogPi_jacobian = normalizePi(x_t, logPi, mu)
assert not torch.isnan(head_out).any().item(), "isnan in head_out!!"
assert not torch.isnan(mu).any().item(), "isnan in mu!!"
return pi, logPi, log_sig_detach, mu, LogPi_jacobian.detach()
def policy_last_layer(
s,
pi_head,
mu_tail,
log_sig_tail,
config,
s_dvt=None,
pi_head_dvt=None,
mu_tail_dvt=None,
log_sig_tail_dvt=None,
save_pi_logs=False,
):
if s_dvt is not None:
pi_dvt, logPi_dvt, _, _, _ = policy_last_layer_op(
s_dvt, pi_head_dvt, mu_tail_dvt, log_sig_tail_dvt, config
)
pi, logPi, log_sig, mu, LogPi_jacobian = policy_last_layer_op(
s, pi_head, mu_tail, log_sig_tail, config
)
if save_pi_logs and (s_dvt is None):
return pi, logPi, log_sig.detach(), mu.detach(), LogPi_jacobian.detach()
elif save_pi_logs and (s_dvt is not None):
return (
pi,
logPi,
pi_dvt,
logPi_dvt,
log_sig.detach(),
mu.detach(),
LogPi_jacobian.detach(),
)
else:
return pi
def XSRL_nextObsEval(
alpha,
beta,
gamma,
omega,
config,
save_dir,
gradientStep=None,
saved_step=None,
suffix="last",
debug=False,
):
evaluate = suffix == "evaluate"
if evaluate:
path_eval = os.path.join(save_dir, "eval2obs")
createFolder(path_eval, "eval2obs already exist")
actionRepeat = config["actionRepeat"]
datasetEval_path = "testDatasets/testDataset_{}".format(config["new_env_name"])
if actionRepeat > 1:
datasetEval_path += "_noRepeatAction"
elif config["distractor"]:
datasetEval_path += "_withDistractor"
datasetEval_path += ".pkl"
datasetEval_path = os.path.join(SRL4RL_path, datasetEval_path)
dataset = loadPickle(datasetEval_path)
actions, observations, measures = (
dataset["actions"],
dataset["observations"],
dataset["measures"],
)
# if debug:
# last_index = actionRepeat * 200
# actions, observations, measures = actions[:-last_index], observations[:-last_index], measures[:-last_index]
measures = measures[1:][actionRepeat:][::actionRepeat]
"force the Garbage Collector to release unreferenced memory"
del dataset
gc.collect()
device = torch.device(config["device"])
Loss_obs = lambda x, y: torch.nn.MSELoss(reduction="sum")(x, y) / (
x.shape[0] * config["n_stack"]
)
loss_log = 0
print(" XSRL_nextObsEval (predicting next obs with PIeval_dataset) ......")
eval_steps = None
if config["new_env_name"] == "TurtlebotMazeEnv":
xHat_nextObsEval_step = 84
eval_steps = [87, 88, 101, 115, 117, 439, 440]
elif config["new_env_name"] == "HalfCheetahBulletEnv":
xHat_nextObsEval_step = 119
elif config["new_env_name"] == "InvertedPendulumSwingupBulletEnv":
xHat_nextObsEval_step = 45
elif config["new_env_name"] == "ReacherBulletEnv":
xHat_nextObsEval_step = 42
eval_steps = [14, 25, 396]
video_path = os.path.join(save_dir, "piEval_{}.mp4".format(suffix))
if config["new_env_name"] == "TurtlebotMazeEnv":
fps = 5
elif actionRepeat > 1:
fps = 20 // actionRepeat
else:
fps = 5
video_out = (
cv2.VideoWriter(
video_path,
cv2.VideoWriter_fourcc(*"mp4v"),
fps=fps,
frameSize=(int(588 * 2), 588),
)
if config["color"]
else cv2.VideoWriter(
video_path,
cv2.VideoWriter_fourcc(*"XVID"),
fps=fps,
frameSize=(int(588 * 2), 588),
isColor=0,
)
)
"init state with obs without noise"
if config["n_stack"] > 1:
nc = 3
observation = reset_stack(observations[0][None], config)
next_observation = reset_stack(observations[0][None], config)
else:
observation = observations[0][None]
with torch.no_grad():
stateExpl = resetState(observation, alpha, beta, gamma, config)
step_rep = 0
elapsed_steps = 0
len_traj = (len(observations) - 1) // actionRepeat - 1
assert len_traj == len(measures), "wrong division in len_traj"
all_states = np.zeros([len_traj, config["state_dim"]])
"observations[1:] -> remove reset obs and first actionRepeat time steps"
for step, (pi, next_obs) in enumerate(zip(actions, observations[1:])):
"Make a step"
if config["n_stack"] > 1:
if (step_rep + 1) > (config["actionRepeat"] - config["n_stack"]):
next_observation[
:, (step_rep - 1) * nc : ((step_rep - 1) + 1) * nc
] = next_obs
elif (step_rep + 1) == config["actionRepeat"]:
next_observation = next_obs[None]
step_rep += 1
if ((step + 1) % actionRepeat == 0) and (step + 1) > actionRepeat:
# (step + 1) > actionRepeat: let one iteration to better bootstrap the state estimation
step_rep = 0
TensA = numpy2pytorch(pi, differentiable=False, device=device).unsqueeze(
dim=0
)
"predict next states"
with torch.no_grad():
o_alpha = alpha(np2torch(observation, device))
o_beta = beta(torch.cat((np2torch(stateExpl, device), TensA), dim=1))
input_gamma = torch.cat((o_alpha, o_beta), dim=1)
s_next = gamma(input_gamma)
"Predict next observations of current elapsed_steps for all trajectories"
xHat = omega_last_layer(omega(s_next))
loss_log += pytorch2numpy(
Loss_obs(xHat, np2torch(next_observation, device))
)
"update video"
update_video(
im=255 * NCWH2WHC(next_observation[:, -3:, :, :]),
color=config["color"],
video_size=588,
video=video_out,
fpv=config["fpv"],
concatIM=255 * tensor2image(xHat[:, -3:, :, :]),
)
if type(eval_steps) is list:
saveIm = elapsed_steps in [xHat_nextObsEval_step] + eval_steps
name_ = "xHat_nextObsEval{}".format(elapsed_steps)
else:
saveIm = elapsed_steps == xHat_nextObsEval_step
name_ = "xHat_nextObsEval"
if saveIm:
"plot image to check the image prediction quality"
if config["n_stack"] > 1:
"saving other frames"
for step_r in range(config["n_stack"]):
name = "xHat_nextObsEval{}_frame{}".format(
elapsed_steps, step_r
)
plot_xHat(
NCWH2WHC(observation[:, step_r * nc : (step_r + 1) * nc]),
tensor2image(xHat[:, step_r * nc : (step_r + 1) * nc]),
imgTarget=NCWH2WHC(
next_observation[:, step_r * nc : (step_r + 1) * nc]
),
figure_path=save_dir,
with_nextObs=True,
name=name,
gradientStep=gradientStep,
suffix=suffix,
evaluate=evaluate,
)
else:
plot_xHat(
NCWH2WHC(observation[:, -3:, :, :]),
tensor2image(xHat[:, -3:, :, :]),
imgTarget=NCWH2WHC(next_observation[:, -3:, :, :]),
figure_path=save_dir,
with_nextObs=True,
name=name_,
gradientStep=gradientStep,
suffix=suffix,
evaluate=evaluate,
)
if elapsed_steps == xHat_nextObsEval_step:
if saved_step is not None:
plot_xHat(
NCWH2WHC(observation[:, -3:, :, :]),
tensor2image(xHat[:, -3:, :, :]),
imgTarget=NCWH2WHC(next_observation[:, -3:, :, :]),
figure_path=os.path.join(save_dir, "xHat_nextObsEval"),
with_nextObs=True,
name="xHat_nextObsEval",
gradientStep=gradientStep,
saved_step=saved_step,
)
if evaluate:
"plot image of all time steps"
plot_xHat(
NCWH2WHC(observation[:, -3:, :, :]),
tensor2image(xHat[:, -3:, :, :]),
imgTarget=NCWH2WHC(next_observation[:, -3:, :, :]),
figure_path=path_eval,
with_noise=config["with_noise"],
with_nextObs=True,
saved_step=elapsed_steps,
)
"save state"
all_states[elapsed_steps] = stateExpl[0]
elapsed_steps += 1
"update states"
stateExpl = pytorch2numpy(s_next)
"update inputs without noise for test"
# observation = add_noise(next_observation.copy(), noise_adder, config)
observation = next_observation.copy()
elif ((step + 1) % actionRepeat == 0) and (step + 1) == actionRepeat:
step_rep = 0
observation = next_observation.copy()
"Release everything if job is finished"
video_out.release()
cv2.destroyAllWindows()
loss_logNorm = loss_log / len_traj
print(" " * 100 + "done: nextObsEval = {:.3f}".format(loss_logNorm))
plotEmbedding(
"UMAP",
measures.copy(),
all_states,
figure_path=save_dir,
gradientStep=gradientStep,
saved_step=saved_step,
proj_dim=3,
suffix=suffix,
env_name=config["env_name"],
evaluate=evaluate,
)
plotEmbedding(
"PCA",
measures,
all_states,
figure_path=save_dir,
gradientStep=gradientStep,
saved_step=saved_step,
proj_dim=3,
suffix=suffix,
env_name=config["env_name"],
evaluate=evaluate,
)
"force the Garbage Collector to release unreferenced memory"
del (
actions,
observations,
measures,
video_out,
all_states,
stateExpl,
s_next,
observation,
next_observation,
xHat,
)
gc.collect()
return loss_logNorm
def piExplore2obs(
envExplor,
noise_adder,
alpha,
beta,
gamma,
omega,
pi_head,
mu_tail,
log_sig_tail,
config,
save_dir,
suffix="last",
debug=False,
evaluate=False,
saved_step=None,
):
device = torch.device(config["device"])
with_discoveryPi = is_with_discoveryPi(config)
if saved_step is None:
saved_step = ""
else:
saved_step = "_E{}".format(saved_step)
if config["env_name"] in ["TurtlebotEnv-v0", "TurtlebotMazeEnv-v0"]:
camera_id_eval = 1
imLabel = "map"
else:
camera_id_eval = -1
imLabel = "env"
if evaluate:
path_eval = os.path.join(save_dir, "piExplore2obs{}/".format(saved_step))
createFolder(path_eval, "piExplore2obs already exist")
path_eval_im = os.path.join(save_dir, "piExplore2im{}/".format(saved_step))
createFolder(path_eval_im, "piExplore2im already exist")
obs = envExplor.reset()
"init state with obs without noise"
if config["n_stack"] > 1:
nc = 3
actionRepeat = config["actionRepeat"]
observation = reset_stack(obs, config)
next_observation = reset_stack(obs, config)
else:
actionRepeat = 1
observation = obs
with torch.no_grad():
stateExpl = resetState(observation, alpha, beta, gamma, config)
eval_steps = 30 if debug else 500
video_path = os.path.join(save_dir, "piExplore_{}{}.mp4".format(suffix, saved_step))
fps = 5
video_out = (
cv2.VideoWriter(
video_path,
cv2.VideoWriter_fourcc(*"mp4v"),
fps=fps,
frameSize=(int(588 * 2), 588),
)
if config["color"]
else cv2.VideoWriter(
video_path,
cv2.VideoWriter_fourcc(*"XVID"),
fps=fps,
frameSize=(int(588 * 2), 588),
isColor=0,
)
)
print(" piExplore2obs (exploring and predicting next obs) ......")
for step in range(eval_steps):
"Make a step"
has_bump = True
num_bump = 0
while has_bump:
if evaluate:
assert num_bump < 500, "num_bump > 500"
num_bump += 1
if with_discoveryPi:
"update policy distribution and sample action"
with torch.no_grad():
TensA = policy_last_layer(
np2torch(stateExpl, "cpu"),
pi_head,
mu_tail,
log_sig_tail,
config=config,
).to(device)
pi = pytorch2numpy(TensA.squeeze(dim=0))
else:
pi = envExplor.action_space.sample()
TensA = numpy2pytorch(
pi, differentiable=False, device=device
).unsqueeze(dim=0)
if config["bumpDetection"]:
has_bump = envExplor.bump_detection(pi)
else:
has_bump = False
"Make a step"
for step_rep in range(actionRepeat):
obs, _, done, _ = envExplor.step(pi)
if config["n_stack"] > 1:
if (step_rep + 1) > (config["actionRepeat"] - config["n_stack"]):
next_observation[
:, (step_rep - 1) * nc : ((step_rep - 1) + 1) * nc
] = obs
elif (step_rep + 1) == actionRepeat:
assert step_rep < 2, "actionRepeat is already performed in env"
next_observation = obs
with torch.no_grad():
"predict next states"
o_alpha = alpha(np2torch(observation, device))
o_beta = beta(torch.cat((np2torch(stateExpl, device), TensA), dim=1))
input_gamma = torch.cat((o_alpha, o_beta), dim=1)
s_next = gamma(input_gamma)
"Predict next observations of current step for all trajectories"
xHat = omega_last_layer(omega(s_next))
"update video"
update_video(
im=255 * NCWH2WHC(next_observation[:, -3:, :, :]),
color=config["color"],
video_size=588,
video=video_out,
fpv=config["fpv"],
concatIM=255 * tensor2image(xHat[:, -3:, :, :]),
)
if evaluate:
im_high_render = (
render_env(
envExplor,
256,
False,
camera_id_eval,
config["color"],
downscaling=False,
)
/ 255.0
)
plot_xHat(
NCWH2WHC(observation[:, -3:, :, :]),
tensor2image(xHat[:, -3:, :, :]),
imgTarget=NCWH2WHC(next_observation[:, -3:, :, :]),
im_high_render=im_high_render,
imLabel=imLabel,
figure_path=path_eval,
with_noise=config["with_noise"],
with_nextObs=True,
saved_step=step,
)
im_high_render = render_env(
envExplor,
588,
False,
camera_id_eval,
config["color"],
downscaling=False,
)
cv2.imwrite(
path_eval_im + "ob_{:05d}".format(step) + ".png",
im_high_render[:, :, ::-1].astype(np.uint8),
)
"update inputs without noise for test"
# observation = add_noise(next_observation.copy(), noise_adder, config)
observation = next_observation.copy()
stateExpl = pytorch2numpy(s_next)
"Release everything if job is finished"
video_out.release()
cv2.destroyAllWindows()
"force the Garbage Collector to release unreferenced memory"
del video_out, stateExpl, s_next, observation, next_observation, xHat
gc.collect()
def getPiExplore(
envExplor,
noise_adder,
alpha,
beta,
gamma,
pi_head,
mu_tail,
log_sig_tail,
config,
save_dir,
n_epoch=None,
debug=False,
evaluate=False,
suffix="",
):
assert config["env_name"] in [
"TurtlebotEnv-v0",
"TurtlebotMazeEnv-v0",
], "getPiExplore only with Turtlebot"
device = torch.device(config["device"])
with_discoveryPi = is_with_discoveryPi(config)
observation = envExplor.reset()
with torch.no_grad():
stateExpl = resetState(observation, alpha, beta, gamma, config)
if debug:
eval_steps = [50, 100]
elif config["env_name"] == "TurtlebotEnv-v0":
eval_steps = [100, 200, 300]
elif config["env_name"] == "TurtlebotMazeEnv-v0":
eval_steps = [100, 200, 300, 400, 500, 600, 700, 800, 900, 1000]
robot_pos = np.zeros((eval_steps[-1] + 1, 2))
eval_i = 0
robot_pos[0] = envExplor.object.copy()
if n_epoch:
n_epoch_ = "-%06d" % n_epoch
else:
n_epoch_ = ""
print(" getPiExplore (exploring) ......")
for step in range(eval_steps[-1]):
"Make a step"
has_bump = True
num_bump = 0
while has_bump:
if evaluate:
assert num_bump < 500, "num_bump > 500"
num_bump += 1
if with_discoveryPi:
"update policy distribution and sample action"
with torch.no_grad():
TensA = policy_last_layer(
np2torch(stateExpl, "cpu"),
pi_head,
mu_tail,
log_sig_tail,
config=config,
).to(device)
pi = pytorch2numpy(TensA.squeeze(dim=0))
else:
pi = envExplor.action_space.sample()
TensA = numpy2pytorch(
pi, differentiable=False, device=device
).unsqueeze(dim=0)
if config["bumpDetection"]:
has_bump = envExplor.bump_detection(pi)
else:
has_bump = False
"Make a step"
obs, _, done, _ = envExplor.step(pi)
"store robot pos"
robot_pos[step + 1] = envExplor.object.copy()
if (step + 1) == eval_steps[eval_i]:
visualizeMazeExplor(
config["env_name"],
robot_pos=robot_pos[: eval_steps[eval_i]].copy(),
save_dir=save_dir,
name="explore{}{}{}".format(eval_steps[eval_i], n_epoch_, suffix),
)
eval_i += 1
next_observation = obs
"predict next states"
with torch.no_grad():
o_alpha = alpha(np2torch(observation, device))
o_beta = beta(torch.cat((np2torch(stateExpl, device), TensA), dim=1))
input_gamma = torch.cat((o_alpha, o_beta), dim=1)
s_next = gamma(input_gamma)
"update inputs without noise for test"
# observation = add_noise(next_observation.copy(), noise_adder, config)
observation = next_observation
stateExpl = pytorch2numpy(s_next)
"force the Garbage Collector to release unreferenced memory"
del robot_pos, s_next, stateExpl, observation, next_observation
gc.collect()
class XSRLRunner(StateRunner):
def __init__(self, config):
super().__init__(config)
self.alpha, self.beta, self.gamma = torch.load(
os.path.join(config["srl_path"], "state_model.pt"),
map_location=torch.device("cpu"),
)
self.alpha.eval(), self.beta.eval(), self.gamma.eval()
self.initState()
def resetState(self):
self.state = self.initState().to("cpu")
self.pi = np.zeros((self.action_dim))
def update_state(self, x, demo=False):
with torch.no_grad():
"predict next state"
inputs = add_noise(x, self.noise_adder, self.noiseParams)
o_alpha = self.alpha(inputs.to(self.device)).to("cpu")
"FNNs only faster with cpu"
o_beta = self.beta(
torch.cat((self.state, np2torch(self.pi, "cpu").unsqueeze(0)), dim=1)
)
input_gamma = torch.cat((o_alpha, o_beta), dim=1)
new_state = self.gamma(input_gamma)
if demo:
self.last_inputs = pytorch2numpy(inputs)[0][-3:, :, :].transpose(1, 2, 0)
self.state = new_state
return new_state
def save_state_model(self, save_path):
print("Saving models ......")
save_model([self.alpha, self.beta, self.gamma], save_path + "state_model")
def train(self, training=True):
self.alpha.train(training)
self.beta.train(training)
self.gamma.train(training)
def to_device(self, device="cpu"):
torchDevice = torch.device(device)
self.alpha.to(torchDevice)
self.beta.to("cpu")
self.gamma.to("cpu")
| python |
# coding=utf-8
from __future__ import unicode_literals
from django.db import models
import pytz
import requests
from datetime import timedelta
import datetime
import math
import wargaming
from django.db.models.signals import pre_save
from django.db.models import Q
from django.contrib.postgres.fields import JSONField
from django.dispatch import receiver
from django.conf import settings
from django.core.exceptions import ObjectDoesNotExist
from django.utils.functional import cached_property
wot = wargaming.WoT(settings.WARGAMING_KEY, language='ru', region='ru')
wgn = wargaming.WGN(settings.WARGAMING_KEY, language='ru', region='ru')
def utc_now():
return datetime.datetime.now(tz=pytz.UTC)
def combine_dt(date, time):
return datetime.datetime.combine(date, time)
class TournamentInfo(dict):
def __init__(self, province_id, seq=None, **kwargs):
super(TournamentInfo, self).__init__(seq=None, **kwargs)
# {u'applications_decreased': False,
# u'apply_error_message': u'Чтобы подать заявку, войдите на сайт.',
# u'arena_name': u'Аэродром',
# u'available_applications_number': 0,
# u'battles': [],
# u'can_apply': False,
# u'front_id': u'campaign_05_ru_west',
# u'is_apply_visible': False,
# u'is_superfinal': False,
# u'next_round': None,
# u'next_round_start_time': u'19:15:00.000000',
# u'owner': None,
# u'pretenders': [{u'arena_battles_count': 49,
# u'arena_wins_percent': 38.78,
# u'cancel_action_id': None,
# u'clan_id': 94365,
# u'color': u'#b00a10',
# u'division_id': None,
# u'elo_rating_10': 1155,
# u'elo_rating_6': 1175,
# u'elo_rating_8': 1259,
# u'emblem_url': u'https://ru.wargaming.net/clans/media/clans/emblems/cl_365/94365/emblem_64x64_gm.png',
# u'fine_level': 0,
# u'id': 94365,
# u'landing': True,
# u'name': u'Deadly Decoy',
# u'tag': u'DECOY',
# u'xp': None}],
# u'province_id': u'herning',
# u'province_name': u'\u0425\u0435\u0440\u043d\u0438\u043d\u0433',
# u'province_pillage_end_datetime': None,
# u'province_revenue': 0,
# u'revenue_level': 0,
# u'round_number': 1,
# u'size': 32,
# u'start_time': u'19:00:00',
# u'turns_till_primetime': 11}
self.update(requests.get(
'https://ru.wargaming.net/globalmap/game_api/tournament_info?alias=%s' % province_id).json())
try:
province = Province.objects.get(province_id=self['province_id'], front__front_id=self['front_id'])
except Province.DoesNotExist:
return
arena_id = province.arena_id
owner = self['owner']
if owner:
update_clan_province_stat(arena_id, **owner)
for clan_data in self.clans_info.values():
update_clan_province_stat(arena_id, **clan_data)
@property
def clans_info(self):
clans = {}
for battle in self['battles']:
if 'first_competitor' in battle and battle['first_competitor']:
clans[battle['first_competitor']['id']] = battle['first_competitor']
if 'second_competitor' in battle and battle['second_competitor']:
clans[battle['second_competitor']['id']] = battle['second_competitor']
if isinstance(self['pretenders'], list):
for clan in self['pretenders']:
clans[clan['id']] = clan
if self['owner'] and self['owner']['id'] in clans:
del clans[self['owner']['id']]
return clans
@property
def pretenders(self):
return self.clans_info.keys()
def update_clan_province_stat(arena_id, tag, name, elo_rating_6, elo_rating_8, elo_rating_10,
arena_wins_percent, arena_battles_count, **kwargs):
pk = kwargs.get('id') or kwargs['clan_id']
clan = Clan.objects.update_or_create(id=pk, defaults={
'tag': tag, 'title': name,
'elo_6': elo_rating_6, 'elo_8': elo_rating_8,
'elo_10': elo_rating_10,
})[0]
ClanArenaStat.objects.update_or_create(clan=clan, arena_id=arena_id, defaults={
'wins_percent': arena_wins_percent,
'battles_count': arena_battles_count,
})
class Clan(models.Model):
tag = models.CharField(max_length=5, null=True)
title = models.CharField(max_length=255, null=True)
elo_6 = models.IntegerField(null=True)
elo_8 = models.IntegerField(null=True)
elo_10 = models.IntegerField(null=True)
def __repr__(self):
return '<Clan: %s>' % self.tag
def __str__(self):
return self.tag
def force_update(self):
clan_info = wgn.clans.info(clan_id=self.pk)[str(self.pk)]
self.tag = clan_info['tag']
self.title = clan_info['name']
self.save()
def as_json(self):
return {
'clan_id': self.pk,
'tag': self.tag,
'name': self.title,
'elo_6': self.elo_6,
'elo_8': self.elo_8,
'elo_10': self.elo_10,
}
def as_json_with_arena(self, arena_id):
data = self.as_json()
stat = self.arena_stats.filter(arena_id=arena_id)
if stat:
data['arena_stat'] = stat[0].as_json()
else:
data['arena_stat'] = ClanArenaStat(
clan=self,
arena_id=arena_id,
wins_percent=0,
battles_count=0,
).as_json()
return data
class Player(models.Model):
nickname = models.CharField(max_length=255)
clan = models.ForeignKey(Clan, null=True)
email = models.CharField(null=True, max_length=255)
password = models.CharField(null=True, max_length=255)
system_account = models.BooleanField(default=False)
class Front(models.Model):
front_id = models.CharField(max_length=254)
max_vehicle_level = models.IntegerField()
class Province(models.Model):
province_id = models.CharField(max_length=255)
front = models.ForeignKey(Front)
province_name = models.CharField(max_length=255)
province_owner = models.ForeignKey(Clan, on_delete=models.SET_NULL, null=True, blank=True)
arena_id = models.CharField(max_length=255)
arena_name = models.CharField(max_length=255)
prime_time = models.TimeField()
server = models.CharField(max_length=10)
def __repr__(self):
return '<Province: %s>' % self.province_id
def __str__(self):
return self.province_id
def force_update(self):
data = wot.globalmap.provinces(
front_id=self.front.front_id, province_id=self.province_id,
fields='arena_id,arena_name,province_name,prime_time,owner_clan_id,server')
if len(data) == 0:
raise Exception("Province '%s' not found on front '%s'", self.province_id, self.front.front_id)
data = data[0]
self.arena_id = data['arena_id']
self.arena_name = data['arena_name']
self.province_name = data['province_name']
self.prime_time = data['prime_time']
if data['owner_clan_id']:
self.province_owner = Clan.objects.get_or_create(pk=data['owner_clan_id'])[0]
self.server = data['server']
@cached_property
def tournament_info(self):
return TournamentInfo(self.province_id)
def as_json(self):
return {
'province_id': self.province_id,
'province_name': self.province_name,
'province_owner': self.province_owner and self.province_owner.as_json(),
'arena_id': self.arena_id,
'arena_name': self.arena_name,
'prime_time': self.prime_time,
'server': self.server,
'max_vehicle_level': self.front.max_vehicle_level,
}
class ClanArenaStat(models.Model):
clan = models.ForeignKey(Clan, related_name='arena_stats')
arena_id = models.CharField(max_length=255)
wins_percent = models.FloatField()
battles_count = models.IntegerField()
# level = models.IntegerField()
# base = models.IntegerField(choices=((1, 'Fist base'), (2, 'Second Base')))
def as_json(self):
return {
'wins_percent': self.wins_percent,
'battles_count': self.battles_count,
}
# CLEAN MAP
# [{u'active_battles': [],
# u'arena_id': u'10_hills',
# u'arena_name': u'\u0420\u0443\u0434\u043d\u0438\u043a\u0438',
# u'attackers': [],
# u'battles_start_at': u'2016-11-23T19:15:00',
# u'competitors': [192,
# 3861,
# 45846,
# 61752,
# 80424,
# 82433,
# 146509,
# 170851,
# 179351,
# 190526,
# 200649,
# 201252,
# 219575],
# u'current_min_bet': 0,
# u'daily_revenue': 0,
# u'front_id': u'campaign_05_ru_west',
# u'front_name': u'\u041a\u0430\u043c\u043f\u0430\u043d\u0438\u044f: \u0417\u0430\u043f\u0430\u0434',
# u'is_borders_disabled': False,
# u'landing_type': u'tournament',
# u'last_won_bet': 0,
# u'max_bets': 32,
# u'neighbours': [u'herning', u'odense', u'uddevalla'],
# u'owner_clan_id': None,
# u'pillage_end_at': None,
# u'prime_time': u'19:15',
# u'province_id': u'aarhus',
# u'province_name': u'\u041e\u0440\u0445\u0443\u0441',
# u'revenue_level': 0,
# u'round_number': None,
# u'server': u'RU6',
# u'status': None,
# u'uri': u'/#province/aarhus',
# u'world_redivision': False}]
class ProvinceAssault(models.Model):
date = models.DateField() # On what date Assault was performed
province = models.ForeignKey(Province, # On what province
related_name='assaults')
current_owner = models.ForeignKey(Clan, related_name='+', null=True)
clans = models.ManyToManyField(Clan) # By which clans
prime_time = models.TimeField()
arena_id = models.CharField(max_length=255)
round_number = models.IntegerField(null=True)
landing_type = models.CharField(max_length=255, null=True)
status = models.CharField(max_length=20, default='FINISHED', null=True)
division = JSONField(null=True)
class Meta:
ordering = ('date', )
unique_together = ('date', 'province')
def __repr__(self):
return '<ProvinceAssault @%s: %s owned by %s>' % (
self.date, self.province.province_id, str(self.current_owner))
@cached_property
def datetime(self):
if isinstance(self.date, str):
self.date = datetime.date(*[int(i) for i in self.date.split('-')])
if isinstance(self.prime_time, str):
self.prime_time = datetime.time(*[int(i) for i in self.prime_time.split(':')])
return combine_dt(self.date, self.prime_time).replace(tzinfo=pytz.UTC)
@cached_property
def planned_times(self):
if utc_now() > self.datetime:
if isinstance(self.round_number, int):
round_number = self.round_number
else:
# Bug-fix: WGAPI can return None on round number if map is new
round_number = 1
else:
round_number = 1 # Bug-Fix: WGAPI return round number from previous day
clans_count = len(self.clans.all())
if clans_count > 0:
total_rounds = round_number + int(math.ceil(math.log(clans_count, 2))) - 1
else:
total_rounds = round_number - 1
times = [
self.datetime + timedelta(minutes=30) * i
for i in range(0, total_rounds)
]
if self.current_owner:
times.append(self.datetime + timedelta(minutes=30) * total_rounds)
return times
def clan_battles(self, clan):
max_rounds = len(self.planned_times)
existing_battles = {b.round: b for b in self.battles.filter(Q(clan_a=clan) | Q(clan_b=clan))}
res = []
for round_number in range(1, max_rounds + 1):
if round_number in existing_battles:
res.append(existing_battles[round_number])
else:
# create FAKE planned battle
pb = ProvinceBattle(
assault=self,
province=self.province,
arena_id=self.arena_id,
round=round_number,
)
if round_number <= self.round_number and self.status == 'STARTED':
pb.winner = clan
if round_number == max_rounds and self.current_owner:
pb.clan_a = self.current_owner
pb.clan_b = clan
res.append(pb)
return res
@cached_property
def max_rounds(self):
return len(self.planned_times)
def as_clan_json(self, clan, current_only=True):
if current_only:
battles = [b.as_json() for b in self.clan_battles(clan)
if b.round >= self.round_number and self.status != 'FINISHED'
or self.datetime > utc_now()]
else:
battles = [b.as_json() for b in self.clan_battles(clan)]
if self.current_owner == clan:
mode = 'defence'
battles = battles[-1:-2:-1]
else:
mode = 'attack'
return {
'mode': mode,
'province_info': self.province.as_json(),
'prime_time': self.datetime,
'clans': {c.pk: c.as_json_with_arena(self.arena_id) for c in self.clans.all()},
'battles': battles,
}
class ProvinceBattle(models.Model):
assault = models.ForeignKey(ProvinceAssault, related_name='battles')
province = models.ForeignKey(Province, related_name='battles')
arena_id = models.CharField(max_length=255)
clan_a = models.ForeignKey(Clan, related_name='+')
clan_b = models.ForeignKey(Clan, related_name='+')
winner = models.ForeignKey(Clan, null=True, related_name='battles_winner')
start_at = models.DateTimeField()
round = models.IntegerField()
class Meta:
ordering = ('round', 'start_at')
def __repr__(self):
clan_a_tag = clan_b_tag = province_id = None
try:
clan_a_tag = self.clan_a.tag
except ObjectDoesNotExist:
clan_a_tag = None
try:
clan_b_tag = self.clan_b.tag
except ObjectDoesNotExist:
clan_b_tag = None
try:
province_id = self.province.province_id
except ObjectDoesNotExist:
province_id = None
return '<Battle round %s: %s VS %s on %s>' % (self.round, clan_a_tag, clan_b_tag, province_id)
def __str__(self):
return repr(self)
@property
def round_datetime(self):
prime_time = self.province.prime_time
date = self.assault.date
return combine_dt(date, prime_time).replace(tzinfo=pytz.UTC) + timedelta(minutes=30) * (self.round - 1)
@property
def title(self):
power = self.assault.max_rounds - self.round - 1
if power == 0:
return 'Final'
else:
return 'Round 1 / %s' % (2 ** power)
def as_json(self):
try:
clan_a = self.clan_a
except ObjectDoesNotExist:
clan_a = None
try:
clan_b = self.clan_b
except ObjectDoesNotExist:
clan_b = None
return {
'planned_start_at': self.round_datetime,
'real_start_at': self.start_at,
'clan_a': clan_a.as_json_with_arena(self.arena_id) if clan_a else None,
'clan_b': clan_b.as_json_with_arena(self.arena_id) if clan_b else None,
'winner': self.winner.as_json() if self.winner else None
}
class ProvinceTag(models.Model):
date = models.DateField()
tag = models.CharField(max_length=255)
province_id = models.CharField(max_length=255)
def __repr__(self):
return "<ProvinceTag %s: %s@%s>" % (self.date, self.tag, self.province_id)
@receiver(pre_save, sender=Clan)
def fetch_minimum_clan_info(sender, instance, **kwargs):
if (not instance.tag or not instance.title) and instance.pk:
instance.force_update()
elif not instance.pk and instance.tag:
info = [i for i in wgn.clans.list(search=instance.tag) if i['tag'] == instance.tag]
if len(info) == 1:
instance.pk = info[0]['clan_id']
instance.title = info[0]['name']
else:
# No clan with such tag, do not allow such Clan
instance.tag = None
instance.title = None
@receiver(pre_save, sender=Province)
def fetch_minimum_clan_info(sender, instance, **kwargs):
required_fields = ['province_name', 'arena_id', 'arena_name', 'prime_time', 'server']
for field in required_fields:
if not getattr(instance, field):
instance.force_update()
| python |
"""
python setup.py sdist
twine upload dist/*
"""
import cv2
if cv2.cuda.getCudaEnabledDeviceCount() > 0:
print("检测到cuda环境") | python |
import librosa as lr
import numpy as np
def mu_law_encoding(data, mu):
mu_x = np.sign(data) * np.log(1 + mu * np.abs(data)) / np.log(mu + 1)
return mu_x
def mu_law_expansion(data, mu):
s = np.sign(data) * (np.exp(np.abs(data) * np.log(mu + 1)) - 1) / mu
return s
def quantize_data(data, classes):
mu_x = mu_law_encoding(data, classes)
bins = np.linspace(-1, 1, classes)
quantized = np.digitize(mu_x, bins) - 1
return quantized
def create_chunks(location):
print("create dataset from audio files at", location)
files = list_all_audio_files(location)
processed_files = []
for i, file in enumerate(files):
print(" processed " + str(i) + " of " + str(len(files)) + " files")
file_data, _ = lr.load(path=file,
sr=None,
mono=True)
quantized_data = quantize_data(file_data, 256).astype(np.uint8)
processed_files.append(quantized_data)
return processed_files
| python |
from random import randint
cpu = randint(0,5)
usuario = int(input('Digite um numero entre 0 a 5: '))
if(cpu == usuario):
print('\033[33;mAcertô, mizeravi!')
else:
print('Errou Zé Ruela') | python |
from DBMS_Software.queryProcessor.ReadGlobalDataDictionary import readGlobalDataDictionary
from DBMS_Software.queryProcessor.ReadGlobalDataDictionary import fetchFileFromGCP
import os
def createSQLDump():
print("Enter the TableName:")
TableName = input()
tableLocation = readGlobalDataDictionary(TableName)
if(tableLocation == 'RemoteLocation'):
fetchFileFromGCP(TableName)
FileExtension = ".txt"
FileName = TableName + FileExtension # Framing the FileName
metaFileExtension = 'MetaData.txt'
metaDatafileName = TableName + metaFileExtension
FileObject = open(metaDatafileName, 'r')
Lines = FileObject.readlines()
for eachline in Lines:
filepath = os.path.join('E:/SQLDump_Extraction', metaDatafileName)
if not os.path.exists('E:/SQLDump_Extraction'):
os.makedirs('E:/SQLDump_Extraction')
f = open(filepath, "a")
f.write(eachline)
f.close()
filepath = os.path.join('E:/SQLDump_Extraction', FileName)
if not os.path.exists('E:/SQLDump_Extraction'):
os.makedirs('E:/SQLDump_Extraction')
f = open(filepath, "a")
| python |
"""STACK Configs."""
import os
import yaml
config = yaml.load(open('stack/config.yml', 'r'), Loader=yaml.FullLoader)
PROJECT_NAME = config['PROJECT_NAME']
STAGE = config.get('STAGE') or 'dev'
# primary bucket
BUCKET = config['BUCKET']
# Additional environement variable to set in the task/lambda
TASK_ENV: dict = dict()
# Existing VPC to point ECS/LAMBDA stacks towards. Defaults to creating a new
# VPC if no ID is supplied.
VPC_ID = os.environ.get("VPC_ID") or config['VPC_ID']
################################################################################
# #
# ECS #
# #
################################################################################
# Min/Max Number of ECS images
MIN_ECS_INSTANCES: int = config['MAX_ECS_INSTANCES']
MAX_ECS_INSTANCES: int = config['MAX_ECS_INSTANCES']
# CPU value | Memory value
# 256 (.25 vCPU) | 0.5 GB, 1 GB, 2 GB
# 512 (.5 vCPU) | 1 GB, 2 GB, 3 GB, 4 GB
# 1024 (1 vCPU) | 2 GB, 3 GB, 4 GB, 5 GB, 6 GB, 7 GB, 8 GB
# 2048 (2 vCPU) | Between 4 GB and 16 GB in 1-GB increments
# 4096 (4 vCPU) | Between 8 GB and 30 GB in 1-GB increments
TASK_CPU: int = config['TASK_CPU']
TASK_MEMORY: int = config['TASK_MEMORY']
################################################################################
# #
# LAMBDA #
# #
################################################################################
TIMEOUT: int = config['TIMEOUT']
MEMORY: int = config['MEMORY']
# stack skips setting concurrency if this value is 0
# the stack will instead use unreserved lambda concurrency
MAX_CONCURRENT: int = 500 if STAGE == "prod" else config['MAX_CONCURRENT']
# Cache
CACHE_NODE_TYPE = config['CACHE_NODE_TYPE']
CACHE_ENGINE = config['CACHE_ENGINE']
CACHE_NODE_NUM = config['CACHE_NODE_NUM']
| python |
"""
Script for testing purposes.
"""
import zmq
def run(port=5555):
context = zmq.Context()
# using zmq.ROUTER
socket = context.socket(zmq.ROUTER)
# bind socket
socket.bind('tcp://*:{}'.format(port))
while True:
msg = socket.recv_multipart()
print('Received message {}'.format(msg))
socket.send_multipart([msg[0], b'', b'RECEIVED'])
if __name__ == '__main__':
run() | python |
from itertools import product
from string import ascii_lowercase
import numpy as np
import pytest
from pandas import (
DataFrame,
Index,
MultiIndex,
Period,
Series,
Timedelta,
Timestamp,
date_range,
)
import pandas._testing as tm
class TestCounting:
def test_cumcount(self):
df = DataFrame([["a"], ["a"], ["a"], ["b"], ["a"]], columns=["A"])
g = df.groupby("A")
sg = g.A
expected = Series([0, 1, 2, 0, 3])
tm.assert_series_equal(expected, g.cumcount())
tm.assert_series_equal(expected, sg.cumcount())
def test_cumcount_empty(self):
ge = DataFrame().groupby(level=0)
se = Series(dtype=object).groupby(level=0)
# edge case, as this is usually considered float
e = Series(dtype="int64")
tm.assert_series_equal(e, ge.cumcount())
tm.assert_series_equal(e, se.cumcount())
def test_cumcount_dupe_index(self):
df = DataFrame(
[["a"], ["a"], ["a"], ["b"], ["a"]], columns=["A"], index=[0] * 5
)
g = df.groupby("A")
sg = g.A
expected = Series([0, 1, 2, 0, 3], index=[0] * 5)
tm.assert_series_equal(expected, g.cumcount())
tm.assert_series_equal(expected, sg.cumcount())
def test_cumcount_mi(self):
mi = MultiIndex.from_tuples([[0, 1], [1, 2], [2, 2], [2, 2], [1, 0]])
df = DataFrame([["a"], ["a"], ["a"], ["b"], ["a"]], columns=["A"], index=mi)
g = df.groupby("A")
sg = g.A
expected = Series([0, 1, 2, 0, 3], index=mi)
tm.assert_series_equal(expected, g.cumcount())
tm.assert_series_equal(expected, sg.cumcount())
def test_cumcount_groupby_not_col(self):
df = DataFrame(
[["a"], ["a"], ["a"], ["b"], ["a"]], columns=["A"], index=[0] * 5
)
g = df.groupby([0, 0, 0, 1, 0])
sg = g.A
expected = Series([0, 1, 2, 0, 3], index=[0] * 5)
tm.assert_series_equal(expected, g.cumcount())
tm.assert_series_equal(expected, sg.cumcount())
def test_ngroup(self):
df = DataFrame({"A": list("aaaba")})
g = df.groupby("A")
sg = g.A
expected = Series([0, 0, 0, 1, 0])
tm.assert_series_equal(expected, g.ngroup())
tm.assert_series_equal(expected, sg.ngroup())
def test_ngroup_distinct(self):
df = DataFrame({"A": list("abcde")})
g = df.groupby("A")
sg = g.A
expected = Series(range(5), dtype="int64")
tm.assert_series_equal(expected, g.ngroup())
tm.assert_series_equal(expected, sg.ngroup())
def test_ngroup_one_group(self):
df = DataFrame({"A": [0] * 5})
g = df.groupby("A")
sg = g.A
expected = Series([0] * 5)
tm.assert_series_equal(expected, g.ngroup())
tm.assert_series_equal(expected, sg.ngroup())
def test_ngroup_empty(self):
ge = DataFrame().groupby(level=0)
se = Series(dtype=object).groupby(level=0)
# edge case, as this is usually considered float
e = Series(dtype="int64")
tm.assert_series_equal(e, ge.ngroup())
tm.assert_series_equal(e, se.ngroup())
def test_ngroup_series_matches_frame(self):
df = DataFrame({"A": list("aaaba")})
s = Series(list("aaaba"))
tm.assert_series_equal(df.groupby(s).ngroup(), s.groupby(s).ngroup())
def test_ngroup_dupe_index(self):
df = DataFrame({"A": list("aaaba")}, index=[0] * 5)
g = df.groupby("A")
sg = g.A
expected = Series([0, 0, 0, 1, 0], index=[0] * 5)
tm.assert_series_equal(expected, g.ngroup())
tm.assert_series_equal(expected, sg.ngroup())
def test_ngroup_mi(self):
mi = MultiIndex.from_tuples([[0, 1], [1, 2], [2, 2], [2, 2], [1, 0]])
df = DataFrame({"A": list("aaaba")}, index=mi)
g = df.groupby("A")
sg = g.A
expected = Series([0, 0, 0, 1, 0], index=mi)
tm.assert_series_equal(expected, g.ngroup())
tm.assert_series_equal(expected, sg.ngroup())
def test_ngroup_groupby_not_col(self):
df = DataFrame({"A": list("aaaba")}, index=[0] * 5)
g = df.groupby([0, 0, 0, 1, 0])
sg = g.A
expected = Series([0, 0, 0, 1, 0], index=[0] * 5)
tm.assert_series_equal(expected, g.ngroup())
tm.assert_series_equal(expected, sg.ngroup())
def test_ngroup_descending(self):
df = DataFrame(["a", "a", "b", "a", "b"], columns=["A"])
g = df.groupby(["A"])
ascending = Series([0, 0, 1, 0, 1])
descending = Series([1, 1, 0, 1, 0])
tm.assert_series_equal(descending, (g.ngroups - 1) - ascending)
tm.assert_series_equal(ascending, g.ngroup(ascending=True))
tm.assert_series_equal(descending, g.ngroup(ascending=False))
def test_ngroup_matches_cumcount(self):
# verify one manually-worked out case works
df = DataFrame(
[["a", "x"], ["a", "y"], ["b", "x"], ["a", "x"], ["b", "y"]],
columns=["A", "X"],
)
g = df.groupby(["A", "X"])
g_ngroup = g.ngroup()
g_cumcount = g.cumcount()
expected_ngroup = Series([0, 1, 2, 0, 3])
expected_cumcount = Series([0, 0, 0, 1, 0])
tm.assert_series_equal(g_ngroup, expected_ngroup)
tm.assert_series_equal(g_cumcount, expected_cumcount)
def test_ngroup_cumcount_pair(self):
# brute force comparison for all small series
for p in product(range(3), repeat=4):
df = DataFrame({"a": p})
g = df.groupby(["a"])
order = sorted(set(p))
ngroupd = [order.index(val) for val in p]
cumcounted = [p[:i].count(val) for i, val in enumerate(p)]
tm.assert_series_equal(g.ngroup(), Series(ngroupd))
tm.assert_series_equal(g.cumcount(), Series(cumcounted))
def test_ngroup_respects_groupby_order(self):
np.random.seed(0)
df = DataFrame({"a": np.random.choice(list("abcdef"), 100)})
for sort_flag in (False, True):
g = df.groupby(["a"], sort=sort_flag)
df["group_id"] = -1
df["group_index"] = -1
for i, (_, group) in enumerate(g):
df.loc[group.index, "group_id"] = i
for j, ind in enumerate(group.index):
df.loc[ind, "group_index"] = j
tm.assert_series_equal(Series(df["group_id"].values), g.ngroup())
tm.assert_series_equal(Series(df["group_index"].values), g.cumcount())
@pytest.mark.parametrize(
"datetimelike",
[
[Timestamp(f"2016-05-{i:02d} 20:09:25+00:00") for i in range(1, 4)],
[Timestamp(f"2016-05-{i:02d} 20:09:25") for i in range(1, 4)],
[Timedelta(x, unit="h") for x in range(1, 4)],
[Period(freq="2W", year=2017, month=x) for x in range(1, 4)],
],
)
def test_count_with_datetimelike(self, datetimelike):
# test for #13393, where DataframeGroupBy.count() fails
# when counting a datetimelike column.
df = DataFrame({"x": ["a", "a", "b"], "y": datetimelike})
res = df.groupby("x").count()
expected = DataFrame({"y": [2, 1]}, index=["a", "b"])
expected.index.name = "x"
tm.assert_frame_equal(expected, res)
def test_count_with_only_nans_in_first_group(self):
# GH21956
df = DataFrame({"A": [np.nan, np.nan], "B": ["a", "b"], "C": [1, 2]})
result = df.groupby(["A", "B"]).C.count()
mi = MultiIndex(levels=[[], ["a", "b"]], codes=[[], []], names=["A", "B"])
expected = Series([], index=mi, dtype=np.int64, name="C")
tm.assert_series_equal(result, expected, check_index_type=False)
def test_count_groupby_column_with_nan_in_groupby_column(self):
# https://github.com/pandas-dev/pandas/issues/32841
df = DataFrame({"A": [1, 1, 1, 1, 1], "B": [5, 4, np.NaN, 3, 0]})
res = df.groupby(["B"]).count()
expected = DataFrame(
index=Index([0.0, 3.0, 4.0, 5.0], name="B"), data={"A": [1, 1, 1, 1]}
)
tm.assert_frame_equal(expected, res)
def test_groupby_count_dateparseerror(self):
dr = date_range(start="1/1/2012", freq="5min", periods=10)
# BAD Example, datetimes first
ser = Series(np.arange(10), index=[dr, np.arange(10)])
grouped = ser.groupby(lambda x: x[1] % 2 == 0)
result = grouped.count()
ser = Series(np.arange(10), index=[np.arange(10), dr])
grouped = ser.groupby(lambda x: x[0] % 2 == 0)
expected = grouped.count()
tm.assert_series_equal(result, expected)
def test_groupby_timedelta_cython_count():
df = DataFrame(
{"g": list("ab" * 2), "delt": np.arange(4).astype("timedelta64[ns]")}
)
expected = Series([2, 2], index=Index(["a", "b"], name="g"), name="delt")
result = df.groupby("g").delt.count()
tm.assert_series_equal(expected, result)
def test_count():
n = 1 << 15
dr = date_range("2015-08-30", periods=n // 10, freq="T")
df = DataFrame(
{
"1st": np.random.choice(list(ascii_lowercase), n),
"2nd": np.random.randint(0, 5, n),
"3rd": np.random.randn(n).round(3),
"4th": np.random.randint(-10, 10, n),
"5th": np.random.choice(dr, n),
"6th": np.random.randn(n).round(3),
"7th": np.random.randn(n).round(3),
"8th": np.random.choice(dr, n) - np.random.choice(dr, 1),
"9th": np.random.choice(list(ascii_lowercase), n),
}
)
for col in df.columns.drop(["1st", "2nd", "4th"]):
df.loc[np.random.choice(n, n // 10), col] = np.nan
df["9th"] = df["9th"].astype("category")
for key in ["1st", "2nd", ["1st", "2nd"]]:
left = df.groupby(key).count()
right = df.groupby(key).apply(DataFrame.count).drop(key, axis=1)
tm.assert_frame_equal(left, right)
def test_count_non_nulls():
# GH#5610
# count counts non-nulls
df = DataFrame(
[[1, 2, "foo"], [1, np.nan, "bar"], [3, np.nan, np.nan]],
columns=["A", "B", "C"],
)
count_as = df.groupby("A").count()
count_not_as = df.groupby("A", as_index=False).count()
expected = DataFrame([[1, 2], [0, 0]], columns=["B", "C"], index=[1, 3])
expected.index.name = "A"
tm.assert_frame_equal(count_not_as, expected.reset_index())
tm.assert_frame_equal(count_as, expected)
count_B = df.groupby("A")["B"].count()
tm.assert_series_equal(count_B, expected["B"])
def test_count_object():
df = DataFrame({"a": ["a"] * 3 + ["b"] * 3, "c": [2] * 3 + [3] * 3})
result = df.groupby("c").a.count()
expected = Series([3, 3], index=Index([2, 3], name="c"), name="a")
tm.assert_series_equal(result, expected)
df = DataFrame({"a": ["a", np.nan, np.nan] + ["b"] * 3, "c": [2] * 3 + [3] * 3})
result = df.groupby("c").a.count()
expected = Series([1, 3], index=Index([2, 3], name="c"), name="a")
tm.assert_series_equal(result, expected)
def test_count_cross_type():
# GH8169
vals = np.hstack(
(np.random.randint(0, 5, (100, 2)), np.random.randint(0, 2, (100, 2)))
)
df = DataFrame(vals, columns=["a", "b", "c", "d"])
df[df == 2] = np.nan
expected = df.groupby(["c", "d"]).count()
for t in ["float32", "object"]:
df["a"] = df["a"].astype(t)
df["b"] = df["b"].astype(t)
result = df.groupby(["c", "d"]).count()
tm.assert_frame_equal(result, expected)
def test_lower_int_prec_count():
df = DataFrame(
{
"a": np.array([0, 1, 2, 100], np.int8),
"b": np.array([1, 2, 3, 6], np.uint32),
"c": np.array([4, 5, 6, 8], np.int16),
"grp": list("ab" * 2),
}
)
result = df.groupby("grp").count()
expected = DataFrame(
{"a": [2, 2], "b": [2, 2], "c": [2, 2]}, index=Index(list("ab"), name="grp")
)
tm.assert_frame_equal(result, expected)
def test_count_uses_size_on_exception():
class RaisingObjectException(Exception):
pass
class RaisingObject:
def __init__(self, msg="I will raise inside Cython"):
super().__init__()
self.msg = msg
def __eq__(self, other):
# gets called in Cython to check that raising calls the method
raise RaisingObjectException(self.msg)
df = DataFrame({"a": [RaisingObject() for _ in range(4)], "grp": list("ab" * 2)})
result = df.groupby("grp").count()
expected = DataFrame({"a": [2, 2]}, index=Index(list("ab"), name="grp"))
tm.assert_frame_equal(result, expected)
| python |
# Copyright 2019 The Bazel Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
# Terser rules for Bazel
The Terser rules run the Terser JS minifier with Bazel.
Wraps the Terser CLI documented at https://github.com/terser-js/terser#command-line-usage
## Installation
Add the `@bazel/terser` npm package to your `devDependencies` in `package.json`.
## Installing with user-managed dependencies
If you didn't use the `yarn_install` or `npm_install` rule, you'll have to declare a rule in your root `BUILD.bazel` file to execute terser:
```python
# Create a terser rule to use in terser_minified#terser_bin
# attribute when using user-managed dependencies
nodejs_binary(
name = "terser_bin",
entry_point = "//:node_modules/terser/bin/uglifyjs",
# Point bazel to your node_modules to find the entry point
data = ["//:node_modules"],
)
```
"""
load(":terser_minified.bzl", _terser_minified = "terser_minified")
terser_minified = _terser_minified
| python |
"""
Referral answer related API endpoints.
"""
from django.db.models import Q
from django.http import Http404
from django_fsm import TransitionNotAllowed
from rest_framework import viewsets
from rest_framework.decorators import action
from rest_framework.permissions import BasePermission, IsAuthenticated
from rest_framework.response import Response
from .. import models
from ..forms import ReferralAnswerForm
from ..serializers import ReferralAnswerSerializer
from .permissions import NotAllowed
class CanCreateAnswer(BasePermission):
"""Permission to create a ReferralAnswer through the API."""
def has_permission(self, request, view):
"""
Members of a unit related to a referral can create answers for said referral.
"""
referral = view.get_referral(request)
return (
request.user.is_authenticated
and referral.units.filter(members__id=request.user.id).exists()
)
class CanRetrieveAnswer(BasePermission):
"""Permission to retrieve a ReferralAnswer through the API."""
def has_permission(self, request, view):
"""
Members of a unit related to a referral can retrieve answers for said referral.
"""
answer = view.get_object()
return (
request.user.is_authenticated
and answer.referral.units.filter(members__id=request.user.id).exists()
)
class CanUpdateAnswer(BasePermission):
"""Permission to update a ReferralAnswer through the API."""
def has_permission(self, request, view):
"""
Only the answer's author can update a referral answer.
"""
answer = view.get_object()
return request.user == answer.created_by
class ReferralAnswerViewSet(viewsets.ModelViewSet):
"""
API endpoints for referral answers.
"""
permission_classes = [NotAllowed]
queryset = models.ReferralAnswer.objects.all()
serializer_class = ReferralAnswerSerializer
def get_permissions(self):
"""
Manage permissions for default methods separately, delegating to @action defined
permissions for other actions.
"""
if self.action == "list":
permission_classes = [IsAuthenticated]
elif self.action == "create":
permission_classes = [CanCreateAnswer]
elif self.action == "retrieve":
permission_classes = [CanRetrieveAnswer]
elif self.action == "update":
permission_classes = [CanUpdateAnswer]
else:
try:
permission_classes = getattr(self, self.action).kwargs.get(
"permission_classes"
)
except AttributeError:
permission_classes = self.permission_classes
return [permission() for permission in permission_classes]
def get_referral(self, request):
"""
Helper: get the related referral, return an error if it does not exist.
"""
referral_id = request.data.get("referral") or request.query_params.get(
"referral"
)
try:
referral = models.Referral.objects.get(id=referral_id)
except models.Referral.DoesNotExist as error:
raise Http404(
f"Referral {request.data.get('referral')} not found"
) from error
return referral
def list(self, request, *args, **kwargs):
"""
Let users get a list of referral answers. Users need to filter them by their related
referral. We use the queryset & filter to manage what a given user is allowed to see.
"""
referral_id = self.request.query_params.get("referral", None)
if referral_id is None:
return Response(
status=400,
data={
"errors": ["ReferralAnswer list requests need a referral parameter"]
},
)
queryset = (
self.get_queryset()
.filter(
# The referral author is only allowed to see published answers
Q(
referral__user=request.user,
state=models.ReferralAnswerState.PUBLISHED,
referral__id=referral_id,
)
# Members of the referral's linked units are allowed to see all answers
| Q(
referral_id=referral_id,
referral__units__members=request.user,
)
)
.distinct()
)
queryset = queryset.order_by("-created_at")
page = self.paginate_queryset(queryset)
if page is not None:
serializer = self.get_serializer(page, many=True)
return self.get_paginated_response(serializer.data)
serializer = self.get_serializer(queryset, many=True)
return Response(serializer.data)
def create(self, request, *args, **kwargs):
"""
Create a new referral answer as the client issues a POST on the referralanswers endpoint.
"""
# Make sure the referral exists and return an error otherwise.
referral = self.get_referral(request)
form = ReferralAnswerForm(
{
"content": request.data.get("content") or "",
"created_by": request.user,
"referral": referral,
"state": models.ReferralAnswerState.DRAFT,
},
)
if not form.is_valid():
return Response(status=400, data=form.errors)
referral_answer = form.save()
# Make sure the referral can support a new draft before creating attachments.
try:
referral.draft_answer(referral_answer)
referral.save()
except TransitionNotAllowed:
# If the referral cannot support a new draft answer, delete the answer
# we just created.
referral_answer.delete()
return Response(
status=400,
data={
"errors": {
f"Transition DRAFT_ANSWER not allowed from state {referral.state}."
}
},
)
for attachment_dict in request.data.get("attachments") or []:
try:
referral_answer.attachments.add(
models.ReferralAnswerAttachment.objects.get(
id=attachment_dict["id"]
)
)
referral_answer.save()
except models.ReferralAnswerAttachment.DoesNotExist:
# Since we have already created the ReferralAnswer, there's not much of a point
# in bailing out now with an error: we'd rather fail silently and let the user
# re-add the attachment if needed.
pass
return Response(status=201, data=ReferralAnswerSerializer(referral_answer).data)
def update(self, request, *args, **kwargs):
"""
Update an existing referral answer.
"""
instance = self.get_object()
# Make sure the referral exists and return an error otherwise.
referral = self.get_referral(request)
# Users can only modify their own referral answers. For other users' answers,
# they're expected to use the "Revise" feature
if not request.user.id == instance.created_by.id:
return Response(status=403)
form = ReferralAnswerForm(
{
"content": request.data.get("content") or "",
"created_by": request.user,
"referral": referral,
"state": instance.state,
},
instance=instance,
)
if not form.is_valid():
return Response(status=400, data=form.errors)
referral_answer = form.save()
return Response(status=200, data=ReferralAnswerSerializer(referral_answer).data)
@action(
detail=True,
methods=["post"],
permission_classes=[CanUpdateAnswer],
)
# pylint: disable=invalid-name
def remove_attachment(self, request, pk):
"""
Remove an attachment from this answer.
We're using an action route on the ReferralAnswer instead of a DELETE on the attachment
as the attachment can be linked to more than one answer.
"""
answer = self.get_object()
if answer.state == models.ReferralAnswerState.PUBLISHED:
return Response(
status=400,
data={
"errors": ["attachments cannot be removed from a published answer"]
},
)
try:
attachment = answer.attachments.get(id=request.data.get("attachment"))
except models.ReferralAnswerAttachment.DoesNotExist:
return Response(
status=400,
data={
"errors": [
(
f"referral answer attachment {request.data.get('attachment')} "
"does not exist"
)
]
},
)
answer.attachments.remove(attachment)
answer.refresh_from_db()
return Response(status=200, data=ReferralAnswerSerializer(answer).data)
| python |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.CreateModel(
name='BookInfo',
fields=[
('id', models.AutoField(auto_created=True, serialize=False, verbose_name='ID', primary_key=True)),
('name', models.CharField(max_length=10)),
],
),
migrations.CreateModel(
name='PeopleInfo',
fields=[
('id', models.AutoField(auto_created=True, serialize=False, verbose_name='ID', primary_key=True)),
('name', models.CharField(max_length=10)),
('gender', models.BooleanField()),
('book', models.ForeignKey(to='Book.BookInfo')),
],
),
]
| python |
from tests.system.common import CondoorTestCase, StopTelnetSrv, StartTelnetSrv
from tests.dmock.dmock import SunHandler
from tests.utils import remove_cache_file
import condoor
class TestSunConnection(CondoorTestCase):
@StartTelnetSrv(SunHandler, 10023)
def setUp(self):
CondoorTestCase.setUp(self)
@StopTelnetSrv()
def tearDown(self):
pass
def test_sun_connection(self):
remove_cache_file()
urls = ["telnet://admin:[email protected]:10023", "telnet://admin:admin@host1"]
conn = condoor.Connection("host", urls, log_session=self.log_session, log_level=self.log_level)
with self.assertRaises(condoor.ConnectionTimeoutError):
conn.connect(self.logfile_condoor)
conn.disconnect()
#with self.assertRaises(condoor.ConnectionTimeoutError):
# conn.reconnect(30)
def test_sun_connection_wrong_passowrd(self):
urls = ["telnet://admin:[email protected]:10023", "telnet://admin:admin@host1"]
conn = condoor.Connection("host", urls, log_session=self.log_session, log_level=self.log_level)
with self.assertRaises(condoor.ConnectionAuthenticationError):
conn.connect(self.logfile_condoor)
conn.disconnect()
| python |
#!/usr/bin/env python3
# encoding=utf-8
#codeby 道长且阻
#email @ydhcui/QQ664284092
from core.plugin import BaseHostPlugin
import re
import socket
import binascii
import hashlib
import struct
import re
import time
class MongodbNoAuth(BaseHostPlugin):
bugname = "Mongodb 未授权访问"
bugrank = "高危"
def filter(self,host):
return host.port == 27017 or host.service == 'mongodb'
def verify(self,host,user='',pwd='',timeout=10):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.settimeout(timeout)
try:
sock.connect((host.host,int(host.port)))
data = binascii.a2b_hex("3a000000a741000000000000d4070000"
"0000000061646d696e2e24636d640000"
"000000ffffffff130000001069736d61"
"73746572000100000000")
sock.send(data)
result = sock.recv(1024)
if b"ismaster" in result:
data = binascii.a2b_hex("480000000200000000000000d40700"
"000000000061646d696e2e24636d64"
"000000000001000000210000000267"
"65744c6f6700100000007374617274"
"75705761726e696e67730000")
sock.send(data)
result = sock.recv(1024)
if b"totalLinesWritten" in result:
self.bugaddr = "%s:%s@%s:%s"%(user,pwd,host.host,host.port)
self.bugreq = "username:%s,password:%s" % (user,pwd)
self.bugres = str(result)
return True
except Exception as e:
print(e)
finally:
sock.close()
| python |
"""
Example of how to make a MuJoCo environment using the Gym library.
"""
from pathlib import Path
from gym.envs.mujoco.mujoco_env import MujocoEnv
from gym.utils import EzPickle
class SpiderEnv(MujocoEnv, EzPickle):
"""
Spider environment for RL. The task is for the spider to move to the target button.
The agent will get a sparse reward of 1.0 for stepping on the button.
"""
def __init__(self, action_repeat=1):
"""
Constructor for :class:`SpiderEnv`.
:param action_repeat: Number of times action should be repeated in MuJoCo
between each RL time step
"""
EzPickle.__init__(self)
self._has_button_been_pressed_before = False
MujocoEnv.__init__(
self,
str(Path("../../mujoco/spider.xml").resolve()),
frame_skip=action_repeat,
)
def reset_model(self):
"""
Reset the spider's degrees of freedom:
- qpos (joint positions); and
- qvel (joint velocities)
"""
self.set_state(self.init_qpos, self.init_qvel)
self._has_button_been_pressed_before = False
return self.state_vector()
def step(self, _action):
"""
Accepts an :param:`_action`, advances the environment by a single RL time step,
and returns a tuple (observation, reward, done, info).
:param _action: An act provided by the RL agent
:return: A tuple containing an observation, a reward, whether the episode has
ended, and auxiliary information
"""
self.do_simulation(_action, self.frame_skip)
_observation = self.state_vector()
_reward = self._reward()
_done = self._has_button_been_pressed_before or self._is_button_pressed()
if not self._has_button_been_pressed_before and _done:
self._has_button_been_pressed_before = True
return _observation, _reward, _done, {}
def _is_button_pressed(self):
"""
Returns whether the button is currently being pressed .
:return: True if the button is currently pressed, False otherwise
"""
return self.data.sensordata[0] > 0
def _reward(self):
"""
Returns a sparse reward from the environment.
i.e if the button is being pressed, return 1.0 otherwise return 0.0.
:return: A reward from the environment
"""
return float(self._is_button_pressed())
# Example of how the environment could be used
if __name__ == "__main__":
env = SpiderEnv(action_repeat=20)
for episode in range(3):
observation = env.reset()
for t in range(1000):
# Image observation
# See `gym.envs.mujoco.mujoco_env.MujocoEnv` for more info about params
pixels = env.render()
print("Observation: ", observation)
# Figure out an action...
action = env.action_space.sample()
print("Action: ", action)
observation, reward, done, info = env.step(action)
if done:
print("Episode {} finished after {} timesteps".format(episode, t + 1))
break
env.close()
| python |
# coding: utf-8
"""
Jamf Pro API
## Overview This is a sample Jamf Pro server which allows for usage without any authentication. The Jamf Pro environment which supports the Try it Out functionality does not run the current beta version of Jamf Pro, thus any newly added endpoints will result in an error and should be used soley for documentation purposes. # noqa: E501
The version of the OpenAPI document: 10.25.0
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import unittest
import datetime
import jamf
from jamf.models.computer_general import ComputerGeneral # noqa: E501
from jamf.rest import ApiException
class TestComputerGeneral(unittest.TestCase):
"""ComputerGeneral unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def make_instance(self, include_optional):
"""Test ComputerGeneral
include_option is a boolean, when False only required
params are included, when True both required and
optional params are included """
# model = jamf.models.computer_general.ComputerGeneral() # noqa: E501
if include_optional :
return ComputerGeneral(
name = 'Boalime',
last_ip_address = '247.185.82.186',
last_reported_ip = '247.185.82.186',
jamf_binary_version = '9.27',
platform = 'Mac',
barcode1 = '5 12345 678900',
barcode2 = '5 12345 678900',
asset_tag = '304822',
remote_management = jamf.models.computer_remote_management.ComputerRemoteManagement(
managed = True,
management_username = 'rootname',
management_password = 'example password', ),
supervised = True,
mdm_capable = jamf.models.computer_mdm_capability.ComputerMdmCapability(
capable = True,
capable_users = ["admin","rootadmin"], ),
report_date = '2018-10-31T18:04:13Z',
last_contact_time = '2018-10-31T18:04:13Z',
last_cloud_backup_date = '2018-10-31T18:04:13Z',
last_enrolled_date = '2018-10-31T18:04:13Z',
mdm_profile_expiration = '2018-10-31T18:04:13Z',
initial_entry_date = 'Wed Oct 31 00:00:00 GMT 2018',
distribution_point = 'distribution point name',
enrollment_method = jamf.models.enrollment_method.EnrollmentMethod(
id = '1',
object_name = '[email protected]',
object_type = 'User-initiated - no invitation', ),
site = jamf.models.v1_site.V1Site(
id = '1',
name = 'Eau Claire', ),
itunes_store_account_active = True,
enrolled_via_automated_device_enrollment = True,
user_approved_mdm = True,
extension_attributes = [
jamf.models.computer_extension_attribute.ComputerExtensionAttribute(
definition_id = '23',
name = 'Some Attribute',
description = 'Some Attribute defines how much Foo impacts Bar.',
enabled = True,
multi_value = True,
values = ["foo","bar"],
data_type = 'STRING',
options = ["foo","bar"],
input_type = 'TEXT', )
]
)
else :
return ComputerGeneral(
)
def testComputerGeneral(self):
"""Test ComputerGeneral"""
inst_req_only = self.make_instance(include_optional=False)
inst_req_and_optional = self.make_instance(include_optional=True)
if __name__ == '__main__':
unittest.main()
| python |
from mock.mock import patch
import os
import pytest
import ca_test_common
import ceph_volume_simple_activate
fake_cluster = 'ceph'
fake_container_binary = 'podman'
fake_container_image = 'quay.ceph.io/ceph/daemon:latest'
fake_id = '42'
fake_uuid = '0c4a7eca-0c2a-4c12-beff-08a80f064c52'
fake_path = '/etc/ceph/osd/{}-{}.json'.format(fake_id, fake_uuid)
class TestCephVolumeSimpleActivateModule(object):
@patch('ansible.module_utils.basic.AnsibleModule.exit_json')
def test_with_check_mode(self, m_exit_json):
ca_test_common.set_module_args({
'osd_id': fake_id,
'osd_fsid': fake_uuid,
'_ansible_check_mode': True
})
m_exit_json.side_effect = ca_test_common.exit_json
with pytest.raises(ca_test_common.AnsibleExitJson) as result:
ceph_volume_simple_activate.main()
result = result.value.args[0]
assert not result['changed']
assert result['cmd'] == ['ceph-volume', '--cluster', fake_cluster, 'simple', 'activate', fake_id, fake_uuid]
assert result['rc'] == 0
assert not result['stdout']
assert not result['stderr']
@patch('ansible.module_utils.basic.AnsibleModule.exit_json')
@patch('ansible.module_utils.basic.AnsibleModule.run_command')
def test_with_failure(self, m_run_command, m_exit_json):
ca_test_common.set_module_args({
'osd_id': fake_id,
'osd_fsid': fake_uuid
})
m_exit_json.side_effect = ca_test_common.exit_json
stdout = ''
stderr = 'error'
rc = 2
m_run_command.return_value = rc, stdout, stderr
with pytest.raises(ca_test_common.AnsibleExitJson) as result:
ceph_volume_simple_activate.main()
result = result.value.args[0]
assert result['changed']
assert result['cmd'] == ['ceph-volume', '--cluster', fake_cluster, 'simple', 'activate', fake_id, fake_uuid]
assert result['rc'] == rc
assert result['stderr'] == stderr
@patch('ansible.module_utils.basic.AnsibleModule.exit_json')
@patch('ansible.module_utils.basic.AnsibleModule.run_command')
def test_activate_all_osds(self, m_run_command, m_exit_json):
ca_test_common.set_module_args({
'osd_all': True
})
m_exit_json.side_effect = ca_test_common.exit_json
stdout = ''
stderr = ''
rc = 0
m_run_command.return_value = rc, stdout, stderr
with pytest.raises(ca_test_common.AnsibleExitJson) as result:
ceph_volume_simple_activate.main()
result = result.value.args[0]
assert result['changed']
assert result['cmd'] == ['ceph-volume', '--cluster', fake_cluster, 'simple', 'activate', '--all']
assert result['rc'] == rc
assert result['stderr'] == stderr
assert result['stdout'] == stdout
@patch.object(os.path, 'exists', return_value=True)
@patch('ansible.module_utils.basic.AnsibleModule.exit_json')
@patch('ansible.module_utils.basic.AnsibleModule.run_command')
def test_activate_path_exists(self, m_run_command, m_exit_json, m_os_path):
ca_test_common.set_module_args({
'path': fake_path
})
m_exit_json.side_effect = ca_test_common.exit_json
stdout = ''
stderr = ''
rc = 0
m_run_command.return_value = rc, stdout, stderr
with pytest.raises(ca_test_common.AnsibleExitJson) as result:
ceph_volume_simple_activate.main()
result = result.value.args[0]
assert result['changed']
assert result['cmd'] == ['ceph-volume', '--cluster', fake_cluster, 'simple', 'activate', '--file', fake_path]
assert result['rc'] == rc
assert result['stderr'] == stderr
assert result['stdout'] == stdout
@patch.object(os.path, 'exists', return_value=False)
@patch('ansible.module_utils.basic.AnsibleModule.fail_json')
def test_activate_path_not_exists(self, m_fail_json, m_os_path):
ca_test_common.set_module_args({
'path': fake_path
})
m_fail_json.side_effect = ca_test_common.fail_json
with pytest.raises(ca_test_common.AnsibleFailJson) as result:
ceph_volume_simple_activate.main()
result = result.value.args[0]
assert result['msg'] == '{} does not exist'.format(fake_path)
assert result['rc'] == 1
@patch('ansible.module_utils.basic.AnsibleModule.exit_json')
@patch('ansible.module_utils.basic.AnsibleModule.run_command')
def test_activate_without_systemd(self, m_run_command, m_exit_json):
ca_test_common.set_module_args({
'osd_id': fake_id,
'osd_fsid': fake_uuid,
'systemd': False
})
m_exit_json.side_effect = ca_test_common.exit_json
stdout = ''
stderr = ''
rc = 0
m_run_command.return_value = rc, stdout, stderr
with pytest.raises(ca_test_common.AnsibleExitJson) as result:
ceph_volume_simple_activate.main()
result = result.value.args[0]
assert result['changed']
assert result['cmd'] == ['ceph-volume', '--cluster', fake_cluster, 'simple', 'activate', fake_id, fake_uuid, '--no-systemd']
assert result['rc'] == rc
assert result['stderr'] == stderr
assert result['stdout'] == stdout
@patch.dict(os.environ, {'CEPH_CONTAINER_BINARY': fake_container_binary})
@patch.dict(os.environ, {'CEPH_CONTAINER_IMAGE': fake_container_image})
@patch('ansible.module_utils.basic.AnsibleModule.exit_json')
@patch('ansible.module_utils.basic.AnsibleModule.run_command')
def test_activate_with_container(self, m_run_command, m_exit_json):
ca_test_common.set_module_args({
'osd_id': fake_id,
'osd_fsid': fake_uuid,
})
m_exit_json.side_effect = ca_test_common.exit_json
stdout = ''
stderr = ''
rc = 0
m_run_command.return_value = rc, stdout, stderr
with pytest.raises(ca_test_common.AnsibleExitJson) as result:
ceph_volume_simple_activate.main()
result = result.value.args[0]
assert result['changed']
assert result['cmd'] == [fake_container_binary,
'run', '--rm', '--privileged',
'--ipc=host', '--net=host',
'-v', '/etc/ceph:/etc/ceph:z',
'-v', '/var/lib/ceph/:/var/lib/ceph/:z',
'-v', '/var/log/ceph/:/var/log/ceph/:z',
'-v', '/run/lvm/:/run/lvm/',
'-v', '/run/lock/lvm/:/run/lock/lvm/',
'--entrypoint=ceph-volume', fake_container_image,
'--cluster', fake_cluster, 'simple', 'activate', fake_id, fake_uuid]
assert result['rc'] == rc
assert result['stderr'] == stderr
assert result['stdout'] == stdout
| python |
import glob
import matplotlib.pyplot as plt
import pickle
import numpy as np
import os
import sys
from argparse import ArgumentParser
from utils import get_params_dict
def parseArgs():
"""Parse command line arguments
Returns
-------
a : argparse.ArgumentParser
"""
parser = ArgumentParser(description='Post process the ROC and PRC data to generate the corresponding plots.')
parser.add_argument('-v', '--verbose',dest='verbose', action='store_true',
default=False, help="verbose output [default is quiet running]")
parser.add_argument('-o','--outDir',dest='out_dir',type=str,
action='store',help="output directory. Default: results/ directory (will be created if doesn't exists).", default='results')
parser.add_argument('-t','--type', dest='type',type=str,
action='store',help="Plot type: either ROC or PRC. Default: ROC", default='ROC')
parser.add_argument('--suffix', dest='suffix',type=str,
action='store',help="A unique suffix to add to plot name. Default '' (empty string)", default='')
parser.add_argument('--curve20',dest='useCurve20', action='store_true',
default=False, help="Plot ROC/PRC cuve at maxed at 0.2 on X-axis (zoom-in version). Default: False")
parser.add_argument('infofile',type=str,
help='The text file containing names and locations of each experiment for which the ROC/PRC curve will be generated.')
args = parser.parse_args()
return args
def roc_prc_curve(arg_space, exp_dict):
suffix = '_'+arg_space.suffix if len(arg_space.suffix) > 0 else arg_space.suffix
curve20 = '_curve20' if arg_space.useCurve20 else ''
#some colors to be used for individual curves.
colors = ['darkorange', 'saddlebrown', 'crimson', 'rebeccapurple', 'limegreen', 'teal', 'dimgray']
out_dir = arg_space.out_dir.strip('/')+'/'
if not os.path.exists(out_dir):
os.makedirs(out_dir)
pckl_text = ''
xval,yval = '',''
areaType = ''
if arg_space.type == 'ROC':
areaType = 'AUC'
pckl_text = 'roc'
xval,yval = 'fpr','tpr'
plt.plot([0,1],[0,1],'k--')
elif arg_space.type == 'PRC':
areaType = 'AUPRC'
pckl_text = 'prc'
xval,yval = 'recall','precision'
plt.plot([0,1],[0.5,0.5],'k--')
else:
print('invalid argument! --type can only have one of the following values: ROC or PRC')
return
count = 0
for key in exp_dict:
if arg_space.verbose:
print('Running for: %s', key)
label = key
with open(exp_dict[key]+'/modelRes_%s.pckl'%pckl_text, 'rb') as f:
pckl = pickle.load(f)
stats = np.loadtxt(exp_dict[key]+'/modelRes_results.txt',delimiter='\t',skiprows=1)
Xval = pckl[xval]
Yval = pckl[yval]
if arg_space.type == 'ROC':
test_stat = round(stats[-2],2)
else:
test_stat = round(stats[-1],2)
clr = colors[count]
plt.plot(Xval, Yval, lw=1, label='%s (%s = %.2f)'%(label,areaType,test_stat), color=clr)
count += 1
plt.grid(which='major',axis='both',linestyle='--', linewidth=1)
if arg_space.useCurve20:
plt.xlim(0, 0.2)
if arg_space.type == 'ROC':
plt.ylim(0, 0.6)
plt.xlabel('False positive rate',fontsize=10.5)
plt.ylabel('True positive rate',fontsize=10.5)
plt.legend(loc=4, fontsize=10.5)
else:
plt.ylim(0.5, 1)
plt.xlabel('Recall',fontsize=10.5)
plt.ylabel('Precision',fontsize=10.5)
plt.legend(loc=1, fontsize=10.5)
#plt.title('Precision-Recall curves')
else:
plt.xlim(0, 1)
plt.ylim(0, 1)
if arg_space.type == 'ROC':
plt.xlabel('False positive rate',fontsize=10.5)
plt.ylabel('True positive rate',fontsize=10.5)
plt.legend(loc=4, fontsize=10.5)
else:
plt.xlabel('Recall',fontsize=10.5)
plt.ylabel('Precision',fontsize=10.5)
plt.legend(loc=3, fontsize=10.5)
#plt.title('Precision-Recall curves')
plt.savefig(out_dir+'%s_curves_selected%s%s.pdf'%(pckl_text.upper(),curve20,suffix))
plt.savefig(out_dir+'%s_curves_selected%s%s.png'%(pckl_text.upper(),curve20,suffix))
plt.clf()
def main():
arg_space = parseArgs()
#create params dictionary
params_dict = get_params_dict(arg_space.infofile)
#print(params_dict)
roc_prc_curve(arg_space, params_dict)
if __name__ == "__main__":
main()
| python |
from django.shortcuts import render_to_response, render
from django.contrib.auth.decorators import login_required
from grid_core.managers import GridManager
@login_required
def account_deshbord(request):
allfriends = GridManager.get_friends_user(request.user)
allgroups = GridManager.get_group_user(request.user)
return render(
request, "grid_my/dashbord-my.html",
{'friends': allfriends, 'groups': allgroups}
)
| python |
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: pydbgen/pbclass/data_define.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from google.protobuf import descriptor_pb2 as google_dot_protobuf_dot_descriptor__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='pydbgen/pbclass/data_define.proto',
package='',
syntax='proto3',
serialized_options=None,
serialized_pb=_b('\n!pydbgen/pbclass/data_define.proto\x1a google/protobuf/descriptor.proto:0\n\x07is_date\x12\x1d.google.protobuf.FieldOptions\x18\xd7\x86\x03 \x01(\x08:4\n\x0bis_datetime\x12\x1d.google.protobuf.FieldOptions\x18\xd8\x86\x03 \x01(\x08\x62\x06proto3')
,
dependencies=[google_dot_protobuf_dot_descriptor__pb2.DESCRIPTOR,])
IS_DATE_FIELD_NUMBER = 50007
is_date = _descriptor.FieldDescriptor(
name='is_date', full_name='is_date', index=0,
number=50007, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=True, extension_scope=None,
serialized_options=None, file=DESCRIPTOR)
IS_DATETIME_FIELD_NUMBER = 50008
is_datetime = _descriptor.FieldDescriptor(
name='is_datetime', full_name='is_datetime', index=1,
number=50008, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=True, extension_scope=None,
serialized_options=None, file=DESCRIPTOR)
DESCRIPTOR.extensions_by_name['is_date'] = is_date
DESCRIPTOR.extensions_by_name['is_datetime'] = is_datetime
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
google_dot_protobuf_dot_descriptor__pb2.FieldOptions.RegisterExtension(is_date)
google_dot_protobuf_dot_descriptor__pb2.FieldOptions.RegisterExtension(is_datetime)
# @@protoc_insertion_point(module_scope)
| python |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
SleekXMPP: The Sleek XMPP Library
Copyright (C) 2010 Nathanael C. Fritz
This file is part of SleekXMPP.
See the file LICENSE for copying permission.
"""
import sys
import logging
import getpass
from optparse import OptionParser
import sleekxmpp
# Python versions before 3.0 do not use UTF-8 encoding
# by default. To ensure that Unicode is handled properly
# throughout SleekXMPP, we will set the default encoding
# ourselves to UTF-8.
if sys.version_info < (3, 0):
from sleekxmpp.util.misc_ops import setdefaultencoding
setdefaultencoding('utf8')
else:
raw_input = input
class IBBReceiver(sleekxmpp.ClientXMPP):
"""
A basic example of creating and using an in-band bytestream.
"""
def __init__(self, jid, password):
sleekxmpp.ClientXMPP.__init__(self, jid, password)
self.register_plugin('xep_0030') # Service Discovery
self.register_plugin('xep_0047', {
'auto_accept': True
}) # In-band Bytestreams
# The session_start event will be triggered when
# the bot establishes its connection with the server
# and the XML streams are ready for use. We want to
# listen for this event so that we we can initialize
# our roster.
self.add_event_handler("session_start", self.start)
self.add_event_handler("ibb_stream_start", self.stream_opened, threaded=True)
self.add_event_handler("ibb_stream_data", self.stream_data)
def start(self, event):
"""
Process the session_start event.
Typical actions for the session_start event are
requesting the roster and broadcasting an initial
presence stanza.
Arguments:
event -- An empty dictionary. The session_start
event does not provide any additional
data.
"""
self.send_presence()
self.get_roster()
def accept_stream(self, iq):
"""
Check that it is ok to accept a stream request.
Controlling stream acceptance can be done via either:
- setting 'auto_accept' to False in the plugin
configuration. The default is True.
- setting 'accept_stream' to a function which accepts
an Iq stanza as its argument, like this one.
The accept_stream function will be used if it exists, and the
auto_accept value will be used otherwise.
"""
return True
def stream_opened(self, stream):
print('Stream opened: %s from %s' % (stream.sid, stream.peer_jid))
# You could run a loop reading from the stream using stream.recv(),
# or use the ibb_stream_data event.
def stream_data(self, event):
print(event['data'])
if __name__ == '__main__':
# Setup the command line arguments.
optp = OptionParser()
# Output verbosity options.
optp.add_option('-q', '--quiet', help='set logging to ERROR',
action='store_const', dest='loglevel',
const=logging.ERROR, default=logging.INFO)
optp.add_option('-d', '--debug', help='set logging to DEBUG',
action='store_const', dest='loglevel',
const=logging.DEBUG, default=logging.INFO)
optp.add_option('-v', '--verbose', help='set logging to COMM',
action='store_const', dest='loglevel',
const=5, default=logging.INFO)
# JID and password options.
optp.add_option("-j", "--jid", dest="jid",
help="JID to use")
optp.add_option("-p", "--password", dest="password",
help="password to use")
opts, args = optp.parse_args()
# Setup logging.
logging.basicConfig(level=opts.loglevel,
format='%(levelname)-8s %(message)s')
if opts.jid is None:
opts.jid = raw_input("Username: ")
if opts.password is None:
opts.password = getpass.getpass("Password: ")
xmpp = IBBReceiver(opts.jid, opts.password)
# If you are working with an OpenFire server, you may need
# to adjust the SSL version used:
# xmpp.ssl_version = ssl.PROTOCOL_SSLv3
# If you want to verify the SSL certificates offered by a server:
# xmpp.ca_certs = "path/to/ca/cert"
# Connect to the XMPP server and start processing XMPP stanzas.
if xmpp.connect():
# If you do not have the dnspython library installed, you will need
# to manually specify the name of the server if it does not match
# the one in the JID. For example, to use Google Talk you would
# need to use:
#
# if xmpp.connect(('talk.google.com', 5222)):
# ...
xmpp.process(block=True)
print("Done")
else:
print("Unable to connect.")
| python |
import datetime as dt
from datetime import datetime
from datetime import timedelta
from .error import WinnowError
valid_rel_date_values = (
"last_full_week",
"last_two_full_weeks",
"last_7_days",
"last_14_days",
"last_30_days",
"last_45_days",
"last_60_days",
"next_7_days",
"next_14_days",
"next_30_days",
"next_45_days",
"next_60_days",
'next_week',
"current_week",
"current_month",
"current_and_next_month",
"current_year",
"last_month",
"next_month",
"next_year",
"past",
"past_and_today",
"future",
"future_and_today",
"yesterday",
"today",
"tomorrow",
"past_and_future",
"two_weeks_past_end_of_month",
)
def interpret_date_range(drange):
drange = drange.lower().replace(' ', '_')
today = datetime.now()
a_few_seconds = timedelta(seconds=3)
one_day = timedelta(days=1)
start_of_day = dt.time()
beginning_today = datetime.combine(today.date(), start_of_day)
end_today = beginning_today + one_day
weekstart = datetime.combine(today.date(), start_of_day) - timedelta(days=(today.isoweekday() % 7))
seven_days = timedelta(days=7)
fourteen_days = timedelta(days=14)
thirty_days = timedelta(days=30)
fortyfive_days = timedelta(days=45)
if drange == 'last_full_week':
return weekstart - seven_days, weekstart
elif drange == 'last_two_full_weeks':
return weekstart - fourteen_days, weekstart
elif drange == 'last_7_days':
return today - seven_days, today + a_few_seconds
elif drange == 'last_14_days':
return today - fourteen_days, today + a_few_seconds
elif drange == 'last_30_days':
return today - thirty_days, today + a_few_seconds
elif drange == 'last_45_days':
return today - fortyfive_days, today + a_few_seconds
elif drange == 'last_60_days':
return today - (2 * thirty_days), today + a_few_seconds
elif drange == 'next_7_days':
return today, today + seven_days
elif drange == 'next_14_days':
return today, today + fourteen_days
elif drange == 'next_30_days':
return today, today + thirty_days
elif drange == 'next_45_days':
return today, today + fortyfive_days
elif drange == 'next_60_days':
return today, today + (2 * thirty_days)
elif drange == 'next_week':
return weekstart + seven_days, weekstart + seven_days + seven_days
elif drange == 'current_week':
return weekstart, weekstart + seven_days
elif drange == 'current_month':
return first_day_of_month(today), last_day_of_month(today)
elif drange == 'current_and_next_month':
start_of_current = first_day_of_month(today)
return start_of_current, last_day_of_month(start_of_current + fortyfive_days)
elif drange == 'current_and_next_year':
next_year = last_day_of_year(today, base_month) + timedelta(days=2)
return first_day_of_year(today, base_month), last_day_of_year(next_year, base_month)
elif drange == 'two_weeks_past_end_of_month':
return first_day_of_month(today), last_day_of_month(today) + fourteen_days
elif drange == 'two_weeks_past_end_of_year':
return first_day_of_year(today, base_month), last_day_of_year(today, base_month) + fourteen_days
elif drange == 'current_year':
return (datetime(year=today.year, month=1, day=1),
datetime(year=today.year+1, month=1, day=1) - dt.datetime.resolution)
elif drange == 'next_year':
next_year = last_day_of_year(today, base_month=1)
return first_day_of_year(next_year + seven_days, base_month=1), last_day_of_year(next_year + seven_days, base_month=1)
elif drange == 'last_month':
last_month = first_day_of_month(today) - timedelta(days=2)
return first_day_of_month(last_month), last_day_of_month(last_month)
elif drange == 'next_month':
next_month = last_day_of_month(today) + timedelta(days=2)
return first_day_of_month(next_month), last_day_of_month(next_month)
elif drange == 'past':
return datetime.fromtimestamp(0), beginning_today - timedelta(microseconds=1)
elif drange == 'past_and_today':
return datetime.fromtimestamp(0), today
elif drange == 'future':
return today, datetime(year=today.year+1000, month=1, day=1)
elif drange == 'future_and_today':
return beginning_today, datetime(year=today.year+1000, month=1, day=1)
elif drange == 'past_and_future':
return datetime.fromtimestamp(0), datetime(year=today.year+1000, month=1, day=1)
elif drange == 'yesterday':
return beginning_today - one_day, beginning_today
elif drange == 'today':
return beginning_today, end_today
elif drange == 'tomorrow':
return end_today, end_today + one_day
else:
raise WinnowError("unknown date description '{}'".format(drange))
| python |
"""
创建函数,在终端中打印矩形.
number = int(input("请输入整数:")) # 5
for row in range(number):
if row == 0 or row == number - 1:
print("*" * number)
else:
print("*%s*" % (" " * (number - 2)))
"""
def print_rectangle(number):
for row in range(number):
if row == 0 or row == number - 1:
print("*" * number)
else:
print("*%s*" % (" " * (number - 2)))
print_rectangle(8)
| python |
import os.path
# manage descriptive name here...
def input_file_to_output_name(filename):
get_base_file = os.path.basename(filename)
base_filename = get_base_file.split('.')[0]
# base_filename = '/pipeline_data/' + base_filename
return base_filename | python |
# Import Modules
from module.Mask_RCNN.mrcnn import config as maskconfig
from module.Mask_RCNN.mrcnn import model as maskmodel
from module.Mask_RCNN.mrcnn import visualize
import tensorflow as tf
import numpy as np
import warnings
import json
import cv2
import os
# Ignore warnings
old_v = tf.compat.v1.logging.get_verbosity()
tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.ERROR)
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
warnings.filterwarnings(action='ignore')
# Initialize Directories
MODEL_DIR = "../../../data/weight/mask_rcnn_fashion_0006.h5"
LABEL_DIR = "../../../data/image/mask_rcnn/label_descriptions.json"
MASK_DIR = "../../../module/Mask_RCNN"
IMG_DIR = "test1.jpg"
# Initialize NUM_CATS, IMAGE_SIZE
NUM_CATS = 46
IMAGE_SIZE = 512
# Load Label Descriptions to label_descriptions
with open(LABEL_DIR) as f:
label_descriptions = json.load(f)
# From label_descriptions['categories'] to label_names
label_names = [x['name'] for x in label_descriptions['categories']]
# Setup Configuration
class InferenceConfig(maskconfig):
NAME = "fashion"
NUM_CLASSES = NUM_CATS + 1 # +1 for the background class
GPU_COUNT = 1
IMAGES_PER_GPU = 4
BACKBONE = 'resnet101'
IMAGE_MIN_DIM = IMAGE_SIZE
IMAGE_MAX_DIM = IMAGE_SIZE
IMAGE_RESIZE_MODE = 'none'
RPN_ANCHOR_SCALES = (16, 32, 64, 128, 256)
DETECTION_MIN_CONFIDENCE = 0.70
GPU_COUNT = 1
IMAGES_PER_GPU = 1
# Execute Inference Configuration
inference_config = InferenceConfig()
# Load Weight File
model = maskmodel.MaskRCNN(mode='inference', config=inference_config, model_dir=MASK_DIR)
model.load_weights(MODEL_DIR, by_name=True)
# Resize Image from image_path
def resize_image(image_path):
temp = cv2.imread(image_path)
temp = cv2.cvtColor(temp, cv2.COLOR_BGR2RGB)
temp = cv2.resize(temp, (IMAGE_SIZE, IMAGE_SIZE), interpolation=cv2.INTER_AREA)
return temp
# Since the submission system does not permit overlapped masks, we have to fix them
def refine_masks(masks, rois):
areas = np.sum(masks.reshape(-1, masks.shape[-1]), axis=0)
mask_index = np.argsort(areas)
union_mask = np.zeros(masks.shape[:-1], dtype=bool)
for m in mask_index:
masks[:, :, m] = np.logical_and(masks[:, :, m], np.logical_not(union_mask))
union_mask = np.logical_or(masks[:, :, m], union_mask)
for m in range(masks.shape[-1]):
mask_pos = np.where(masks[:, :, m] == True)
if np.any(mask_pos):
y1, x1 = np.min(mask_pos, axis=1)
y2, x2 = np.max(mask_pos, axis=1)
rois[m, :] = [y1, x1, y2, x2]
return masks, rois
# Python code to remove duplicate elements
def remove(duplicate):
final_list = []
duplicate_list = []
for num in duplicate:
if num not in final_list:
final_list.append(num)
else:
duplicate_list.append(num)
return final_list, duplicate_list
# Single Image Masking
img = cv2.imread(IMG_DIR)
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
result = model.detect([resize_image(IMG_DIR)], verbose=1)
r = result[0]
if r['masks'].size > 0:
masks = np.zeros((img.shape[0], img.shape[1], r['masks'].shape[-1]), dtype=np.uint8)
for m in range(r['masks'].shape[-1]):
masks[:, :, m] = cv2.resize(r['masks'][:, :, m].astype('uint8'),
(img.shape[1], img.shape[0]), interpolation=cv2.INTER_NEAREST)
y_scale = img.shape[0] / IMAGE_SIZE
x_scale = img.shape[1] / IMAGE_SIZE
rois = (r['rois'] * [y_scale, x_scale, y_scale, x_scale]).astype(int)
masks, rois = refine_masks(masks, rois)
else:
masks, rois = r['masks'], r['rois']
visualize.display_instances(img, rois, masks, r['class_ids'],
['bg'] + label_names, r['scores'],
title='camera1', figsize=(12, 12))
visualize.display_top_masks(img, masks, r['class_ids'], label_names, limit=8)
| python |
from django.urls import path
from api import views
app_name = "api"
urlpatterns = [path("signup/", views.SignUp.as_view(), name="signup")]
| python |
import os
from glob import glob
from os.path import join, basename
import numpy as np
from utils.data_utils import default_loader
from . import CDDataset
class OSCDDataset(CDDataset):
__BAND_NAMES = (
'B01', 'B02', 'B03', 'B04', 'B05', 'B06',
'B07', 'B08', 'B8A', 'B09', 'B10', 'B11', 'B12'
)
def __init__(
self,
root, phase='train',
transforms=(None, None, None),
repeats=1,
subset='val',
cache_level=1
):
super().__init__(root, phase, transforms, repeats, subset)
# cache_level=0 for no cache, 1 to cache labels, 2 and higher to cache all.
self.cache_level = int(cache_level)
if self.cache_level > 0:
self._pool = dict()
def _read_file_paths(self):
image_dir = join(self.root, "Onera Satellite Change Detection dataset - Images")
target_dir = join(self.root, "Onera Satellite Change Detection dataset - Train Labels")
txt_file = join(image_dir, "train.txt")
# Read cities
with open(txt_file, 'r') as f:
cities = [city.strip() for city in f.read().strip().split(',')]
if self.subset == 'train':
# For training, use the first 11 pairs
cities = cities[:-3]
else:
# For validation and test, use the remaining 3 pairs
cities = cities[-3:]
# Use resampled images
t1_list = [[join(image_dir, city, "imgs_1_rect", band+'.tif') for band in self.__BAND_NAMES] for city in cities]
t2_list = [[join(image_dir, city, "imgs_2_rect", band+'.tif') for band in self.__BAND_NAMES] for city in cities]
tar_list = [join(target_dir, city, 'cm', city+'-cm.tif') for city in cities]
return t1_list, t2_list, tar_list
def fetch_image(self, image_paths):
key = '-'.join(image_paths[0].split(os.sep)[-3:-1])
if self.cache_level >= 2:
image = self._pool.get(key, None)
if image is not None:
return image
image = np.stack([default_loader(p) for p in image_paths], axis=-1).astype(np.float32)
if self.cache_level >= 2:
self._pool[key] = image
return image
def fetch_target(self, target_path):
key = basename(target_path)
if self.cache_level >= 1:
tar = self._pool.get(key, None)
if tar is not None:
return tar
# In the tif labels, 1 stands for NC and 2 for C,
# thus a -1 offset is added.
tar = (default_loader(target_path) - 1).astype(np.bool)
if self.cache_level >= 1:
self._pool[key] = tar
return tar
| python |
"""All the url endpoint hooks for facebook"""
import os
from sanic.response import json, text
from sanic import Blueprint
from .base import FacebookResponse
from taggo.parsers import FacebookYamlExecutor
VERIFY_TOKEN = os.environ.get("VF_TOKEN")
fb = Blueprint('facebook', url_prefix="/fb")
@fb.post('/recieve_message')
async def recieve_message(request):
data = request.json
fb_resp = FacebookResponse(page_type=data["object"],
entries=data.get("entry"),
executor=request.app.config["command"])
await fb_resp.send()
return json({
"reply": "success"
})
@fb.get("/recieve_message")
async def ping_pong(request):
if request.raw_args.get("hub.verify_token") == VERIFY_TOKEN:
return text(request.raw_args.get("hub.challenge"))
else:
return text("Error")
@fb.get('/')
async def ping(request):
return text("Hi! Nice to meet you") | python |
from flask import render_template, url_for, request, redirect, session, flash
from home_password.models.user import User
from home_password.models.site import Site
from flask_login import login_user, current_user, logout_user
from flask import Blueprint
main = Blueprint('main', __name__)
@main.route('/')
@main.route('/login', methods=["GET",'POST'])
def login():
if request.method == "POST":
user = User.query.filter_by(username=request.form["username"]).first()
if user is not None and user.valid_login(request.form["password"]):
login_user(user)
if user.is_admin:
return redirect(url_for('admin.home'))
else:
return redirect(url_for('users.home'))
else:
flash("incorrect login","error")
return render_template('users/login.html')
@main.route('/logout')
def logout():
logout_user()
return redirect(url_for('main.login')) | python |
"""*Text handling functions*."""
import json
import subprocess
import sys
from os.path import basename, splitext
from pathlib import Path
from urllib.parse import urlparse
from loguru import logger as log
import iscc_sdk as idk
__all__ = [
"text_meta_extract",
"text_extract",
"text_name_from_uri",
]
TEXT_META_MAP = {
"custom:iscc_name": "name",
"custom:iscc_description": "description",
"custom:iscc_meta": "meta",
"dc:title": "name",
"dc:description": "description",
"dc:creator": "creator",
"dc:rights": "rights",
}
def text_meta_extract(fp):
# type: (str) -> dict
"""
Extract metadata from text document file.
:param str fp: Filepath to text document file.
:return: Metadata mapped to IsccMeta schema
:rtype: dict
"""
args = ["--metadata", "-j", "--encoding=UTF-8", fp]
result = idk.run_tika(args)
meta = json.loads(result.stdout.decode(sys.stdout.encoding, errors="ignore"))
mapped = dict()
done = set()
for tag, mapped_field in TEXT_META_MAP.items():
if mapped_field in done: # pragma nocover
continue
value = meta.get(tag)
if value:
if isinstance(value, list):
value = ", ".join(value)
log.debug(f"Mapping text metadata: {tag} -> {mapped_field} -> {value}")
mapped[mapped_field] = value
done.add(mapped_field)
return mapped
def text_extract(fp):
# type: (str) -> str
"""
Extract plaintext from a text document.
:param st fp: Filepath to text document file.
:return: Extracted plaintext
:rtype: str
"""
args = ["--text", "--encoding=UTF-8", fp]
result = idk.run_tika(args)
text = result.stdout.decode(encoding="UTF-8").strip()
if not text:
raise idk.IsccExtractionError(f"No text extracted from {basename(fp)}")
return result.stdout.decode(encoding="UTF-8")
def text_name_from_uri(uri):
# type: (str, Path) -> str
"""
Extract "filename" part of an uri without file extension to be used as fallback title for an
asset if no title information can be acquired.
:param str uri: Url or file path
:return: derived name (might be an empty string)
:rtype: str
"""
if isinstance(uri, Path):
result = urlparse(uri.as_uri())
else:
result = urlparse(uri)
base = basename(result.path) if result.path else basename(result.netloc)
name = splitext(base)[0]
name = name.replace("-", " ")
name = name.replace("_", " ")
return name
| python |
import data_processor
import model_lib
if __name__ == "__main__":
train_set = data_processor.read_dataset("preprocessed/training_nopestudio.json")
valid_set = data_processor.read_dataset("preprocessed/validation_nopestudio.json")
combined_set = data_processor.read_dataset("preprocessed/dataset_nopestudio.json")
if train_set is None:
print("정제된 훈련 데이터가 없습니다. 새로 생성합니다.")
train_set = data_processor.process_dataset("TRAIN")
data_processor.write_dataset("training.json", train_set)
if valid_set is None:
print("정제된 검증 데이터가 없습니다. 새로 생성합니다.")
valid_set = data_processor.process_dataset("VALID")
data_processor.write_dataset("validation.json", valid_set)
if combined_set is None:
print("정제한 합본 데이터셋이 존재하지 않습니다. 새로 생성합니다.")
combined_set = data_processor.combine_dataset(
train_set,
valid_set
)
data_processor.write_dataset("dataset.json", combined_set)
combined_X = combined_set["data"]
combined_y = combined_set["target"]
while True:
print("다음 중 원하는 평가 방법을 입력")
print("1: holdout validation")
print("2: k-fold cross validation")
print("유효하지 않은 값일 경우 프로세스 종료")
evaluate_type = input()
if evaluate_type != "1" and evaluate_type != "2":
print("유효하지 않은 값 입력됨. 프로세스 종료")
break
val = input("측정을 원하는 모델을 입력(유효하지 않은 값일 경우 프로세스 종료): ")
model = model_lib.load_model(model=val, random_state=41)
if model is None:
print("유효하지 않은 값 입력됨. 프로세스 종료")
break
# pipe = make_pipeline(
# StandardScaler(),
# model
# )
if evaluate_type == "1":
model.fit(
train_set["data"],
train_set["target"],
)
model_lib.evaluate(
valid_set["data"],
valid_set["target"],
model
)
else:
model_lib.evaluate_kfold(combined_X, combined_y, model)
| python |
import numba as nb
import numpy as np
class Zobrist(object):
MAX_RAND = pow(10, 16)
BLACK_TABLE = np.random.seed(3) or np.random.randint(MAX_RAND, size=(8, 8))
WHITE_TABLE = np.random.seed(7) or np.random.randint(MAX_RAND, size=(8, 8))
@staticmethod
def from_state(state):
return Zobrist.hash(state.board,
Zobrist.BLACK_TABLE,
Zobrist.WHITE_TABLE)
@staticmethod
def update_action(previous, action, player):
return Zobrist.update(previous, action,
Zobrist.BLACK_TABLE,
Zobrist.WHITE_TABLE,
[player])
@staticmethod
def update_flip(previous, flip):
return Zobrist.update(previous, flip,
Zobrist.BLACK_TABLE,
Zobrist.WHITE_TABLE,
[1, -1])
@staticmethod
@nb.jit(nopython=True, nogil=True, cache=True)
def hash(board, black_table, white_table):
result = 0
for row, col in zip(*np.where(board == 1)):
result ^= black_table[row, col]
for row, col in zip(*np.where(board == -1)):
result ^= white_table[row, col]
return result
@staticmethod
@nb.jit(nopython=True, nogil=True, cache=True)
def update(previous, square, black_table, white_table, players):
result = previous
row, col = square
for player in players:
if player == 1:
result ^= black_table[row, col]
elif player == -1:
result ^= white_table[row, col]
return result
| python |
#!/usr/bin/env python
"""
CloudFormation Custom::FindImage resource handler.
"""
# pylint: disable=C0103
from datetime import datetime
from logging import DEBUG, getLogger
import re
from typing import Any, Dict, List, Tuple
import boto3
from iso8601 import parse_date
log = getLogger("cfntoolkit.ec2")
log.setLevel(DEBUG)
def find_image(event: Dict[str, Any]) -> Dict[str, Any]:
"""
Custom::FindImage resource
Locates the latest version of an AMI/AKI/ARI with given attributes.
"""
if event["RequestType"] not in ("Create", "Update"):
return {}
rp = dict(event["ResourceProperties"])
filters = {} # type: Dict[str, Any]
try:
owner = rp["Owner"]
except KeyError:
raise ValueError("Owner must be specified")
add_filters(rp, filters)
# Convert the filters dict to a list of {Name: key, Value: values} dicts
ec2_filters = [{"Name": key, "Values": values}
for key, values in filters.items()]
ec2 = boto3.client("ec2")
result = ec2.describe_images(Owners=[owner], Filters=ec2_filters)
images = result.get("Images")
if not images:
raise ValueError("No AMIs found that match the filters applied.")
images = filter_names_and_descriptions(images, rp)
preferred_virtualization_type = rp.get("PreferredVirtualizationType")
preferred_root_device_type = rp.get("PreferredRootDeviceType")
def sort_key(image: Dict[str, Any]) -> Tuple[bool, bool, datetime]:
"""
Prioritize AMI preferences.
"""
date = parse_date(image["CreationDate"])
is_preferred_virtualization_type = (
preferred_virtualization_type is None or
image["VirtualizationType"] == preferred_virtualization_type)
is_preferred_root_device_type = (
preferred_root_device_type is None or
image["RootDeviceType"] == preferred_root_device_type)
return (is_preferred_virtualization_type,
is_preferred_root_device_type,
date)
images.sort(key=sort_key, reverse=True)
image_ids = [image["ImageId"] for image in images]
return {
"ImageId": image_ids[0],
"MatchingImageIds": image_ids,
}
# EC2 instance families that only support paravirtualization.
PV_ONLY_INSTANCE_FAMILIES = {"c1", "m1", "m2", "t1",}
# EC2 instance families that support either paravirtualization or HVM.
PV_HVM_INSTANCE_FAMILIES = {"c3", "hi1", "hs1", "m3",}
# EC2 instance families that have instance storage.
INSTANCE_STORE_FAMILIES = {
"c1", "c3", "cc2", "cg1", "cr1", "d2", "g2", "f1", "hi1", "hs1", "i2",
"i3", "m1", "m2", "m3", "r3", "x1",
}
# Keys for various fields so we catch subtle misspellings
KEY_REQPROP_ARCHITECTURE = "Architecture"
KEY_REQPROP_ENA_SUPPORT = "EnaSupport"
KEY_REQPROP_PLATFORM = "Platform"
KEY_REQPROP_ROOT_DEVICE_TYPE = "RootDeviceType"
KEY_REQPROP_VIRTUALIZATION_TYPE = "VirtualizationType"
KEY_EC2_ARCHITECTURE = "architecture"
KEY_EC2_ENA_SUPPORT = "ena-support"
KEY_EC2_PLATFORM = "platform"
KEY_EC2_ROOT_DEVICE_TYPE = "root-device-type"
KEY_EC2_VIRTUALIZATION_TYPE = "virtualization-type"
HVM = "hvm"
PARAVIRTUAL = "paravirtual"
EBS = "ebs"
# These request properties are embedded in the filter directly (though
# renamed), with the value encapsulated as a list.
DIRECT_FILTERS = {
KEY_REQPROP_ARCHITECTURE: KEY_EC2_ARCHITECTURE,
KEY_REQPROP_ENA_SUPPORT: KEY_EC2_ENA_SUPPORT,
KEY_REQPROP_PLATFORM: KEY_EC2_PLATFORM,
KEY_REQPROP_ROOT_DEVICE_TYPE: KEY_EC2_ROOT_DEVICE_TYPE,
KEY_REQPROP_VIRTUALIZATION_TYPE: KEY_EC2_VIRTUALIZATION_TYPE,
}
def add_filters(
request_properties: Dict[str, Any],
filters: Dict[str, List]) -> None:
"""
add_filters(request_properties: Dict[Str, Any],
filters: Dict[str, Any]) -> None:
Examine request_properties for appropriate values and apply them to the
filters list.
"""
for key in DIRECT_FILTERS:
if key in request_properties:
value = request_properties.pop(key)
filter_key = DIRECT_FILTERS.get(key)
filters[filter_key] = listify(value)
add_instance_type_filter(request_properties, filters)
return
def add_instance_type_filter(
request_properties: Dict[str, Any], filters: Dict[str, List]) -> None:
"""
add_instance_type_filter(
request_properties: Dict[str, Any], filters: List) -> None
Examine request_properties for an instance_type filter
"""
instance_type = request_properties.pop("InstanceType", None)
if instance_type is None:
return
if "." in instance_type:
instance_family = instance_type[:instance_type.find(".")]
else:
instance_family = instance_type
if instance_family in PV_ONLY_INSTANCE_FAMILIES:
# PV-only instance types
log.debug("instance_family=%s filters=%s", instance_family, filters)
if (filters.get(KEY_EC2_VIRTUALIZATION_TYPE, [PARAVIRTUAL]) !=
[PARAVIRTUAL]):
raise ValueError(
"VirtualizationType must be paravirtual for %s instance "
"types" % (instance_type,))
filters[KEY_EC2_VIRTUALIZATION_TYPE] = [PARAVIRTUAL]
# Ignore Switch hitting instance types (c3, etc.); assume all newer
# instance families are HVM-only.
elif instance_family not in PV_HVM_INSTANCE_FAMILIES:
if filters.get(KEY_EC2_VIRTUALIZATION_TYPE, [HVM]) != [HVM]:
raise ValueError(
"VirtualizationType must be hvm for %s instance types" %
(instance_type,))
filters[KEY_EC2_VIRTUALIZATION_TYPE] = [HVM]
if instance_family not in INSTANCE_STORE_FAMILIES:
# EBS-only root volume types.
if filters.get(KEY_EC2_ROOT_DEVICE_TYPE, [EBS]) != [EBS]:
raise ValueError(
"RootDeviceType must be ebs for %s instance types" %
(instance_type,))
filters["root-device-type"] = ["ebs"]
return
def filter_names_and_descriptions(
images: List, request_properties: Dict[str, Any]) -> List:
"""
filter_names_and_descriptions(
images: List, request_properties: Dict[str, Any]) -> List:
Filter image names and descriptions according to the rules given in
request_properties.
"""
for include_exclude in ["Included", "Excluded"]:
for param in ["Description", "Name"]:
key = "%s%ss" % (include_exclude, param)
value = request_properties.get(key)
if not value:
continue
regex = regex_string_list(listify(value))
# maybe_not is a passthrough when including, reverses the logic
# test when excluding.
if include_exclude == "Included":
maybe_not = lambda x: x
else:
maybe_not = lambda x: not x
images = [im for im in images
if maybe_not(regex.search(im[param]))]
if not images:
raise ValueError(
"No AMIs found that passed the %s filter" % key)
return images
def listify(value):
"""
Encapsulate value in a list if it isn't already.
"""
if isinstance(value, list):
return value
return [value]
def regex_string_list(sl: List[str]):
"""
Compile a list of strings into a regular expression.
"""
return re.compile("|".join(["(?:%s)" % el for el in sl]))
| python |
import rsa
from django.db import models
import base64
class RSAFieldMixin(object):
def loadKeys(self, keys=[]):
if len(keys) == 0:
(pubkey, privkey) = rsa.newkeys(512)
keys.append(pubkey)
keys.append(privkey)
elif len(keys) == 2:
pubkey = keys[0]
privkey = keys[1]
else:
raise Exception("Invaild key array passed")
keys[0] = pubkey
keys[1] = privkey
return keys
def encrypt(self, value):
cryptoText = value.encode('utf8')
crypt = rsa.encrypt(cryptoText, self.loadKeys()[0])
return crypt.hex()
def decrypt(self, value):
value = bytes.fromhex(value)
text = rsa.decrypt(value, self.loadKeys()[1])
return text
def get_internal_type(self):
"""
To treat everything as text
"""
return 'CharField'
def get_prep_value(self, value):
if value:
return self.encrypt(value)
return None
def get_db_prep_value(self, value, connection, prepared=False):
if not prepared:
value = self.get_prep_value(value)
return value
def from_db_value(self, value, expression, connection):
return self.to_python(value)
def to_python(self, value):
if value is None:
return value
value = self.decrypt(value)
return super(RSAFieldMixin, self).to_python(value.decode('utf8'))
class RSACharField(RSAFieldMixin, models.CharField):
pass
class RSATextField(RSAFieldMixin, models.TextField):
pass
class RSADateTimeField(RSAFieldMixin, models.DateTimeField):
pass
class RSAIntegerField(RSAFieldMixin, models.IntegerField):
pass
class RSADateField(RSAFieldMixin, models.DateField):
pass
class RSAFloatField(RSAFieldMixin, models.FloatField):
pass
class RSAEmailField(RSAFieldMixin, models.EmailField):
pass
class RSABooleanField(RSAFieldMixin, models.BooleanField):
pass
class RSABinaryField(RSAFieldMixin, models.BinaryField):
pass
| python |
import tensorflow as tf # for deep learning
import pathlib # for loading path libs
# data loader class
class DataLoader():
# init method
def __init__(self, path_to_dir):
self.__path_to_dir = pathlib.Path(path_to_dir)
# proecess image method
# @tf.function
def process_image(self, image_data):
image_raw = tf.io.read_file(image_data)
image_decoded = tf.image.decode_jpeg(image_raw) # decode a raw image
return (
tf.image.resize(image_decoded, [192, 192]) / 255.0
) # normalize and resize an image
# retrive root labels
def retrive_root_labels(self):
all_image_list = self.__path_to_dir.glob("*/*")
# convert image labels to str
self.__all_image_paths = [str(image) for image in all_image_list]
# extract all the labels
root_labels = [
label.name for label in self.__path_to_dir.glob("*/") if label.is_dir()
]
# encode root labels into dic
root_labels = dict((name, index) for index, name in enumerate(root_labels))
# extract the labels of each images
all_images_labels =[
root_labels[pathlib.Path(image).parent.name] for image in self.__all_image_paths
]
# return all the labels and root labels
return all_images_labels, self.__all_image_paths, root_labels
| python |
import json
from wtforms import widgets
class CheckboxInput(widgets.CheckboxInput):
def __call__(self, field, **kwargs):
kwargs.update({"class_": "checkbox-field"})
rendered_field = super().__call__(field, **kwargs)
return widgets.HTMLString(
"""
%s<label class="state" for="%s"> </label>
"""
% (rendered_field, field.id)
)
class FileInput(widgets.FileInput):
def __call__(self, field, **kwargs):
kwargs.update(
{"@change": "count = $event.target.files.length", "class": "d-hidden"}
)
rendered_field = super().__call__(field, **kwargs)
return widgets.HTMLString(
"""
<label x-data="{count: 0}" class="file-field input-group">
<div class="info" x-text="count ? count + ' files(s) selected' : 'Choose file(s)'"></div>
%s
<span class="button button-secondary input-group-addon">Browse</span>
</label>
"""
% rendered_field
)
class HorizontalSelect(widgets.Select):
def __init__(self):
self.multiple = True
def __call__(self, field, **kwargs):
kwargs.update(
{"x-ref": "field", "class": "d-hidden", "@change": "ev = $event.timeStamp"}
)
rendered_field = super().__call__(field, **kwargs)
return widgets.HTMLString(
"""
<div class="select-multi-field"
x-data="{ ev: null }"
@set-one="
$refs.field.options[$event.detail.key].selected = $event.detail.selected;
$dispatch('propagate');
"
@set-all="
Object.keys($refs.field.options).forEach(key => $refs.field.options[key].selected = $event.detail);
$dispatch('propagate');
"
@propagate="$refs.field.dispatchEvent(new Event('change'))"
>
%s
<div class="row">
<div class="col-12 col-sm-6 col-md-5 col-lg-4">
<div class="title">
<a href="#" class="pull-right" @click.prevent="$dispatch('set-all', true)">Choose all</a>
Available
</div>
<ul>
<template x-for="key in Object.keys($refs.field.options)" :key="key">
<li x-show="!$refs.field.options[key].selected">
<a href="#"
@click.prevent="$dispatch('set-one', {key, selected: true})"
x-text="$refs.field.options[key].label"
></a>
</li>
</template>
</ul>
</div>
<div class="col-12 col-sm-6 col-md-5 col-lg-4">
<div class="title">
<a href="#" class="pull-right" @click.prevent="$dispatch('set-all', false)">Remove all</a>
Selected
</div>
<ul>
<template x-for="key in Object.keys($refs.field.options)" :key="key">
<li x-show="$refs.field.options[key].selected">
<a href="#"
@click.prevent="$dispatch('set-one', {key, selected: false})"
x-text="$refs.field.options[key].label"
></a>
</li>
</template>
</ul>
</div>
</div>
</div>
"""
% rendered_field
)
class PasswordInput(widgets.PasswordInput):
def __call__(self, field, **kwargs):
kwargs.update({":type": "show ? 'text' : 'password'"})
rendered_field = super().__call__(field, **kwargs)
return widgets.HTMLString(
"""
<div class="password-field icon-input" x-data="{ show: false }">
%s
<span class="fa" :class="{'fa-eye': !show, 'fa-eye-slash': show}" @click="show = !show"></span>
</div>
"""
% rendered_field
)
class RadioInput(widgets.RadioInput):
def __call__(self, field, **kwargs):
kwargs.update({"class_": "radio-field"})
rendered_field = super().__call__(field, **kwargs)
return widgets.HTMLString(
"""
%s<label class="state" for="%s"> </label>
"""
% (rendered_field, field.id)
)
class Select(widgets.Select):
def __call__(self, field, **kwargs):
rendered_field = super().__call__(field, **kwargs)
return widgets.HTMLString(
"""
<div class="select-field icon-input">
%s
<span class="fa fa-caret-down"></span>
</div>
"""
% rendered_field
)
class TagsInput(widgets.TextInput):
def __call__(self, field, **kwargs):
kwargs.update({":value": "JSON.stringify(tags)", "class": "d-hidden"})
rendered_field = super().__call__(field, **kwargs)
return widgets.HTMLString(
"""
<div x-data='{ tags: %s, newTag: "" }'>
%s
<div class="tags-field">
<template x-for="tag in tags" :key="tag">
<span class="tag">
<span x-text="tag"></span>
<a href="#"
@click.prevent="tags = tags.filter(i => i !== tag)">
<i class="fa fa-times"></i>
</a>
</span>
</template>
<input placeholder="add a new tag ..."
x-model="newTag"
@keydown.enter.prevent="
if (newTag.trim() !== ''
&& tags.indexOf(newTag.trim()) == -1
) tags.push(newTag.trim()); newTag = ''"
@keydown.backspace="if (newTag === '') tags.pop()"
>
</div>
</div>
"""
% (json.dumps(field.data), rendered_field)
)
| python |
import heterocl as hcl
import numpy as np
def test_zero_allocate():
def kernel(A):
with hcl.for_(0, 10) as i:
with hcl.for_(i, 10) as j:
A[j] += i
return hcl.compute((0,), lambda x: A[x], "B")
A = hcl.placeholder((10,))
s = hcl.create_schedule(A, kernel)
p = hcl.Platform.aws_f1
p.config(compiler="vitis", mode="debug", backend="vhls")
try:
f = hcl.build(s, p)
except:
print("passed")
| python |
import abc
class LayerBase(object):
"""Base class for most layers; each layer contains information which is
added on top of the regulation, such as definitions, internal citations,
keyterms, etc."""
__metaclass__ = abc.ABCMeta
# @see layer_type
INLINE = 'inline'
PARAGRAPH = 'paragraph'
SEARCH_REPLACE = 'search_replace'
@abc.abstractproperty
def shorthand(self):
"""A short description for this layer. This is used in query strings
and the like to define which layers should be used"""
raise NotImplementedError
@abc.abstractproperty
def data_source(self):
"""Data is pulled from the API; this field indicates the name of the
endpoint to pull data from"""
raise NotImplementedError
@abc.abstractproperty
def layer_type(self):
"""Layer data can be applied in a few ways, attaching itself to a
node, replacing text based on offset, or replacing text based on
searching. Which type is this layer?"""
raise NotImplementedError
class InlineLayer(LayerBase):
"""Represents a layer which replaces text by looking at offsets"""
layer_type = LayerBase.INLINE
@abc.abstractmethod
def replacement_for(self, original, data):
"""Given the original text and the relevant data from a layer, create
a (string) replacement, by, for example, running the data through a
template"""
raise NotImplementedError
def apply_layer(self, text, label_id):
"""Entry point when processing the regulation tree. Given the node's
text and its label_id, yield all replacement text"""
data_with_offsets = ((entry, start, end)
for entry in self.layer.get(label_id, [])
for (start, end) in entry['offsets'])
for data, start, end in data_with_offsets:
start, end = int(start), int(end)
original = text[start:end]
replacement = self.replacement_for(original, data)
yield (original, replacement, (start, end))
class SearchReplaceLayer(LayerBase):
"""Represents a layer which replaces text by searching for and replacing a
specific substring. Also accounts for the string appearing multiple times
(via the 'locations' field)"""
layer_type = LayerBase.SEARCH_REPLACE
_text_field = 'text' # All but key terms follow this convention...
@abc.abstractmethod
def replacements_for(self, text, data):
"""Given the original text and the relevant data from a layer, create
a (string) replacement, by, for example, running the data through a
template. Returns a generator"""
raise NotImplementedError
def apply_layer(self, label_id):
"""Entry point when processing the regulation tree. Given the node's
label_id, attempt to find relevant layer data in self.layer"""
for entry in self.layer.get(label_id, []):
text = entry[self._text_field]
for replacement in self.replacements_for(text, entry):
yield (text, replacement, entry['locations'])
| python |
import os
import hashlib
from download.url_image_downloader import UrlImageDownloader
def test_download_image_from_url():
url = ('https://upload.wikimedia.org/wikipedia/commons/thumb/9/9f/RacingFlagsJune2007.jpg/575px-'
'RacingFlagsJune2007.jpg')
image_path = 'test.jpg'
# download the image
downloader = UrlImageDownloader(url, image_path)
downloader.download()
md5 = hashlib.md5()
# calculate md5 hash of the downloaded image
with open(image_path, "rb") as file:
for chunk in iter(lambda: file.read(4096), b""):
md5.update(chunk)
assert os.path.isfile(image_path)
assert md5.hexdigest() == '82a8ebf6719a24b52dec3fa6856d4870'
# remove the downloaded image
os.remove(image_path)
| python |
#!/router/bin/python
from trex_general_test import CTRexGeneral_Test
from tests_exceptions import *
from interfaces_e import IFType
from nose.tools import nottest
from misc_methods import print_r
class CTRexNbar_Test(CTRexGeneral_Test):
"""This class defines the NBAR testcase of the T-Rex traffic generator"""
def __init__(self, *args, **kwargs):
super(CTRexNbar_Test, self).__init__(*args, **kwargs)
self.unsupported_modes = ['loopback'] # obviously no NBar in loopback
pass
def setUp(self):
super(CTRexNbar_Test, self).setUp() # launch super test class setUp process
# self.router.kill_nbar_flows()
self.router.clear_cft_counters()
self.router.clear_nbar_stats()
def match_classification (self):
nbar_benchmark = self.get_benchmark_param("nbar_classification")
test_classification = self.router.get_nbar_stats()
print "TEST CLASSIFICATION:"
print test_classification
missmatchFlag = False
missmatchMsg = "NBAR classification contians a missmatch on the following protocols:"
fmt = '\n\t{0:15} | Expected: {1:>3.2f}%, Got: {2:>3.2f}%'
noise_level = 0.045 # percents
for cl_intf in self.router.get_if_manager().get_if_list(if_type = IFType.Client):
client_intf = cl_intf.get_name()
# removing noise classifications
for key, value in test_classification[client_intf]['percentage'].items():
if value <= noise_level:
print 'Removing noise classification: %s' % key
del test_classification[client_intf]['percentage'][key]
if len(test_classification[client_intf]['percentage']) != (len(nbar_benchmark) + 1): # adding 'total' key to nbar_benchmark
raise ClassificationMissmatchError ('The total size of classification result does not match the provided benchmark.')
for protocol, bench in nbar_benchmark.iteritems():
if protocol != 'total':
try:
bench = float(bench)
protocol = protocol.replace('_','-')
protocol_test_res = test_classification[client_intf]['percentage'][protocol]
deviation = 100 * abs(bench/protocol_test_res - 1) # percents
difference = abs(bench - protocol_test_res)
if (deviation > 10 and difference > noise_level): # allowing 10% deviation and 'noise_level'% difference
missmatchFlag = True
missmatchMsg += fmt.format(protocol, bench, protocol_test_res)
except KeyError as e:
missmatchFlag = True
print e
print "Changes missmatchFlag to True. ", "\n\tProtocol {0} isn't part of classification results on interface {intf}".format( protocol, intf = client_intf )
missmatchMsg += "\n\tProtocol {0} isn't part of classification results on interface {intf}".format( protocol, intf = client_intf )
except ZeroDivisionError as e:
print "ZeroDivisionError: %s" % protocol
pass
if missmatchFlag:
self.fail(missmatchMsg)
def test_nbar_simple(self):
# test initializtion
deviation_compare_value = 0.03 # default value of deviation - 3%
self.router.configure_basic_interfaces()
self.router.config_pbr(mode = "config")
self.router.config_nbar_pd()
mult = self.get_benchmark_param('multiplier')
core = self.get_benchmark_param('cores')
ret = self.trex.start_trex(
c = core,
m = mult,
p = True,
nc = True,
d = 100,
f = 'avl/sfr_delay_10_1g.yaml',
l = 1000)
trex_res = self.trex.sample_to_run_finish()
# trex_res is a CTRexResult instance- and contains the summary of the test results
# you may see all the results keys by simply calling here for 'print trex_res.result'
print ("\nLATEST RESULT OBJECT:")
print trex_res
print ("\nLATEST DUMP:")
print trex_res.get_latest_dump()
self.check_general_scenario_results(trex_res, check_latency = False)
# test_norm_cpu = 2*(trex_res.result['total-tx']/(core*trex_res.result['cpu_utilization']))
trex_tx_pckt = trex_res.get_last_value("trex-global.data.m_total_tx_pkts")
cpu_util = trex_res.get_last_value("trex-global.data.m_cpu_util")
cpu_util_hist = trex_res.get_value_list("trex-global.data.m_cpu_util")
print "cpu util is:", cpu_util
print cpu_util_hist
test_norm_cpu = 2 * trex_tx_pckt / (core * cpu_util)
print "test_norm_cpu is:", test_norm_cpu
if self.get_benchmark_param('cpu2core_custom_dev'):
# check this test by custom deviation
deviation_compare_value = self.get_benchmark_param('cpu2core_dev')
print "Comparing test with custom deviation value- {dev_val}%".format( dev_val = int(deviation_compare_value*100) )
# need to be fixed !
#if ( abs((test_norm_cpu/self.get_benchmark_param('cpu_to_core_ratio')) - 1) > deviation_compare_value):
# raise AbnormalResultError('Normalized bandwidth to CPU utilization ratio exceeds benchmark boundaries')
self.match_classification()
assert True
@nottest
def test_rx_check (self):
# test initializtion
self.router.configure_basic_interfaces()
self.router.config_pbr(mode = "config")
self.router.config_nbar_pd()
mult = self.get_benchmark_param('multiplier')
core = self.get_benchmark_param('cores')
sample_rate = self.get_benchmark_param('rx_sample_rate')
ret = self.trex.start_trex(
c = core,
m = mult,
p = True,
nc = True,
rx_check = sample_rate,
d = 100,
f = 'cap2/sfr.yaml',
l = 1000)
trex_res = self.trex.sample_to_run_finish()
# trex_res is a CTRexResult instance- and contains the summary of the test results
# you may see all the results keys by simply calling here for 'print trex_res.result'
print ("\nLATEST RESULT OBJECT:")
print trex_res
print ("\nLATEST DUMP:")
print trex_res.get_latest_dump()
self.check_general_scenario_results(trex_res)
self.check_CPU_benchmark(trex_res, 10)
# if trex_res.result['rx_check_tx']==trex_res.result['rx_check_rx']: # rx_check verification shoud pass
# assert trex_res.result['rx_check_verification'] == "OK"
# else:
# assert trex_res.result['rx_check_verification'] == "FAIL"
# the name intentionally not matches nose default pattern, including the test should be specified explicitly
def NBarLong(self):
self.router.configure_basic_interfaces()
self.router.config_pbr(mode = "config")
self.router.config_nbar_pd()
mult = self.get_benchmark_param('multiplier')
core = self.get_benchmark_param('cores')
ret = self.trex.start_trex(
c = core,
m = mult,
p = True,
nc = True,
d = 18000, # 5 hours
f = 'avl/sfr_delay_10_1g.yaml',
l = 1000)
trex_res = self.trex.sample_to_run_finish()
# trex_res is a CTRexResult instance- and contains the summary of the test results
# you may see all the results keys by simply calling here for 'print trex_res.result'
print ("\nLATEST RESULT OBJECT:")
print trex_res
self.check_general_scenario_results(trex_res, check_latency = False)
def tearDown(self):
CTRexGeneral_Test.tearDown(self)
pass
if __name__ == "__main__":
pass
| python |
from rest_framework import permissions
from rest_framework.reverse import reverse
class IsOwnerOrReadOnly(permissions.BasePermission):
"""
Custom permission to only allow owners of an object to edit it.
"""
def has_object_permission(self, request, view, obj):
# Read permissions are allowed to any request,
# so we'll always allow GET, HEAD or OPTIONS requests.
if request.method in permissions.SAFE_METHODS:
return True
# Write permissions are only allowed to the owner of the snippet.
return obj.owner == request.user
class IsOwnerCheck(permissions.BasePermission):
def has_permission(self, request, view):
"""
map={"view_name":{"path_info","method "}
}
"""
maps = {
'book_list': {'url': '/demo-service/api/v1/book/', 'method': 'GET'},
'book_create': {'url': '/api/v1/book/', 'method': 'POST'}
}
results = False
view_name = view.get_view_name()
print(view_name,"xxxxxxxxxxx")
if view_name in maps.keys() and request.method in permissions.SAFE_METHODS:
mapper = maps.get(view_name)
user_role_url = mapper.get('url',None)
user_role_url_method = 'GET'
# user_role_url = request.user.permission.url
# user_role_url_method = request.user.permission.method.upper()
print(request.method,request.path_info)
if user_role_url == request.path_info and user_role_url_method ==request.method:
return True
else:
return False
else:
return False
def has_object_permission(self, request, view, obj):
""" view表示当前视图, obj为数据对象 """
return True
| python |
from ms_deisotope.peak_dependency_network.intervals import Interval, IntervalTreeNode
from glycan_profiling.task import TaskBase
from .chromatogram import Chromatogram
class ChromatogramForest(TaskBase):
"""An an algorithm for aggregating chromatograms from peaks of close mass
weighted by intensity.
This algorithm assumes that mass accuracy is correlated with intensity, so
the most intense peaks should most accurately reflect their true neutral mass.
The expected input is a list of (scan id, peak) pairs. This list is sorted by
descending peak intensity. For each pair, using binary search, locate the nearest
existing chromatogram in :attr:`chromatograms`. If the nearest chromatogram is within
:attr:`error_tolerance` ppm of the peak's neutral mass, add this peak to that
chromatogram, otherwise create a new chromatogram containing this peak and insert
it into :attr:`chromatograms` while preserving the overall sortedness. This algorithm
is carried out by :meth:`aggregate_unmatched_peaks`
This process may produce chromatograms with large gaps in them, which
may or may not be acceptable. To break gapped chromatograms into separate
entities, the :class:`ChromatogramFilter` type has a method :meth:`split_sparse`.
Attributes
----------
chromatograms : list of Chromatogram
A list of growing Chromatogram objects, ordered by neutral mass
count : int
The number of peaks accumulated
error_tolerance : float
The mass error tolerance between peaks and possible chromatograms (in ppm)
scan_id_to_rt : callable
A callable object to convert scan ids to retention time.
"""
def __init__(self, chromatograms=None, error_tolerance=1e-5, scan_id_to_rt=lambda x: x):
if chromatograms is None:
chromatograms = []
self.chromatograms = sorted(chromatograms, key=lambda x: x.neutral_mass)
self.error_tolerance = error_tolerance
self.scan_id_to_rt = scan_id_to_rt
self.count = 0
def __len__(self):
return len(self.chromatograms)
def __iter__(self):
return iter(self.chromatograms)
def __getitem__(self, i):
if isinstance(i, (int, slice)):
return self.chromatograms[i]
else:
return [self.chromatograms[j] for j in i]
def find_insertion_point(self, peak):
index, matched = binary_search_with_flag(
self.chromatograms, peak.neutral_mass, self.error_tolerance)
return index, matched
def find_minimizing_index(self, peak, indices):
best_index = None
best_error = float('inf')
for index_case in indices:
chroma = self[index_case]
err = abs(chroma.neutral_mass - peak.neutral_mass) / peak.neutral_mass
if err < best_error:
best_index = index_case
best_error = err
return best_index
def handle_peak(self, scan_id, peak):
if len(self) == 0:
index = [0]
matched = False
else:
index, matched = self.find_insertion_point(peak)
if matched:
chroma = self.chromatograms[self.find_minimizing_index(peak, index)]
most_abundant_member = chroma.most_abundant_member
chroma.insert(scan_id, peak, self.scan_id_to_rt(scan_id))
if peak.intensity < most_abundant_member:
chroma.retain_most_abundant_member()
else:
chroma = Chromatogram(None)
chroma.created_at = "forest"
chroma.insert(scan_id, peak, self.scan_id_to_rt(scan_id))
self.insert_chromatogram(chroma, index)
self.count += 1
def insert_chromatogram(self, chromatogram, index):
# TODO: Review this index arithmetic, the output isn't sorted.
index = index[0] # index is (index, matched) from binary_search_with_flag
if index != 0:
self.chromatograms.insert(index + 1, chromatogram)
else:
if len(self) == 0:
new_index = index
else:
x = self.chromatograms[index]
if x.neutral_mass < chromatogram.neutral_mass:
new_index = index + 1
else:
new_index = index
self.chromatograms.insert(new_index, chromatogram)
def aggregate_unmatched_peaks(self, *args, **kwargs):
import warnings
warnings.warn("Instead of calling aggregate_unmatched_peaks, call aggregate_peaks", stacklevel=2)
self.aggregate_peaks(*args, **kwargs)
def aggregate_peaks(self, scan_id_peaks_list, minimum_mass=300, minimum_intensity=1000.):
unmatched = sorted(scan_id_peaks_list, key=lambda x: x[1].intensity, reverse=True)
for scan_id, peak in unmatched:
if peak.neutral_mass < minimum_mass or peak.intensity < minimum_intensity:
continue
self.handle_peak(scan_id, peak)
class ChromatogramMerger(TaskBase):
def __init__(self, chromatograms=None, error_tolerance=1e-5):
if chromatograms is None:
chromatograms = []
self.chromatograms = sorted(chromatograms, key=lambda x: x.neutral_mass)
self.error_tolerance = error_tolerance
self.count = 0
self.verbose = False
def __len__(self):
return len(self.chromatograms)
def __iter__(self):
return iter(self.chromatograms)
def __getitem__(self, i):
if isinstance(i, (int, slice)):
return self.chromatograms[i]
else:
return [self.chromatograms[j] for j in i]
def find_candidates(self, new_chromatogram):
index, matched = binary_search_with_flag(
self.chromatograms, new_chromatogram.neutral_mass, self.error_tolerance)
return index, matched
def merge_overlaps(self, new_chromatogram, chromatogram_range):
has_merged = False
query_mass = new_chromatogram.neutral_mass
for chroma in chromatogram_range:
cond = (chroma.overlaps_in_time(new_chromatogram) and abs(
(chroma.neutral_mass - query_mass) / query_mass) < self.error_tolerance and
not chroma.common_nodes(new_chromatogram))
if cond:
chroma.merge(new_chromatogram)
has_merged = True
break
return has_merged
def find_insertion_point(self, new_chromatogram):
return binary_search_exact(
self.chromatograms, new_chromatogram.neutral_mass)
def handle_new_chromatogram(self, new_chromatogram):
if len(self) == 0:
index = [0]
matched = False
else:
index, matched = self.find_candidates(new_chromatogram)
if matched:
chroma = self[index]
has_merged = self.merge_overlaps(new_chromatogram, chroma)
if not has_merged:
insertion_point = self.find_insertion_point(new_chromatogram)
self.insert_chromatogram(new_chromatogram, [insertion_point])
else:
self.insert_chromatogram(new_chromatogram, index)
self.count += 1
def insert_chromatogram(self, chromatogram, index):
if index[0] != 0:
self.chromatograms.insert(index[0] + 1, chromatogram)
else:
if len(self) == 0:
new_index = index[0]
else:
x = self.chromatograms[index[0]]
if x.neutral_mass < chromatogram.neutral_mass:
new_index = index[0] + 1
else:
new_index = index[0]
self.chromatograms.insert(new_index, chromatogram)
def aggregate_chromatograms(self, chromatograms):
unmatched = sorted(chromatograms, key=lambda x: x.total_signal, reverse=True)
for chroma in unmatched:
self.handle_new_chromatogram(chroma)
def flatten_tree(tree):
output_queue = []
input_queue = [tree]
while input_queue:
next_node = input_queue.pop()
output_queue.append(next_node)
next_right = next_node.right
if next_right is not None:
input_queue.append(next_right)
next_left = next_node.left
if next_left is not None:
input_queue.append(next_left)
return output_queue[::-1]
def layered_traversal(nodes):
return sorted(nodes, key=lambda x: (x.level, x.center), reverse=True)
class ChromatogramOverlapSmoother(object):
def __init__(self, chromatograms, error_tolerance=1e-5):
self.retention_interval_tree = build_rt_interval_tree(chromatograms)
self.error_tolerance = error_tolerance
self.solution_map = {None: []}
self.chromatograms = self.smooth()
def __iter__(self):
return iter(self.chromatograms)
def __getitem__(self, i):
return self.chromatograms[i]
def __len__(self):
return len(self.chromatograms)
def aggregate_interval(self, tree):
chromatograms = [interval[0] for interval in tree.contained]
chromatograms.extend(self.solution_map[tree.left])
chromatograms.extend(self.solution_map[tree.right])
merger = ChromatogramMerger(error_tolerance=self.error_tolerance)
merger.aggregate_chromatograms(chromatograms)
self.solution_map[tree] = list(merger)
return merger
def smooth(self):
nodes = layered_traversal(flatten_tree(self.retention_interval_tree))
for node in nodes:
self.aggregate_interval(node)
final = self.solution_map[self.retention_interval_tree]
result = ChromatogramMerger()
result.aggregate_chromatograms(final)
return list(result)
def binary_search_with_flag(array, mass, error_tolerance=1e-5):
lo = 0
n = hi = len(array)
while hi != lo:
mid = (hi + lo) // 2
x = array[mid]
err = (x.neutral_mass - mass) / mass
if abs(err) <= error_tolerance:
i = mid - 1
# Begin Sweep forward
while i > 0:
x = array[i]
err = (x.neutral_mass - mass) / mass
if abs(err) <= error_tolerance:
i -= 1
continue
else:
break
low_end = i
i = mid + 1
# Begin Sweep backward
while i < n:
x = array[i]
err = (x.neutral_mass - mass) / mass
if abs(err) <= error_tolerance:
i += 1
continue
else:
break
high_end = i
return list(range(low_end, high_end)), True
elif (hi - lo) == 1:
return [mid], False
elif err > 0:
hi = mid
elif err < 0:
lo = mid
return 0, False
def binary_search_exact(array, mass):
lo = 0
hi = len(array)
while hi != lo:
mid = (hi + lo) // 2
x = array[mid]
err = (x.neutral_mass - mass)
if err == 0:
return mid
elif (hi - lo) == 1:
return mid
elif err > 0:
hi = mid
else:
lo = mid
def smooth_overlaps(chromatogram_list, error_tolerance=1e-5):
chromatogram_list = sorted(chromatogram_list, key=lambda x: x.neutral_mass)
out = []
last = chromatogram_list[0]
i = 1
while i < len(chromatogram_list):
current = chromatogram_list[i]
mass_error = abs((last.neutral_mass - current.neutral_mass) / current.neutral_mass)
if mass_error <= error_tolerance:
if last.overlaps_in_time(current):
last = last.merge(current)
last.created_at = "smooth_overlaps"
else:
out.append(last)
last = current
else:
out.append(last)
last = current
i += 1
out.append(last)
return out
class ChromatogramRetentionTimeInterval(Interval):
def __init__(self, chromatogram):
super(ChromatogramRetentionTimeInterval, self).__init__(
chromatogram.start_time, chromatogram.end_time, [chromatogram])
self.neutral_mass = chromatogram.neutral_mass
self.start_time = self.start
self.end_time = self.end
self.data['neutral_mass'] = self.neutral_mass
def build_rt_interval_tree(chromatogram_list, interval_tree_type=IntervalTreeNode):
intervals = list(map(ChromatogramRetentionTimeInterval, chromatogram_list))
interval_tree = interval_tree_type.build(intervals)
return interval_tree
| python |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('documents', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='InformationDocument',
fields=[
('document_ptr', models.OneToOneField(primary_key=True, auto_created=True, to='documents.Document', serialize=False, parent_link=True)),
],
options={
'verbose_name_plural': 'Information documents',
'verbose_name': 'Information document',
'abstract': False,
'permissions': (('view_informationdocument', 'User/Group is allowed to view that document'),),
},
bases=('documents.document',),
),
]
| python |
import re
class Command:
def __init__(self, name, register, jump_addr=None):
self.name = name
self.register = register
self.jump_addr = jump_addr
class Program:
def __init__(self, commands, registers):
self.commands = commands
self.registers = registers
self.instr_ptr = 0
def exec_next_command(self):
cmd = self.commands[self.instr_ptr]
if cmd.name == "hlf":
self.registers[cmd.register] //= 2
self.instr_ptr += 1
elif cmd.name == "tpl":
self.registers[cmd.register] *= 3
self.instr_ptr += 1
elif cmd.name == "inc":
self.registers[cmd.register] += 1
self.instr_ptr += 1
elif cmd.name == "jmp":
self.instr_ptr += cmd.jump_addr
elif cmd.name == "jie":
self.instr_ptr += cmd.jump_addr if self.registers[cmd.register] % 2 == 0 else 1
elif cmd.name == "jio":
self.instr_ptr += cmd.jump_addr if self.registers[cmd.register] == 1 else 1
else:
raise ValueError("Unsupported command: ", cmd.name)
def run(self):
while self.instr_ptr < len(self.commands):
self.exec_next_command()
def solve(commands):
pgm = Program(commands, {"a": 0, "b": 0})
pgm.run()
return pgm.registers["b"]
def parse(file_name):
with open(file_name, "r") as f:
commands = []
for line in f.readlines():
if any([cmd in line for cmd in ["inc", "tpl", "hlf"]]):
_, cmd, r, _ = re.split(r"([a-z]+) ([a|b])", line)
commands.append(Command(cmd, r))
elif "jmp" in line:
_, cmd, jmp_addr, _ = re.split(r"([a-z]+) ([+|-][0-9]+)", line)
commands.append(Command(cmd, None, int(jmp_addr)))
if any([cmd in line for cmd in ["jie", "jio"]]):
_, cmd, r, jmp_addr, _ = re.split(r"([a-z]+) ([a|b]), ([+\-0-9]+)", line)
commands.append(Command(cmd, r, int(jmp_addr)))
return commands
if __name__ == '__main__':
print(solve(parse("data.txt")))
| python |
if __name__ == "__main__":
import argparse
import os
import torch
import torch.nn as nn
import torch.optim as optim
from mnistconvnet import MNISTConvNet
from mnist_data_loader import mnist_data_loader
from sgdol import SGDOL
# Parse input arguments.
parser = argparse.ArgumentParser(description='MNIST CNN SGDOL')
parser.add_argument('--use-cuda', action='store_true', default=False,
help='allow the use of CUDA (default: False)')
parser.add_argument('--seed', type=int, default=0, metavar='S',
help='random seed (default: 0)')
parser.add_argument('--train-epochs', type=int, default=30, metavar='N',
help='number of epochs to train (default: 30)')
parser.add_argument('--train-batchsize', type=int, default=100,
help='batchsize in training (default: 100)')
parser.add_argument('--dataroot', type=str, default='./data',
help='location to save the dataset (default: ./data)')
parser.add_argument('--optim-method', type=str, default='SGDOL',
choices=['SGDOL', 'Adam', 'SGD', 'Adagrad'],
help='the optimizer to be employed (default: SGDOL)')
parser.add_argument('--smoothness', type=float, default=10.0, metavar='M',
help='to be used in SGDOL (default: 10)')
parser.add_argument('--alpha', type=float, default=10.0,
help='to be used in SGDOL (default: 10)')
parser.add_argument('--lr', type=float, default=0.001, metavar='LR',
help='learning rate of the chosen optimizer (default: 0.001)')
args = parser.parse_args()
# Set the random seed for reproducibility.
torch.manual_seed(args.seed)
# Load data.
kwargs = {}
dataset_info = mnist_data_loader(root_dir=args.dataroot,
batch_size=args.train_batchsize,
valid_ratio=0,
**kwargs)
train_loader = dataset_info[0]
test_loader = dataset_info[4]
# Check the availability of GPU.
use_cuda = args.use_cuda and torch.cuda.is_available()
device = torch.device("cuda:0" if use_cuda else "cpu")
# Initialize the neural network model and move it to GPU if needed.
net = MNISTConvNet()
net.to(device)
# Define the loss function.
criterion = nn.CrossEntropyLoss()
# Select optimizer.
optim_method = args.optim_method
if optim_method == 'SGDOL':
optimizer = SGDOL(net.parameters(),
smoothness=args.smoothness,
alpha=args.alpha)
elif optim_method == 'SGD':
optimizer = optim.SGD(net.parameters(),
lr=args.lr)
elif optim_method == 'Adagrad':
optimizer = optim.Adagrad(net.parameters(),
lr=args.lr)
elif optim_method == 'Adam':
optimizer = optim.Adam(net.parameters(),
lr=args.lr)
else:
raise ValueError("Invalid optimization method: {}".format(optim_method))
# Train the model.
all_train_losses = []
for epoch in range(args.train_epochs):
# Train the model for one epoch.
net.train()
for data in train_loader:
inputs, labels = data
inputs, labels = inputs.to(device), labels.to(device)
num_grads = 1 if args.optim_method != 'SGDOL' else 2
for _ in range(num_grads):
optimizer.zero_grad()
outputs = net(inputs)
loss = criterion(outputs, labels)
loss.backward()
optimizer.step()
# Evaluate the trained model over all training samples.
net.eval()
running_loss = 0.0
with torch.no_grad():
for data in train_loader:
inputs, labels = data
inputs, labels = inputs.to(device), labels.to(device)
outputs = net(inputs)
loss = criterion(outputs, labels)
running_loss += loss.item()
avg_train_loss = running_loss / len(train_loader)
all_train_losses.append(avg_train_loss)
print('Epoch %d: Training Loss: %.4f' % (epoch + 1, avg_train_loss))
# Evaluate the test error of the final model.
net.eval()
correct = 0
total = 0
with torch.no_grad():
for data in test_loader:
inputs, labels = data
inputs, labels = inputs.to(device), labels.to(device)
outputs = net(inputs)
_, predicted = torch.max(outputs.data, 1)
correct += (predicted == labels).sum().item()
total += labels.size(0)
test_accu = 1.0 * correct / total
print('Final Test Accuracy: %.4f\n' % (test_accu))
# Write log files.
if optim_method == 'SGDOL':
opt_para = args.smoothness
else:
opt_para = args.lr
if not os.path.exists('logs'):
os.makedirs('logs')
train_loss_fname = ''.join(['logs/',
'{0}'.format(optim_method),
'_training_loss.txt'])
with open(train_loss_fname, 'a') as f:
f.write('{0}, {1}\n'.format(opt_para, all_train_losses))
test_error_fname = ''.join(['logs/',
'{0}'.format(optim_method),
'_test_error.txt'])
with open(test_error_fname, 'a') as f:
f.write('{0}, {1}\n'.format(opt_para, test_accu))
| python |
# -*- coding: utf-8 -*-
# @Time : 2019/9/8 14:18
# @Author : zhoujun
import os
import cv2
import torch
import subprocess
import numpy as np
import pyclipper
BASE_DIR = os.path.dirname(os.path.realpath(__file__))
def de_shrink(poly, r=1.5):
d_i = cv2.contourArea(poly) * r / cv2.arcLength(poly, True)
pco = pyclipper.PyclipperOffset()
pco.AddPath(poly, pyclipper.JT_ROUND, pyclipper.ET_CLOSEDPOLYGON)
shrinked_poly = np.array(pco.Execute(d_i))
return shrinked_poly
def decode(preds, threshold=0.2, min_area=5):
"""
在输出上使用sigmoid 将值转换为置信度,并使用阈值来进行文字和背景的区分
:param preds: 网络输出
:param scale: 网络的scale
:param threshold: sigmoid的阈值
:return: 最后的输出图和文本框
"""
if subprocess.call(['make', '-C', BASE_DIR]) != 0: # return value
raise RuntimeError('Cannot compile pse: {}'.format(BASE_DIR))
from .pse import get_points, get_num
shrink_map = preds[0, :, :].detach().cpu().numpy()
score_map = shrink_map.astype(np.float32)
shrink_map = shrink_map > threshold
label_num, label = cv2.connectedComponents(shrink_map.astype(np.uint8), connectivity=4)
bbox_list = []
label_points = get_points(label, score_map, label_num)
for label_value, label_point in label_points.items():
score_i = label_point[0]
label_point = label_point[2:]
points = np.array(label_point, dtype=int).reshape(-1, 2)
if points.shape[0] < min_area:
continue
# if score_i < 0.93:
# continue
rect = cv2.minAreaRect(points)
poly = cv2.boxPoints(rect)
shrinked_poly = de_shrink(poly)
if shrinked_poly.size == 0:
continue
rect = cv2.minAreaRect(shrinked_poly)
shrinked_poly = cv2.boxPoints(rect).astype(int)
if cv2.contourArea(shrinked_poly) < 100:
continue
bbox_list.append([shrinked_poly[1], shrinked_poly[2], shrinked_poly[3], shrinked_poly[0]])
return label, np.array(bbox_list)
def decode_py(preds, threshold=0.2, min_area=5):
shrink_map = preds[0, :, :].detach().cpu().numpy()
# score_map = shrink_map.astype(np.float32)
shrink_map = shrink_map > threshold
label_num, label = cv2.connectedComponents(shrink_map.astype(np.uint8), connectivity=4)
bbox_list = []
for label_idx in range(1, label_num):
points = np.array(np.where(label == label_idx)).transpose((1, 0))[:, ::-1]
if points.shape[0] < min_area:
continue
# score_i = np.mean(score_map[label == label_idx])
# if score_i < 0.93:
# continue
rect = cv2.minAreaRect(points)
poly = cv2.boxPoints(rect).astype(int)
shrinked_poly = de_shrink(poly)
if shrinked_poly.size == 0:
continue
rect = cv2.minAreaRect(shrinked_poly)
shrinked_poly = cv2.boxPoints(rect).astype(int)
if cv2.contourArea(shrinked_poly) < 100:
continue
bbox_list.append([shrinked_poly[1], shrinked_poly[2], shrinked_poly[3], shrinked_poly[0]])
return label, np.array(bbox_list)
| python |
count = 0
print('Before', count)
for thing in [9, 41, 12, 3, 74, 15]:
count += 1
# zork = zork + 1
print(count, thing)
print('After', count)
| python |
# import src.stacking.argus_models
| python |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.