content
stringlengths 0
894k
| type
stringclasses 2
values |
---|---|
# Generated by Django 2.2.2 on 2019-06-30 13:19
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('favorites', '0003_auto_20190630_1317'),
]
operations = [
migrations.RenameField(
model_name='auditlog',
old_name='favourite_id',
new_name='favourite',
),
migrations.RenameField(
model_name='favorite',
old_name='category_id',
new_name='category',
),
]
|
python
|
# Generated by Django 2.2.7 on 2020-03-14 19:51
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("postcards", "0004_contact_language"),
]
operations = [
migrations.AlterField(
model_name="card",
name="sent_at",
field=models.DateTimeField(blank=True, null=True),
),
]
|
python
|
from projects.utils.multiprocessing import *
from projects.utils.sql import *
from projects.utils.data_table import *
|
python
|
from Constants import ALL_LEVELS, CAP_LEVELS, MISSION_LEVELS, BOWSER_STAGES, LVL_BOB, SPECIAL_LEVELS, LVL_MAIN_SCR, LVL_CASTLE_GROUNDS, BEHAVIOUR_NAMES
from randoutils import format_binary
import random
import sys
import numpy as np
from Entities.Object3D import Object3D
import logging
#from Parsers.LevelScript import LevelScriptParser
from random import shuffle
WHITELIST_SHUFFLING = [
(None, 0xBC), # Bob-Omb
(0x13003174, None), # Bob-Omb
(0x1300472C, None), # Goomba,
(0x13004770, None), # Goomba Triplet
(0x13001298, None), # Coin Triplet
(0x130001F4, None), # King Bob-Omb
(0x13002BB8, None), # King Whomp
(0x130039D4, None), # Moneybag
(None, 0x68), # Koopa (The Quick, Normal, etc)
#(0x130005B4, None), # Rotating Platform WF
(0x13002AA4, None), # Tree Behaviour
(None, 0x65), # Scuttlebug
(None, 0x19), # Tree (Snow)
(None, 0x17), # Tree (In Courses)
(None, 0x18), # Tree (Courtyard)
(None, 0x1B), # Tree (SSL)
(0x13001548, None), # Heave-Ho
(None, 0x78), # Heart
(0x13004348, None), # Red Coin
(0x13003E8C, None), # Red Coin Star
(0x13002EC0, None), # Mario Spawn
(0x13005468, None), # Skeeter (WDW Bug thing)
(0x13000BC8, None), # Thwomp
(0x13000B8C, None), # Thwomp 2
(0x1300525C, None), # Grindel
(0x13001FBC, None), # Piranha
(0x13005120, None), # Fire-Spitting
(0x13002EF8, None), # Toad
(0x130009A4, None), # Single Coin
(0x13000964, None), # Coins (x3)
(0x13000984, None), # Coins (x10)
(0x130008EC, None), # Coins (Formations)
(0x13005440, 0x58), # Clam in JRB
(0x13004634, None), # Pokey
(0x13004668, 0x55), # Pokeys Head
(0x130030A4, None), # Blue Coin
(None, 0x7C), # Sign
(0x13003EAC, 0xD7),
(None, 0x74), # Coin Type 1
(None, 0x75), # Coin Type 2
(None, 0x74), # Coin Type 3
(None, 0x75), # Multiple Coins
(None, 0xD4), # One-Up
(0x13001F3C, None), # Koopa Shell
(0x130020E8, 0x57), # Lost Penguin
(0x13002E58, None), # Wandering Penguin
(0x13004148, 0xD4), # Homing-One-Up
(0x130031DC, 0xC3), # Bob-Omb Buddy (With Message)
(0x13003228, None), # Bob-Omb Buddy (Opening Canon)
(0x1300478C, 0x66),
#(None, 0xDF), # Chuckya
(0x13000054, None), # Eye-Ball
(0x13001108, None), # Flamethrower
(0x130046DC, 0xDC), # Fly-Guy
(None, 0x89), # Item-Box
(0x13004698, None), # Bat
(0x130046DC, None), # Fly-Guy
(0x13004918, None), # Lakitu
(0x13004954, None), # Evil Lakitu
(0x130049C8, None), # Spiny
(0x13004A00, None), # Mole
(0x13004A58, None), # Mole in Hole
(0x13003700, 0x65), # Ice Bully (Big)
(0x130036C8, 0x64), # Ice Bully (Small)
(0x13001650, 0x00), # Bouncing Box
(0x130027E4, 0x65), # Boo
(0x130027D0, 0x00), # Boo (x3)
(0x13002794, 0x65), # Big Boo
(0x130007F8, 0x7A), # Star
(0x13003E3C, 0x7A), # Star
#(0x13001B70, 0x00), # Checkerboard Elevator (Logic: DON'T TOUCH FOR VANISH CAP LEVEL)
(0x13002F74, 0x00), # Mario Start 1
(0x1300442C, None), # TTC: Pendulum
(0x130054B8, None), # TTC: Pendulum
(0x13004FD4, None), # BBH: Haunted Chair
(0x13005024, None), # BBH: Piano
(0x1300506C, None), # BBH: Bookend
]
BSCRIPT_START = 0x10209C
HEIGHT_OFFSETS = {
(None, 0x89): 200,
(0x130007F8, 0x7A): 200,
(0x13002250, None): 200,
(None, 0x75): 300,
}
CANT_BE_IN_WATER = [
(None, 0x89), # Star
(0x13003700, None), # Ice Bully (Big) - otherwise you win instantly
(0x130031DC, 0xC3), # Bob-Omb Buddy (With Message)
(0x13003228, None) # Bob-Omb Buddy (Opening Canon)
]
WALKABLE_COLLISION_TYPES = [
0x00, # environment default
0x29, # default floor with noise
0x14, # slightly slippery
0x15, # anti slippery
0x0B, # close camera
0x30, # hard floor (always fall damage)
## may be harder
#0x13, # slippery
#0x2A, # slippery with noise
0x0D, # water (stationary)
]
def signed_tetra_volume(a, b, c, d):
return np.sign(np.dot(np.cross(b-a, c-a), d-a)/6.0)
def trace_geometry_intersections(level_geometry, ray, face_type = None):
# algorithm that was used for this:
# http://www.lighthouse3d.com/tutorials/maths/ray-triangle-intersection/
# or maybe this
# https://wiki2.org/en/M%C3%B6ller%E2%80%93Trumbore_intersection_algorithm
[q0, q1] = ray
ray_origin = q0
ray_vector = q1 - q0
#print("origin", ray_origin)
#print("dir", ray_vector)
ray_is_vertical = ray_vector[0] == 0.0 and ray_vector[1] == 0.0
faces = level_geometry.get_triangles(face_type) # [[[-1.0, -1.0, 0.0], [1.0, -1.0, 0.0], [0.0, 1.0, 0.0]]]
intersection_count = 0
intersection_positions = []
intersection_faces = []
for face in faces:
#print("next face", face.index)
[p1, p2, p3] = face.vertices
[xmin, xmax, ymin, ymax, zmin, zmax] = face.bounding_box
# precheck bounds
if ray_is_vertical:
# for vertical rays we can quickly check if the coordinates are atleast in the bounding box of the tri
if ray_origin[0] < xmin or ray_origin[0] > xmax or ray_origin[1] < ymin or ray_origin[1] > ymax:
#print('oob precheck')
continue
edge_a = p2 - p1
edge_b = p3 - p1
h = np.cross(ray_vector, edge_b)
a = np.dot(edge_a, h)
if abs(a) < 0e-10:
#print("parallel")
continue
f = 1.0/a
s = ray_origin - p1
u = f * (np.dot(s, h))
if u < 0.0 or u > 1.0:
#print("u outside 0-1")
continue
q = np.cross(s, edge_a)
v = f * (np.dot(ray_vector, q))
if v < 0.0 or u + v > 1.0:
#print("v < 0 or u + v > 1")
continue
t = f * np.dot(edge_b, q)
if t > 0e-10:
#print("hit")
intersection_count += 1
intersection_positions.append(
ray_origin + ray_vector * t
)
intersection_faces.append(face)
continue
#print("doesnt reach", t)
return (intersection_count, intersection_positions, intersection_faces)
"""
[q0, q1] = ray
triangles = level_geometry.get_triangles() # [[[-1.0, -1.0, 0.0], [1.0, -1.0, 0.0], [0.0, 1.0, 0.0]]]
intersection_count = 0
intersection_positions = []
for triangle in triangles:
[p1, p2, p3] = triangle
signed_volume_a = signed_tetra_volume(q0, p1, p2, p3)
signed_volume_b = signed_tetra_volume(q1, p1, p2, p3)
if signed_volume_a != signed_volume_b:
s3 = signed_tetra_volume(q0,q1,p1,p2)
s4 = signed_tetra_volume(q0,q1,p2,p3)
s5 = signed_tetra_volume(q0,q1,p3,p1)
if s3 == s4 and s4 == s5:
intersection_count += 1
n = np.cross(p2-p1,p3-p1)
t = np.dot(p1-q0,n) / np.dot(q1-q0,n)
intersection_positions.append(
q0 + t * (12-q0)
)
return (intersection_count, intersection_positions)
"""
def get_closest_intersection(intersections, position):
closest_dist = 1e20 # big number as "infinity"
closest_index = 0
for index, intersection_point in enumerate(intersections):
diff = position - intersection_point
dist = np.sqrt(np.sum(np.power(diff, 2)))
if dist < closest_dist:
closest_dist = dist
closest_index = index
return closest_dist
class LevelRandomizer:
def __init__(self, rom : 'ROM'):
self.rom = rom
@staticmethod
def can_shuffle(obj : Object3D):
if obj.source == "MARIO_SPAWN":
return True
else:
for (target_bscript_address, target_model_id) in WHITELIST_SHUFFLING:
if (target_model_id is None or target_model_id == obj.model_id) and (target_bscript_address is None or target_bscript_address == obj.behaviour):
return True
return False
def get_height_offset(self, obj : Object3D):
for (target_bscript_address, target_model_id) in HEIGHT_OFFSETS:
if (target_model_id is None or target_model_id == obj.model_id) and (target_bscript_address is None or target_bscript_address == obj.behaviour):
return HEIGHT_OFFSETS[(target_bscript_address, target_model_id)]
return 1 # fallback to ensure it doesn't fail oob check or falls out of level
def can_be_in_water(self, obj : Object3D):
for (target_bscript_address, target_model_id) in CANT_BE_IN_WATER:
if (target_model_id is None or target_model_id == obj.model_id) and (target_bscript_address is None or target_bscript_address == obj.behaviour):
return False
return True
def is_in_water_box(self, water_box, position):
(
water_box_id,
water_box_start_x, water_box_start_z,
water_box_end_x, water_box_end_z,
water_box_y,
water_box_type
) = water_box
if water_box_type != "WATER":
#print("waterbox is not water, all good")
return False
if position[0] < water_box_start_x or position[0] > water_box_end_x:
#print("x is outside waterbox x, all good")
return False
if position[2] < water_box_start_z or position[2] > water_box_end_z:
#print("y is outside waterbox y, all good")
return False
if position[1] > water_box_y:
#print("item is higher than waterbox")
return False
return True
def is_valid_position(self, level_script, object3d, position):
if not self.can_be_in_water(object3d):
#print(object3d, 'cant be in water')
#print("found an object that cannot be in water", len(level_script.water_boxes))
for water_box in level_script.water_boxes:
#print(water_box)
if self.is_in_water_box(water_box, position):
logging.info("invalid position for object, in water box")
#print(position, object3d)
return False
# count floors under the position we want to test
(floors_underneath, floor_positions, floor_faces) = trace_geometry_intersections(
level_script.level_geometry,
[
position + np.array([0.0, 0.0, 1.0]),
position + np.array([0.0, 0.0, -1.0e7])
]
)
# if the amount is even, we're inside a wall or (if it's 0) oob
# if the amount is odd we're ok
is_valid_amount = floors_underneath % 2 == 1
if not is_valid_amount: return False
if floor_faces[0].collision_type not in WALKABLE_COLLISION_TYPES:
#print("invalid floor type", hex(floor_faces[0].collision_type))
return False
# require minimum distance from point from ceilings
(_, ceiling_positions, ceiling_faces) = trace_geometry_intersections(
level_script.level_geometry,
[
position + np.array([0.0, 0.0, 1.0]),
position + np.array([0.0, 0.0, +1.0e7])
]
)
closest_ceiling = get_closest_intersection(ceiling_positions, position)
if closest_ceiling < 10.0: return False
return is_valid_amount
def shuffle_objects(self):
for (level, parsed) in self.rom.levelscripts.items():
if level in SPECIAL_LEVELS:
continue
floor_triangles = parsed.level_geometry.get_triangles('FLOOR')
shufflable_objects = list(filter(LevelRandomizer.can_shuffle, parsed.objects))
other_objects = list(filter(lambda x: not LevelRandomizer.can_shuffle(x), parsed.objects))
for other_object in other_objects:
parsed.level_geometry.add_debug_marker(other_object.position, other_object, color=(100, 100, 255))
while len(shufflable_objects) > 0:
obj = shufflable_objects.pop()
face = random.choice(floor_triangles)
[p1, p2, p3] = face.vertices
r1 = random.random()
r2 = random.random()
if r1 + r2 > 1:
r1 = r1 - 1
r2 = r2 - 1
point = p1 + (r1 * (p2 - p1)) + (r2 * (p3 - p1))
# match bscript and model_id
height_offset = self.get_height_offset(obj)
point[2] += height_offset
if not self.is_valid_position(parsed, obj, point):
#print('invalid position')
shufflable_objects.append(obj)
else:
obj.set(self.rom, 'position', tuple([int(p) for p in list(point)]))
parsed.level_geometry.add_debug_marker(point, obj, color=(255, 100, 100))
|
python
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: pyenv
short_description: Run pyenv command
options:
always_copy:
description:
- the "--always-copy" option of pyenv virtualenv
required: false
type: bool
default: false
bare:
description:
- the "--bare" option of "versions" and "virtualenvs" subcommand
required: false
type: bool
default: true
clear:
description:
- the "--clear" option of pyenv virtualenv
required: false
type: bool
default: false
copies:
description:
- the "--copies" option of pyenv virtualenv
required: false
type: bool
default: false
expanduser:
description:
- whether the environment variable PYENV_ROOT and "pyenv_root" option are filtered by os.path.expanduser
required: false
type: bool
default: true
force:
description:
- the "-f/--force" option of pyenv install
required: false
type: bool
default: false
list:
description:
- -l/--list option of pyenv install command
required: false
type: bool
default: false
no_pip:
description:
- the "--no-pip" option of pyenv virtualenv
required: false
type: bool
default: false
no_setuptools:
description:
- the "--no-setuptools" option of pyenv virtualenv
required: false
type: bool
default: false
no_wheel:
description:
- the "--no-wheel" option of pyenv virtualenv
required: false
type: bool
default: false
pyenv_root:
description:
- PYENV_ROOT
required: false
type: str
default: null
skip_aliases:
description:
- the "-s/--skip-aliases" option of pyenv virtualenvs
required: false
type: bool
default: true
skip_existing:
description:
- the "-s/--skip-existing" option of pyenv install
required: false
type: bool
default: true
subcommand:
description:
- pyenv subcommand
choices: ["install", "uninstall", "versions", "global", "virtualenv", "virtualenvs"]
required: false
default: install
symlinks:
description:
- the "--symlinks" option of pyenv virtualenv
required: false
type: bool
default: false
version:
description:
- A python version name
type: str
required: false
default: null
versions:
description:
- python version names
type: list
required: false
default: null
virtualenv_name:
description:
- A virtualenv name
type: str
required: false
default: null
without_pip:
description:
- the "--without_pip" option of pyenv virtualenv
required: false
type: bool
default: false
requirements:
- pyenv
author: "Suzuki Shunsuke"
'''
EXAMPLES = '''
- name: pyenv install -s 3.6.1
pyenv:
version: 3.6.1
pyenv_root: "~/.pyenv"
- name: pyenv install -f 3.6.1
pyenv:
version: 3.6.1
pyenv_root: "~/.pyenv"
force: yes
- name: pyenv uninstall -f 2.6.9
pyenv:
subcommand: uninstall
version: 2.6.9
pyenv_root: "~/.pyenv"
- name: pyenv global 3.6.1
pyenv:
subcommand: global
versions:
- 3.6.1
pyenv_root: "~/.pyenv"
- name: pyenv global
pyenv:
subcommand: global
pyenv_root: "~/.pyenv"
register: result
- debug:
var: result.versions
- name: pyenv install -l
pyenv:
list: yes
pyenv_root: "~/.pyenv"
register: result
- debug:
var: result.versions
- name: pyenv versions --bare
pyenv:
subcommand: versions
pyenv_root: "~/.pyenv"
register: result
- debug:
var: result.versions
- name: pyenv virtualenvs --skip-aliases --bare
pyenv:
subcommand: virtualenvs
pyenv_root: "~/.pyenv"
register: result
- debug:
var: result.virtualenvs
- name: pyenv virtualenv --force 2.7.13 ansible
pyenv:
subcommand: virtualenv
pyenv_root: "~/.pyenv"
version: 2.7.13
virtualenv_name: ansible
force: yes
'''
RETURNS = '''
virtualenvs:
description: the return value of `pyenv virtualenvs`
returned: success
type: list
sample:
- 3.6.1/envs/neovim
- neovim
versions:
description: the return value of `pyenv install --list` or `pyenv global` or `pyenv versions`
returned: success
type: list
sample:
- 2.7.13
- 3.6.1
'''
import os # noqa E402
from ansible.module_utils.basic import AnsibleModule # noqa E402
def wrap_get_func(func):
def wrap(module, *args, **kwargs):
result, data = func(module, *args, **kwargs)
if result:
module.exit_json(**data)
else:
module.fail_json(**data)
return wrap
def get_install_list(module, cmd_path, **kwargs):
""" pyenv install --list
"""
rc, out, err = module.run_command([cmd_path, "install", "-l"], **kwargs)
if rc:
return (False, dict(msg=err, stdout=out))
else:
# slice: remove header and last newline
versions = [line.strip() for line in out.split("\n")[1:-1]]
return (True, dict(
changed=False, failed=False, stdout=out, stderr=err,
versions=versions))
cmd_install_list = wrap_get_func(get_install_list)
def get_versions(module, cmd_path, bare, **kwargs):
""" pyenv versions [--bare]
"""
cmd = [cmd_path, "versions"]
if bare:
cmd.append("--bare")
rc, out, err = module.run_command(cmd, **kwargs)
if rc:
return (False, dict(msg=err, stdout=out))
else:
# slice: remove last newline
versions = [line.strip() for line in out.split("\n")[:-1]]
return (True, dict(
changed=False, failed=False, stdout=out, stderr=err,
versions=versions))
cmd_versions = wrap_get_func(get_versions)
def cmd_uninstall(module, cmd_path, version, **kwargs):
""" pyenv uninstall --force <version>
"""
result, data = get_versions(module, cmd_path, True, **kwargs)
if not result:
return module.fail_json(**data)
if version not in data["versions"]:
return module.exit_json(
changed=False, failed=False, stdout="", stderr="")
cmd = [cmd_path, "uninstall", "-f", version]
rc, out, err = module.run_command(cmd, **kwargs)
if rc:
module.fail_json(msg=err, stdout=out)
else:
module.exit_json(changed=True, failed=False, stdout=out, stderr=err)
def get_global(module, cmd_path, **kwargs):
""" pyenv global
"""
rc, out, err = module.run_command([cmd_path, "global"], **kwargs)
if rc:
return (False, dict(msg=err, stdout=out))
else:
# slice: remove last newline
versions = [line.strip() for line in out.split("\n")[:-1]]
return (True, dict(
changed=False, failed=False, stdout=out, stderr=err,
versions=versions))
cmd_get_global = wrap_get_func(get_global)
def cmd_set_global(module, cmd_path, versions, **kwargs):
""" pyenv global <version> [<version> ...]
"""
result, data = get_global(module, cmd_path, **kwargs)
if not result:
return module.fail_json(**data)
if set(data["versions"]) == set(versions):
return module.exit_json(
changed=False, failed=False, stdout="", stderr="",
versions=versions)
rc, out, err = module.run_command(
[cmd_path, "global"] + versions, **kwargs)
if rc:
module.fail_json(msg=err, stdout=out)
else:
module.exit_json(
changed=True, failed=False, stdout=out, stderr=err,
versions=versions)
def cmd_install(module, params, cmd_path, **kwargs):
""" pyenv install [--skip-existing] [--force] <version>
"""
cmd = [cmd_path, "install"]
if params["skip_existing"] is not False:
force = False
cmd.append("--skip-existing")
elif params["force"] is True:
force = True
cmd.append("--force")
cmd.append(params["version"])
rc, out, err = module.run_command(cmd, **kwargs)
if rc:
return module.fail_json(msg=err, stdout=out)
else:
changed = force or out
return module.exit_json(
changed=changed, failed=False, stdout=out, stderr=err)
def get_virtualenvs(module, cmd_path, skip_aliases, bare, **kwargs):
""" pyenv virtualenvs [--skip-aliases] [--bare]
"""
cmd = [cmd_path, "virtualenvs"]
if skip_aliases:
cmd.append("--skip-aliases")
if bare:
cmd.append("--bare")
rc, out, err = module.run_command(cmd, **kwargs)
if rc:
return (False, dict(msg=err, stdout=out))
else:
# slice: remove last newline
virtualenvs = [line.strip() for line in out.split("\n")[:-1]]
return (True, dict(
changed=False, failed=False, stdout=out, stderr=err,
virtualenvs=virtualenvs))
cmd_virtualenvs = wrap_get_func(get_virtualenvs)
def cmd_virtualenv(
module, cmd_path, version, virtualenv_name, options, **kwargs):
""" pyenv virtualenv [--force] <version> <virtualenv name>
"""
cmd = [cmd_path, "virtualenv"]
for key in [
"force", "no_pip", "no_setuptools", "no_wheel", "symlinks",
"copies", "clear", "without_pip"]:
if options[key]:
cmd.append("--{}".format(key.replace("_", "-")))
if options["force"]:
# pyenv virtualenv --force not working as expected?
# https://github.com/pyenv/pyenv-virtualenv/issues/161
cmd.append("--force")
cmd.append(version)
cmd.append(virtualenv_name)
rc, out, err = module.run_command(cmd, **kwargs)
if rc:
return module.fail_json(msg=err, stdout=out)
else:
return module.exit_json(
changed=True, failed=False, stdout=out, stderr=err)
if options["clear"]:
# pyenv virtualenv --clear not working as expected?
cmd.append(version)
cmd.append(virtualenv_name)
rc, out, err = module.run_command(cmd, **kwargs)
if rc:
return module.fail_json(msg=err, stdout=out)
else:
return module.exit_json(
changed=True, failed=False, stdout=out, stderr=err)
result, data = get_virtualenvs(module, cmd_path, False, True, **kwargs)
if not result:
return module.fail_json(**data)
virtualenvs = set(data["virtualenvs"])
if virtualenv_name in virtualenvs:
if "{}/envs/{}".format(version, virtualenv_name) in virtualenvs:
return module.exit_json(
changed=False, failed=False,
stdout="{} already exists".format(virtualenv_name), stderr="")
else:
return module.fail_json(
msg="{} already exists but version differs".format(
virtualenv_name))
cmd.append(version)
cmd.append(virtualenv_name)
rc, out, err = module.run_command(cmd, **kwargs)
if rc:
return module.fail_json(msg=err, stdout=out)
else:
return module.exit_json(
changed=True, failed=False, stdout=out, stderr=err)
MSGS = {
"required_pyenv_root": (
"Either the environment variable 'PYENV_ROOT' "
"or 'pyenv_root' option is required")
}
def get_pyenv_root(params):
if params["pyenv_root"]:
if params["expanduser"]:
return os.path.expanduser(params["pyenv_root"])
else:
return params["pyenv_root"]
else:
if "PYENV_ROOT" not in os.environ:
return None
if params["expanduser"]:
return os.path.expanduser(os.environ["PYENV_ROOT"])
else:
return os.environ["PYENV_ROOT"]
def main():
module = AnsibleModule(argument_spec={
"bare": {"required": False, "type": "bool", "default": True},
"copies": {"required": False, "type": "bool", "default": False},
"clear": {"required": False, "type": "bool", "default": False},
"force": {"required": False, "type": "bool", "default": None},
"expanduser": {"required": False, "type": "bool", "default": True},
"list": {"required": False, "type": "bool", "default": False},
"no_pip": {"required": False, "type": "bool", "default": False},
"no_setuptools": {"required": False, "type": "bool", "default": False},
"no_wheel": {"required": False, "type": "bool", "default": False},
"pyenv_root": {"required": False, "default": None},
"skip_aliases": {"required": False, "type": "bool", "default": True},
"skip_existing": {"required": False, "type": "bool", "default": None},
"subcommand": {
"required": False, "default": "install",
"choices": [
"install", "uninstall", "versions", "global",
"virtualenv", "virtualenvs"]
},
"symlinks": {"required": False, "type": "bool", "default": False},
"version": {"required": False, "type": "str", "default": None},
"versions": {"required": False, "type": "list", "default": None},
"virtualenv_name": {"required": False, "type": "str", "default": None},
"without_pip": {"required": False, "type": "bool", "default": False},
})
params = module.params
environ_update = {}
pyenv_root = get_pyenv_root(params)
if pyenv_root is None:
return module.fail_json(
msg=MSGS["required_pyenv_root"])
environ_update["PYENV_ROOT"] = pyenv_root
cmd_path = os.path.join(pyenv_root, "bin", "pyenv")
if params["subcommand"] == "install":
if params["list"]:
return cmd_install_list(
module, cmd_path, environ_update=environ_update)
return cmd_install(
module, params, cmd_path, environ_update=environ_update)
elif params["subcommand"] == "uninstall":
if not params["version"]:
return module.fail_json(
msg="uninstall subcommand requires the 'version' parameter")
return cmd_uninstall(
module, cmd_path, params["version"], environ_update=environ_update)
elif params["subcommand"] == "versions":
return cmd_versions(
module, cmd_path, params["bare"], environ_update=environ_update)
elif params["subcommand"] == "global":
if params["versions"]:
return cmd_set_global(
module, cmd_path, params["versions"],
environ_update=environ_update)
else:
return cmd_get_global(
module, cmd_path, environ_update=environ_update)
elif params["subcommand"] == "virtualenvs":
return cmd_virtualenvs(
module, cmd_path, params["skip_aliases"], params["bare"],
environ_update=environ_update)
elif params["subcommand"] == "virtualenv":
if not params["version"]:
return module.fail_json(
msg="virtualenv subcommand requires the 'version' parameter")
if not params["virtualenv_name"]:
return module.fail_json(
msg=(
"virtualenv subcommand requires the 'virtualenv_name' "
"parameter"))
options = dict((key, params[key]) for key in [
"force", "no_pip", "no_setuptools", "no_wheel", "symlinks",
"copies", "clear", "without_pip"])
return cmd_virtualenv(
module, cmd_path, params["version"], params["virtualenv_name"],
options, environ_update=environ_update)
if __name__ == '__main__':
main()
|
python
|
#!/usr/bin/env python
#
# Copyright (C) 2016-2020 Wason Technology, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#Example client to change the light source color
#For use with rip_sensor_world.world Gazebo world
#Warning: Color changes do not currently show in gzclient. They
#will be applied to the camera sensors.
import sys
from RobotRaconteur.Client import *
import time
import cv2
import numpy as np
server=RRN.ConnectService('rr+tcp://localhost:11346/?service=GazeboServer')
w=server.get_worlds('default')
print(w.light_names)
sun=w.get_lights('sun')
color=sun.diffuse_color
print(str(color[0]["a"]) + " " + str(color[0]["r"]) + " " + str(color[0]["g"]) + " " + str(color[0]["b"]))
color_dtype=RRN.GetNamedArrayDType('com.robotraconteur.color.ColorRGBAf',server)
color2=np.zeros((1,),dtype=color_dtype)
color2["a"]=1.0
color2["r"]=0.0
color2["g"]=1.0
color2["b"]=0.0
sun.diffuse_color=color2
|
python
|
from uuid import uuid4
from sqlalchemy import Column, String, Boolean, ForeignKey
from sqlalchemy.dialects.postgresql import UUID
from sqlalchemy.orm import relationship, backref
from .db import Base
class User(Base):
__tablename__ = "user"
id = Column(UUID(as_uuid=True), primary_key=True, index=True, default=uuid4)
name = Column(String)
lastname = Column(String)
email = Column(String, unique=True, index=True)
dogs = relationship("Dog", cascade="all,delete")
class Dog(Base):
__tablename__ = "dog"
id = Column(UUID(as_uuid=True), primary_key=True, index=True, default=uuid4)
name = Column(String)
picture = Column(String)
create_date = Column(String)
is_adopted = Column(Boolean)
user_id = Column(UUID(as_uuid=True), ForeignKey("user.id"))
user = relationship("User", backref=backref("dogs_user", cascade="all,delete"))
|
python
|
# encoding: utf-8
"""
keepalive.py
Created by Thomas Mangin on 2009-11-05.
Copyright (c) 2009-2015 Exa Networks. All rights reserved.
"""
from exabgp.bgp.message import Message
# =================================================================== KeepAlive
#
class KeepAlive (Message):
ID = Message.CODE.KEEPALIVE
TYPE = chr(Message.CODE.KEEPALIVE)
def message (self):
return self._message('')
def __str__ (self):
return "KEEPALIVE"
@classmethod
def unpack_message (cls, data, negotiated): # pylint: disable=W0613
# XXX: FIXME: raise Notify if data has something
return cls()
|
python
|
import pandas as pd
import os
from IGTD_Functions import min_max_transform, table_to_image
num_row = 30 # Number of pixel rows in image representation
num_col = 30 # Number of pixel columns in image representation
num = num_row * num_col # Number of features to be included for analysis, which is also the total number of pixels in image representation
save_image_size = 3 # Size of pictures (in inches) saved during the execution of IGTD algorithm.
max_step = 10000 # The maximum number of iterations to run the IGTD algorithm, if it does not converge.
val_step = 300 # The number of iterations for determining algorithm convergence. If the error reduction rate
# is smaller than a pre-set threshold for val_step itertions, the algorithm converges.
# Import the example data and linearly scale each feature so that its minimum and maximum values are 0 and 1, respectively.
data = pd.read_csv('../Data/Data.txt', low_memory=False, sep='\t', engine='c', na_values=['na', '-', ''],
header=0, index_col=0)
data = data.iloc[:, :num]
norm_data = min_max_transform(data.values)
norm_data = pd.DataFrame(norm_data, columns=data.columns, index=data.index)
# Run the IGTD algorithm using (1) the Euclidean distance for calculating pairwise feature distances and pariwise pixel
# distances and (2) the absolute function for evaluating the difference between the feature distance ranking matrix and
# the pixel distance ranking matrix. Save the result in Test_1 folder.
fea_dist_method = 'Euclidean'
image_dist_method = 'Euclidean'
error = 'abs'
result_dir = '../Results/Test_1'
os.makedirs(name=result_dir, exist_ok=True)
table_to_image(norm_data, [num_row, num_col], fea_dist_method, image_dist_method, save_image_size,
max_step, val_step, result_dir, error)
# Run the IGTD algorithm using (1) the Pearson correlation coefficient for calculating pairwise feature distances,
# (2) the Manhattan distance for calculating pariwise pixel distances, and (3) the square function for evaluating
# the difference between the feature distance ranking matrix and the pixel distance ranking matrix.
# Save the result in Test_2 folder.
fea_dist_method = 'Pearson'
image_dist_method = 'Manhattan'
error = 'squared'
result_dir = '../Results/Test_2'
os.makedirs(name=result_dir, exist_ok=True)
table_to_image(norm_data, [num_row, num_col], fea_dist_method, image_dist_method, save_image_size,
max_step, val_step, result_dir, error)
|
python
|
import json
import os
from typing import List, Dict, Any
from .._types import TEST_SCHEMA
class TestSchema:
_endpoint_url: str
_paths: Dict[str, List[Any]]
def __init__(self, endpoint_url: str) -> None:
self._endpoint_url = endpoint_url
self._paths = {}
def add_tests(self, path: str, tests: List[Any]) -> None:
self._paths[path] = tests
def to_json(self) -> TEST_SCHEMA:
return {"endpoint_url": self._endpoint_url, "paths": self._paths}
def save(self, path: str) -> None:
with open(path, "w") as fp:
json.dump(self.to_json(), fp)
@staticmethod
def load(path: str) -> "TestSchema":
assert os.path.exists(path), f"Test schema not found at {path}"
with open(path) as fp:
data = json.load(fp)
TestSchema.validate_test_schema(data)
schema = TestSchema(data.get("endpoint_url"))
schema._paths = data.get("paths")
return schema
@staticmethod
def validate_test_schema(test_schema: TEST_SCHEMA) -> bool:
try:
assert type(test_schema) is dict
assert type(test_schema.get("endpoint_url")) is str
assert type(test_schema.get("paths")) is dict
except AssertionError:
raise ValueError("Invalid Test Schema Provided")
return True
@property
def endpoint_url(self) -> str:
return self._endpoint_url
@property
def paths(self) -> Dict[str, List[Any]]:
return self._paths
|
python
|
# Copyright (c) 2019 ISciences, LLC.
# All rights reserved.
#
# WSIM is licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License. You may
# obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0.
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import unittest
from typing import List
from wsim_workflow.step import Step
from forcing.leaky_bucket import LeakyBucket
from forcing.nmme import NMMEForecast
def get_producing_step(target: str, steps: List[Step]) -> Step:
return [s for s in steps if target in s.targets][0]
class TestNMMEConfig(unittest.TestCase):
source = '/tmp/source'
derived = '/tmp/derived'
def test_model_iteration_correct(self):
# WSIM's "yearmon" variable is based on the last month of observed data available.
# In other words, the "201901" model iteration is run in February 2019 using observed
# data through the end of January 2019. This is different from the "reference time" used
# in NMME files, which refers to the month in which the forecast was generated. A
# confusing result of this offset is that we use the "201902" NMME data to produce
# the "201901" WSIM run. This offset is handled by the NMME path generator, since
# other parts of the code have no reason to know about this.
observed = LeakyBucket(self.source)
nmme = NMMEForecast(self.source, self.derived, observed, 'Model3', 1969, 2008)
params = {
'yearmon': '201901',
'target': '201904',
'member': '8'
}
raw_fcst = nmme.forecast_raw(**params).split('::')[0]
# the raw forecast file uses the WSIM month, 201901
self.assertTrue(raw_fcst.endswith('model3_201901_trgt201904_fcst8.nc'))
# and its dependencies use the NMME month, 201902
anom_to_raw = get_producing_step(raw_fcst, nmme.prep_steps(**params))
self.assertIn(os.path.join(nmme.model_dir(), 'clim', 'Model3.prate.02.mon.clim.nc'), anom_to_raw.dependencies)
self.assertIn(os.path.join(nmme.model_dir(), 'clim', 'Model3.tmp2m.02.mon.clim.nc'), anom_to_raw.dependencies)
self.assertIn(os.path.join(nmme.model_dir(), 'raw_anom', 'nmme_201902', 'Model3.tmp2m.201902.anom.nc'),
anom_to_raw.dependencies)
self.assertIn(os.path.join(nmme.model_dir(), 'raw_anom', 'nmme_201902', 'Model3.prate.201902.anom.nc'),
anom_to_raw.dependencies)
def test_hindcast_lead(self):
# This test checks another consequence of the offset between WSIM data version and
# NMME forecast reference times.
observed = LeakyBucket(self.source)
nmme = NMMEForecast(self.source, self.derived, observed, 'Model3', 1969, 2008)
fit_command = nmme.compute_fit_hindcast(varname='Pr', month=9, lead=4)[0].commands[0]
lead_arg = fit_command.index('--lead') + 1
self.assertEqual(fit_command[lead_arg], '3')
|
python
|
import torch
import torch.nn as nn
import torch.functional as tf
from torch.nn.modules.activation import ReLU
from models.m1layers_warpgan.conv2d import CustomConv2d
class StyleController(nn.Module):
"""
Style Controller network.
"""
def __init__(self, args):
"""
Style Controller Network
:param batch_size : number of examples in a batch
:param input_size : dimension of the style vectors
"""
super().__init__()
# unpack input parameters from args
self.batch_size = args.in_batch
self.k = args.k
self.style_size = args.style_size
self.device = args.device
# inp: (in_batch, input_size)
# out: (in_batch, 128)
self.linears = nn.Sequential(
# inp: (in_batch, input_size)
# out: (in_batch, 128)
nn.Linear(self.style_size, 128),
# inp: (in_batch, 128)
# out: (in_batch, 128)
nn.LayerNorm(128),
# inp: (in_batch, 128)
# out: (in_batch, 128)
nn.ReLU(),
# inp: (in_batch, 128)
# out: (in_batch, 128)
nn.Linear(128, 128),
# inp: (in_batch, 128)
# out: (in_batch, 128)
nn.LayerNorm(128),
# inp: (in_batch, 128)
# out: (in_batch, 128)
nn.ReLU(),
)
# inp: (in_batch, 128)
# out: (in_batch, 4 * k)
self.linear_gamma = nn.Linear(128, 4 * self.k, bias = True)
# inp: (in_batch, 128)
# out: (in_batch, 4 * k)
self.linear_beta = nn.Linear(128, 4 * self.k, bias = True)
# initialize all weights for module
self.initialize_weights()
def forward(self, x) -> tuple:
"""
Forward function for Style Controller.
Returns two (batch_size, 1, 1, 4 * k) shaped tensors, gamma and beta coefficients
:param x: style encodings
:shape: (batch_size, style_size)
:return : out
:shape: (batch_size, 2, 1, 4 * k)
"""
if x is None:
x = torch.randn((self.batch_size, self.style_size)).to(self.device)
# inp: (batch_size, style_size)
# out: (batch_size, 128)
out = self.linears(x)
# inp: (batch_size, 128)
# out: (batch_size, 4 * k)
gamma = self.linear_gamma(out)
# inp: (batch_size, 4 * k)
# out: (batch_size, 4 * k, 1, 1)
gamma = gamma.view([-1, 4 * self.k, 1, 1])
# inp: (batch_size, 128)
# out: (batch_size, 4 * k, 1, 1)
beta = self.linear_beta(out)
# inp: (batch_size, 4 * k)
# out: (batch_size, 4 * k, 1, 1)
beta = beta.view([-1, 4 * self.k, 1, 1])
return beta, gamma
def initialize_weights(self) -> None:
"""
Initialize weights of modules.
"""
for module in self.modules():
if isinstance(module, nn.Linear):
nn.init.kaiming_uniform_(module.weight)
if module.bias is not None:
nn.init.zeros_(module.bias)
|
python
|
from multiprocessing import Process
import multiprocessing as mp
import time
class Worker(Process):
def __init__(self, worker_idx, task_queue, result_queue, debug_prints=False):
# call the Process constructor
Process.__init__(self)
self.worker_idx = worker_idx
# the queues for work to be done and work done
self.task_queue = task_queue
self.result_queue = result_queue
self.debug_prints = debug_prints
def run_task(self, task):
return task()
def run(self):
if self.debug_prints:
worker_process = mp.current_process()
print("Worker process started as name: {}; PID: {}\n".format(worker_process.name,
worker_process.pid))
while True:
# get the next task
task_idx, next_task = self.task_queue.get()
# # check for the poison pill which is the signal to stop
if next_task is None:
if self.debug_prints:
print('Worker: {}; received {} {}: FINISHED'.format(
self.name, task_idx, next_task))
# mark the poison pill task as done
self.task_queue.task_done()
# and exit the loop
break
if self.debug_prints:
print('Worker: {}; task_idx : {}; args : {} '.format(
self.name, task_idx, next_task.args))
# run the task
start = time.time()
answer = self.run_task(next_task)
end = time.time()
task_time = end - start
if self.debug_prints:
print('Worker: {}; task_idx : {}; COMPLETED in {} s'.format(
self.name, task_idx, task_time))
# (for joinable queue) tell the queue that the formerly
# enqued task is complete
self.task_queue.task_done()
# put the results into the results queue with it's task
# index so we can sort them later
self.result_queue.put((task_idx, self.worker_idx, task_time, answer))
class Task(object):
def __init__(self, func, *args):
self.args = args
self.func = func
def __call__(self, **kwargs):
# run the function passing in the args for running it and any
# worker information in the kwargs
return self.func(*self.args, **kwargs)
|
python
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2020/3/25 1:39
# @Author : WieAngeal
# @File : ycyl_hander.py
# @Software: PyCharm
from flask import Blueprint, flash, render_template, session, redirect, request
from ..common import (ConsoleLogger, make_response, HttpError,
relative_path, multi_dict_parser2dict)
from flask import request
from ..models import Hosinfo
from ..services import DBService
from ..common import email, auth
import ast
from flask_login import login_required
logger = ConsoleLogger(relative_path(__file__))
ycyl_service = DBService(model=Hosinfo)
ycyl = Blueprint('ycyl', __name__, url_prefix='/ycyl')
@ycyl.route('/', methods=["GET", "POST"])
@login_required
def home():
token = request.args.get('token')
user = auth.verify_auth_token(token)['username']
return render_template("ctyxy.html", user=user)
@ycyl.route('/api/register', methods=["POST"])
def register():
method = request.method
if method == 'POST':
data = request.form.get('data')
hosinfo = ast.literal_eval(data)
Attachments = ['工作日报记录表.xlsx']
email.send_mail(title='第一份flask_email测试邮件',
to='[email protected]',
msg_html='''<h2>这是我的个人博客</h2>
<hr />
<h3>东风破</h3>
<h5><font color="blue" size="18px">周杰伦</font></h5>
<p>一盏离愁 孤灯伫立在窗口</p>
<p>我在门后 假装你人还没走</p>
<p>旧地如重游月 圆更寂寞
<p>夜半清醒的烛火 不忍苛责我</p>''',
attachfiles=None
)
id = ycyl_service.max(Hosinfo.id)
if (id is None):
id = 0
hosinfo['id'] = id + 1
obj = ycyl_service.save(Hosinfo(**hosinfo))
return make_response(data=obj.json())
@ycyl.route('/api/count', methods=["POST", "GET"])
def count():
method = request.method
if method == 'GET':
max_num = ycyl_service.max(Hosinfo.id)
return make_response(data=max_num, e="查询总数成功。")
|
python
|
from django.shortcuts import render, redirect
from bs4 import BeautifulSoup
from selenium import webdriver
from selenium.webdriver.chrome.service import Service
from webdriver_manager.chrome import ChromeDriverManager
def index(request):
headers = {"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/94.0.4606.61 Safari/537.36"}
options = webdriver.ChromeOptions()
driver = webdriver.Chrome(service=Service(ChromeDriverManager().install()))
data = []
for b in range(1, 5):
URL = "https://etherscan.io/accounts/" + str(b)
driver.get(URL)
soup = BeautifulSoup(driver.page_source, "lxml")
i = 0
a = 4
for td in soup.find_all("td"):
if(i == a):
data.append(td.get_text())
a = a + 6
i = i + 1
context = {
"data": data
}
return render(request, 'chartapp/index.html', context)
|
python
|
from __future__ import unicode_literals
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
from django.utils.translation import ugettext_lazy as _
@python_2_unicode_compatible
class CartItem(models.Model):
cart = models.ForeignKey('carts.Cart', verbose_name=_('cart'), related_name='cartitems')
variant = models.ForeignKey('products.Variant', verbose_name=_('variant'))
quantity = models.PositiveIntegerField(_('quantity'), default=1)
created = models.DateTimeField(auto_now_add=True, editable=False)
updated = models.DateTimeField(auto_now=True, editable=False)
def __str__(self):
return '%s - %s' % (self.cart, self.variant)
class Meta:
app_label = 'carts'
verbose_name = _('cart item')
verbose_name_plural = _('cart items')
ordering = ('-created',)
|
python
|
# Copyright 2014, Doug Wiegley, A10 Networks.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import absolute_import
from __future__ import unicode_literals
import six
import time
from acos_client import errors as acos_errors
from acos_client.v21 import base
class Action(base.BaseV21):
def write_memory(self):
try:
self._get("system.action.write_memory")
except acos_errors.InvalidPartitionParameter:
pass
def reboot(self, **kwargs):
raise NotImplementedError
# return self._post("system.action.reboot", **kwargs)
def reload(self, write_memory=False, **kwargs):
# write_memory param is required but no matter what value is passed
# it will ALWAYS save pending changes
write_memory = 1 if write_memory else 0
return self._post("system.action.reload",
params={"write_memory": write_memory}, **kwargs)
def activate_and_write(self, partition, **kwargs):
write_cmd = "write memory\r\n"
if partition is not None:
write_cmd = "active-partition {0}\r\n{1}".format(partition, write_cmd)
last_e = None
for i in six.moves.range(0, 5):
# Request raises an exception when the "maybe error" is returned.
try:
return self._request("POST", "cli.deploy", params=None, payload=write_cmd, **kwargs)
except acos_errors.ACOSException as e:
last_e = e
# Catch 'might fail error'
if e.msg.startswith("write memory") or '2039 ' in e.msg:
time.sleep(1)
continue
raise e
if last_e is not None:
raise last_e
|
python
|
import json
import os
import pathlib
import re
import datetime
job_log_search_dict = {
'temp_dir1': r"Starting plotting progress into temporary dirs: (.+) and .+\n",
'temp_dir2': r"Starting plotting progress into temporary dirs: .+ and (.+)\n",
'final_dir': r"Final Directory is: (.+)\n",
'plot_id': r"ID: (.+)\n",
'process_id': r"Process ID is: (.+)\n",
'phase1_time': r"Time for phase 1 = (\d+\.\d+) seconds\.",
'phase2_time': r"Time for phase 2 = (\d+\.\d+) seconds\.",
'phase3_time': r"Time for phase 3 = (\d+\.\d+) seconds\.",
'phase4_time': r"Time for phase 4 = (\d+\.\d+) seconds\.",
'total_time': r"Total time = (\d+\.\d+) seconds\.",
'copy_time': r"Copy time = (\d+\.\d+) seconds\.",
'plot_size': r"Plot size is: (\d+)\n",
'buffer_size': r"Buffer size is: (.+)\n",
'n_buckets': r"Using (\d+) buckets\n",
'n_threads': r"Using (\d+) threads of stripe size \d+\n",
'stripe_size': r"Using \d+ threads of stripe size (\d+)\n",
}
config_fn = pathlib.Path(__file__).parent / ".." / "config.json"
with open(config_fn) as f:
config_dict = json.load(f)
job_log_dir = pathlib.Path(config_dict['job_log_dir'])
def get_all_job_files(job_dir=None):
if job_dir is None:
job_dir = job_log_dir
job_files = {}
for job_log_file in job_dir.glob("*.log"):
match = re.search("(\d{4})-(\d{2})-\d{2}_\d{2}_\d{2}_\d{2}", job_log_file.name)
if not match:
print(f"JOB NAME READ ERROR: {job_log_file}")
continue
file_time = datetime.datetime.strptime(match.group(), "%Y-%m-%d_%H_%M_%S")
job_files[job_log_file] = file_time
return job_files
def read_job_log(path):
with open(path) as f:
job_log = f.read()
job_data = {}
for key, regex in job_log_search_dict.items():
match = re.search(regex, job_log)
if match:
job_data[key] = match.group(1)
# status
match = re.search("Created a total of 1 new plots", job_log)
if match:
job_data['status'] = "complete"
else:
match = re.search("error", job_log, flags=re.IGNORECASE)
if match:
job_data['status'] = "error"
else:
job_data['status'] = "in_progress"
return job_data
if __name__ == "__main__":
for file, t in get_all_job_files().items():
data = read_job_log(file)
data['time'] = t
print(t, data)
|
python
|
from ddt import ddt, data
from rest_framework.test import APITestCase
@ddt
class TestCookieRequest(APITestCase):
@data('put', 'patch', 'post', 'delete')
def test_generate_csrf_token_for_each_not_safe_method_request(self, http_verb):
request_method = getattr(self.client, http_verb)
first_response_csrf = request_method('/').cookies['csrftoken']._value
second_response_csrf = request_method('/').cookies['csrftoken']._value
self.assertNotEquals(first_response_csrf, second_response_csrf)
@data('get', 'head', 'options', 'trace')
def test_not_generate_csrf_token_for_safe_method_request(self, http_verb):
request_method = getattr(self.client, http_verb)
self.assertNotIn('csrftoken', request_method('/').cookies)
|
python
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'error.ui'
#
# Created by: PyQt5 UI code generator 5.12
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_errorwin(object):
def setupUi(self, errorwin):
errorwin.setObjectName("errorwin")
errorwin.resize(248, 164)
self.centralwidget = QtWidgets.QWidget(errorwin)
self.centralwidget.setObjectName("centralwidget")
self.gridLayout = QtWidgets.QGridLayout(self.centralwidget)
self.gridLayout.setObjectName("gridLayout")
self.text_error = QtWidgets.QTextBrowser(self.centralwidget)
self.text_error.setObjectName("text_error")
self.gridLayout.addWidget(self.text_error, 0, 0, 1, 1)
self.pushButton = QtWidgets.QPushButton(self.centralwidget)
self.pushButton.setObjectName("pushButton")
self.gridLayout.addWidget(self.pushButton, 1, 0, 1, 1)
errorwin.setCentralWidget(self.centralwidget)
self.retranslateUi(errorwin)
QtCore.QMetaObject.connectSlotsByName(errorwin)
def retranslateUi(self, errorwin):
_translate = QtCore.QCoreApplication.translate
errorwin.setWindowTitle(_translate("errorwin", "ошибка"))
self.text_error.setHtml(_translate("errorwin", "<!DOCTYPE HTML PUBLIC \"-//W3C//DTD HTML 4.0//EN\" \"http://www.w3.org/TR/REC-html40/strict.dtd\">\n"
"<html><head><meta name=\"qrichtext\" content=\"1\" /><style type=\"text/css\">\n"
"p, li { white-space: pre-wrap; }\n"
"</style></head><body style=\" font-family:\'Ubuntu\'; font-size:11pt; font-weight:400; font-style:normal;\">\n"
"<p align=\"center\" style=\"-qt-paragraph-type:empty; margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><br /></p></body></html>"))
self.pushButton.setText(_translate("errorwin", "ok"))
|
python
|
import ops
import iopc
TARBALL_FILE="clutter-1.26.0.tar.xz"
TARBALL_DIR="clutter-1.26.0"
INSTALL_DIR="clutter-bin"
pkg_path = ""
output_dir = ""
tarball_pkg = ""
tarball_dir = ""
install_dir = ""
install_tmp_dir = ""
cc_host = ""
tmp_include_dir = ""
dst_include_dir = ""
dst_lib_dir = ""
def set_global(args):
global pkg_path
global output_dir
global tarball_pkg
global install_dir
global install_tmp_dir
global tarball_dir
global cc_host
global tmp_include_dir
global dst_include_dir
global dst_lib_dir
global dst_pkgconfig_dir
pkg_path = args["pkg_path"]
output_dir = args["output_path"]
tarball_pkg = ops.path_join(pkg_path, TARBALL_FILE)
install_dir = ops.path_join(output_dir, INSTALL_DIR)
install_tmp_dir = ops.path_join(output_dir, INSTALL_DIR + "-tmp")
tarball_dir = ops.path_join(output_dir, TARBALL_DIR)
cc_host_str = ops.getEnv("CROSS_COMPILE")
cc_host = cc_host_str[:len(cc_host_str) - 1]
tmp_include_dir = ops.path_join(output_dir, ops.path_join("include",args["pkg_name"]))
dst_include_dir = ops.path_join("include",args["pkg_name"])
dst_lib_dir = ops.path_join(install_dir, "lib")
dst_pkgconfig_dir = ops.path_join(ops.path_join(output_dir, "pkgconfig"), "pkgconfig")
def MAIN_ENV(args):
set_global(args)
ops.exportEnv(ops.setEnv("CC", ops.getEnv("CROSS_COMPILE") + "gcc"))
ops.exportEnv(ops.setEnv("CXX", ops.getEnv("CROSS_COMPILE") + "g++"))
ops.exportEnv(ops.setEnv("CROSS", ops.getEnv("CROSS_COMPILE")))
ops.exportEnv(ops.setEnv("DESTDIR", install_tmp_dir))
ops.exportEnv(ops.setEnv("PKG_CONFIG_LIBDIR", ops.path_join(iopc.getSdkPath(), "pkgconfig")))
ops.exportEnv(ops.setEnv("PKG_CONFIG_SYSROOT_DIR", iopc.getSdkPath()))
ops.exportEnv(ops.addEnv("PATH", ops.path_join(pkg_path, "host_utils")))
cc_sysroot = ops.getEnv("CC_SYSROOT")
cflags = ""
cflags += " -I" + ops.path_join(cc_sysroot, 'usr/include')
#cflags += " -I" + ops.path_join(iopc.getSdkPath(), 'usr/include/libexpat')
ldflags = ""
ldflags += " -L" + ops.path_join(cc_sysroot, 'lib')
ldflags += " -L" + ops.path_join(cc_sysroot, 'usr/lib')
ldflags += " -L" + ops.path_join(iopc.getSdkPath(), 'lib')
#libs = ""
#libs += " -lffi -lxml2 -lexpat"
ops.exportEnv(ops.setEnv("LDFLAGS", ldflags))
ops.exportEnv(ops.setEnv("CFLAGS", cflags))
#ops.exportEnv(ops.setEnv("LIBS", libs))
return False
def MAIN_EXTRACT(args):
set_global(args)
ops.unTarXz(tarball_pkg, output_dir)
#ops.copyto(ops.path_join(pkg_path, "finit.conf"), output_dir)
return True
def MAIN_PATCH(args, patch_group_name):
set_global(args)
for patch in iopc.get_patch_list(pkg_path, patch_group_name):
if iopc.apply_patch(tarball_dir, patch):
continue
else:
sys.exit(1)
return True
def MAIN_CONFIGURE(args):
set_global(args)
print ops.getEnv("PKG_CONFIG_PATH")
extra_conf = []
extra_conf.append("--host=" + cc_host)
extra_conf.append("--enable-egl-backend=yes")
extra_conf.append("--enable-wayland-compositor=yes")
extra_conf.append("--disable-glibtest")
'''
includes = '-I' + ops.path_join(iopc.getSdkPath(), 'usr/include/libglib/glib-2.0')
includes += ' -I' + ops.path_join(iopc.getSdkPath(), 'usr/include/libglib')
extra_conf.append('CFLAGS=' + includes)
extra_conf.append('GLIB_CFLAGS=' + includes)
libs = ' -lglib-2.0 -lgobject-2.0 -lgio-2.0 -lgthread-2.0 -lgmodule-2.0 -lpthread -lz -lffi -lpcre'
extra_conf.append('LIBS=-L' + ops.path_join(iopc.getSdkPath(), 'lib') + libs)
extra_conf.append('GLIB_LIBS=-L' + ops.path_join(iopc.getSdkPath(), 'lib') + libs)
extra_conf.append("--disable-documentation")
extra_conf.append('FFI_CFLAGS="-I' + ops.path_join(iopc.getSdkPath(), 'usr/include/libffi') + '"')
extra_conf.append('FFI_LIBS="-L' + ops.path_join(iopc.getSdkPath(), 'lib') + ' -lffi"')
extra_conf.append('EXPAT_CFLAGS="-I' + ops.path_join(iopc.getSdkPath(), 'usr/include/libexpat') + '"')
extra_conf.append('EXPAT_LIBS="-L' + ops.path_join(iopc.getSdkPath(), 'lib') + ' -lexpat"')
extra_conf.append('LIBXML_CFLAGS="-I' + ops.path_join(iopc.getSdkPath(), 'usr/include/libxml2') + '"')
extra_conf.append('LIBXML_LIBS="-L' + ops.path_join(iopc.getSdkPath(), 'lib') + ' -lxml2"')
'''
iopc.configure(tarball_dir, extra_conf)
return True
def MAIN_BUILD(args):
set_global(args)
print "AAAA" + ops.getEnv("PATH")
ops.mkdir(install_dir)
ops.mkdir(install_tmp_dir)
iopc.make(tarball_dir)
iopc.make_install(tarball_dir)
ops.mkdir(install_dir)
ops.mkdir(dst_lib_dir)
libwayland_client = "libwayland-client.so.0.3.0"
ops.copyto(ops.path_join(install_tmp_dir, "usr/local/lib/" + libwayland_client), dst_lib_dir)
ops.ln(dst_lib_dir, libwayland_client, "libwayland-client.so.0.3")
ops.ln(dst_lib_dir, libwayland_client, "libwayland-client.so.0")
ops.ln(dst_lib_dir, libwayland_client, "libwayland-client.so")
libwayland_cursor = "libwayland-cursor.so.0.0.0"
ops.copyto(ops.path_join(install_tmp_dir, "usr/local/lib/" + libwayland_cursor), dst_lib_dir)
ops.ln(dst_lib_dir, libwayland_cursor, "libwayland-cursor.so.0.0")
ops.ln(dst_lib_dir, libwayland_cursor, "libwayland-cursor.so.0")
ops.ln(dst_lib_dir, libwayland_cursor, "libwayland-cursor.so")
libwayland_egl = "libwayland-egl.so.1.0.0"
ops.copyto(ops.path_join(install_tmp_dir, "usr/local/lib/" + libwayland_egl), dst_lib_dir)
ops.ln(dst_lib_dir, libwayland_egl, "libwayland-egl.so.1.0")
ops.ln(dst_lib_dir, libwayland_egl, "libwayland-egl.so.1")
ops.ln(dst_lib_dir, libwayland_egl, "libwayland-egl.so")
libwayland_server = "libwayland-server.so.0.1.0"
ops.copyto(ops.path_join(install_tmp_dir, "usr/local/lib/" + libwayland_server), dst_lib_dir)
ops.ln(dst_lib_dir, libwayland_server, "libwayland-server.so.0.1")
ops.ln(dst_lib_dir, libwayland_server, "libwayland-server.so.0")
ops.ln(dst_lib_dir, libwayland_server, "libwayland-server.so")
ops.mkdir(tmp_include_dir)
ops.copyto(ops.path_join(install_tmp_dir, "usr/local/include/."), tmp_include_dir)
ops.mkdir(dst_pkgconfig_dir)
ops.copyto(ops.path_join(install_tmp_dir, "usr/local/lib/pkgconfig/wayland-scanner.pc"), dst_pkgconfig_dir)
return False
def MAIN_INSTALL(args):
set_global(args)
iopc.installBin(args["pkg_name"], ops.path_join(ops.path_join(install_dir, "lib"), "."), "lib")
iopc.installBin(args["pkg_name"], ops.path_join(tmp_include_dir, "."), dst_include_dir)
iopc.installBin(args["pkg_name"], ops.path_join(dst_pkgconfig_dir, '.'), "pkgconfig")
return False
def MAIN_SDKENV(args):
set_global(args)
cflags = ""
cflags += " -I" + ops.path_join(iopc.getSdkPath(), 'usr/include/' + args["pkg_name"])
iopc.add_includes(cflags)
#libs = ""
#libs += " -lcap"
#iopc.add_libs(libs)
return False
def MAIN_CLEAN_BUILD(args):
set_global(args)
return False
def MAIN(args):
set_global(args)
|
python
|
import numpy as np
def normalize_features(features):
raise NotImplementedError()
|
python
|
"""
Produces Fig. 11 of Johnson & Weinberg (2019), a 2-column by 2-row plot
showing the slow burst models. Star formation histories are shown in the top
left, [O/Fe]-[Fe/H] tracks in the top right, [O/Fe] and [Fe/H] against time
in the bottom left, and [O/Fe] against time in the bottom right.
"""
import visuals # visuals.py -> matplotlib subroutines in this directory
import matplotlib.pyplot as plt
import vice
import sys
import warnings
warnings.filterwarnings("ignore")
def setup_axes():
"""
Sets up the 2x2 axis grid with the proper axis labels and ranges and the
associated insets
Returns
=======
axes :: list
The axes, indexable via axes[row number][column number]
insets :: list
The insets, indexable via insets[row number]
"""
axes = visuals.subplots(2, 2, figsize = (14, 14))
xlabels = [["Time [Gyr]", "[Fe/H]"], ["Time [Gyr]", "Time [Gyr]"]]
ylabels = [[r"$\dot{M}_*$ [M$_\odot$ yr$^{-1}$]", "[O/Fe]"],
["[X/H]", "[O/Fe]"]]
xlims = [[[-1, 16], [-1.7, 0.2]], [[-1, 16], [-1, 16]]]
ylims = [[[-1, 13], [0.0, 0.5]], [[-0.34, 0.14], [-0.1, 0.5]]]
for i in range(2):
for j in range(2):
axes[i][j].set_xlabel(xlabels[i][j])
axes[i][j].set_ylabel(ylabels[i][j])
axes[i][j].set_xlim(xlims[i][j])
axes[i][j].set_ylim(ylims[i][j])
axes[1][0].yaxis.set_ticks([-0.3, -0.2, -0.1, 0.0, 0.1])
return axes
def plot_history(axes, name, color, linestyle = '-'):
"""
Plots the relevant information for a given history on the 2x2 axis grid
Parameters
==========
axes :: list
The 2x2 list of matplotlib axis objects to plot on
name :: str
The name of the model to plot
color :: str
The name of the color to use in plotting the model
"""
hist = vice.history(name)
# axes[0][0].plot(hist["time"], hist["ifr"], linestyle = '--',
# c = visuals.colors()[color])
axes[0][0].plot(hist["time"], hist["sfr"], c = visuals.colors()[color],
linestyle = linestyle)
if linestyle == '-':
axes[0][1].plot(hist["[Fe/H]"], hist["[O/Fe]"],
c = visuals.colors()[color], linestyle = linestyle)
axes[1][0].plot(hist["time"], hist["[O/H]"], linestyle = '--',
c = visuals.colors()[color])
axes[1][0].plot(hist["time"], hist["[Fe/H]"], linestyle = '-',
c = visuals.colors()[color])
else:
axes[1][0].plot(hist["time"], hist["[O/H]"], linestyle = linestyle,
c = visuals.colors()[color])
axes[1][0].plot(hist["time"], hist["[Fe/H]"], linestyle = linestyle,
c = visuals.colors()[color])
axes[1][1].plot(hist["time"], hist["[O/Fe]"], c = visuals.colors()[color],
linestyle = linestyle)
def draw_ofe_legend(ax):
"""
Draws the legend differentiating between oxygen and iron in the plot of
[X/H] against time.
Parameters
==========
ax :: subplot
The matplotlib axis object to put the legend on
"""
lines = 2 * [None]
for i in range(2):
lines[i] = ax.plot([1, 2], [1, 2], c = visuals.colors()["black"],
label = ["O", "Fe"][i], linestyle = ['--', '-'][i])[0]
ax.legend(loc = visuals.mpl_loc()["upper left"], frameon = False,
bbox_to_anchor = (0.01, 0.99))
for i in range(2):
lines[i].remove()
def main():
"""
Produces the figure and saves it as a PDF.
"""
plt.clf()
axes = setup_axes()
plot_history(axes, "../../simulations/episodic_infall", "black",
linestyle = ':')
plot_history(axes, "../../simulations/constant", "black",
linestyle = ':')
plot_history(axes, "../../simulations/slowburst_episodic_infall",
"crimson")
plot_history(axes, "../../simulations/slowburst_constant", "deepskyblue")
draw_ofe_legend(axes[1][0])
plt.tight_layout()
plt.savefig(sys.argv[1])
plt.clf()
if __name__ == "__main__":
main()
|
python
|
# -*- coding: utf-8 -*-
import numpy as np
import cv2
img = cv2.imread('images/cameraman.tif',0)
cv2.imshow("Image read in Python", img)
k = cv2.waitKey(0) & 0xFF
if k == 27: # wait for ESC key to exit
cv2.destroyAllWindows()
|
python
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This module defines the base aperture classes.
"""
import abc
from copy import deepcopy
import numpy as np
from astropy.coordinates import SkyCoord
import astropy.units as u
from .bounding_box import BoundingBox
from ._photometry_utils import (_handle_units, _prepare_photometry_data,
_validate_inputs)
from ..utils._wcs_helpers import _pixel_scale_angle_at_skycoord
__all__ = ['Aperture', 'SkyAperture', 'PixelAperture']
class Aperture(metaclass=abc.ABCMeta):
"""
Abstract base class for all apertures.
"""
_params = ()
positions = np.array(())
theta = None
def __len__(self):
if self.isscalar:
raise TypeError(f'A scalar {self.__class__.__name__!r} object '
'has no len()')
return self.shape[0]
def __getitem__(self, index):
if self.isscalar:
raise TypeError(f'A scalar {self.__class__.__name__!r} object '
'cannot be indexed')
kwargs = dict()
for param in self._params:
if param == 'positions':
# slice the positions array
kwargs[param] = getattr(self, param)[index]
else:
kwargs[param] = getattr(self, param)
return self.__class__(**kwargs)
def __iter__(self):
for i in range(len(self)):
yield self.__getitem__(i)
def _positions_str(self, prefix=None):
if isinstance(self, PixelAperture):
return np.array2string(self.positions, separator=', ',
prefix=prefix)
elif isinstance(self, SkyAperture):
return repr(self.positions)
else:
raise TypeError('Aperture must be a subclass of PixelAperture '
'or SkyAperture')
def __repr__(self):
prefix = f'{self.__class__.__name__}'
cls_info = []
for param in self._params:
if param == 'positions':
cls_info.append(self._positions_str(prefix))
else:
cls_info.append(f'{param}={getattr(self, param)}')
cls_info = ', '.join(cls_info)
return f'<{prefix}({cls_info})>'
def __str__(self):
cls_info = [('Aperture', self.__class__.__name__)]
for param in self._params:
if param == 'positions':
prefix = 'positions'
cls_info.append((prefix, self._positions_str(prefix + ': ')))
else:
cls_info.append((param, getattr(self, param)))
fmt = [f'{key}: {val}' for key, val in cls_info]
return '\n'.join(fmt)
def __eq__(self, other):
"""
Equality operator for `Aperture`.
All Aperture properties are compared for strict equality except
for Quantity parameters, which allow for different units if they
are directly convertible.
"""
if not isinstance(other, self.__class__):
return False
self_params = list(self._params)
other_params = list(other._params)
# check that both have identical parameters
if self_params != other_params:
return False
# now check the parameter values
# Note that Quantity comparisons allow for different units
# if they directly convertible (e.g., 1. * u.deg == 60. * u.arcmin)
try:
for param in self_params:
# np.any is used for SkyCoord array comparisons
if np.any(getattr(self, param) != getattr(other, param)):
return False
except TypeError:
# TypeError is raised from SkyCoord comparison when they do
# not have equivalent frames. Here return False instead of
# the TypeError.
return False
return True
def __ne__(self, other):
"""
Inequality operator for `Aperture`.
"""
return not (self == other)
def copy(self):
"""
Make an independent (deep) copy.
"""
params_copy = {}
for param in list(self._params):
params_copy[param] = deepcopy(getattr(self, param))
return self.__class__(**params_copy)
@property
def shape(self):
"""
The shape of the instance.
"""
if isinstance(self.positions, SkyCoord):
return self.positions.shape
else:
return self.positions.shape[:-1]
@property
def isscalar(self):
"""
Whether the instance is scalar (i.e., a single position).
"""
return self.shape == ()
class PixelAperture(Aperture):
"""
Abstract base class for apertures defined in pixel coordinates.
"""
@property
def _default_patch_properties(self):
"""
A dictionary of default matplotlib.patches.Patch properties.
"""
mpl_params = dict()
# matplotlib.patches.Patch default is ``fill=True``
mpl_params['fill'] = False
return mpl_params
@staticmethod
def _translate_mask_mode(mode, subpixels, rectangle=False):
if mode not in ('center', 'subpixel', 'exact'):
raise ValueError(f'Invalid mask mode: {mode}')
if rectangle and mode == 'exact':
mode = 'subpixel'
subpixels = 32
if mode == 'subpixels':
if not isinstance(subpixels, int) or subpixels <= 0:
raise ValueError('subpixels must be a strictly positive '
'integer')
if mode == 'center':
use_exact = 0
subpixels = 1
elif mode == 'subpixel':
use_exact = 0
elif mode == 'exact':
use_exact = 1
subpixels = 1
return use_exact, subpixels
@property
@abc.abstractmethod
def _xy_extents(self):
"""
The (x, y) extents of the aperture measured from the center
position.
In other words, the (x, y) extents are half of the aperture
minimal bounding box size in each dimension.
"""
raise NotImplementedError('Needs to be implemented in a subclass.')
@property
def bbox(self):
"""
The minimal bounding box for the aperture.
If the aperture is scalar then a single
`~photutils.aperture.BoundingBox` is returned, otherwise a list
of `~photutils.aperture.BoundingBox` is returned.
"""
positions = np.atleast_2d(self.positions)
x_delta, y_delta = self._xy_extents
xmin = positions[:, 0] - x_delta
xmax = positions[:, 0] + x_delta
ymin = positions[:, 1] - y_delta
ymax = positions[:, 1] + y_delta
bboxes = [BoundingBox.from_float(x0, x1, y0, y1)
for x0, x1, y0, y1 in zip(xmin, xmax, ymin, ymax)]
if self.isscalar:
return bboxes[0]
else:
return bboxes
@property
def _centered_edges(self):
"""
A list of ``(xmin, xmax, ymin, ymax)`` tuples, one for each
position, of the pixel edges after recentering the aperture at
the origin.
These pixel edges are used by the low-level `photutils.geometry`
functions.
"""
edges = []
for position, bbox in zip(np.atleast_2d(self.positions),
np.atleast_1d(self.bbox)):
xmin = bbox.ixmin - 0.5 - position[0]
xmax = bbox.ixmax - 0.5 - position[0]
ymin = bbox.iymin - 0.5 - position[1]
ymax = bbox.iymax - 0.5 - position[1]
edges.append((xmin, xmax, ymin, ymax))
return edges
@property
def area(self):
"""
The exact analytical area of the aperture shape.
Returns
-------
area : float
The aperture area.
"""
raise NotImplementedError('Needs to be implemented in a subclass.')
@abc.abstractmethod
def to_mask(self, method='exact', subpixels=5):
"""
Return a mask for the aperture.
Parameters
----------
method : {'exact', 'center', 'subpixel'}, optional
The method used to determine the overlap of the aperture on
the pixel grid. Not all options are available for all
aperture types. Note that the more precise methods are
generally slower. The following methods are available:
* ``'exact'`` (default):
The the exact fractional overlap of the aperture and
each pixel is calculated. The aperture weights will
contain values between 0 and 1.
* ``'center'``:
A pixel is considered to be entirely in or out of the
aperture depending on whether its center is in or out
of the aperture. The aperture weights will contain
values only of 0 (out) and 1 (in).
* ``'subpixel'``:
A pixel is divided into subpixels (see the
``subpixels`` keyword), each of which are considered
to be entirely in or out of the aperture depending
on whether its center is in or out of the aperture.
If ``subpixels=1``, this method is equivalent to
``'center'``. The aperture weights will contain values
between 0 and 1.
subpixels : int, optional
For the ``'subpixel'`` method, resample pixels by this
factor in each dimension. That is, each pixel is divided
into ``subpixels ** 2`` subpixels. This keyword is ignored
unless ``method='subpixel'``.
Returns
-------
mask : `~photutils.aperture.ApertureMask` or list of `~photutils.aperture.ApertureMask`
A mask for the aperture. If the aperture is scalar then a
single `~photutils.aperture.ApertureMask` is returned,
otherwise a list of `~photutils.aperture.ApertureMask` is
returned.
"""
raise NotImplementedError('Needs to be implemented in a subclass.')
def area_overlap(self, data, *, mask=None, method='exact', subpixels=5):
"""
Return the areas of the aperture masks that overlap with the
data, i.e., how many pixels are actually used to calculate each
sum.
Parameters
----------
data : array_like or `~astropy.units.Quantity`
The 2D array to multiply with the aperture mask.
mask : array_like (bool), optional
A boolean mask with the same shape as ``data`` where a
`True` value indicates the corresponding element of ``data``
is masked. Masked data are excluded from the area overlap.
method : {'exact', 'center', 'subpixel'}, optional
The method used to determine the overlap of the aperture on
the pixel grid. Not all options are available for all
aperture types. Note that the more precise methods are
generally slower. The following methods are available:
* ``'exact'`` (default):
The the exact fractional overlap of the aperture and
each pixel is calculated. The aperture weights will
contain values between 0 and 1.
* ``'center'``:
A pixel is considered to be entirely in or out of the
aperture depending on whether its center is in or out
of the aperture. The aperture weights will contain
values only of 0 (out) and 1 (in).
* ``'subpixel'``:
A pixel is divided into subpixels (see the
``subpixels`` keyword), each of which are considered
to be entirely in or out of the aperture depending
on whether its center is in or out of the aperture.
If ``subpixels=1``, this method is equivalent to
``'center'``. The aperture weights will contain values
between 0 and 1.
subpixels : int, optional
For the ``'subpixel'`` method, resample pixels by this
factor in each dimension. That is, each pixel is divided
into ``subpixels ** 2`` subpixels. This keyword is ignored
unless ``method='subpixel'``.
Returns
-------
areas : float or array_like
The overlapping areas between the aperture masks and the data.
"""
apermasks = self.to_mask(method=method, subpixels=subpixels)
if self.isscalar:
apermasks = (apermasks,)
if mask is not None:
mask = np.asarray(mask)
if mask.shape != data.shape:
raise ValueError('mask and data must have the same shape')
data = np.ones_like(data)
vals = [apermask.get_values(data, mask=mask) for apermask in apermasks]
# if the aperture does not overlap the data return np.nan
areas = [val.sum() if val.shape != (0,) else np.nan for val in vals]
if self.isscalar:
return areas[0]
else:
return areas
def _do_photometry(self, data, variance, method='exact', subpixels=5,
unit=None):
aperture_sums = []
aperture_sum_errs = []
masks = self.to_mask(method=method, subpixels=subpixels)
if self.isscalar:
masks = (masks,)
for apermask in masks:
values = apermask.get_values(data)
# if the aperture does not overlap the data return np.nan
aper_sum = values.sum() if values.shape != (0,) else np.nan
aperture_sums.append(aper_sum)
if variance is not None:
values = apermask.get_values(variance)
# if the aperture does not overlap the data return np.nan
aper_var = values.sum() if values.shape != (0,) else np.nan
aperture_sum_errs.append(np.sqrt(aper_var))
aperture_sums = np.array(aperture_sums)
aperture_sum_errs = np.array(aperture_sum_errs)
# apply units
if unit is not None:
aperture_sums = aperture_sums * unit # can't use *= w/old numpy
aperture_sum_errs = aperture_sum_errs * unit
return aperture_sums, aperture_sum_errs
def do_photometry(self, data, error=None, mask=None, method='exact',
subpixels=5):
"""
Perform aperture photometry on the input data.
Parameters
----------
data : array_like or `~astropy.units.Quantity` instance
The 2D array on which to perform photometry. ``data``
should be background subtracted.
error : array_like or `~astropy.units.Quantity`, optional
The pixel-wise Gaussian 1-sigma errors of the input
``data``. ``error`` is assumed to include *all* sources of
error, including the Poisson error of the sources (see
`~photutils.utils.calc_total_error`) . ``error`` must have
the same shape as the input ``data``.
mask : array_like (bool), optional
A boolean mask with the same shape as ``data`` where a
`True` value indicates the corresponding element of ``data``
is masked. Masked data are excluded from all calculations.
method : {'exact', 'center', 'subpixel'}, optional
The method used to determine the overlap of the aperture on
the pixel grid. Not all options are available for all
aperture types. Note that the more precise methods are
generally slower. The following methods are available:
* ``'exact'`` (default):
The the exact fractional overlap of the aperture and
each pixel is calculated. The aperture weights will
contain values between 0 and 1.
* ``'center'``:
A pixel is considered to be entirely in or out of the
aperture depending on whether its center is in or out
of the aperture. The aperture weights will contain
values only of 0 (out) and 1 (in).
* ``'subpixel'``:
A pixel is divided into subpixels (see the
``subpixels`` keyword), each of which are considered
to be entirely in or out of the aperture depending
on whether its center is in or out of the aperture.
If ``subpixels=1``, this method is equivalent to
``'center'``. The aperture weights will contain values
between 0 and 1.
subpixels : int, optional
For the ``'subpixel'`` method, resample pixels by this
factor in each dimension. That is, each pixel is divided
into ``subpixels ** 2`` subpixels. This keyword is ignored
unless ``method='subpixel'``.
Returns
-------
aperture_sums : `~numpy.ndarray` or `~astropy.units.Quantity`
The sums within each aperture.
aperture_sum_errs : `~numpy.ndarray` or `~astropy.units.Quantity`
The errors on the sums within each aperture.
Notes
-----
`RectangularAperture` and `RectangularAnnulus` photometry with
the "exact" method uses a subpixel approximation by subdividing
each data pixel by a factor of 1024 (``subpixels = 32``). For
rectangular aperture widths and heights in the range from
2 to 100 pixels, this subpixel approximation gives results
typically within 0.001 percent or better of the exact value.
The differences can be larger for smaller apertures (e.g.,
aperture sizes of one pixel or smaller). For such small sizes,
it is recommend to set ``method='subpixel'`` with a larger
``subpixels`` size.
"""
# validate inputs
data, error = _validate_inputs(data, error)
# handle data, error, and unit inputs
# output data and error are ndarray without units
data, error, unit = _handle_units(data, error)
# compute variance and apply input mask
data, variance = _prepare_photometry_data(data, error, mask)
return self._do_photometry(data, variance, method=method,
subpixels=subpixels, unit=unit)
@staticmethod
def _make_annulus_path(patch_inner, patch_outer):
"""
Define a matplotlib annulus path from two patches.
This preserves the cubic Bezier curves (CURVE4) of the aperture
paths.
"""
import matplotlib.path as mpath
path_inner = patch_inner.get_path()
transform_inner = patch_inner.get_transform()
path_inner = transform_inner.transform_path(path_inner)
path_outer = patch_outer.get_path()
transform_outer = patch_outer.get_transform()
path_outer = transform_outer.transform_path(path_outer)
verts_inner = path_inner.vertices[:-1][::-1]
verts_inner = np.concatenate((verts_inner, [verts_inner[-1]]))
verts = np.vstack((path_outer.vertices, verts_inner))
codes = np.hstack((path_outer.codes, path_inner.codes))
return mpath.Path(verts, codes)
def _define_patch_params(self, origin=(0, 0), **kwargs):
"""
Define the aperture patch position and set any default
matplotlib patch keywords (e.g., ``fill=False``).
Parameters
----------
origin : array_like, optional
The ``(x, y)`` position of the origin of the displayed
image.
**kwargs : `dict`
Any keyword arguments accepted by
`matplotlib.patches.Patch`.
Returns
-------
xy_positions : `~numpy.ndarray`
The aperture patch positions.
patch_params : `dict`
Any keyword arguments accepted by
`matplotlib.patches.Patch`.
"""
xy_positions = deepcopy(np.atleast_2d(self.positions))
xy_positions[:, 0] -= origin[0]
xy_positions[:, 1] -= origin[1]
patch_params = self._default_patch_properties
patch_params.update(kwargs)
return xy_positions, patch_params
@abc.abstractmethod
def _to_patch(self, origin=(0, 0), **kwargs):
"""
Return a `~matplotlib.patches.patch` for the aperture.
Parameters
----------
origin : array_like, optional
The ``(x, y)`` position of the origin of the displayed
image.
**kwargs : `dict`
Any keyword arguments accepted by
`matplotlib.patches.Patch`.
Returns
-------
patch : `~matplotlib.patches.patch` or list of `~matplotlib.patches.patch`
A patch for the aperture. If the aperture is scalar then a
single `~matplotlib.patches.patch` is returned, otherwise a
list of `~matplotlib.patches.patch` is returned.
"""
raise NotImplementedError('Needs to be implemented in a subclass.')
def plot(self, axes=None, origin=(0, 0), **kwargs):
"""
Plot the aperture on a matplotlib `~matplotlib.axes.Axes`
instance.
Parameters
----------
axes : `matplotlib.axes.Axes` or `None`, optional
The matplotlib axes on which to plot. If `None`, then the
current `~matplotlib.axes.Axes` instance is used.
origin : array_like, optional
The ``(x, y)`` position of the origin of the displayed
image.
**kwargs : `dict`
Any keyword arguments accepted by
`matplotlib.patches.Patch`.
Returns
-------
patch : list of `~matplotlib.patches.Patch`
A list of matplotlib patches for the plotted aperture. The
patches can be used, for example, when adding a plot legend.
"""
import matplotlib.pyplot as plt
if axes is None:
axes = plt.gca()
patches = self._to_patch(origin=origin, **kwargs)
if self.isscalar:
patches = (patches,)
for patch in patches:
axes.add_patch(patch)
return patches
def _to_sky_params(self, wcs):
"""
Convert the pixel aperture parameters to those for a sky
aperture.
Parameters
----------
wcs : WCS object
A world coordinate system (WCS) transformation that
supports the `astropy shared interface for WCS
<https://docs.astropy.org/en/stable/wcs/wcsapi.html>`_
(e.g., `astropy.wcs.WCS`, `gwcs.wcs.WCS`).
Returns
-------
sky_params : `dict`
A dictionary of parameters for an equivalent sky aperture.
"""
sky_params = {}
xpos, ypos = np.transpose(self.positions)
sky_params['positions'] = skypos = wcs.pixel_to_world(xpos, ypos)
# Aperture objects require scalar shape parameters (e.g.,
# radius, a, b, theta, etc.), therefore we must calculate the
# pixel scale and angle at only a single sky position, which
# we take as the first aperture position. For apertures with
# multiple positions used with a WCS that contains distortions
# (e.g., a spatially-dependent pixel scale), this may lead to
# unexpected results (e.g., results that are dependent of the
# order of the positions). There is no good way to fix this with
# the current Aperture API allowing multiple positions.
if not self.isscalar:
skypos = skypos[0]
_, pixscale, angle = _pixel_scale_angle_at_skycoord(skypos, wcs)
for param in self._params:
value = getattr(self, param)
if param == 'positions':
continue
elif param == 'theta':
# photutils aperture sky angles are defined as the PA of
# the semimajor axis (i.e., relative to the WCS latitude
# axis). region sky angles are defined relative to the WCS
# longitude axis.
value = (value * u.rad) - angle.to(u.rad)
else:
value = (value * u.pix * pixscale).to(u.arcsec)
sky_params[param] = value
return sky_params
@abc.abstractmethod
def to_sky(self, wcs):
"""
Convert the aperture to a `SkyAperture` object defined in
celestial coordinates.
Parameters
----------
wcs : WCS object
A world coordinate system (WCS) transformation that
supports the `astropy shared interface for WCS
<https://docs.astropy.org/en/stable/wcs/wcsapi.html>`_
(e.g., `astropy.wcs.WCS`, `gwcs.wcs.WCS`).
Returns
-------
aperture : `SkyAperture` object
A `SkyAperture` object.
"""
raise NotImplementedError('Needs to be implemented in a subclass.')
class SkyAperture(Aperture):
"""
Abstract base class for all apertures defined in celestial
coordinates.
"""
def _to_pixel_params(self, wcs):
"""
Convert the sky aperture parameters to those for a pixel
aperture.
Parameters
----------
wcs : WCS object
A world coordinate system (WCS) transformation that
supports the `astropy shared interface for WCS
<https://docs.astropy.org/en/stable/wcs/wcsapi.html>`_
(e.g., `astropy.wcs.WCS`, `gwcs.wcs.WCS`).
Returns
-------
pixel_params : `dict`
A dictionary of parameters for an equivalent pixel aperture.
"""
pixel_params = {}
xpos, ypos = wcs.world_to_pixel(self.positions)
pixel_params['positions'] = np.transpose((xpos, ypos))
# Aperture objects require scalar shape parameters (e.g.,
# radius, a, b, theta, etc.), therefore we must calculate the
# pixel scale and angle at only a single sky position, which
# we take as the first aperture position. For apertures with
# multiple positions used with a WCS that contains distortions
# (e.g., a spatially-dependent pixel scale), this may lead to
# unexpected results (e.g., results that are dependent of the
# order of the positions). There is no good way to fix this with
# the current Aperture API allowing multiple positions.
if self.isscalar:
skypos = self.positions
else:
skypos = self.positions[0]
_, pixscale, angle = _pixel_scale_angle_at_skycoord(skypos, wcs)
for param in self._params:
value = getattr(self, param)
if param == 'positions':
continue
elif param == 'theta':
# photutils aperture sky angles are defined as the PA of
# the semimajor axis (i.e., relative to the WCS latitude
# axis). region sky angles are defined relative to the WCS
# longitude axis.
value = (value + angle).to(u.radian).value
else:
if value.unit.physical_type == 'angle':
value = (value / pixscale).to(u.pixel).value
else:
value = value.value
pixel_params[param] = value
return pixel_params
@abc.abstractmethod
def to_pixel(self, wcs):
"""
Convert the aperture to a `PixelAperture` object defined in
pixel coordinates.
Parameters
----------
wcs : WCS object
A world coordinate system (WCS) transformation that
supports the `astropy shared interface for WCS
<https://docs.astropy.org/en/stable/wcs/wcsapi.html>`_
(e.g., `astropy.wcs.WCS`, `gwcs.wcs.WCS`).
Returns
-------
aperture : `PixelAperture` object
A `PixelAperture` object.
"""
raise NotImplementedError('Needs to be implemented in a subclass.')
|
python
|
# Copyright 2008-2018 pydicom authors. See LICENSE file for details.
"""Many point of entry for pydicom read and write functions"""
from pydicom.filereader import (dcmread, read_file, read_dicomdir)
from pydicom.filewriter import dcmwrite, write_file
|
python
|
from typing import Any, Dict, List, Optional
from pydantic import BaseModel, Field
class RecordSearch(BaseModel):
"""
Dao search
Attributes:
-----------
query:
The elasticsearch search query portion
aggregations:
The elasticsearch search aggregations
"""
query: Optional[Dict[str, Any]]
aggregations: Optional[Dict[str, Any]]
class RecordSearchResults(BaseModel):
"""
Dao search results
Attributes:
-----------
total: int
The total of query results
records: List[T]
List of records retrieved for the pagination configuration
aggregations: Optional[Dict[str, Dict[str, Any]]]
The query aggregations grouped by task. Optional
words: Optional[Dict[str, int]]
The words cloud aggregations
metadata: Optional[Dict[str, int]]
Metadata fields aggregations
"""
total: int
records: List[Dict[str, Any]]
aggregations: Optional[Dict[str, Dict[str, Any]]] = Field(default_factory=dict)
words: Optional[Dict[str, int]] = None
metadata: Optional[Dict[str, int]] = None
|
python
|
from typing import Tuple, Callable
from .template import Processor
from .concat import BatchConcat, BatchPool
from .denoise import Dada2SingleEnd, Dada2PairedEnd
from .importing import ImportSingleEndFastq, ImportPairedEndFastq
from .trimming import BatchTrimGalorePairedEnd, BatchTrimGaloreSingleEnd
class GenerateASVPairedEnd(Processor):
fq_dir: str
fq1_suffix: str
fq2_suffix: str
clip_r1_5_prime: int
clip_r2_5_prime: int
trimmed_fq_dir: str
feature_sequence_qza: str
feature_table_qza: str
def main(
self,
fq_dir: str,
fq1_suffix: str,
fq2_suffix: str,
clip_r1_5_prime: int,
clip_r2_5_prime: int) -> Tuple[str, str]:
return self.feature_table_qza, self.feature_sequence_qza
def trimming(self):
self.trimmed_fq_dir = BatchTrimGalorePairedEnd(self.settings).main(
fq_dir=self.fq_dir,
fq1_suffix=self.fq1_suffix,
fq2_suffix=self.fq2_suffix,
clip_r1_5_prime=self.clip_r1_5_prime,
clip_r2_5_prime=self.clip_r2_5_prime)
class GenerateASVConcatPairedEnd(GenerateASVPairedEnd):
concat_fq_dir: str
fq_suffix: str
single_end_seq_qza: str
def main(
self,
fq_dir: str,
fq1_suffix: str,
fq2_suffix: str,
clip_r1_5_prime: int,
clip_r2_5_prime: int) -> Tuple[str, str]:
self.fq_dir = fq_dir
self.fq1_suffix = fq1_suffix
self.fq2_suffix = fq2_suffix
self.clip_r1_5_prime = clip_r1_5_prime
self.clip_r2_5_prime = clip_r2_5_prime
self.trimming()
self.concat()
self.importing()
self.denoise()
return self.feature_table_qza, self.feature_sequence_qza
def concat(self):
self.concat_fq_dir, self.fq_suffix = BatchConcat(self.settings).main(
fq_dir=self.trimmed_fq_dir,
fq1_suffix=self.fq1_suffix,
fq2_suffix=self.fq2_suffix)
def importing(self):
self.single_end_seq_qza = ImportSingleEndFastq(self.settings).main(
fq_dir=self.concat_fq_dir,
fq_suffix=self.fq_suffix)
def denoise(self):
self.feature_table_qza, self.feature_sequence_qza = Dada2SingleEnd(self.settings).main(
demultiplexed_seq_qza=self.single_end_seq_qza)
class GenerateASVMergePairedEnd(GenerateASVPairedEnd):
paired_end_seq_qza: str
def main(
self,
fq_dir: str,
fq1_suffix: str,
fq2_suffix: str,
clip_r1_5_prime: int,
clip_r2_5_prime: int) -> Tuple[str, str]:
self.fq_dir = fq_dir
self.fq1_suffix = fq1_suffix
self.fq2_suffix = fq2_suffix
self.clip_r1_5_prime = clip_r1_5_prime
self.clip_r2_5_prime = clip_r2_5_prime
self.trimming()
self.importing()
self.denoise()
return self.feature_table_qza, self.feature_sequence_qza
def importing(self):
self.paired_end_seq_qza = ImportPairedEndFastq(self.settings).main(
fq_dir=self.trimmed_fq_dir,
fq1_suffix=self.fq1_suffix,
fq2_suffix=self.fq2_suffix)
def denoise(self):
self.feature_table_qza, self.feature_sequence_qza = Dada2PairedEnd(self.settings).main(
demultiplexed_seq_qza=self.paired_end_seq_qza)
class GenerateASVPoolPairedEnd(GenerateASVPairedEnd):
pooled_fq_dir: str
fq_suffix: str
single_end_seq_qza: str
def main(
self,
fq_dir: str,
fq1_suffix: str,
fq2_suffix: str,
clip_r1_5_prime: int,
clip_r2_5_prime: int) -> Tuple[str, str]:
self.fq_dir = fq_dir
self.fq1_suffix = fq1_suffix
self.fq2_suffix = fq2_suffix
self.clip_r1_5_prime = clip_r1_5_prime
self.clip_r2_5_prime = clip_r2_5_prime
self.trimming()
self.pool()
self.importing()
self.denoise()
return self.feature_table_qza, self.feature_sequence_qza
def pool(self):
self.pooled_fq_dir, self.fq_suffix = BatchPool(self.settings).main(
fq_dir=self.trimmed_fq_dir,
fq1_suffix=self.fq1_suffix,
fq2_suffix=self.fq2_suffix)
def importing(self):
self.single_end_seq_qza = ImportSingleEndFastq(self.settings).main(
fq_dir=self.pooled_fq_dir,
fq_suffix=self.fq_suffix)
def denoise(self):
self.feature_table_qza, self.feature_sequence_qza = Dada2SingleEnd(self.settings).main(
demultiplexed_seq_qza=self.single_end_seq_qza)
class FactoryGenerateASVPairedEnd(Processor):
MODE_TO_CLASS = {
'concat': GenerateASVConcatPairedEnd,
'merge': GenerateASVMergePairedEnd,
'pool': GenerateASVPoolPairedEnd
}
def main(self, paired_end_mode: str) -> Callable:
assert paired_end_mode in self.MODE_TO_CLASS.keys(), \
f'"{paired_end_mode}" is not a valid mode for GenerateASV'
_Class = self.MODE_TO_CLASS[paired_end_mode]
return _Class(self.settings).main
class GenerateASVSingleEnd(Processor):
fq_dir: str
fq_suffix: str
clip_5_prime: int
trimmed_fq_dir: str
single_end_seq_qza: str
feature_sequence_qza: str
feature_table_qza: str
def main(
self,
fq_dir: str,
fq_suffix: str,
clip_5_prime: int) -> Tuple[str, str]:
self.fq_dir = fq_dir
self.fq_suffix = fq_suffix
self.clip_5_prime = clip_5_prime
self.trimming()
self.importing()
self.denoise()
return self.feature_table_qza, self.feature_sequence_qza
def trimming(self):
self.trimmed_fq_dir = BatchTrimGaloreSingleEnd(self.settings).main(
fq_dir=self.fq_dir,
fq_suffix=self.fq_suffix,
clip_5_prime=self.clip_5_prime)
def importing(self):
self.single_end_seq_qza = ImportSingleEndFastq(self.settings).main(
fq_dir=self.trimmed_fq_dir,
fq_suffix=self.fq_suffix)
def denoise(self):
self.feature_table_qza, self.feature_sequence_qza = Dada2SingleEnd(self.settings).main(
demultiplexed_seq_qza=self.single_end_seq_qza)
|
python
|
import personalnames.titles as titles
import bisect
# noinspection PyTypeChecker
def gen_initials(lastname, firstname, formats, title=None, post_nominal=None, no_ws=False):
"""
Generate the name formats with initials.
:param lastname: person's lastname
:param firstname: person's firstname
:param title: person's title
:param post_nominal: suffix, e.g. 'junior', or 'esq.'
:param formats: list of formats ['firstnamelastname', 'lastnamefirstname']
:param no_ws: add a form with no whitespace.
:return: de-duplicated list of names with initials.
"""
# Normalise whitespace to single space
lastname = normalise_whitespace(lastname)
parts = normalise_whitespace(firstname).split()
forms = []
for x in range(1, len(parts) + 1):
initials = [part[0:1] + "." for part in parts[0:x]]
initials += parts[x:]
if "firstnamelastname" in formats:
forms.append(" ".join([" ".join(initials), lastname]))
if title:
forms.append(" ".join([title, " ".join(initials), lastname]))
if "lastnamefirstname" in formats:
forms.append(", ".join([lastname, " ".join(initials)]))
if title:
forms.append(", ".join([lastname, title + " " + " ".join(initials)]))
for x in range(1, len(parts) + 1):
initials = [part[0:1] + "." for part in parts[1:x]]
initials += parts[x:]
if "firstnamelastname" in formats:
forms.append(" ".join([parts[0], " ".join(initials), lastname]))
if title:
forms.append(" ".join([title, parts[0], " ".join(initials), lastname]))
if "lastnamefirstname" in formats:
forms.append(lastname + ", " + " ".join([parts[0], " ".join(initials)]))
if title:
forms.append(
lastname + ", " + " ".join([title, parts[0], " ".join(initials)])
)
if post_nominal:
forms.extend([x + ", " + post_nominal for x in forms[:]])
if no_ws:
forms.extend([removewhitespace(x) for x in forms[:]])
return list(set(forms))
def parse_titles(parts):
title_parts = []
suffix_parts = []
nominal_parts = []
for part in parts:
if part.lower() in titles.prefixes:
title_parts.append(part)
elif part.lower() in titles.suffixes:
suffix_parts.append(part)
else:
nominal_parts.append(part)
return title_parts, nominal_parts, suffix_parts
def name_split(name, split_char=","):
"""
Split a name into a list of name parts (not categorised, just an ordered list).
Retain commas for later use in splitting the list into surname and forename parts.
:param name: string for personal name
:param split_char: character to split on (default to comma)
:return: list of strings, including commas.
"""
name_list = []
split_split = name.split(split_char)
for split_item in split_split[:-1]:
[name_list.append(normalise_whitespace(x)) for x in split_item.split()]
name_list.append(split_char)
[name_list.append(normalise_whitespace(x)) for x in split_split[-1].split()]
return name_list
def name_parts(name, split_c=","):
"""
TO DO: handle case with multiple commas (if this is a genuine case)
:param name:
:param split_c:
:return:
"""
n_parts = name_split(name, split_char=split_c)
title, personal_name, suffix = parse_titles(n_parts)
if personal_name[-1] == split_c: # case where multiple commas in name, or only comma is a post-nominal, e.g. Esq.
n = personal_name[:-1]
else:
n = personal_name
if split_c in n:
lastname = whitespace_list(
n[: bisect.bisect(n, split_c) - 1]
)
firstname = whitespace_list(
n[bisect.bisect(n, split_c):]
)
else:
firstname = whitespace_list(n[:-1])
lastname = whitespace_list([n[-1]])
title = whitespace_list(title)
suffix = whitespace_list(suffix)
return title, firstname, lastname, suffix
def name_initials(name, name_formats=None, non_ws=False):
"""
Generate a set of initials from a name provided as a string.
:param name: string, e.g. Dr. Martin Luther King
:param name_formats: list of formats for the name.
:param non_ws: no whitespace form
:return: list of formats including initials
"""
if name_formats is None:
name_formats = ["firstnamelastname", "lastnamefirstname"]
honorific, forename, surname, suffix = name_parts(name)
initials = gen_initials(
lastname=surname, firstname=forename, title=honorific, post_nominal=suffix, formats=name_formats, no_ws=non_ws
)
return [normalise_whitespace(x) for x in initials]
def whitespace_list(text_list):
return normalise_whitespace(" ".join(text_list))
def normalise_whitespace(text):
"""
Normalise the whitespace in the string
:param text: string
:return: string with whitespace normalised to single space
"""
return " ".join(text.strip().split())
def removewhitespace(text):
"""
Remove the whitespace in the string
:param text: string
:return: string with no whitespace
"""
return "".join(text.strip().split())
|
python
|
from typing import List, Union
def is_valid(sides: List[Union[float,int]]) -> bool:
[x, y, z] = sides
return x > 0 and y > 0 and z > 0 and x + y > z
def equilateral(sides: List[Union[float,int]]) -> bool:
sides.sort()
return is_valid(sides) and sides.count(sides[0]) == 3
def isosceles(sides: List[Union[float,int]]) -> bool:
sides.sort()
return is_valid(sides) and sides[0] == sides[1] or sides[1] == sides[2]
def scalene(sides: List[Union[float,int]]) -> bool:
sides.sort()
return is_valid(sides) and sides[0] != sides[1] and sides[1] != sides[2]
|
python
|
import numpy as np
import scipy.sparse as sparse
import scipy.sparse.linalg as linalg
from .mesh import UniformMesh
from .laminate_model import LaminateModel
from .laminate_dof import LaminateDOF
class LaminateFEM(object):
def __init__(self, material, cantilever):
self.cantilever = cantilever
self.mesh = UniformMesh(cantilever.topology)
self.dof = LaminateDOF(self.mesh)
self.model = LaminateModel(material, cantilever.a, cantilever.b)
self.a = cantilever.a
self.b = cantilever.b
self.assemble()
def get_mass_matrix(self, free=False):
muu = self.muu.tocsr()
if free is False:
return muu
return muu[self.dof.free_dofs, :][:, self.dof.free_dofs]
def get_stiffness_matrix(self, free=False):
kuu = self.kuu.tocsr()
if free is False:
return kuu
return kuu[self.dof.free_dofs, :][:, self.dof.free_dofs]
def get_piezoelectric_matrix(self, free=False):
kuv = self.kuv.tocsr()
if free is False:
return kuv
return kuv[self.dof.free_dofs, :]
def get_capacitance_matrix(self):
return self.kvv
def modal_analysis(self, n_modes):
"""The return value (w) are the eigenvalues and the return value (v)
are the eigenvectors.
"""
m = self.muu.tocsc()[self.dof.free_dofs, :][:, self.dof.free_dofs]
k = self.kuu.tocsc()[self.dof.free_dofs, :][:, self.dof.free_dofs]
w, v = linalg.eigsh(k, k=n_modes, M=m, sigma=0, which='LM')
vall = np.zeros((self.dof.n_mdof, n_modes))
vall[self.dof.free_dofs, :] = v
return w, v, vall
def assemble(self):
"""The mass, stiffness, piezoelectric, and capacitance matricies are
assembled in this function.
"""
muue = self.model.get_mass_element()
kuue = self.model.get_stiffness_element()
kuve = self.model.get_piezoelectric_element()
kvve = self.model.get_capacitance_element()
nm, ne = kuve.shape
k_num = nm * nm * self.mesh.n_elem
p_num = nm * ne * self.mesh.n_elem
c_num = ne * ne * self.mesh.n_elem
k_index = list(np.ndindex(nm, nm))
p_index = list(np.ndindex(nm, ne))
c_index = list(np.ndindex(ne, ne))
k_row = np.zeros(k_num)
k_col = np.zeros(k_num)
k_val = np.zeros(k_num)
m_val = np.zeros(k_num)
p_row = np.zeros(p_num)
p_col = np.zeros(p_num)
p_val = np.zeros(p_num)
c_row = np.zeros(c_num)
c_col = np.zeros(c_num)
c_val = np.zeros(c_num)
k_ntriplet = 0
p_ntriplet = 0
c_ntriplet = 0
for ni, e in enumerate(self.dof.dof_elements):
for ii, jj in k_index:
k_row[k_ntriplet] = e.mechanical_dof[ii]
k_col[k_ntriplet] = e.mechanical_dof[jj]
k_val[k_ntriplet] = kuue[ii, jj]
m_val[k_ntriplet] = muue[ii, jj]
k_ntriplet += 1
for ii, jj in p_index:
p_row[p_ntriplet] = e.mechanical_dof[ii]
p_col[p_ntriplet] = e.electrical_dof[jj]
p_val[p_ntriplet] = kuve[ii, jj]
p_ntriplet += 1
for ii, jj in c_index:
c_row[c_ntriplet] = e.electrical_dof[ii]
c_col[c_ntriplet] = e.electrical_dof[jj]
c_val[c_ntriplet] = kvve[ii, jj]
c_ntriplet += 1
muu_shape = (self.dof.n_mdof, self.dof.n_mdof)
kuu_shape = (self.dof.n_mdof, self.dof.n_mdof)
kuv_shape = (self.dof.n_mdof, self.dof.n_edof)
kvv_shape = (self.dof.n_edof, self.dof.n_edof)
self.muu = sparse.coo_matrix((m_val, (k_row, k_col)), shape=muu_shape)
self.kuu = sparse.coo_matrix((k_val, (k_row, k_col)), shape=kuu_shape)
self.kuv = sparse.coo_matrix((p_val, (p_row, p_col)), shape=kuv_shape)
self.kvv = sparse.coo_matrix((c_val, (c_row, c_col)), shape=kvv_shape)
|
python
|
"""Falcon benchmarks"""
from bench import main # NOQA
|
python
|
import os
from pathlib import Path
import pyspark.sql.types as st
from pyspark.sql.types import Row
from pyspark.ml.regression import GBTRegressor
from pyspark.sql import DataFrame, SparkSession
spark = SparkSession.builder \
.appName("karl02") \
.getOrCreate()
datadir: str = os.getenv("DATADIR")
if datadir is None:
raise ValueError("Environment variable DATADIR must be defined")
print(f"datadir = '{datadir}'")
schema = st.StructType([
st.StructField('year', st.IntegerType(), True),
st.StructField('month', st.IntegerType(), True),
st.StructField('dn', st.IntegerType(), True),
st.StructField('wday', st.IntegerType(), True),
st.StructField('snap', st.IntegerType(), True),
st.StructField('dept_id', st.StringType(), True),
st.StructField('item_id', st.StringType(), True),
st.StructField('store_id', st.StringType(), True),
st.StructField('sales', st.DoubleType(), True),
st.StructField('flag_ram', st.IntegerType(), True),
st.StructField('Sales_Pred', st.DoubleType(), True)
])
p = str(Path(datadir, "Sales5_Ab2011_InklPred.csv"))
print(f"Reading: '{p}'")
train: DataFrame = spark.read.csv(p, header='true', schema=schema)
rows = train.rdd.take(5)
for r in rows:
dn = r["sales"]
d = r.asDict()
v = list(d.values())
print(v)
print(type(v))
print("------------------------- R E A D Y --------------------------------")
def train(df: DataFrame):
def astraining(row: Row) -> Row:
df = row.asDict()
del df['Sales_Pred']
del df['sales']
sales = row.asDict()['sales']
return Row(label=sales, features=list(df.values()))
t3 = train.rdd \
.filter(lambda r: r["sales"] is not None) \
.map(astraining)
gbt = GBTRegressor(maxIter=10)
df = spark.createDataFrame(t3)
df.show()
gbt.fit(df)
print("----------- after fit ------------")
|
python
|
###############################################################################
# Imports
###############################################################################
from layer import Layer
import numpy as np
class HiddenLayer(Layer):
def setDownstreamSum(self, w, delta):
"""Sum the product of w and delta for the next layer
Needed for calculating delta for this layer
Parameters
----------
w : np.ndarray
Matrix of weight values for the next layer
delta : np.ndarray
Matrix of delta values for the next layer
"""
self.downstream_sum = np.matmul(w[:,:-1].transpose(), delta)
def setDelta(self):
"""Calculate delta for the hidden layer
"""
# Derivative of sigmoid using last forward pass
output_der = self.y * (1 - self.y)
self.delta = output_der * self.downstream_sum
if __name__ == '__main__':
print('Warning: Tests for this file are deprecated')
|
python
|
# -*- coding: utf-8 -*-
'''
@Time : 2021/8/30
@Author : Yanyuxiang
@Email : [email protected]
@FileName: send_message.py
@Software: PyCharm
'''
import itchat
def main():
itchat.auto_login()
friends = itchat.get_friends(update=True)
# itchat.send('这是来自python程序的一条消息', toUserName='filehelper')
return
if __name__ == '__main__':
main()
|
python
|
from . import scheduler
from app.utils.refresh_mat_views import refresh_all_mat_views
from app.utils.constants import COUNTRIES
# 5/9 = 5am, 2pm, and 11pm
# https://cron.help/#0_5/9_*_*_*
@scheduler.task("cron", minute="0", hour="5")
def run_task_ALL():
with scheduler.app.app_context():
from app.service.routes import call_loader
for country in COUNTRIES:
call_loader(
country=country,
search=dict(ad_reached_countries=[country], ad_active_status="ALL"),
)
@scheduler.task("cron", minute="35", hour="*")
def refresh_views():
with scheduler.app.app_context():
refresh_all_mat_views(False)
|
python
|
import torch
from torch.utils.data import DataLoader
from torch import optim
from torch.optim.lr_scheduler import StepLR
from snf.layers.flowsequential import FlowSequential
from snf.layers.selfnorm import SelfNormConv, SelfNormFC
from snf.train.losses import NegativeGaussianLoss
from snf.train.experiment import Experiment
def create_model(data_size, layer='conv'):
layers = []
c_in = data_size[0]
h = data_size[1]
w = data_size[2]
if layer == 'fc':
size = c_in * h * w
layers.append(SelfNormFC(size, size, bias=True,
sym_recon_grad=False,
only_R_recon=False))
model = FlowSequential(NegativeGaussianLoss(size=(size,)), *layers)
elif layer == 'conv':
layers.append(SelfNormConv(c_in, c_in, (3,3), bias=True,
stride=1, padding=1,
sym_recon_grad=False,
only_R_recon=False))
model = FlowSequential(NegativeGaussianLoss(size=data_size), *layers)
return model
def load_data(batch_size=100, im_size=(1,28,28), n_train=60_000, n_val=10_0000, n_test=10_000):
trainx = torch.randn(n_train, *im_size)
testx = torch.randn(n_test, *im_size)
trainy = torch.zeros(n_train)
testy = torch.zeros(n_test)
trainvalset = torch.utils.data.TensorDataset(trainx, trainy)
testset = torch.utils.data.TensorDataset(testx, testy)
trainset = torch.utils.data.Subset(trainvalset, range(0, n_train - n_val))
valset = torch.utils.data.Subset(trainvalset, range(n_train - n_val, n_train))
train_loader = DataLoader(trainset, batch_size=batch_size)
val_loader = DataLoader(valset, batch_size=batch_size)
test_loader = DataLoader(testset, batch_size=batch_size)
return train_loader, val_loader, test_loader
def run_timing_experiment(name, snf_name, config, sz, m, results):
train_loader, val_loader, test_loader = load_data(batch_size=config['batch_size'], im_size=sz,
n_train=50_000, n_val=100, n_test=100)
model = create_model(data_size=sz, layer=m).to('cuda')
optimizer = optim.Adam(model.parameters(), lr=config['lr'], betas=(0.9, 0.999))
scheduler = StepLR(optimizer, step_size=1, gamma=1.0)
experiment = Experiment(model, train_loader, val_loader, test_loader,
optimizer, scheduler, **config)
experiment.run()
mean_time = experiment.summary['Batch Time Mean']
std_time = experiment.summary['Batch Time Std']
print(f"{name}: {mean_time} +/- {std_time}")
results[f'{m} {snf_name}']['n_params'].append(sz[0] * sz[1] * sz[2])
results[f'{m} {snf_name}']['mean'].append(mean_time)
results[f'{m} {snf_name}']['std'].append(std_time)
return results
def main():
image_sizes = [(1, x*32, 1) for x in range(1, 130, 3)]
model_type = ['fc', 'conv']
self_normalized = [True, False]
name = 'Timing Experiment '
results = {}
for m in model_type:
for snf in self_normalized:
if snf:
snf_name = 'SNF'
else:
snf_name = 'Reg'
results[f'{m} {snf_name}'] = {
'n_params': [],
'mean': [],
'std': []
}
for sz in image_sizes:
name = f'Timing Experiment {m} {snf_name} {sz}'
config = {
'name': name,
'eval_epochs': 1,
'sample_epochs': 1000,
'log_interval': 10000,
'lr': 1e-4,
'batch_size': 128,
'modified_grad': snf,
'add_recon_grad': snf,
'sym_recon_grad': False,
'only_R_recon': False,
'actnorm': False,
'split_prior': False,
'activation': 'None',
'log_timing': True,
'epochs': 10
}
results = run_timing_experiment(name, snf_name, config, sz, m, results)
print(results[f'{m} {snf_name}'])
print(results)
print(results)
print(results)
|
python
|
from VcfNormalize import VcfNormalize
import argparse
import os
#get command line arguments
parser = argparse.ArgumentParser(description='Script to run GATK VariantsToAllelicPrimitives in order to decompose MNPs into more basic/primitive alleles')
parser.add_argument('--gatk_folder', type=str, required=True, help='Folder containing GATK jar file' )
parser.add_argument('--bgzip_folder', type=str, required=True, help='Folder containing bgzip' )
parser.add_argument('--vcf', type=str, required=True, help='Path to the VCF file that will be analysed' )
parser.add_argument('--outprefix', type=str, required=True, help='Prefix for output file' )
parser.add_argument('--reference', type=str, required=True, help='Path to the reference Fasta file' )
parser.add_argument('--compress', type=str, required=False, help='Compress the output file' )
args = parser.parse_args()
if __name__ == '__main__':
vcfallprim = VcfNormalize(vcf=args.vcf,gatk_folder=args.gatk_folder,bgzip_folder=args.bgzip_folder)
vcfallprim.run_gatk_VariantsToAllelicPrimitives(outprefix=args.outprefix,reference=args.reference,compress=args.compress)
|
python
|
# This file was automatically created by FeynRules 2.3.36
# Mathematica version: 11.3.0 for Linux x86 (64-bit) (March 7, 2018)
# Date: Wed 24 Feb 2021 15:52:48
from object_library import all_couplings, Coupling
from function_library import complexconjugate, re, im, csc, sec, acsc, asec, cot
GC_1 = Coupling(name = 'GC_1',
value = '-(ee*complex(0,1))/3.',
order = {'QED':1})
GC_2 = Coupling(name = 'GC_2',
value = '(2*ee*complex(0,1))/3.',
order = {'QED':1})
GC_3 = Coupling(name = 'GC_3',
value = '-(ee*complex(0,1))',
order = {'QED':1})
GC_4 = Coupling(name = 'GC_4',
value = 'ee*complex(0,1)',
order = {'QED':1})
GC_5 = Coupling(name = 'GC_5',
value = 'ee**2*complex(0,1)',
order = {'QED':2})
GC_6 = Coupling(name = 'GC_6',
value = '2*ee**2*complex(0,1)',
order = {'QED':2})
GC_7 = Coupling(name = 'GC_7',
value = '-ee**2/(2.*cw)',
order = {'QED':2})
GC_8 = Coupling(name = 'GC_8',
value = 'ee**2/(2.*cw)',
order = {'QED':2})
GC_9 = Coupling(name = 'GC_9',
value = '-(cab*ee**2*complex(0,1))/(2.*cw)',
order = {'QED':2})
GC_10 = Coupling(name = 'GC_10',
value = '-(cphi*ee**2)/(2.*cw)',
order = {'QED':2})
GC_11 = Coupling(name = 'GC_11',
value = '(cphi*ee**2)/(2.*cw)',
order = {'QED':2})
GC_12 = Coupling(name = 'GC_12',
value = '-(cab*cphi*ee**2*complex(0,1))/(2.*cw)',
order = {'QED':2})
GC_13 = Coupling(name = 'GC_13',
value = 'cphi*fl1x2*complex(0,1) - cphi*fl2x1*complex(0,1)',
order = {'ZEE':1})
GC_14 = Coupling(name = 'GC_14',
value = '-(cphi*fl1x2*complex(0,1)) + cphi*fl2x1*complex(0,1)',
order = {'ZEE':1})
GC_15 = Coupling(name = 'GC_15',
value = 'cphi*fl1x3*complex(0,1) - cphi*fl3x1*complex(0,1)',
order = {'ZEE':1})
GC_16 = Coupling(name = 'GC_16',
value = '-(cphi*fl1x3*complex(0,1)) + cphi*fl3x1*complex(0,1)',
order = {'ZEE':1})
GC_17 = Coupling(name = 'GC_17',
value = 'cphi*fl2x3*complex(0,1) - cphi*fl3x2*complex(0,1)',
order = {'ZEE':1})
GC_18 = Coupling(name = 'GC_18',
value = '-(cphi*fl2x3*complex(0,1)) + cphi*fl3x2*complex(0,1)',
order = {'ZEE':1})
GC_19 = Coupling(name = 'GC_19',
value = '-G',
order = {'QCD':1})
GC_20 = Coupling(name = 'GC_20',
value = 'complex(0,1)*G',
order = {'QCD':1})
GC_21 = Coupling(name = 'GC_21',
value = 'complex(0,1)*G**2',
order = {'QCD':2})
GC_22 = Coupling(name = 'GC_22',
value = '-(complex(0,1)*I1a11)',
order = {'QED':1})
GC_23 = Coupling(name = 'GC_23',
value = '-(complex(0,1)*I1a12)',
order = {'QED':1})
GC_24 = Coupling(name = 'GC_24',
value = '-(complex(0,1)*I1a13)',
order = {'QED':1})
GC_25 = Coupling(name = 'GC_25',
value = '-(complex(0,1)*I1a21)',
order = {'QED':1})
GC_26 = Coupling(name = 'GC_26',
value = '-(complex(0,1)*I1a22)',
order = {'QED':1})
GC_27 = Coupling(name = 'GC_27',
value = '-(complex(0,1)*I1a23)',
order = {'QED':1})
GC_28 = Coupling(name = 'GC_28',
value = '-(complex(0,1)*I1a31)',
order = {'QED':1})
GC_29 = Coupling(name = 'GC_29',
value = '-(complex(0,1)*I1a32)',
order = {'QED':1})
GC_30 = Coupling(name = 'GC_30',
value = '-(complex(0,1)*I1a33)',
order = {'QED':1})
GC_31 = Coupling(name = 'GC_31',
value = 'complex(0,1)*I2a11',
order = {'QED':1})
GC_32 = Coupling(name = 'GC_32',
value = 'complex(0,1)*I2a12',
order = {'QED':1})
GC_33 = Coupling(name = 'GC_33',
value = 'complex(0,1)*I2a13',
order = {'QED':1})
GC_34 = Coupling(name = 'GC_34',
value = 'complex(0,1)*I2a21',
order = {'QED':1})
GC_35 = Coupling(name = 'GC_35',
value = 'complex(0,1)*I2a22',
order = {'QED':1})
GC_36 = Coupling(name = 'GC_36',
value = 'complex(0,1)*I2a23',
order = {'QED':1})
GC_37 = Coupling(name = 'GC_37',
value = 'complex(0,1)*I2a31',
order = {'QED':1})
GC_38 = Coupling(name = 'GC_38',
value = 'complex(0,1)*I2a32',
order = {'QED':1})
GC_39 = Coupling(name = 'GC_39',
value = 'complex(0,1)*I2a33',
order = {'QED':1})
GC_40 = Coupling(name = 'GC_40',
value = 'complex(0,1)*I3a11',
order = {'QED':1})
GC_41 = Coupling(name = 'GC_41',
value = 'complex(0,1)*I3a12',
order = {'QED':1})
GC_42 = Coupling(name = 'GC_42',
value = 'complex(0,1)*I3a13',
order = {'QED':1})
GC_43 = Coupling(name = 'GC_43',
value = 'complex(0,1)*I3a21',
order = {'QED':1})
GC_44 = Coupling(name = 'GC_44',
value = 'complex(0,1)*I3a22',
order = {'QED':1})
GC_45 = Coupling(name = 'GC_45',
value = 'complex(0,1)*I3a23',
order = {'QED':1})
GC_46 = Coupling(name = 'GC_46',
value = 'complex(0,1)*I3a31',
order = {'QED':1})
GC_47 = Coupling(name = 'GC_47',
value = 'complex(0,1)*I3a32',
order = {'QED':1})
GC_48 = Coupling(name = 'GC_48',
value = 'complex(0,1)*I3a33',
order = {'QED':1})
GC_49 = Coupling(name = 'GC_49',
value = '-(complex(0,1)*I4a11)',
order = {'QED':1})
GC_50 = Coupling(name = 'GC_50',
value = '-(complex(0,1)*I4a12)',
order = {'QED':1})
GC_51 = Coupling(name = 'GC_51',
value = '-(complex(0,1)*I4a13)',
order = {'QED':1})
GC_52 = Coupling(name = 'GC_52',
value = '-(complex(0,1)*I4a21)',
order = {'QED':1})
GC_53 = Coupling(name = 'GC_53',
value = '-(complex(0,1)*I4a22)',
order = {'QED':1})
GC_54 = Coupling(name = 'GC_54',
value = '-(complex(0,1)*I4a23)',
order = {'QED':1})
GC_55 = Coupling(name = 'GC_55',
value = '-(complex(0,1)*I4a31)',
order = {'QED':1})
GC_56 = Coupling(name = 'GC_56',
value = '-(complex(0,1)*I4a32)',
order = {'QED':1})
GC_57 = Coupling(name = 'GC_57',
value = '-(complex(0,1)*I4a33)',
order = {'QED':1})
GC_58 = Coupling(name = 'GC_58',
value = '-(complex(0,1)*lam1)',
order = {'QED':2})
GC_59 = Coupling(name = 'GC_59',
value = '-2*complex(0,1)*lam1',
order = {'QED':2})
GC_60 = Coupling(name = 'GC_60',
value = '-3*complex(0,1)*lam1',
order = {'QED':2})
GC_61 = Coupling(name = 'GC_61',
value = '-3*complex(0,1)*lam2',
order = {'QED':2})
GC_62 = Coupling(name = 'GC_62',
value = '-(complex(0,1)*lam3)',
order = {'QED':2})
GC_63 = Coupling(name = 'GC_63',
value = '-(complex(0,1)*lam3) - complex(0,1)*lam4 - complex(0,1)*lam5',
order = {'QED':2})
GC_64 = Coupling(name = 'GC_64',
value = '-2*cphi**2*complex(0,1)*lam5',
order = {'QED':2})
GC_65 = Coupling(name = 'GC_65',
value = '-(cphi*complex(0,1)*lam4)/2. - (cphi*complex(0,1)*lam5)/2.',
order = {'QED':2})
GC_66 = Coupling(name = 'GC_66',
value = '(cab*cphi*lam4)/2. - (cab*cphi*lam5)/2.',
order = {'QED':2})
GC_67 = Coupling(name = 'GC_67',
value = '-(cab*cphi*lam4)/2. + (cab*cphi*lam5)/2.',
order = {'QED':2})
GC_68 = Coupling(name = 'GC_68',
value = '-(complex(0,1)*lam6)',
order = {'QED':2})
GC_69 = Coupling(name = 'GC_69',
value = '-3*complex(0,1)*lam6',
order = {'QED':2})
GC_70 = Coupling(name = 'GC_70',
value = '-(cphi*complex(0,1)*lam6)',
order = {'QED':2})
GC_71 = Coupling(name = 'GC_71',
value = '-2*cphi*complex(0,1)*lam6',
order = {'QED':2})
GC_72 = Coupling(name = 'GC_72',
value = '-3*complex(0,1)*lam7',
order = {'QED':2})
GC_73 = Coupling(name = 'GC_73',
value = '-(cphi*complex(0,1)*lam7)',
order = {'QED':2})
GC_74 = Coupling(name = 'GC_74',
value = '(ee**2*complex(0,1)*sab)/(2.*cw)',
order = {'QED':2})
GC_75 = Coupling(name = 'GC_75',
value = '-(cphi*ee**2*complex(0,1)*sab)/(2.*cw)',
order = {'QED':2})
GC_76 = Coupling(name = 'GC_76',
value = '(cphi*lam4*sab)/2. - (cphi*lam5*sab)/2.',
order = {'QED':2})
GC_77 = Coupling(name = 'GC_77',
value = '-(cphi*lam4*sab)/2. + (cphi*lam5*sab)/2.',
order = {'QED':2})
GC_78 = Coupling(name = 'GC_78',
value = '-(cab**2*complex(0,1)*lam3) + 2*cab*complex(0,1)*lam6*sab - complex(0,1)*lam1*sab**2',
order = {'QED':2})
GC_79 = Coupling(name = 'GC_79',
value = '-(cab**2*complex(0,1)*lam3) - cab**2*complex(0,1)*lam4 + cab**2*complex(0,1)*lam5 + 2*cab*complex(0,1)*lam6*sab - complex(0,1)*lam1*sab**2',
order = {'QED':2})
GC_80 = Coupling(name = 'GC_80',
value = '-(cab**2*complex(0,1)*lam3) - cab**2*complex(0,1)*lam4 + cab**2*complex(0,1)*lam5 - 2*cab*complex(0,1)*lam7*sab - complex(0,1)*lam2*sab**2',
order = {'QED':2})
GC_81 = Coupling(name = 'GC_81',
value = '-(cab**2*complex(0,1)*lam1) - 2*cab*complex(0,1)*lam6*sab - complex(0,1)*lam3*sab**2',
order = {'QED':2})
GC_82 = Coupling(name = 'GC_82',
value = '-(cab**2*complex(0,1)*lam5) + cab*complex(0,1)*lam6*sab - cab*complex(0,1)*lam7*sab + complex(0,1)*lam5*sab**2',
order = {'QED':2})
GC_83 = Coupling(name = 'GC_83',
value = '-(cab**2*complex(0,1)*lam1) - 2*cab*complex(0,1)*lam6*sab - complex(0,1)*lam3*sab**2 - complex(0,1)*lam4*sab**2 + complex(0,1)*lam5*sab**2',
order = {'QED':2})
GC_84 = Coupling(name = 'GC_84',
value = '-(cab**2*complex(0,1)*lam2) + 2*cab*complex(0,1)*lam7*sab - complex(0,1)*lam3*sab**2 - complex(0,1)*lam4*sab**2 + complex(0,1)*lam5*sab**2',
order = {'QED':2})
GC_85 = Coupling(name = 'GC_85',
value = '-(cab**2*cphi*complex(0,1)*lam4)/2. - (cab**2*cphi*complex(0,1)*lam5)/2. + cab*cphi*complex(0,1)*lam6*sab - cab*cphi*complex(0,1)*lam7*sab + (cphi*complex(0,1)*lam4*sab**2)/2. + (cphi*complex(0,1)*lam5*sab**2)/2.',
order = {'QED':2})
GC_86 = Coupling(name = 'GC_86',
value = '-(cab**2*complex(0,1)*lam7) + 2*cab*complex(0,1)*lam5*sab - complex(0,1)*lam6*sab**2',
order = {'QED':2})
GC_87 = Coupling(name = 'GC_87',
value = '-(cab**2*complex(0,1)*lam6) + cab*complex(0,1)*lam1*sab - cab*complex(0,1)*lam3*sab + complex(0,1)*lam6*sab**2',
order = {'QED':2})
GC_88 = Coupling(name = 'GC_88',
value = '-(cab**2*complex(0,1)*lam6) + cab*complex(0,1)*lam1*sab - cab*complex(0,1)*lam3*sab - cab*complex(0,1)*lam4*sab + cab*complex(0,1)*lam5*sab + complex(0,1)*lam6*sab**2',
order = {'QED':2})
GC_89 = Coupling(name = 'GC_89',
value = '-(cab**2*cphi*complex(0,1)*lam7) + cab*cphi*complex(0,1)*lam4*sab + cab*cphi*complex(0,1)*lam5*sab - cphi*complex(0,1)*lam6*sab**2',
order = {'QED':2})
GC_90 = Coupling(name = 'GC_90',
value = '-(cab**2*complex(0,1)*lam6) - 2*cab*complex(0,1)*lam5*sab - complex(0,1)*lam7*sab**2',
order = {'QED':2})
GC_91 = Coupling(name = 'GC_91',
value = '-(cab**2*complex(0,1)*lam7) - cab*complex(0,1)*lam2*sab + cab*complex(0,1)*lam3*sab + cab*complex(0,1)*lam4*sab - cab*complex(0,1)*lam5*sab + complex(0,1)*lam7*sab**2',
order = {'QED':2})
GC_92 = Coupling(name = 'GC_92',
value = '-(cab**2*cphi*complex(0,1)*lam6) - cab*cphi*complex(0,1)*lam4*sab - cab*cphi*complex(0,1)*lam5*sab - cphi*complex(0,1)*lam7*sab**2',
order = {'QED':2})
GC_93 = Coupling(name = 'GC_93',
value = '-3*cab**4*complex(0,1)*lam2 + 12*cab**3*complex(0,1)*lam7*sab - 6*cab**2*complex(0,1)*lam3*sab**2 - 6*cab**2*complex(0,1)*lam4*sab**2 - 6*cab**2*complex(0,1)*lam5*sab**2 + 12*cab*complex(0,1)*lam6*sab**3 - 3*complex(0,1)*lam1*sab**4',
order = {'QED':2})
GC_94 = Coupling(name = 'GC_94',
value = '-3*cab**4*complex(0,1)*lam1 - 12*cab**3*complex(0,1)*lam6*sab - 6*cab**2*complex(0,1)*lam3*sab**2 - 6*cab**2*complex(0,1)*lam4*sab**2 - 6*cab**2*complex(0,1)*lam5*sab**2 - 12*cab*complex(0,1)*lam7*sab**3 - 3*complex(0,1)*lam2*sab**4',
order = {'QED':2})
GC_95 = Coupling(name = 'GC_95',
value = '-(cab**4*complex(0,1)*lam3) - cab**4*complex(0,1)*lam4 - cab**4*complex(0,1)*lam5 + 6*cab**3*complex(0,1)*lam6*sab - 6*cab**3*complex(0,1)*lam7*sab - 3*cab**2*complex(0,1)*lam1*sab**2 - 3*cab**2*complex(0,1)*lam2*sab**2 + 4*cab**2*complex(0,1)*lam3*sab**2 + 4*cab**2*complex(0,1)*lam4*sab**2 + 4*cab**2*complex(0,1)*lam5*sab**2 - 6*cab*complex(0,1)*lam6*sab**3 + 6*cab*complex(0,1)*lam7*sab**3 - complex(0,1)*lam3*sab**4 - complex(0,1)*lam4*sab**4 - complex(0,1)*lam5*sab**4',
order = {'QED':2})
GC_96 = Coupling(name = 'GC_96',
value = '-3*cab**4*complex(0,1)*lam7 - 3*cab**3*complex(0,1)*lam2*sab + 3*cab**3*complex(0,1)*lam3*sab + 3*cab**3*complex(0,1)*lam4*sab + 3*cab**3*complex(0,1)*lam5*sab - 9*cab**2*complex(0,1)*lam6*sab**2 + 9*cab**2*complex(0,1)*lam7*sab**2 + 3*cab*complex(0,1)*lam1*sab**3 - 3*cab*complex(0,1)*lam3*sab**3 - 3*cab*complex(0,1)*lam4*sab**3 - 3*cab*complex(0,1)*lam5*sab**3 + 3*complex(0,1)*lam6*sab**4',
order = {'QED':2})
GC_97 = Coupling(name = 'GC_97',
value = '-3*cab**4*complex(0,1)*lam6 + 3*cab**3*complex(0,1)*lam1*sab - 3*cab**3*complex(0,1)*lam3*sab - 3*cab**3*complex(0,1)*lam4*sab - 3*cab**3*complex(0,1)*lam5*sab + 9*cab**2*complex(0,1)*lam6*sab**2 - 9*cab**2*complex(0,1)*lam7*sab**2 - 3*cab*complex(0,1)*lam2*sab**3 + 3*cab*complex(0,1)*lam3*sab**3 + 3*cab*complex(0,1)*lam4*sab**3 + 3*cab*complex(0,1)*lam5*sab**3 + 3*complex(0,1)*lam7*sab**4',
order = {'QED':2})
GC_98 = Coupling(name = 'GC_98',
value = '-(ee**2*sphi)/(2.*cw)',
order = {'QED':2})
GC_99 = Coupling(name = 'GC_99',
value = '(ee**2*sphi)/(2.*cw)',
order = {'QED':2})
GC_100 = Coupling(name = 'GC_100',
value = '-(cab*ee**2*complex(0,1)*sphi)/(2.*cw)',
order = {'QED':2})
GC_101 = Coupling(name = 'GC_101',
value = '-2*cphi*complex(0,1)*lam5*sphi',
order = {'QED':2})
GC_102 = Coupling(name = 'GC_102',
value = '-(complex(0,1)*lam6*sphi)',
order = {'QED':2})
GC_103 = Coupling(name = 'GC_103',
value = '-2*complex(0,1)*lam6*sphi',
order = {'QED':2})
GC_104 = Coupling(name = 'GC_104',
value = '-(complex(0,1)*lam7*sphi)',
order = {'QED':2})
GC_105 = Coupling(name = 'GC_105',
value = '-(ee**2*complex(0,1)*sab*sphi)/(2.*cw)',
order = {'QED':2})
GC_106 = Coupling(name = 'GC_106',
value = '-2*complex(0,1)*lam5*sphi**2',
order = {'QED':2})
GC_107 = Coupling(name = 'GC_107',
value = 'fl1x2*complex(0,1)*sphi - fl2x1*complex(0,1)*sphi',
order = {'ZEE':1})
GC_108 = Coupling(name = 'GC_108',
value = '-(fl1x2*complex(0,1)*sphi) + fl2x1*complex(0,1)*sphi',
order = {'ZEE':1})
GC_109 = Coupling(name = 'GC_109',
value = 'fl1x3*complex(0,1)*sphi - fl3x1*complex(0,1)*sphi',
order = {'ZEE':1})
GC_110 = Coupling(name = 'GC_110',
value = '-(fl1x3*complex(0,1)*sphi) + fl3x1*complex(0,1)*sphi',
order = {'ZEE':1})
GC_111 = Coupling(name = 'GC_111',
value = 'fl2x3*complex(0,1)*sphi - fl3x2*complex(0,1)*sphi',
order = {'ZEE':1})
GC_112 = Coupling(name = 'GC_112',
value = '-(fl2x3*complex(0,1)*sphi) + fl3x2*complex(0,1)*sphi',
order = {'ZEE':1})
GC_113 = Coupling(name = 'GC_113',
value = '-(complex(0,1)*lam4*sphi)/2. - (complex(0,1)*lam5*sphi)/2.',
order = {'QED':2})
GC_114 = Coupling(name = 'GC_114',
value = '(cab*lam4*sphi)/2. - (cab*lam5*sphi)/2.',
order = {'QED':2})
GC_115 = Coupling(name = 'GC_115',
value = '-(cab*lam4*sphi)/2. + (cab*lam5*sphi)/2.',
order = {'QED':2})
GC_116 = Coupling(name = 'GC_116',
value = 'cphi*complex(0,1)*lam10*sphi - cphi*complex(0,1)*lam7*sphi',
order = {'QED':2})
GC_117 = Coupling(name = 'GC_117',
value = '2*cphi**2*complex(0,1)*lam10*sphi - 2*cphi**2*complex(0,1)*lam7*sphi',
order = {'QED':2})
GC_118 = Coupling(name = 'GC_118',
value = '-(cphi*complex(0,1)*lam3*sphi) + cphi*complex(0,1)*lam8*sphi',
order = {'QED':2})
GC_119 = Coupling(name = 'GC_119',
value = '-(cphi*complex(0,1)*lam3*sphi) - cphi*complex(0,1)*lam4*sphi + cphi*complex(0,1)*lam8*sphi',
order = {'QED':2})
GC_120 = Coupling(name = 'GC_120',
value = '-(cphi*complex(0,1)*lam2*sphi) + cphi*complex(0,1)*lam9*sphi',
order = {'QED':2})
GC_121 = Coupling(name = 'GC_121',
value = '(lam4*sab*sphi)/2. - (lam5*sab*sphi)/2.',
order = {'QED':2})
GC_122 = Coupling(name = 'GC_122',
value = '-(lam4*sab*sphi)/2. + (lam5*sab*sphi)/2.',
order = {'QED':2})
GC_123 = Coupling(name = 'GC_123',
value = '-(cab**2*complex(0,1)*lam4*sphi)/2. - (cab**2*complex(0,1)*lam5*sphi)/2. + cab*complex(0,1)*lam6*sab*sphi - cab*complex(0,1)*lam7*sab*sphi + (complex(0,1)*lam4*sab**2*sphi)/2. + (complex(0,1)*lam5*sab**2*sphi)/2.',
order = {'QED':2})
GC_124 = Coupling(name = 'GC_124',
value = '-(cab**2*complex(0,1)*lam7*sphi) + cab*complex(0,1)*lam4*sab*sphi + cab*complex(0,1)*lam5*sab*sphi - complex(0,1)*lam6*sab**2*sphi',
order = {'QED':2})
GC_125 = Coupling(name = 'GC_125',
value = '-(cab**2*complex(0,1)*lam6*sphi) - cab*complex(0,1)*lam4*sab*sphi - cab*complex(0,1)*lam5*sab*sphi - complex(0,1)*lam7*sab**2*sphi',
order = {'QED':2})
GC_126 = Coupling(name = 'GC_126',
value = 'cab**2*cphi*complex(0,1)*lam10*sphi - cab**2*cphi*complex(0,1)*lam7*sphi - cab*cphi*complex(0,1)*lam2*sab*sphi + cab*cphi*complex(0,1)*lam3*sab*sphi - cab*cphi*complex(0,1)*lam8*sab*sphi + cab*cphi*complex(0,1)*lam9*sab*sphi - cphi*complex(0,1)*lam10*sab**2*sphi + cphi*complex(0,1)*lam7*sab**2*sphi',
order = {'QED':2})
GC_127 = Coupling(name = 'GC_127',
value = '-(cab**2*cphi*complex(0,1)*lam2*sphi) + cab**2*cphi*complex(0,1)*lam9*sphi - 2*cab*cphi*complex(0,1)*lam10*sab*sphi + 2*cab*cphi*complex(0,1)*lam7*sab*sphi - cphi*complex(0,1)*lam3*sab**2*sphi + cphi*complex(0,1)*lam8*sab**2*sphi',
order = {'QED':2})
GC_128 = Coupling(name = 'GC_128',
value = '-(cab**2*cphi*complex(0,1)*lam3*sphi) + cab**2*cphi*complex(0,1)*lam8*sphi + 2*cab*cphi*complex(0,1)*lam10*sab*sphi - 2*cab*cphi*complex(0,1)*lam7*sab*sphi - cphi*complex(0,1)*lam2*sab**2*sphi + cphi*complex(0,1)*lam9*sab**2*sphi',
order = {'QED':2})
GC_129 = Coupling(name = 'GC_129',
value = '-(cphi**2*ee*complex(0,1)) - ee*complex(0,1)*sphi**2',
order = {'QED':1})
GC_130 = Coupling(name = 'GC_130',
value = '2*cphi**2*ee**2*complex(0,1) + 2*ee**2*complex(0,1)*sphi**2',
order = {'QED':2})
GC_131 = Coupling(name = 'GC_131',
value = '-(cphi**2*complex(0,1)*lam7) - complex(0,1)*lam10*sphi**2',
order = {'QED':2})
GC_132 = Coupling(name = 'GC_132',
value = '-2*cphi**3*complex(0,1)*lam7 - 2*cphi*complex(0,1)*lam10*sphi**2',
order = {'QED':2})
GC_133 = Coupling(name = 'GC_133',
value = '-(cphi**2*complex(0,1)*lam9) - complex(0,1)*lam2*sphi**2',
order = {'QED':2})
GC_134 = Coupling(name = 'GC_134',
value = '-(cphi**2*complex(0,1)*lam8) - complex(0,1)*lam3*sphi**2',
order = {'QED':2})
GC_135 = Coupling(name = 'GC_135',
value = '-(cphi**2*complex(0,1)*lam8) - complex(0,1)*lam3*sphi**2 - complex(0,1)*lam4*sphi**2',
order = {'QED':2})
GC_136 = Coupling(name = 'GC_136',
value = '-(cphi**2*complex(0,1)*lam10) - complex(0,1)*lam7*sphi**2',
order = {'QED':2})
GC_137 = Coupling(name = 'GC_137',
value = '-(cphi**3*complex(0,1)*lam10) + cphi*complex(0,1)*lam10*sphi**2 - 2*cphi*complex(0,1)*lam7*sphi**2',
order = {'QED':2})
GC_138 = Coupling(name = 'GC_138',
value = '2*cphi*complex(0,1)*lam10*sphi**2 - 2*cphi*complex(0,1)*lam7*sphi**2',
order = {'QED':2})
GC_139 = Coupling(name = 'GC_139',
value = '-(cphi**2*complex(0,1)*lam3) - complex(0,1)*lam8*sphi**2',
order = {'QED':2})
GC_140 = Coupling(name = 'GC_140',
value = '-(cphi**2*complex(0,1)*lam3) - cphi**2*complex(0,1)*lam4 - complex(0,1)*lam8*sphi**2',
order = {'QED':2})
GC_141 = Coupling(name = 'GC_141',
value = '-(cphi**2*complex(0,1)*lam2) - complex(0,1)*lam9*sphi**2',
order = {'QED':2})
GC_142 = Coupling(name = 'GC_142',
value = '-2*cphi**2*complex(0,1)*lam2*sphi**2 + 4*cphi**2*complex(0,1)*lam9*sphi**2 - 4*cphi**2*complex(0,1)*lameta*sphi**2',
order = {'QED':2})
GC_143 = Coupling(name = 'GC_143',
value = '-((cphi**2*muzee)/cmath.sqrt(2)) - (muzee*sphi**2)/cmath.sqrt(2)',
order = {'QED':1})
GC_144 = Coupling(name = 'GC_144',
value = '(cphi**2*muzee)/cmath.sqrt(2) + (muzee*sphi**2)/cmath.sqrt(2)',
order = {'QED':1})
GC_145 = Coupling(name = 'GC_145',
value = '-(cab**2*cphi**2*complex(0,1)*lam7) - cab*cphi**2*complex(0,1)*lam2*sab + cab*cphi**2*complex(0,1)*lam3*sab + cphi**2*complex(0,1)*lam7*sab**2 - cab**2*complex(0,1)*lam10*sphi**2 + cab*complex(0,1)*lam8*sab*sphi**2 - cab*complex(0,1)*lam9*sab*sphi**2 + complex(0,1)*lam10*sab**2*sphi**2',
order = {'QED':2})
GC_146 = Coupling(name = 'GC_146',
value = '-(cab**2*cphi**2*complex(0,1)*lam8) - 2*cab*cphi**2*complex(0,1)*lam10*sab - cphi**2*complex(0,1)*lam9*sab**2 - cab**2*complex(0,1)*lam3*sphi**2 - 2*cab*complex(0,1)*lam7*sab*sphi**2 - complex(0,1)*lam2*sab**2*sphi**2',
order = {'QED':2})
GC_147 = Coupling(name = 'GC_147',
value = '-(cab**2*cphi**2*complex(0,1)*lam9) + 2*cab*cphi**2*complex(0,1)*lam10*sab - cphi**2*complex(0,1)*lam8*sab**2 - cab**2*complex(0,1)*lam2*sphi**2 + 2*cab*complex(0,1)*lam7*sab*sphi**2 - complex(0,1)*lam3*sab**2*sphi**2',
order = {'QED':2})
GC_148 = Coupling(name = 'GC_148',
value = '-(cab**2*cphi**2*complex(0,1)*lam10) + cab*cphi**2*complex(0,1)*lam8*sab - cab*cphi**2*complex(0,1)*lam9*sab + cphi**2*complex(0,1)*lam10*sab**2 - cab**2*complex(0,1)*lam7*sphi**2 - cab*complex(0,1)*lam2*sab*sphi**2 + cab*complex(0,1)*lam3*sab*sphi**2 + complex(0,1)*lam7*sab**2*sphi**2',
order = {'QED':2})
GC_149 = Coupling(name = 'GC_149',
value = '-(cab**2*cphi**2*complex(0,1)*lam2) + 2*cab*cphi**2*complex(0,1)*lam7*sab - cphi**2*complex(0,1)*lam3*sab**2 - cab**2*complex(0,1)*lam9*sphi**2 + 2*cab*complex(0,1)*lam10*sab*sphi**2 - complex(0,1)*lam8*sab**2*sphi**2',
order = {'QED':2})
GC_150 = Coupling(name = 'GC_150',
value = '-(cab**2*cphi**2*complex(0,1)*lam3) - 2*cab*cphi**2*complex(0,1)*lam7*sab - cphi**2*complex(0,1)*lam2*sab**2 - cab**2*complex(0,1)*lam8*sphi**2 - 2*cab*complex(0,1)*lam10*sab*sphi**2 - complex(0,1)*lam9*sab**2*sphi**2',
order = {'QED':2})
GC_151 = Coupling(name = 'GC_151',
value = 'cphi**2*complex(0,1)*lam10*sphi - 2*cphi**2*complex(0,1)*lam7*sphi - complex(0,1)*lam10*sphi**3',
order = {'QED':2})
GC_152 = Coupling(name = 'GC_152',
value = '-2*cphi**2*complex(0,1)*lam10*sphi - 2*complex(0,1)*lam7*sphi**3',
order = {'QED':2})
GC_153 = Coupling(name = 'GC_153',
value = '-2*cphi**3*complex(0,1)*lam9*sphi + 4*cphi**3*complex(0,1)*lameta*sphi - 2*cphi*complex(0,1)*lam2*sphi**3 + 2*cphi*complex(0,1)*lam9*sphi**3',
order = {'QED':2})
GC_154 = Coupling(name = 'GC_154',
value = '-2*cphi**3*complex(0,1)*lam2*sphi + 2*cphi**3*complex(0,1)*lam9*sphi - 2*cphi*complex(0,1)*lam9*sphi**3 + 4*cphi*complex(0,1)*lameta*sphi**3',
order = {'QED':2})
GC_155 = Coupling(name = 'GC_155',
value = '-4*cphi**4*complex(0,1)*lameta - 4*cphi**2*complex(0,1)*lam9*sphi**2 - 2*complex(0,1)*lam2*sphi**4',
order = {'QED':2})
GC_156 = Coupling(name = 'GC_156',
value = '-(cphi**4*complex(0,1)*lam9) - 2*cphi**2*complex(0,1)*lam2*sphi**2 + 2*cphi**2*complex(0,1)*lam9*sphi**2 - 4*cphi**2*complex(0,1)*lameta*sphi**2 - complex(0,1)*lam9*sphi**4',
order = {'QED':2})
GC_157 = Coupling(name = 'GC_157',
value = '-2*cphi**4*complex(0,1)*lam2 - 4*cphi**2*complex(0,1)*lam9*sphi**2 - 4*complex(0,1)*lameta*sphi**4',
order = {'QED':2})
GC_158 = Coupling(name = 'GC_158',
value = '(cab**2*ee**2*complex(0,1))/(2.*sw**2) + (ee**2*complex(0,1)*sab**2)/(2.*sw**2)',
order = {'QED':2})
GC_159 = Coupling(name = 'GC_159',
value = '(ee**2*complex(0,1))/(2.*sw**2)',
order = {'QED':2})
GC_160 = Coupling(name = 'GC_160',
value = '-((ee**2*complex(0,1))/sw**2)',
order = {'QED':2})
GC_161 = Coupling(name = 'GC_161',
value = '(cphi**2*ee**2*complex(0,1))/(2.*sw**2)',
order = {'QED':2})
GC_162 = Coupling(name = 'GC_162',
value = '(cw**2*ee**2*complex(0,1))/sw**2',
order = {'QED':2})
GC_163 = Coupling(name = 'GC_163',
value = '(cphi*ee**2*complex(0,1)*sphi)/(2.*sw**2)',
order = {'QED':2})
GC_164 = Coupling(name = 'GC_164',
value = '(ee**2*complex(0,1)*sphi**2)/(2.*sw**2)',
order = {'QED':2})
GC_165 = Coupling(name = 'GC_165',
value = 'ee/(2.*sw)',
order = {'QED':1})
GC_166 = Coupling(name = 'GC_166',
value = '(ee*complex(0,1))/(sw*cmath.sqrt(2))',
order = {'QED':1})
GC_167 = Coupling(name = 'GC_167',
value = '-(cab*ee*complex(0,1))/(2.*sw)',
order = {'QED':1})
GC_168 = Coupling(name = 'GC_168',
value = '(cab*ee*complex(0,1))/(2.*sw)',
order = {'QED':1})
GC_169 = Coupling(name = 'GC_169',
value = '(CKM1x1*ee*complex(0,1))/(sw*cmath.sqrt(2))',
order = {'QED':1})
GC_170 = Coupling(name = 'GC_170',
value = '(CKM1x2*ee*complex(0,1))/(sw*cmath.sqrt(2))',
order = {'QED':1})
GC_171 = Coupling(name = 'GC_171',
value = '(CKM1x3*ee*complex(0,1))/(sw*cmath.sqrt(2))',
order = {'QED':1})
GC_172 = Coupling(name = 'GC_172',
value = '(CKM2x1*ee*complex(0,1))/(sw*cmath.sqrt(2))',
order = {'QED':1})
GC_173 = Coupling(name = 'GC_173',
value = '(CKM2x2*ee*complex(0,1))/(sw*cmath.sqrt(2))',
order = {'QED':1})
GC_174 = Coupling(name = 'GC_174',
value = '(CKM2x3*ee*complex(0,1))/(sw*cmath.sqrt(2))',
order = {'QED':1})
GC_175 = Coupling(name = 'GC_175',
value = '(CKM3x1*ee*complex(0,1))/(sw*cmath.sqrt(2))',
order = {'QED':1})
GC_176 = Coupling(name = 'GC_176',
value = '(CKM3x2*ee*complex(0,1))/(sw*cmath.sqrt(2))',
order = {'QED':1})
GC_177 = Coupling(name = 'GC_177',
value = '(CKM3x3*ee*complex(0,1))/(sw*cmath.sqrt(2))',
order = {'QED':1})
GC_178 = Coupling(name = 'GC_178',
value = '(cphi*ee)/(2.*sw)',
order = {'QED':1})
GC_179 = Coupling(name = 'GC_179',
value = '-(cab*cphi*ee*complex(0,1))/(2.*sw)',
order = {'QED':1})
GC_180 = Coupling(name = 'GC_180',
value = '(cab*cphi*ee*complex(0,1))/(2.*sw)',
order = {'QED':1})
GC_181 = Coupling(name = 'GC_181',
value = '-((cw*ee*complex(0,1))/sw)',
order = {'QED':1})
GC_182 = Coupling(name = 'GC_182',
value = '(cw*ee*complex(0,1))/sw',
order = {'QED':1})
GC_183 = Coupling(name = 'GC_183',
value = '-ee**2/(2.*sw)',
order = {'QED':2})
GC_184 = Coupling(name = 'GC_184',
value = 'ee**2/(2.*sw)',
order = {'QED':2})
GC_185 = Coupling(name = 'GC_185',
value = '(cab*ee**2*complex(0,1))/(2.*sw)',
order = {'QED':2})
GC_186 = Coupling(name = 'GC_186',
value = '-(cphi*ee**2)/(2.*sw)',
order = {'QED':2})
GC_187 = Coupling(name = 'GC_187',
value = '(cphi*ee**2)/(2.*sw)',
order = {'QED':2})
GC_188 = Coupling(name = 'GC_188',
value = '(cab*cphi*ee**2*complex(0,1))/(2.*sw)',
order = {'QED':2})
GC_189 = Coupling(name = 'GC_189',
value = '(-2*cw*ee**2*complex(0,1))/sw',
order = {'QED':2})
GC_190 = Coupling(name = 'GC_190',
value = '-(ee*complex(0,1)*sab)/(2.*sw)',
order = {'QED':1})
GC_191 = Coupling(name = 'GC_191',
value = '(ee*complex(0,1)*sab)/(2.*sw)',
order = {'QED':1})
GC_192 = Coupling(name = 'GC_192',
value = '-(cphi*ee*complex(0,1)*sab)/(2.*sw)',
order = {'QED':1})
GC_193 = Coupling(name = 'GC_193',
value = '(cphi*ee*complex(0,1)*sab)/(2.*sw)',
order = {'QED':1})
GC_194 = Coupling(name = 'GC_194',
value = '-(ee**2*complex(0,1)*sab)/(2.*sw)',
order = {'QED':2})
GC_195 = Coupling(name = 'GC_195',
value = '(cphi*ee**2*complex(0,1)*sab)/(2.*sw)',
order = {'QED':2})
GC_196 = Coupling(name = 'GC_196',
value = '(ee*sphi)/(2.*sw)',
order = {'QED':1})
GC_197 = Coupling(name = 'GC_197',
value = '-(cab*ee*complex(0,1)*sphi)/(2.*sw)',
order = {'QED':1})
GC_198 = Coupling(name = 'GC_198',
value = '(cab*ee*complex(0,1)*sphi)/(2.*sw)',
order = {'QED':1})
GC_199 = Coupling(name = 'GC_199',
value = '-(ee**2*sphi)/(2.*sw)',
order = {'QED':2})
GC_200 = Coupling(name = 'GC_200',
value = '(ee**2*sphi)/(2.*sw)',
order = {'QED':2})
GC_201 = Coupling(name = 'GC_201',
value = '(cab*ee**2*complex(0,1)*sphi)/(2.*sw)',
order = {'QED':2})
GC_202 = Coupling(name = 'GC_202',
value = '-(ee*complex(0,1)*sab*sphi)/(2.*sw)',
order = {'QED':1})
GC_203 = Coupling(name = 'GC_203',
value = '(ee*complex(0,1)*sab*sphi)/(2.*sw)',
order = {'QED':1})
GC_204 = Coupling(name = 'GC_204',
value = '(ee**2*complex(0,1)*sab*sphi)/(2.*sw)',
order = {'QED':2})
GC_205 = Coupling(name = 'GC_205',
value = '(ee*complex(0,1)*sw)/(3.*cw)',
order = {'QED':1})
GC_206 = Coupling(name = 'GC_206',
value = '(-2*ee*complex(0,1)*sw)/(3.*cw)',
order = {'QED':1})
GC_207 = Coupling(name = 'GC_207',
value = '(ee*complex(0,1)*sw)/cw',
order = {'QED':1})
GC_208 = Coupling(name = 'GC_208',
value = '-(cw*ee*complex(0,1))/(2.*sw) - (ee*complex(0,1)*sw)/(6.*cw)',
order = {'QED':1})
GC_209 = Coupling(name = 'GC_209',
value = '(cw*ee*complex(0,1))/(2.*sw) - (ee*complex(0,1)*sw)/(6.*cw)',
order = {'QED':1})
GC_210 = Coupling(name = 'GC_210',
value = '-(cw*ee*complex(0,1))/(2.*sw) + (ee*complex(0,1)*sw)/(2.*cw)',
order = {'QED':1})
GC_211 = Coupling(name = 'GC_211',
value = '(cw*ee*complex(0,1))/(2.*sw) + (ee*complex(0,1)*sw)/(2.*cw)',
order = {'QED':1})
GC_212 = Coupling(name = 'GC_212',
value = '-(cab*cw*ee)/(2.*sw) - (cab*ee*sw)/(2.*cw)',
order = {'QED':1})
GC_213 = Coupling(name = 'GC_213',
value = '(cw*ee**2*complex(0,1))/sw - (ee**2*complex(0,1)*sw)/cw',
order = {'QED':2})
GC_214 = Coupling(name = 'GC_214',
value = '-(cw*ee*sab)/(2.*sw) - (ee*sab*sw)/(2.*cw)',
order = {'QED':1})
GC_215 = Coupling(name = 'GC_215',
value = '(cw*ee*sab)/(2.*sw) + (ee*sab*sw)/(2.*cw)',
order = {'QED':1})
GC_216 = Coupling(name = 'GC_216',
value = '-(cphi*cw*ee*complex(0,1)*sphi)/(2.*sw) - (cphi*ee*complex(0,1)*sphi*sw)/(2.*cw)',
order = {'QED':1})
GC_217 = Coupling(name = 'GC_217',
value = '(cphi*cw*ee*complex(0,1)*sphi)/(2.*sw) + (cphi*ee*complex(0,1)*sphi*sw)/(2.*cw)',
order = {'QED':1})
GC_218 = Coupling(name = 'GC_218',
value = '(cphi*cw*ee**2*complex(0,1)*sphi)/sw + (cphi*ee**2*complex(0,1)*sphi*sw)/cw',
order = {'QED':2})
GC_219 = Coupling(name = 'GC_219',
value = '-(cw*ee*complex(0,1)*sphi**2)/(2.*sw) + (cphi**2*ee*complex(0,1)*sw)/cw + (ee*complex(0,1)*sphi**2*sw)/(2.*cw)',
order = {'QED':1})
GC_220 = Coupling(name = 'GC_220',
value = '-(cphi**2*cw*ee*complex(0,1))/(2.*sw) + (cphi**2*ee*complex(0,1)*sw)/(2.*cw) + (ee*complex(0,1)*sphi**2*sw)/cw',
order = {'QED':1})
GC_221 = Coupling(name = 'GC_221',
value = '(cw*ee**2*complex(0,1)*sphi**2)/sw - (2*cphi**2*ee**2*complex(0,1)*sw)/cw - (ee**2*complex(0,1)*sphi**2*sw)/cw',
order = {'QED':2})
GC_222 = Coupling(name = 'GC_222',
value = '(cphi**2*cw*ee**2*complex(0,1))/sw - (cphi**2*ee**2*complex(0,1)*sw)/cw - (2*ee**2*complex(0,1)*sphi**2*sw)/cw',
order = {'QED':2})
GC_223 = Coupling(name = 'GC_223',
value = '-(ee**2*complex(0,1)) + (cw**2*ee**2*complex(0,1))/(2.*sw**2) + (ee**2*complex(0,1)*sw**2)/(2.*cw**2)',
order = {'QED':2})
GC_224 = Coupling(name = 'GC_224',
value = 'ee**2*complex(0,1) + (cw**2*ee**2*complex(0,1))/(2.*sw**2) + (ee**2*complex(0,1)*sw**2)/(2.*cw**2)',
order = {'QED':2})
GC_225 = Coupling(name = 'GC_225',
value = 'cab**2*ee**2*complex(0,1) + ee**2*complex(0,1)*sab**2 + (cab**2*cw**2*ee**2*complex(0,1))/(2.*sw**2) + (cw**2*ee**2*complex(0,1)*sab**2)/(2.*sw**2) + (cab**2*ee**2*complex(0,1)*sw**2)/(2.*cw**2) + (ee**2*complex(0,1)*sab**2*sw**2)/(2.*cw**2)',
order = {'QED':2})
GC_226 = Coupling(name = 'GC_226',
value = '-(cphi*ee**2*complex(0,1)*sphi) + (cphi*cw**2*ee**2*complex(0,1)*sphi)/(2.*sw**2) - (3*cphi*ee**2*complex(0,1)*sphi*sw**2)/(2.*cw**2)',
order = {'QED':2})
GC_227 = Coupling(name = 'GC_227',
value = '-(ee**2*complex(0,1)*sphi**2) + (cw**2*ee**2*complex(0,1)*sphi**2)/(2.*sw**2) + (2*cphi**2*ee**2*complex(0,1)*sw**2)/cw**2 + (ee**2*complex(0,1)*sphi**2*sw**2)/(2.*cw**2)',
order = {'QED':2})
GC_228 = Coupling(name = 'GC_228',
value = '-(cphi**2*ee**2*complex(0,1)) + (cphi**2*cw**2*ee**2*complex(0,1))/(2.*sw**2) + (cphi**2*ee**2*complex(0,1)*sw**2)/(2.*cw**2) + (2*ee**2*complex(0,1)*sphi**2*sw**2)/cw**2',
order = {'QED':2})
GC_229 = Coupling(name = 'GC_229',
value = '-(ee**2*complex(0,1)*vev)/(2.*cw)',
order = {'QED':1})
GC_230 = Coupling(name = 'GC_230',
value = '(cab*ee**2*complex(0,1)*vev)/(2.*sw**2)',
order = {'QED':1})
GC_231 = Coupling(name = 'GC_231',
value = '-(ee**2*complex(0,1)*sab*vev)/(2.*sw**2)',
order = {'QED':1})
GC_232 = Coupling(name = 'GC_232',
value = '-(ee**2*vev)/(2.*sw)',
order = {'QED':1})
GC_233 = Coupling(name = 'GC_233',
value = '(ee**2*complex(0,1)*vev)/(2.*sw)',
order = {'QED':1})
GC_234 = Coupling(name = 'GC_234',
value = '(ee**2*vev)/(2.*sw)',
order = {'QED':1})
GC_235 = Coupling(name = 'GC_235',
value = '(cphi*lam4*vev)/2. - (cphi*lam5*vev)/2. - (muzee*sphi)/cmath.sqrt(2)',
order = {'QED':1})
GC_236 = Coupling(name = 'GC_236',
value = '-(cphi*lam4*vev)/2. + (cphi*lam5*vev)/2. + (muzee*sphi)/cmath.sqrt(2)',
order = {'QED':1})
GC_237 = Coupling(name = 'GC_237',
value = '-(cab*complex(0,1)*lam6*vev) + complex(0,1)*lam1*sab*vev',
order = {'QED':1})
GC_238 = Coupling(name = 'GC_238',
value = '-(cab*complex(0,1)*lam6*vev) - complex(0,1)*lam5*sab*vev',
order = {'QED':1})
GC_239 = Coupling(name = 'GC_239',
value = '-(cab*complex(0,1)*lam7*vev) + complex(0,1)*lam3*sab*vev + complex(0,1)*lam4*sab*vev - complex(0,1)*lam5*sab*vev',
order = {'QED':1})
GC_240 = Coupling(name = 'GC_240',
value = '-(cab*cphi*complex(0,1)*lam6*vev) - (cphi*complex(0,1)*lam4*sab*vev)/2. - (cphi*complex(0,1)*lam5*sab*vev)/2. + (complex(0,1)*muzee*sab*sphi)/cmath.sqrt(2)',
order = {'QED':1})
GC_241 = Coupling(name = 'GC_241',
value = '-(cab*complex(0,1)*lam1*vev) - complex(0,1)*lam6*sab*vev',
order = {'QED':1})
GC_242 = Coupling(name = 'GC_242',
value = '-(cab*complex(0,1)*lam5*vev) + complex(0,1)*lam6*sab*vev',
order = {'QED':1})
GC_243 = Coupling(name = 'GC_243',
value = '-(cab*cphi*complex(0,1)*lam4*vev)/2. - (cab*cphi*complex(0,1)*lam5*vev)/2. + cphi*complex(0,1)*lam6*sab*vev + (cab*complex(0,1)*muzee*sphi)/cmath.sqrt(2)',
order = {'QED':1})
GC_244 = Coupling(name = 'GC_244',
value = '-(cab*complex(0,1)*lam3*vev) - cab*complex(0,1)*lam4*vev + cab*complex(0,1)*lam5*vev - complex(0,1)*lam7*sab*vev',
order = {'QED':1})
GC_245 = Coupling(name = 'GC_245',
value = '-3*cab**3*complex(0,1)*lam7*vev + 3*cab**2*complex(0,1)*lam3*sab*vev + 3*cab**2*complex(0,1)*lam4*sab*vev + 3*cab**2*complex(0,1)*lam5*sab*vev - 9*cab*complex(0,1)*lam6*sab**2*vev + 3*complex(0,1)*lam1*sab**3*vev',
order = {'QED':1})
GC_246 = Coupling(name = 'GC_246',
value = '-3*cab**3*complex(0,1)*lam6*vev + 3*cab**2*complex(0,1)*lam1*sab*vev - 2*cab**2*complex(0,1)*lam3*sab*vev - 2*cab**2*complex(0,1)*lam4*sab*vev - 2*cab**2*complex(0,1)*lam5*sab*vev + 6*cab*complex(0,1)*lam6*sab**2*vev - 3*cab*complex(0,1)*lam7*sab**2*vev + complex(0,1)*lam3*sab**3*vev + complex(0,1)*lam4*sab**3*vev + complex(0,1)*lam5*sab**3*vev',
order = {'QED':1})
GC_247 = Coupling(name = 'GC_247',
value = '-(cab**3*complex(0,1)*lam3*vev) - cab**3*complex(0,1)*lam4*vev - cab**3*complex(0,1)*lam5*vev + 6*cab**2*complex(0,1)*lam6*sab*vev - 3*cab**2*complex(0,1)*lam7*sab*vev - 3*cab*complex(0,1)*lam1*sab**2*vev + 2*cab*complex(0,1)*lam3*sab**2*vev + 2*cab*complex(0,1)*lam4*sab**2*vev + 2*cab*complex(0,1)*lam5*sab**2*vev - 3*complex(0,1)*lam6*sab**3*vev',
order = {'QED':1})
GC_248 = Coupling(name = 'GC_248',
value = '-3*cab**3*complex(0,1)*lam1*vev - 9*cab**2*complex(0,1)*lam6*sab*vev - 3*cab*complex(0,1)*lam3*sab**2*vev - 3*cab*complex(0,1)*lam4*sab**2*vev - 3*cab*complex(0,1)*lam5*sab**2*vev - 3*complex(0,1)*lam7*sab**3*vev',
order = {'QED':1})
GC_249 = Coupling(name = 'GC_249',
value = '(lam4*sphi*vev)/2. - (lam5*sphi*vev)/2. + (cphi*muzee)/cmath.sqrt(2)',
order = {'QED':1})
GC_250 = Coupling(name = 'GC_250',
value = '-(lam4*sphi*vev)/2. + (lam5*sphi*vev)/2. - (cphi*muzee)/cmath.sqrt(2)',
order = {'QED':1})
GC_251 = Coupling(name = 'GC_251',
value = '-(cab*complex(0,1)*lam6*sphi*vev) - (complex(0,1)*lam4*sab*sphi*vev)/2. - (complex(0,1)*lam5*sab*sphi*vev)/2. - (cphi*complex(0,1)*muzee*sab)/cmath.sqrt(2)',
order = {'QED':1})
GC_252 = Coupling(name = 'GC_252',
value = '-(cab*complex(0,1)*lam4*sphi*vev)/2. - (cab*complex(0,1)*lam5*sphi*vev)/2. + complex(0,1)*lam6*sab*sphi*vev - (cab*cphi*complex(0,1)*muzee)/cmath.sqrt(2)',
order = {'QED':1})
GC_253 = Coupling(name = 'GC_253',
value = '-(cab*cphi*complex(0,1)*lam3*sphi*vev) + cab*cphi*complex(0,1)*lam8*sphi*vev + cphi*complex(0,1)*lam10*sab*sphi*vev - cphi*complex(0,1)*lam7*sab*sphi*vev + (cab*cphi**2*complex(0,1)*muzee)/cmath.sqrt(2) - (cab*complex(0,1)*muzee*sphi**2)/cmath.sqrt(2)',
order = {'QED':1})
GC_254 = Coupling(name = 'GC_254',
value = 'cab*cphi*complex(0,1)*lam10*sphi*vev - cab*cphi*complex(0,1)*lam7*sphi*vev + cphi*complex(0,1)*lam3*sab*sphi*vev - cphi*complex(0,1)*lam8*sab*sphi*vev - (cphi**2*complex(0,1)*muzee*sab)/cmath.sqrt(2) + (complex(0,1)*muzee*sab*sphi**2)/cmath.sqrt(2)',
order = {'QED':1})
GC_255 = Coupling(name = 'GC_255',
value = '-(cab*cphi**2*complex(0,1)*lam3*vev) - cphi**2*complex(0,1)*lam7*sab*vev - cab*complex(0,1)*lam8*sphi**2*vev - complex(0,1)*lam10*sab*sphi**2*vev - cab*cphi*complex(0,1)*muzee*sphi*cmath.sqrt(2)',
order = {'QED':1})
GC_256 = Coupling(name = 'GC_256',
value = '-(cab*cphi**2*complex(0,1)*lam10*vev) + cphi**2*complex(0,1)*lam8*sab*vev - cab*complex(0,1)*lam7*sphi**2*vev + complex(0,1)*lam3*sab*sphi**2*vev - cphi*complex(0,1)*muzee*sab*sphi*cmath.sqrt(2)',
order = {'QED':1})
GC_257 = Coupling(name = 'GC_257',
value = '-(cab*cphi**2*complex(0,1)*lam8*vev) - cphi**2*complex(0,1)*lam10*sab*vev - cab*complex(0,1)*lam3*sphi**2*vev - complex(0,1)*lam7*sab*sphi**2*vev + cab*cphi*complex(0,1)*muzee*sphi*cmath.sqrt(2)',
order = {'QED':1})
GC_258 = Coupling(name = 'GC_258',
value = '-(cab*cphi**2*complex(0,1)*lam7*vev) + cphi**2*complex(0,1)*lam3*sab*vev - cab*complex(0,1)*lam10*sphi**2*vev + complex(0,1)*lam8*sab*sphi**2*vev + cphi*complex(0,1)*muzee*sab*sphi*cmath.sqrt(2)',
order = {'QED':1})
GC_259 = Coupling(name = 'GC_259',
value = '-(ee**2*vev)/(4.*cw) - (cw*ee**2*vev)/(4.*sw**2)',
order = {'QED':1})
GC_260 = Coupling(name = 'GC_260',
value = '(ee**2*vev)/(4.*cw) - (cw*ee**2*vev)/(4.*sw**2)',
order = {'QED':1})
GC_261 = Coupling(name = 'GC_261',
value = '-(ee**2*vev)/(4.*cw) + (cw*ee**2*vev)/(4.*sw**2)',
order = {'QED':1})
GC_262 = Coupling(name = 'GC_262',
value = '(ee**2*vev)/(4.*cw) + (cw*ee**2*vev)/(4.*sw**2)',
order = {'QED':1})
GC_263 = Coupling(name = 'GC_263',
value = 'cab*ee**2*complex(0,1)*vev + (cab*cw**2*ee**2*complex(0,1)*vev)/(2.*sw**2) + (cab*ee**2*complex(0,1)*sw**2*vev)/(2.*cw**2)',
order = {'QED':1})
GC_264 = Coupling(name = 'GC_264',
value = '-(ee**2*complex(0,1)*sab*vev) - (cw**2*ee**2*complex(0,1)*sab*vev)/(2.*sw**2) - (ee**2*complex(0,1)*sab*sw**2*vev)/(2.*cw**2)',
order = {'QED':1})
GC_265 = Coupling(name = 'GC_265',
value = '-(yb/cmath.sqrt(2))',
order = {'QED':1})
GC_266 = Coupling(name = 'GC_266',
value = '-((cab*complex(0,1)*yb)/cmath.sqrt(2))',
order = {'QED':1})
GC_267 = Coupling(name = 'GC_267',
value = '(complex(0,1)*sab*yb)/cmath.sqrt(2)',
order = {'QED':1})
GC_268 = Coupling(name = 'GC_268',
value = '-(yc/cmath.sqrt(2))',
order = {'QED':1})
GC_269 = Coupling(name = 'GC_269',
value = 'yc/cmath.sqrt(2)',
order = {'QED':1})
GC_270 = Coupling(name = 'GC_270',
value = '-((cab*complex(0,1)*yc)/cmath.sqrt(2))',
order = {'QED':1})
GC_271 = Coupling(name = 'GC_271',
value = '(complex(0,1)*sab*yc)/cmath.sqrt(2)',
order = {'QED':1})
GC_272 = Coupling(name = 'GC_272',
value = '-(ydo/cmath.sqrt(2))',
order = {'QED':1})
GC_273 = Coupling(name = 'GC_273',
value = '-((cab*complex(0,1)*ydo)/cmath.sqrt(2))',
order = {'QED':1})
GC_274 = Coupling(name = 'GC_274',
value = '(complex(0,1)*sab*ydo)/cmath.sqrt(2)',
order = {'QED':1})
GC_275 = Coupling(name = 'GC_275',
value = '-(complex(0,1)*ye)',
order = {'QED':1})
GC_276 = Coupling(name = 'GC_276',
value = '-(ye/cmath.sqrt(2))',
order = {'QED':1})
GC_277 = Coupling(name = 'GC_277',
value = 'ye/cmath.sqrt(2)',
order = {'QED':1})
GC_278 = Coupling(name = 'GC_278',
value = '-((cab*complex(0,1)*ye)/cmath.sqrt(2))',
order = {'QED':1})
GC_279 = Coupling(name = 'GC_279',
value = '(complex(0,1)*sab*ye)/cmath.sqrt(2)',
order = {'QED':1})
GC_280 = Coupling(name = 'GC_280',
value = '-(complex(0,1)*ym)',
order = {'QED':1})
GC_281 = Coupling(name = 'GC_281',
value = '-(ym/cmath.sqrt(2))',
order = {'QED':1})
GC_282 = Coupling(name = 'GC_282',
value = 'ym/cmath.sqrt(2)',
order = {'QED':1})
GC_283 = Coupling(name = 'GC_283',
value = '-((cab*complex(0,1)*ym)/cmath.sqrt(2))',
order = {'QED':1})
GC_284 = Coupling(name = 'GC_284',
value = '(complex(0,1)*sab*ym)/cmath.sqrt(2)',
order = {'QED':1})
GC_285 = Coupling(name = 'GC_285',
value = '-(ys/cmath.sqrt(2))',
order = {'QED':1})
GC_286 = Coupling(name = 'GC_286',
value = '-((cab*complex(0,1)*ys)/cmath.sqrt(2))',
order = {'QED':1})
GC_287 = Coupling(name = 'GC_287',
value = '(complex(0,1)*sab*ys)/cmath.sqrt(2)',
order = {'QED':1})
GC_288 = Coupling(name = 'GC_288',
value = '-(yt/cmath.sqrt(2))',
order = {'QED':1})
GC_289 = Coupling(name = 'GC_289',
value = 'yt/cmath.sqrt(2)',
order = {'QED':1})
GC_290 = Coupling(name = 'GC_290',
value = '-((cab*complex(0,1)*yt)/cmath.sqrt(2))',
order = {'QED':1})
GC_291 = Coupling(name = 'GC_291',
value = '(complex(0,1)*sab*yt)/cmath.sqrt(2)',
order = {'QED':1})
GC_292 = Coupling(name = 'GC_292',
value = '-(complex(0,1)*ytau)',
order = {'QED':1})
GC_293 = Coupling(name = 'GC_293',
value = '-(ytau/cmath.sqrt(2))',
order = {'QED':1})
GC_294 = Coupling(name = 'GC_294',
value = 'ytau/cmath.sqrt(2)',
order = {'QED':1})
GC_295 = Coupling(name = 'GC_295',
value = '-((cab*complex(0,1)*ytau)/cmath.sqrt(2))',
order = {'QED':1})
GC_296 = Coupling(name = 'GC_296',
value = '(complex(0,1)*sab*ytau)/cmath.sqrt(2)',
order = {'QED':1})
GC_297 = Coupling(name = 'GC_297',
value = '-(yup/cmath.sqrt(2))',
order = {'QED':1})
GC_298 = Coupling(name = 'GC_298',
value = 'yup/cmath.sqrt(2)',
order = {'QED':1})
GC_299 = Coupling(name = 'GC_299',
value = '-((cab*complex(0,1)*yup)/cmath.sqrt(2))',
order = {'QED':1})
GC_300 = Coupling(name = 'GC_300',
value = '(complex(0,1)*sab*yup)/cmath.sqrt(2)',
order = {'QED':1})
GC_301 = Coupling(name = 'GC_301',
value = '-(yzee/cmath.sqrt(2))',
order = {'ZEE':1})
GC_302 = Coupling(name = 'GC_302',
value = 'yzee/cmath.sqrt(2)',
order = {'ZEE':1})
GC_303 = Coupling(name = 'GC_303',
value = '-((cab*complex(0,1)*yzee)/cmath.sqrt(2))',
order = {'ZEE':1})
GC_304 = Coupling(name = 'GC_304',
value = '-(cphi*complex(0,1)*yzee)',
order = {'ZEE':1})
GC_305 = Coupling(name = 'GC_305',
value = '-((complex(0,1)*sab*yzee)/cmath.sqrt(2))',
order = {'ZEE':1})
GC_306 = Coupling(name = 'GC_306',
value = '-(complex(0,1)*sphi*yzee)',
order = {'ZEE':1})
GC_307 = Coupling(name = 'GC_307',
value = '-(yzem/cmath.sqrt(2))',
order = {'ZEE':1})
GC_308 = Coupling(name = 'GC_308',
value = 'yzem/cmath.sqrt(2)',
order = {'ZEE':1})
GC_309 = Coupling(name = 'GC_309',
value = '-((cab*complex(0,1)*yzem)/cmath.sqrt(2))',
order = {'ZEE':1})
GC_310 = Coupling(name = 'GC_310',
value = '-(cphi*complex(0,1)*yzem)',
order = {'ZEE':1})
GC_311 = Coupling(name = 'GC_311',
value = '-((complex(0,1)*sab*yzem)/cmath.sqrt(2))',
order = {'ZEE':1})
GC_312 = Coupling(name = 'GC_312',
value = '-(complex(0,1)*sphi*yzem)',
order = {'ZEE':1})
GC_313 = Coupling(name = 'GC_313',
value = '-(yzet/cmath.sqrt(2))',
order = {'ZEE':1})
GC_314 = Coupling(name = 'GC_314',
value = 'yzet/cmath.sqrt(2)',
order = {'ZEE':1})
GC_315 = Coupling(name = 'GC_315',
value = '-((cab*complex(0,1)*yzet)/cmath.sqrt(2))',
order = {'ZEE':1})
GC_316 = Coupling(name = 'GC_316',
value = '-(cphi*complex(0,1)*yzet)',
order = {'ZEE':1})
GC_317 = Coupling(name = 'GC_317',
value = '-((complex(0,1)*sab*yzet)/cmath.sqrt(2))',
order = {'ZEE':1})
GC_318 = Coupling(name = 'GC_318',
value = '-(complex(0,1)*sphi*yzet)',
order = {'ZEE':1})
GC_319 = Coupling(name = 'GC_319',
value = '-(yzme/cmath.sqrt(2))',
order = {'ZEE':1})
GC_320 = Coupling(name = 'GC_320',
value = 'yzme/cmath.sqrt(2)',
order = {'ZEE':1})
GC_321 = Coupling(name = 'GC_321',
value = '-((cab*complex(0,1)*yzme)/cmath.sqrt(2))',
order = {'ZEE':1})
GC_322 = Coupling(name = 'GC_322',
value = '-(cphi*complex(0,1)*yzme)',
order = {'ZEE':1})
GC_323 = Coupling(name = 'GC_323',
value = '-((complex(0,1)*sab*yzme)/cmath.sqrt(2))',
order = {'ZEE':1})
GC_324 = Coupling(name = 'GC_324',
value = '-(complex(0,1)*sphi*yzme)',
order = {'ZEE':1})
GC_325 = Coupling(name = 'GC_325',
value = '-(yzmm/cmath.sqrt(2))',
order = {'ZEE':1})
GC_326 = Coupling(name = 'GC_326',
value = 'yzmm/cmath.sqrt(2)',
order = {'ZEE':1})
GC_327 = Coupling(name = 'GC_327',
value = '-((cab*complex(0,1)*yzmm)/cmath.sqrt(2))',
order = {'ZEE':1})
GC_328 = Coupling(name = 'GC_328',
value = '-(cphi*complex(0,1)*yzmm)',
order = {'ZEE':1})
GC_329 = Coupling(name = 'GC_329',
value = '-((complex(0,1)*sab*yzmm)/cmath.sqrt(2))',
order = {'ZEE':1})
GC_330 = Coupling(name = 'GC_330',
value = '-(complex(0,1)*sphi*yzmm)',
order = {'ZEE':1})
GC_331 = Coupling(name = 'GC_331',
value = '-(yzmt/cmath.sqrt(2))',
order = {'ZEE':1})
GC_332 = Coupling(name = 'GC_332',
value = 'yzmt/cmath.sqrt(2)',
order = {'ZEE':1})
GC_333 = Coupling(name = 'GC_333',
value = '-((cab*complex(0,1)*yzmt)/cmath.sqrt(2))',
order = {'ZEE':1})
GC_334 = Coupling(name = 'GC_334',
value = '-(cphi*complex(0,1)*yzmt)',
order = {'ZEE':1})
GC_335 = Coupling(name = 'GC_335',
value = '-((complex(0,1)*sab*yzmt)/cmath.sqrt(2))',
order = {'ZEE':1})
GC_336 = Coupling(name = 'GC_336',
value = '-(complex(0,1)*sphi*yzmt)',
order = {'ZEE':1})
GC_337 = Coupling(name = 'GC_337',
value = '-(yzte/cmath.sqrt(2))',
order = {'ZEE':1})
GC_338 = Coupling(name = 'GC_338',
value = 'yzte/cmath.sqrt(2)',
order = {'ZEE':1})
GC_339 = Coupling(name = 'GC_339',
value = '-((cab*complex(0,1)*yzte)/cmath.sqrt(2))',
order = {'ZEE':1})
GC_340 = Coupling(name = 'GC_340',
value = '-(cphi*complex(0,1)*yzte)',
order = {'ZEE':1})
GC_341 = Coupling(name = 'GC_341',
value = '-((complex(0,1)*sab*yzte)/cmath.sqrt(2))',
order = {'ZEE':1})
GC_342 = Coupling(name = 'GC_342',
value = '-(complex(0,1)*sphi*yzte)',
order = {'ZEE':1})
GC_343 = Coupling(name = 'GC_343',
value = '-(yztm/cmath.sqrt(2))',
order = {'ZEE':1})
GC_344 = Coupling(name = 'GC_344',
value = 'yztm/cmath.sqrt(2)',
order = {'ZEE':1})
GC_345 = Coupling(name = 'GC_345',
value = '-((cab*complex(0,1)*yztm)/cmath.sqrt(2))',
order = {'ZEE':1})
GC_346 = Coupling(name = 'GC_346',
value = '-(cphi*complex(0,1)*yztm)',
order = {'ZEE':1})
GC_347 = Coupling(name = 'GC_347',
value = '-((complex(0,1)*sab*yztm)/cmath.sqrt(2))',
order = {'ZEE':1})
GC_348 = Coupling(name = 'GC_348',
value = '-(complex(0,1)*sphi*yztm)',
order = {'ZEE':1})
GC_349 = Coupling(name = 'GC_349',
value = '-(yztt/cmath.sqrt(2))',
order = {'ZEE':1})
GC_350 = Coupling(name = 'GC_350',
value = 'yztt/cmath.sqrt(2)',
order = {'ZEE':1})
GC_351 = Coupling(name = 'GC_351',
value = '-((cab*complex(0,1)*yztt)/cmath.sqrt(2))',
order = {'ZEE':1})
GC_352 = Coupling(name = 'GC_352',
value = '-(cphi*complex(0,1)*yztt)',
order = {'ZEE':1})
GC_353 = Coupling(name = 'GC_353',
value = '-((complex(0,1)*sab*yztt)/cmath.sqrt(2))',
order = {'ZEE':1})
GC_354 = Coupling(name = 'GC_354',
value = '-(complex(0,1)*sphi*yztt)',
order = {'ZEE':1})
GC_355 = Coupling(name = 'GC_355',
value = '(ee*complex(0,1)*complexconjugate(CKM1x1))/(sw*cmath.sqrt(2))',
order = {'QED':1})
GC_356 = Coupling(name = 'GC_356',
value = '(ee*complex(0,1)*complexconjugate(CKM1x2))/(sw*cmath.sqrt(2))',
order = {'QED':1})
GC_357 = Coupling(name = 'GC_357',
value = '(ee*complex(0,1)*complexconjugate(CKM1x3))/(sw*cmath.sqrt(2))',
order = {'QED':1})
GC_358 = Coupling(name = 'GC_358',
value = '(ee*complex(0,1)*complexconjugate(CKM2x1))/(sw*cmath.sqrt(2))',
order = {'QED':1})
GC_359 = Coupling(name = 'GC_359',
value = '(ee*complex(0,1)*complexconjugate(CKM2x2))/(sw*cmath.sqrt(2))',
order = {'QED':1})
GC_360 = Coupling(name = 'GC_360',
value = '(ee*complex(0,1)*complexconjugate(CKM2x3))/(sw*cmath.sqrt(2))',
order = {'QED':1})
GC_361 = Coupling(name = 'GC_361',
value = '(ee*complex(0,1)*complexconjugate(CKM3x1))/(sw*cmath.sqrt(2))',
order = {'QED':1})
GC_362 = Coupling(name = 'GC_362',
value = '(ee*complex(0,1)*complexconjugate(CKM3x2))/(sw*cmath.sqrt(2))',
order = {'QED':1})
GC_363 = Coupling(name = 'GC_363',
value = '(ee*complex(0,1)*complexconjugate(CKM3x3))/(sw*cmath.sqrt(2))',
order = {'QED':1})
|
python
|
import AnimatedProp
from direct.actor import Actor
from direct.interval.IntervalGlobal import *
class HQPeriscopeAnimatedProp(AnimatedProp.AnimatedProp):
def __init__(self, node):
AnimatedProp.AnimatedProp.__init__(self, node)
parent = node.getParent()
self.periscope = Actor.Actor(node, copy=0)
self.periscope.reparentTo(parent)
self.periscope.loadAnims({'anim': 'phase_3.5/models/props/HQ_periscope-chan'})
self.periscope.pose('anim', 0)
self.node = self.periscope
self.track = Sequence(Wait(2.0), self.periscope.actorInterval('anim', startFrame=0, endFrame=40), Wait(0.7), self.periscope.actorInterval('anim', startFrame=40, endFrame=90), Wait(0.7), self.periscope.actorInterval('anim', startFrame=91, endFrame=121), Wait(0.7), self.periscope.actorInterval('anim', startFrame=121, endFrame=91), Wait(0.7), self.periscope.actorInterval('anim', startFrame=90, endFrame=40), Wait(0.7), self.periscope.actorInterval('anim', startFrame=40, endFrame=90), Wait(0.7), self.periscope.actorInterval('anim', startFrame=91, endFrame=121), Wait(0.5), self.periscope.actorInterval('anim', startFrame=121, endFrame=148), Wait(3.0), name=self.uniqueName('HQPeriscope'))
def delete(self):
AnimatedProp.AnimatedProp.delete(self)
self.node.cleanup()
del self.node
del self.periscope
del self.track
def enter(self):
AnimatedProp.AnimatedProp.enter(self)
self.track.loop()
def exit(self):
AnimatedProp.AnimatedProp.exit(self)
self.track.finish()
|
python
|
from threading import Lock, Thread
from time import sleep
class Ingresso:
def __init__(self, estoque):
self.estoque = estoque
self.lock = Lock()
def comprar(self, quantidade):
self.lock.acquire()
if self.estoque < quantidade:
print("-Não temos ingresso suficientes.")
self.lock.release()
return
sleep(1)
self.estoque -= quantidade
print(
f"-Você comprou {quantidade} de ingresso(s), restando {self.estoque} no estoque."
)
self.lock.release()
if __name__ == "__main__":
ingresso = Ingresso(10)
for i in range(1, 20):
t = Thread(target=ingresso.comprar, args=(i,))
t.start()
|
python
|
from functools import partial, wraps
from slm_lab import ROOT_DIR
from slm_lab.lib import logger, util
import os
import pydash as ps
import torch
import torch.nn as nn
logger = logger.get_logger(__name__)
class NoOpLRScheduler:
'''Symbolic LRScheduler class for API consistency'''
def __init__(self, optim):
self.optim = optim
def step(self, epoch=None):
pass
def get_lr(self):
return self.optim.defaults['lr']
def build_fc_model(dims, activation=None):
'''Build a full-connected model by interleaving nn.Linear and activation_fn'''
assert len(dims) >= 2, 'dims need to at least contain input, output'
# shift dims and make pairs of (in, out) dims per layer
dim_pairs = list(zip(dims[:-1], dims[1:]))
layers = []
for in_d, out_d in dim_pairs:
layers.append(nn.Linear(in_d, out_d))
if activation is not None:
layers.append(get_activation_fn(activation))
model = nn.Sequential(*layers)
return model
def get_nn_name(uncased_name):
'''Helper to get the proper name in PyTorch nn given a case-insensitive name'''
for nn_name in nn.__dict__:
if uncased_name.lower() == nn_name.lower():
return nn_name
raise ValueError(f'Name {uncased_name} not found in {nn.__dict__}')
def get_activation_fn(activation):
'''Helper to generate activation function layers for net'''
activation = activation or 'relu'
ActivationClass = getattr(nn, get_nn_name(activation))
return ActivationClass()
def get_loss_fn(cls, loss_spec):
'''Helper to parse loss param and construct loss_fn for net'''
LossClass = getattr(nn, get_nn_name(loss_spec['name']))
loss_spec = ps.omit(loss_spec, 'name')
loss_fn = LossClass(**loss_spec)
return loss_fn
def get_lr_scheduler(cls, lr_scheduler_spec):
'''Helper to parse lr_scheduler param and construct Pytorch optim.lr_scheduler'''
if ps.is_empty(lr_scheduler_spec):
lr_scheduler = NoOpLRScheduler(cls.optim)
elif lr_scheduler_spec['name'] == 'LinearToZero':
LRSchedulerClass = getattr(torch.optim.lr_scheduler, 'LambdaLR')
total_t = float(lr_scheduler_spec['total_t'])
lr_scheduler = LRSchedulerClass(cls.optim, lr_lambda=lambda x: 1 - x / total_t)
else:
LRSchedulerClass = getattr(torch.optim.lr_scheduler, lr_scheduler_spec['name'])
lr_scheduler_spec = ps.omit(lr_scheduler_spec, 'name')
lr_scheduler = LRSchedulerClass(cls.optim, **lr_scheduler_spec)
return lr_scheduler
def get_optim(cls, optim_spec):
'''Helper to parse optim param and construct optim for net'''
OptimClass = getattr(torch.optim, optim_spec['name'])
optim_spec = ps.omit(optim_spec, 'name')
optim = OptimClass(cls.parameters(), **optim_spec)
return optim
def get_policy_out_dim(body):
'''Helper method to construct the policy network out_dim for a body according to is_discrete, action_type'''
action_dim = body.action_dim
if body.is_discrete:
if body.action_type == 'multi_discrete':
assert ps.is_list(action_dim), action_dim
policy_out_dim = action_dim
else:
assert ps.is_integer(action_dim), action_dim
policy_out_dim = action_dim
else:
if body.action_type == 'multi_continuous':
assert ps.is_list(action_dim), action_dim
raise NotImplementedError('multi_continuous not supported yet')
else:
assert ps.is_integer(action_dim), action_dim
if action_dim == 1:
policy_out_dim = 2 # singleton stay as int
else:
# TODO change this to one slicable layer for efficiency
policy_out_dim = action_dim * [2]
return policy_out_dim
def get_out_dim(body, add_critic=False):
'''Construct the NetClass out_dim for a body according to is_discrete, action_type, and whether to add a critic unit'''
policy_out_dim = get_policy_out_dim(body)
if add_critic:
if ps.is_list(policy_out_dim):
out_dim = policy_out_dim + [1]
else:
out_dim = [policy_out_dim, 1]
else:
out_dim = policy_out_dim
return out_dim
def init_layers(net, init_fn):
if init_fn is None:
return
nonlinearity = get_nn_name(net.hid_layers_activation).lower()
if nonlinearity == 'leakyrelu':
nonlinearity = 'leaky_relu'
if init_fn == 'xavier_uniform_':
try:
gain = nn.init.calculate_gain(nonlinearity)
except ValueError:
gain = 1
init_fn = partial(nn.init.xavier_uniform_, gain=gain)
elif 'kaiming' in init_fn:
assert nonlinearity in ['relu', 'leaky_relu'], f'Kaiming initialization not supported for {nonlinearity}'
init_fn = nn.init.__dict__[init_fn]
init_fn = partial(init_fn, nonlinearity=nonlinearity)
else:
init_fn = nn.init.__dict__[init_fn]
net.apply(partial(init_parameters, init_fn=init_fn))
def init_parameters(module, init_fn):
'''
Initializes module's weights using init_fn, which is the name of function from from nn.init
Initializes module's biases to either 0.01 or 0.0, depending on module
The only exception is BatchNorm layers, for which we use uniform initialization
'''
bias_init = 0.0
classname = util.get_class_name(module)
if 'BatchNorm' in classname:
init_fn(module.weight)
nn.init.constant_(module.bias, bias_init)
elif 'GRU' in classname:
for name, param in module.named_parameters():
if 'weight' in name:
init_fn(param)
elif 'bias' in name:
nn.init.constant_(param, 0.0)
elif 'Linear' in classname or ('Conv' in classname and 'Net' not in classname):
init_fn(module.weight)
nn.init.constant_(module.bias, bias_init)
# params methods
def save(net, model_path):
'''Save model weights to path'''
torch.save(net.state_dict(), util.smart_path(model_path))
logger.info(f'Saved model to {model_path}')
def save_algorithm(algorithm, ckpt=None):
'''Save all the nets for an algorithm'''
agent = algorithm.agent
net_names = algorithm.net_names
prepath = util.get_prepath(agent.spec, agent.info_space, unit='session')
if ckpt is not None:
prepath = f'{prepath}_ckpt-{ckpt}'
logger.info(f'Saving algorithm {util.get_class_name(algorithm)} nets {net_names}')
for net_name in net_names:
net = getattr(algorithm, net_name)
model_path = f'{prepath}_{net_name}_model.pth'
save(net, model_path)
optim_path = f'{prepath}_{net_name}_optim.pth'
save(net.optim, optim_path)
def load(net, model_path):
'''Save model weights from a path into a net module'''
device = None if torch.cuda.is_available() else 'cpu'
net.load_state_dict(torch.load(util.smart_path(model_path), map_location=device))
logger.info(f'Loaded model from {model_path}')
def load_algorithm(algorithm):
'''Save all the nets for an algorithm'''
agent = algorithm.agent
net_names = algorithm.net_names
if util.in_eval_lab_modes():
# load specific model in eval mode
prepath = agent.info_space.eval_model_prepath
else:
prepath = util.get_prepath(agent.spec, agent.info_space, unit='session')
logger.info(f'Loading algorithm {util.get_class_name(algorithm)} nets {net_names}')
for net_name in net_names:
net = getattr(algorithm, net_name)
model_path = f'{prepath}_{net_name}_model.pth'
load(net, model_path)
optim_path = f'{prepath}_{net_name}_optim.pth'
load(net.optim, optim_path)
def copy(src_net, tar_net):
'''Copy model weights from src to target'''
tar_net.load_state_dict(src_net.state_dict())
def polyak_update(src_net, tar_net, old_ratio=0.5):
'''
Polyak weight update to update a target tar_net, retain old weights by its ratio, i.e.
target <- old_ratio * source + (1 - old_ratio) * target
'''
for src_param, tar_param in zip(src_net.parameters(), tar_net.parameters()):
tar_param.data.copy_(old_ratio * src_param.data + (1.0 - old_ratio) * tar_param.data)
def to_check_training_step():
'''Condition for running assert_trained'''
return os.environ.get('PY_ENV') == 'test' or util.get_lab_mode() == 'dev'
def dev_check_training_step(fn):
'''
Decorator to check if net.training_step actually updates the network weights properly
Triggers only if to_check_training_step is True (dev/test mode)
@example
@net_util.dev_check_training_step
def training_step(self, ...):
...
'''
@wraps(fn)
def check_fn(*args, **kwargs):
if not to_check_training_step():
return fn(*args, **kwargs)
net = args[0] # first arg self
# get pre-update parameters to compare
pre_params = [param.clone() for param in net.parameters()]
# run training_step, get loss
loss = fn(*args, **kwargs)
# get post-update parameters to compare
post_params = [param.clone() for param in net.parameters()]
if loss == 0.0:
# if loss is 0, there should be no updates
# TODO if without momentum, parameters should not change too
for p_name, param in net.named_parameters():
assert param.grad.norm() == 0
else:
# check parameter updates
try:
assert not all(torch.equal(w1, w2) for w1, w2 in zip(pre_params, post_params)), f'Model parameter is not updated in training_step(), check if your tensor is detached from graph. Loss: {loss:g}'
logger.info(f'Model parameter is updated in training_step(). Loss: {loss: g}')
except Exception as e:
logger.error(e)
if os.environ.get('PY_ENV') == 'test':
# raise error if in unit test
raise(e)
# check grad norms
min_norm, max_norm = 0.0, 1e5
for p_name, param in net.named_parameters():
try:
grad_norm = param.grad.norm()
assert min_norm < grad_norm < max_norm, f'Gradient norm for {p_name} is {grad_norm:g}, fails the extreme value check {min_norm} < grad_norm < {max_norm}. Loss: {loss:g}. Check your network and loss computation.'
logger.info(f'Gradient norm for {p_name} is {grad_norm:g}; passes value check.')
except Exception as e:
logger.warn(e)
logger.debug('Passed network parameter update check.')
# store grad norms for debugging
net.store_grad_norms()
return loss
return check_fn
def get_grad_norms(algorithm):
'''Gather all the net's grad norms of an algorithm for debugging'''
grad_norms = []
for net_name in algorithm.net_names:
net = getattr(algorithm, net_name)
if net.grad_norms is not None:
grad_norms.extend(net.grad_norms)
return grad_norms
|
python
|
"""
Minimize the Himmelblau function.
http://en.wikipedia.org/wiki/Himmelblau%27s_function
"""
import numpy
import minhelper
def himmelblau(X):
"""
This R^2 -> R^1 function should be compatible with algopy.
http://en.wikipedia.org/wiki/Himmelblau%27s_function
This function has four local minima where the value of the function is 0.
"""
x = X[0]
y = X[1]
a = x*x + y - 11
b = x + y*y - 7
return a*a + b*b
def main():
target = [3, 2]
easy_init = [3.1, 2.1]
hard_init = [-0.27, -0.9]
minhelper.show_minimization_results(
himmelblau, target, easy_init, hard_init)
if __name__ == '__main__':
main()
|
python
|
import os
import tempfile
from unittest import TestCase
from pubmed_bpe_tokeniser import PubmedBPETokenisor
class TestPubmedBPETokenisor(TestCase):
def test_train(self):
# Arrange
data_file = os.path.join(os.path.dirname(__file__), "data", "sample_pubmed.json")
sut = PubmedBPETokenisor(vocab_size=300)
tempdir = tempfile.mkdtemp()
output_file_json = os.path.join(tempdir, "vocab.json")
# Act
sut.train([data_file], output_file_json)
# Assert
self.assertTrue(os.path.getsize(output_file_json) > 100,
"Expected the vocab file size {} to be greater than 100")
|
python
|
#
# -*- coding: utf-8 -*-
#
# Copyright (c) 2020 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
from common.base_model_init import BaseModelInitializer, set_env_var
class ModelInitializer(BaseModelInitializer):
# SSD-MobileNet BFloat16 inference model initialization
args = None
custom_args = []
def __init__(self, args, custom_args=[], platform_util=None):
super(ModelInitializer, self).__init__(args, custom_args, platform_util)
# Set the num_inter_threads and num_intra_threads
# if user did not provide then default value based on platform will be set
self.set_num_inter_intra_threads(self.args.num_inter_threads,
self.args.num_intra_threads)
# Set KMP env vars, if they haven't already been set
config_file_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), "config.json")
self.set_kmp_vars(config_file_path)
benchmark_script = os.path.join(self.args.intelai_models, self.args.mode,
"infer_detections.py")
self.command_prefix = self.get_command_prefix(self.args.socket_id) \
+ "{} {}".format(self.python_exe, benchmark_script)
set_env_var("OMP_NUM_THREADS", self.args.num_intra_threads)
self.command_prefix += " -g {0}".format(self.args.input_graph)
self.command_prefix += " -i 1000"
self.command_prefix += " -w 200"
self.command_prefix += " -a {0}".format(self.args.num_intra_threads)
self.command_prefix += " -e {0}".format(self.args.num_inter_threads)
self.command_prefix += " -p {0}".format(self.args.precision)
if self.args.data_location:
self.command_prefix += " -d {0}".format(self.args.data_location)
if self.args.accuracy_only:
self.command_prefix += " -r"
assert self.args.data_location, "accuracy must provide the data."
else:
# Did not support multi-batch accuracy check.
self.command_prefix += " -b {0}".format(self.args.batch_size)
def run(self):
# Run script from the tensorflow models research directory
self.run_command(self.command_prefix)
|
python
|
import os
from ament_index_python.packages import get_package_share_directory
from launch import LaunchDescription
from launch.actions import IncludeLaunchDescription
from launch.launch_description_sources import PythonLaunchDescriptionSource
def generate_launch_description():
# # launch 海龟节点<正常版>
# turtlesim_world_1 = IncludeLaunchDescription(
# PythonLaunchDescriptionSource([os.path.join(
# get_package_share_directory('launch_tutorial'), 'launch'),
# '/turtlesim_world_1.launch.py'])
# )
# # launch 海龟节点 <YAML>
# turtlesim_world_2 = IncludeLaunchDescription(
# PythonLaunchDescriptionSource([os.path.join(
# get_package_share_directory('launch_tutorial'), 'launch'),
# '/turtlesim_world_2.launch.py'])
# )
# launch 海龟节点 <YAML> 使用wildcards 通配符 /**:
turtlesim_world_3 = IncludeLaunchDescription(
PythonLaunchDescriptionSource([os.path.join(
get_package_share_directory('launch_tutorial'), 'launch'),
'/turtlesim_world_3.launch.py'])
)
# broadcaster_listener_nodes = IncludeLaunchDescription(
# PythonLaunchDescriptionSource([os.path.join(
# get_package_share_directory('launch_tutorial'), 'launch'),
# '/broadcaster_listener.launch.py']),
# launch_arguments={'target_frame': 'carrot1'}.items(),
# )
# mimic_node = IncludeLaunchDescription(
# PythonLaunchDescriptionSource([os.path.join(
# get_package_share_directory('launch_tutorial'), 'launch'),
# '/mimic.launch.py'])
# )
# fixed_frame_node = IncludeLaunchDescription(
# PythonLaunchDescriptionSource([os.path.join(
# get_package_share_directory('launch_tutorial'), 'launch'),
# '/fixed_broadcaster.launch.py'])
# )
# rviz_node = IncludeLaunchDescription(
# PythonLaunchDescriptionSource([os.path.join(
# get_package_share_directory('launch_tutorial'), 'launch'),
# '/turtlesim_rviz.launch.py'])
# )
return LaunchDescription([
# turtlesim_world_1,
# turtlesim_world_2,
turtlesim_world_3,
# broadcaster_listener_nodes,
# mimic_node,
# fixed_frame_node,
# rviz_node
])
|
python
|
from setuptools import setup,find_packages
import lixtools
setup(
name='lixtools',
description="""software tools for data collection/processing at LiX""",
version=lixtools.__version__,
author='Lin Yang',
author_email='[email protected]',
license="BSD-3-Clause",
url="https://github.com/NSLS-II-LIX/lixtools",
packages=find_packages(),
package_data={'': ['plate_label_template.html', 'template_report.ipynb']},
include_package_data=True,
install_requires=['py4xs', 'numpy', 'pandas',
'python-barcode', 'matplotlib', 'pillow',
'openpyxl>=3', 'qrcode'],
python_requires='>=3.6',
classifiers=[
"Intended Audience :: Science/Research",
"Topic :: Scientific/Engineering :: Information Analysis",
"Development Status :: 3 - Alpha",
"Programming Language :: Python :: 3.6",
],
keywords='x-ray scattering',
)
|
python
|
'''
* 'show system status'
'''
import re
from genie.metaparser import MetaParser
from genie.metaparser.util.schemaengine import Optional
# ===========================================
# Schema for 'show system status'
# ===========================================
class ShowSystemStatusSchema(MetaParser):
""" Schema for "show system status" """
schema = {
'boot_loader_version': str,
'build': str,
'chassis_serial_number': str,
'commit_pending': str,
'configuration_template': str,
Optional('engineering_signed'): bool,
Optional('controller_compatibility'): str,
Optional('cpu_allocation'): {
Optional('total'): int,
Optional('control'): int,
Optional('data'): int
},
'cpu_reported_reboot': str,
'cpu_states': {
'idle': float,
'system': float,
'user': float
},
'current_time': str,
'disk_usage': {
'avail_mega': int,
'filesystem': str,
'mounted_on': str,
'size_mega': int,
'use_pc': int,
'used_mega': int
},
Optional('vmanage_storage_usage'): {
Optional('filesystem'): str,
Optional('size_mega'): int,
Optional('used_mega'): int,
Optional('avail_mega'): int,
Optional('use_pc'): int,
Optional('mounted_on'): str
},
'last_reboot': str,
Optional('load_average'): {
Optional('minute_1'): float,
Optional('minute_15'): float,
Optional('minute_5'): float
},
'memory_usage': {
'buffers_kilo': int,
'cache_kilo': int,
'free_kilo': int,
'total_kilo': int,
'used_kilo': int
},
Optional('hypervisor_type'):str,
Optional('cloud_hosted_instance'):str,
'model_name': str,
'personality': str,
'processes': int,
'services': str,
'system_fips_state': str,
'system_logging_disk': str,
'system_logging_host': str,
'system_state': str,
'system_uptime': str,
Optional('device_role'): str,
Optional('testbed_mode'): str,
'version': str,
'vmanaged': str
}
# ===========================================
# Parser for 'show system status'
# ===========================================
class ShowSystemStatus(ShowSystemStatusSchema):
""" Parser for "show system status" """
cli_command = "show system status"
def cli(self, output=None):
if output is None:
output = self.device.execute(self.cli_command)
parsed_dict = {}
# System logging to host is disabled
# System logging to disk is enabled
p1 = re.compile(r'^System\s+logging\s+to\s+(?P<type>\w+)\s+is\s+(?P<value>enabled|disabled)$')
# CPU allocation: 4 total, 1 control, 3 data
# CPU allocation: 16 total
p2 = re.compile(r'^CPU\s+allocation:\s+(?P<total>\d+)\s+total(,\s+(?P<control>\d+)\s+control)?(,\s+(?P<data>\d+)\s+data)?$')
# CPU states: 1.25% user, 5.26% system, 93.48% idle
p3 = re.compile(r'^CPU\s+states:\s+(?P<user>[\d\.]+)\%\s+user,\s+(?P<system>[\d\.]+)\%\s+system,\s+(?P<idle>[\d\.]+)\%\s+idle$')
# Load average: 1 minute: 3.20, 5 minutes: 3.13, 15 minutes: 3.10
p4 = re.compile(r'^Load\s+average:\s+1\s+minute:\s+(?P<minute_1>[\d\.]+),\s+5\s+minutes:\s+(?P<minute_5>[\d\.]+),\s+15\s+minutes:\s+(?P<minute_15>[\d\.]+)$')
# Engineering Signed True
p5 = re.compile(r'^Engineering +Signed +(?P<value>True|False)$')
# Memory usage: 1907024K total, 1462908K used, 444116K free
p6 = re.compile(r'^Memory\s+usage:\s+(?P<total_kilo>\d+)K\s+total,\s+(?P<used_kilo>\d+)K\s+used,\s+(?P<free_kilo>\d+)K\s+free$')
# 0K buffers, 0K cache
p7 = re.compile(r'^(?P<buffers_kilo>\d+)K\s+buffers,\s+(?P<cache_kilo>\d+)K\s+cache$')
# Disk usage: Filesystem Size Used Avail Use % Mounted on
# vManage storage usage: Filesystem Size Used Avail Use% Mounted on
p8 = re.compile(r'^(?P<usage_dict>.+usage):\s+Filesystem\s+Size\s+Used\s+Avail\s+Use\s*%\s+Mounted\s+on$')
# /dev/root 7615M 447M 6741M 6% /
# /dev/disk/by-label/fs-bootflash 11039M 1240M 9219M 12% /bootflash
# /dev/bootflash1 28748M 2031M 25257M 7% /bootflash
p9 = re.compile(r'^(?P<filesystem>.+\S)\s+(?P<size_mega>\d+)M\s+(?P<used_mega>\d+)M\s+(?P<avail_mega>\d+)M\s+(?P<use_pc>\d+)\%\s+(?P<mounted_on>.+)$')
# Controller Compatibility: 20.3
# Version: 99.99.999-4567
# Build: 4567
# System state: GREEN. All daemons up
# System FIPS state: Enabled
# Testbed mode: Enabled
# Hypervisor Type: None
# Cloud Hosted Instance: false
# Last reboot: Initiated by user - activate 99.99.999-4567.
# CPU-reported reboot: Not Applicable
# Boot loader version: Not applicable
# System uptime: 0 days 21 hrs 35 min 28 sec
# Current time: Thu Aug 06 02:49:25 PDT 2020
# Processes: 250 total
# Personality: vedge
# Model name: vedge-cloud
# Services: None
# vManaged: true
# Commit pending: false
# Configuration template: CLItemplate_srp_vedge
# Chassis serial number: None
p10 = re.compile(r'^(?P<key>.*):\s+(?P<value>.*)$')
for line in output.splitlines():
line = line.strip()
# System logging to host is disabled
# System logging to disk is enabled
m1 = p1.match(line)
if m1:
group = m1.groupdict()
parsed_dict['system_logging_' + group['type']] = group['value']
continue
# CPU allocation: 4 total, 1 control, 3 data
# CPU allocation: 16 total
m2 = p2.match(line)
if m2:
group = m2.groupdict()
group = {key: int(group[key]) for key in group if group[key]}
parsed_dict.update({'cpu_allocation': group})
continue
# CPU states: 1.25% user, 5.26% system, 93.48% idle
m3 = p3.match(line)
if m3:
group = m3.groupdict()
for keys in group:
group[keys] = float(group[keys])
parsed_dict.update({'cpu_states': group})
continue
# Load average: 1 minute: 3.20, 5 minutes: 3.13, 15 minutes: 3.10
m4 = p4.match(line)
if m4:
group = m4.groupdict()
for keys in group:
group[keys] = float(group[keys])
parsed_dict.update({'load_average': group})
continue
# Engineering Signed True
m5 = p5.match(line)
if m5:
group = m5.groupdict()
group = bool(group['value'])
parsed_dict.update({'engineering_signed': group})
continue
# Memory usage: 1907024K total, 1462908K used, 444116K free
m6 = p6.match(line)
if m6:
group = m6.groupdict()
parsed_dict.update({'memory_usage': {
key:int(group[key]) for key in group
}})
continue
# 0K buffers, 0K cache
m7 = p7.match(line)
if m7:
group = m7.groupdict()
parsed_dict['memory_usage'].update({
key:int(group[key]) for key in group
})
continue
# Disk usage: Filesystem Size Used Avail Use % Mounted on
# vManage storage usage: Filesystem Size Used Avail Use% Mounted on
m8 = p8.match(line)
if m8:
group = m8.groupdict()
usage_dict_name = group['usage_dict'].replace(' ', '_').lower()
usage_dict = parsed_dict.setdefault(usage_dict_name, {})
continue
# /dev/sda 503966M 6162M 472203M 1% /opt/data
# /dev/bootflash1 28748M 2031M 25257M 7% /bootflash
m9 = p9.match(line)
if m9:
group = m9.groupdict()
usage_dict.update({'filesystem': group.pop('filesystem')})
usage_dict.update({'mounted_on': group.pop('mounted_on')})
usage_dict.update({
key: int(group[key]) for key in group
})
continue
# Controller Compatibility: 20.3
# Version: 99.99.999-4567
# Build: 4567
# System state: GREEN. All daemons up
# System FIPS state: Enabled
# Testbed mode: Enabled
# Hypervisor Type: None
# Cloud Hosted Instance: false
# Last reboot: Initiated by user - activate 99.99.999-4567.
# CPU-reported reboot: Not Applicable
# Boot loader version: Not applicable
# System uptime: 0 days 21 hrs 35 min 28 sec
# Current time: Thu Aug 06 02:49:25 PDT 2020
# Processes: 250 total
# Personality: vedge
# Model name: vedge-cloud
# Services: None
# vManaged: true
# Commit pending: false
# Configuration template: CLItemplate_srp_vedge
# Chassis serial number: None
m10 = p10.match(line)
if m10:
group = m10.groupdict()
key = group['key'].replace('-', '_').replace(' ','_').replace(':','').lower()
if key == 'processes':
group['value'] = int(group['value'].replace('total',''))
parsed_dict.update({key: (group['value'])})
continue
return parsed_dict
|
python
|
from django.conf.urls import patterns, include, url
from tastypie.api import Api
from tastypie_evostream.api import StreamResource
tastypie_evostream_api = Api()
tastypie_evostream_api.register(StreamResource())
urlpatterns = patterns(
'',
url(r'', include(tastypie_evostream_api.urls)),
)
|
python
|
# -*- coding: UTF-8 -*-
from django.db import models
from django.contrib.contenttypes import fields
from django.contrib.contenttypes.models import ContentType
# from south.modelsinspector import add_introspection_rules
# from tagging.models import Tag
# from tagging_autocomplete.models import TagAutocompleteField
from taggit_autosuggest.managers import TaggableManager
from django.contrib.auth.models import User
#from contrapartes.models import Usuarios
# from thumbs import ImageWithThumbsField
from sorl.thumbnail import ImageField
from utils import *
import datetime
# from south.modelsinspector import add_introspection_rules
from ckeditor_uploader.fields import RichTextUploadingField
# add_introspection_rules ([], ["^ckeditor\.fields\.RichTextField"])
# add_introspection_rules ([], ["^tagging_autocomplete\.models\.TagAutocompleteField"])
# Create your models here.
class Imagen(models.Model):
''' Modelo generico para subir imagenes en todos los demas app :)'''
content_type = models.ForeignKey(ContentType,on_delete=models.DO_NOTHING)
object_id = models.IntegerField(db_index=True)
content_object = fields.GenericForeignKey('content_type', 'object_id')
nombre_img = models.CharField("Nombre",max_length=200, null=True, blank=True)
foto = ImageField("Foto",upload_to=get_file_path,null=True, blank=True)
tags_img = TaggableManager("Tags",help_text='Separar elementos con "," ', blank=True)
fileDir = 'fotos/'
class Meta:
verbose_name_plural = "Imágenes"
def __str__(self):
return self.nombre_img
class Documentos(models.Model):
''' Modelo generico para subir los documentos en distintos app'''
content_type = models.ForeignKey(ContentType,on_delete=models.DO_NOTHING)
object_id = models.IntegerField(db_index=True)
content_object = fields.GenericForeignKey('content_type', 'object_id')
nombre_doc = models.CharField("Nombre",max_length=200, null=True, blank=True)
adjunto = models.FileField("Adjunto",upload_to=get_file_path, null=True, blank=True)
tags_doc = TaggableManager("Tags",help_text='Separar elementos con "," ', blank=True)
fileDir = 'documentos/'
class Meta:
verbose_name_plural = "Documentos"
def __str__(self):
return self.nombre_doc
class Videos(models.Model):
''' Modelo generico para subir videos en todos los app'''
content_type = models.ForeignKey(ContentType,on_delete=models.DO_NOTHING)
object_id = models.IntegerField(db_index=True)
content_object = fields.GenericForeignKey('content_type', 'object_id')
nombre_video = models.CharField(max_length=200, null=True, blank=True)
url = models.URLField(null=True, blank=True)
tags_vid = TaggableManager(help_text='Separar elementos con "," ', blank=True)
class Meta:
verbose_name_plural = "Videos"
def __str__(self):
return self.nombre_video
class Audios(models.Model):
'''' Modelo generico para subir audios en todos los demas app '''
content_type = models.ForeignKey(ContentType,on_delete=models.DO_NOTHING)
object_id = models.IntegerField(db_index=True)
content_object = fields.GenericForeignKey('content_type', 'object_id')
nombre_audio = models.CharField(max_length=200, null=True, blank=True)
audio = models.FileField(upload_to=get_file_path, null=True, blank=True)
tags_aud = TaggableManager(help_text='Separar elementos con "," ', blank=True)
fileDir = 'audios/'
class Meta:
verbose_name_plural = "Audios"
def __str__(self):
return self.nombre_audio
class Foros(models.Model):
nombre = models.CharField(max_length=200)
creacion = models.DateField(auto_now_add=True)
apertura = models.DateField('Apertura y recepción de aportes')
cierre = models.DateField('Cierre de aportes')
fecha_skype = models.DateField('Propuesta de reunión skype')
memoria = models.DateField('Propuesta entrega de memoria')
contenido = RichTextUploadingField()
contraparte = models.ForeignKey(User,on_delete=models.DO_NOTHING)
#documentos = fields.GenericRelation(Documentos)
#fotos = fields.GenericRelation(Imagen)
#video = fields.GenericRelation(Videos)
#audio = fields.GenericRelation(Audios)
correo_enviado = models.BooleanField(editable=False)
class Meta:
verbose_name_plural = "Foros"
ordering = ['-creacion']
def __str__(self):
return self.nombre
def __documento__(self):
lista = []
for obj in self.documentos.all():
lista.append(obj)
return lista
def __fotos__(self):
lista = []
for obj in self.fotos.all():
lista.append(obj)
return lista
def __video__(self):
lista = []
for obj in self.video.all():
lista.append(obj)
return lista
def __audio__(self):
lista = []
for obj in self.audio.all():
lista.append(obj)
return lista
def get_absolute_url(self):
return "/foros/ver/%d" % (self.id)
class Aportes(models.Model):
foro = models.ForeignKey(Foros,on_delete=models.CASCADE)
fecha = models.DateField(auto_now_add=True)
contenido = RichTextUploadingField()
user = models.ForeignKey(User,on_delete=models.DO_NOTHING)
adjuntos = fields.GenericRelation(Documentos)
fotos = fields.GenericRelation(Imagen)
video = fields.GenericRelation(Videos)
audio = fields.GenericRelation(Audios)
class Meta:
verbose_name_plural = "Aportes"
def __str__(self):
return self.foro.nombre
def __documento__(self):
lista = []
for obj in self.adjuntos.all():
lista.append(obj)
return lista
def __fotos__(self):
lista = []
for obj in self.fotos.all():
lista.append(obj)
return lista
def __video__(self):
lista = []
for obj in self.video.all():
lista.append(obj)
return lista
def __audio__(self):
lista = []
for obj in self.audio.all():
lista.append(obj)
return lista
class Comentarios(models.Model):
fecha = models.DateField(auto_now_add=True)
usuario = models.ForeignKey(User,on_delete=models.DO_NOTHING)
comentario = RichTextUploadingField()
aporte = models.ForeignKey(Aportes,on_delete=models.CASCADE)
class Meta:
verbose_name_plural = "Comentarios"
def __str__(self):
return self.usuario.username
|
python
|
# coding: utf-8
'''
------------------------------------------------------------------------------
Copyright 2015 - 2017 Esri
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
------------------------------------------------------------------------------
UnitTestUtiliites.py
--------------------------------------------------
requirments:
* ArcGIS Desktop 10.X+ or ArcGIS Pro 1.X+
* Python 2.7 or Python 3.4
author: ArcGIS Solutions
company: Esri
==================================================
description:
Basic methods used in unit tests
==================================================
history:
10/06/2015 - JH - original coding
10/23/2015 - MF - mods for tests
==================================================
'''
import arcpy
import os
import sys
import traceback
import platform
import logging
import Configuration
import datetime
def getLoggerName():
''' get unique log file name '''
if Configuration.DEBUG == True:
print("UnitTestUtilities - getLoggerName")
seq = 0
name = nameFromDate(seq)
#add +=1 to seq until name doesn't exist as a path
while os.path.exists(os.path.join(Configuration.logPath, name)):
seq += 1
name = nameFromDate(seq)
#logFilePath = os.path.join(Configuration.logPath, name)
return name
def getCurrentDateTimeForLogFile():
''' Get current date/time string as: YYYY-MM-DD_HH-MM-SS '''
return datetime.datetime.now().strftime("%Y-%B-%d_%H-%M-%S")
def getCurrentDateTime():
''' Get current date/time string as: DD/MM/YYYY HH:MM:SS '''
return datetime.datetime.now().strftime("%d/%B/%Y %H:%M:%S")
def nameFromDate(seq):
''' Make log file name'''
return 'SGT_' + str(getCurrentDateTimeForLogFile()) + '_seq' + str(seq) + '.log'
def makeFileFromPath(filePath):
''' make a file object from a path to that
file if it doesn't already exist '''
if not checkExists(filePath):
try:
fd = open(filePath, 'a')
fd.close()
except:
print("Can't make file for some reason.")
return filePath
def makeFolderFromPath(folderPath):
''' make a folder(s) from a path if it doesn't
already exist '''
if not checkExists(folderPath):
try:
os.makedirs(folderPath)
except:
print("Can't make the folder for some reason.")
return folderPath
def initializeLogger(name, logLevel = logging.DEBUG):
''' get and return named logger '''
if Configuration.DEBUG == True:
print("UnitTestUtilities - initializeLogger")
# Check if the path to the log files exists, and create if not
if not os.path.exists(Configuration.logPath):
dummy = makeFolderFromPath(Configuration.logPath)
# get a unique log file name if we don't have a name already
if name == None or name == "":
name = getLoggerName()
logFile = os.path.join(Configuration.logPath, name)
Configuration.LoggerFile = logFile
# if the log file does NOT exist, create it
if not os.path.exists(logFile):
logFile = makeFileFromPath(logFile)
logger = logging.getLogger(name)
logger.setLevel(logLevel)
logFormatter = logging.Formatter('%(levelname)s: %(asctime)s %(message)s')
fileHandler = logging.FileHandler(logFile)
fileHandler.setFormatter(logFormatter)
logger.addHandler(fileHandler)
consoleHandler = logging.StreamHandler(sys.stdout)
consoleHandler.setFormatter(logFormatter)
logger.addHandler(consoleHandler)
return logger
def setUpLogFileHeader():
''' Add a header to log file when initialized '''
Configuration.Logger.debug("UnitTestUtilities - setUpLogFileHeader")
Configuration.Logger.info("------------ Begin Tests ------------------")
Configuration.Logger.info("Platform: {0}".format(platform.platform()))
Configuration.Logger.info("Python Version {0}".format(sys.version))
agsInstallInfo = arcpy.GetInstallInfo()
Configuration.Logger.info("Product: {0}, Version: {1}, Installed on: {2}, Build: {3}.".format(agsInstallInfo['ProductName'], \
agsInstallInfo['Version'], agsInstallInfo['InstallDate'], agsInstallInfo['BuildNumber']))
Configuration.Logger.info("-------------------------------------------")
def checkArcPy():
''' sanity check that ArcPy is working '''
if Configuration.DEBUG == True: print("UnitTestUtilities - checkArcPy")
arcpy.AddMessage("ArcPy works")
def checkExists(p):
''' Python check for existence '''
if Configuration.DEBUG == True: print("UnitTestUtilities - checkExists")
return os.path.exists(p)
def createScratch(scratchPath):
''' create scratch geodatabase '''
if Configuration.DEBUG == True: print("UnitTestUtilities - createScratch")
scratchName = 'scratch.gdb'
scratchGDB = os.path.join(scratchPath, scratchName)
if checkExists(scratchGDB):
print("Scratch already exists")
return scratchGDB
try:
if Configuration.DEBUG == True: print("Creating scratch geodatabase...")
arcpy.CreateFileGDB_management(scratchPath, scratchName)
if Configuration.DEBUG == True: print("Created scratch gdb.")
except:
print("Problem creating scratch.gdb")
return scratchGDB
def deleteScratch(scratchPath):
''' delete scratch geodatabase '''
if Configuration.DEBUG == True: print("UnitTestUtilities - deleteScratch")
try:
arcpy.Delete_management(scratchPath)
if Configuration.DEBUG == True: print("Deleted scratch gdb.")
except:
print("scratch.gdb delete failed")
return
def checkFilePaths(paths):
''' check file/folder paths exist '''
if Configuration.DEBUG == True: print("UnitTestUtilities - checkFilePaths")
for path2check in paths:
if os.path.exists(path2check):
if Configuration.DEBUG == True: print("Valid Path: " + path2check)
else:
raise Exception('Bad Path: ' + str(path2check))
def checkGeoObjects(objects):
''' check geospatial stuff exists '''
if Configuration.DEBUG == True: print("UnitTestUtilities - checkGeoObjects")
for object2Check in objects:
#TODO: Shouldn't we be using arcpy.Exists()?
desc = arcpy.Describe(object2Check)
if desc == None:
print("--> Invalid Object: " + str(object2Check))
arcpy.AddError("Bad Input")
raise Exception('Bad Input')
else:
if Configuration.DEBUG == True: print("Valid Object: " + desc.Name)
def handleArcPyError():
''' Basic GP error handling, errors printed to console and logger '''
if Configuration.DEBUG == True: print("UnitTestUtilities - handleArcPyError")
# Get the arcpy error messages
msgs = arcpy.GetMessages()
arcpy.AddError(msgs)
print(msgs)
Configuration.Logger.error(msgs)
raise Exception('ArcPy Error')
def handleGeneralError(exception = None):
''' Basic error handler, errors printed to console and logger '''
if Configuration.DEBUG == True: print("UnitTestUtilities - handleGeneralError")
if isinstance(exception, Exception):
print(str(exception))
Configuration.Logger.error(str(exception))
# Get the traceback object
tb = sys.exc_info()[2]
tbinfo = traceback.format_tb(tb)[0]
# Concatenate information together concerning the error into a message string
pymsg = "PYTHON ERRORS:\nTraceback info:\n" + tbinfo + "\nError Info:\n" + str(sys.exc_info()[1])
msgs = "ArcPy ERRORS:\n" + arcpy.GetMessages() + "\n"
# Print Python error messages for use in Python / Python Window
print(pymsg + "\n")
Configuration.Logger.error(pymsg)
print(msgs)
Configuration.Logger.error(msgs)
if isinstance(exception, Exception):
raise exception
else:
raise Exception('General Error')
def geoObjectsExist(objects):
''' Return true if all of the input list of geo-objects exist, false otherwise '''
allExist = True
for obj in objects:
if not arcpy.Exists(obj):
allExist = False
return allExist
def folderPathsExist(paths):
''' Return true if all input paths exist, false otherwise '''
allExist = True
for p in paths:
if not os.path.exists(p):
allExist = False
return allExist
def deleteIfExists(dataset):
''' Delete the input dataset if it exists '''
if (arcpy.Exists(dataset)):
arcpy.Delete_management(dataset)
arcpy.AddMessage("deleted dataset: " + dataset)
|
python
|
from flask_restful import Resource, reqparse, request
from flask_restful import fields, marshal_with, marshal
from sqlalchemy.exc import IntegrityError
from sqlalchemy import or_, and_, text
from flask_jwt_extended import jwt_required
from models.keyword import Keyword
from app import db
from utils.util import max_res
from helpers.keywords_resource_helper import *
class KeywordsResource(Resource):
@jwt_required
def get(self, keyword_id=None):
if keyword_id:
keyword = Keyword.find_by_id(keyword_id)
return max_res(marshal(keyword, keyword_fields))
else:
conditions = []
args = keyword_query_parser.parse_args()
page = args['page']
per_page = args['pagesize']
if args['orderby'] not in sortable_fields:
return max_res('', code=500, errmsg='排序非法字段')
sort = args['orderby']
if args['desc']>0:
sort = args['orderby'] + ' desc'
conditions = make_conditions(conditions,args)
# 在这里添加更多的 条件查询 例如
# if args['name'] is not None:
# conditions.append(Keyword.name.like('%'+args['name']+'%'))
if conditions is []:
pagination = Keyword.query.order_by(text(sort)).paginate(page, per_page, error_out=False)
else:
pagination = Keyword.query.filter(*conditions).order_by(text(sort)).paginate(page, per_page, error_out=False)
paginate = {
'total':pagination.total,
'pageSize': pagination.per_page,
'current': pagination.page
}
print(pagination.items)
return max_res(marshal({
'pagination': paginate,
'list': [marshal(u, keyword_fields) for u in pagination.items]
}, keyword_list_fields))
@jwt_required
def post(self):
args = keyword_post_parser.parse_args()
keyword = Keyword(**args)
try:
keyword.add()
except IntegrityError:
return max_res('', code=401, errmsg='名称重复')
return max_res(marshal(keyword, keyword_fields))
def put(self, keyword_id=None):
keyword = Keyword.find_by_id(keyword_id)
args = keyword_update_parser.parse_args()
keyword = update_all_fields(args, keyword)
#可以在这里继续添加 需要更新的字段 如
# if args['name']:
# o.name = args['name']
#
db.session.commit()
try:
keyword.update()
except Exception as e:
return max_res('',500, 'Failed to modify.')
return max_res(marshal(keyword, keyword_fields))
def delete(self, keyword_id=None):
keyword = Keyword.find_by_id(keyword_id)
try:
keyword.delete()
except Exception as e:
return max_res('',500, 'The record has already deleted.')
return max_res('The keyword has been deleted.')
|
python
|
# -*- coding: utf-8 -*-
import unittest
from iemlav.lib.log_monitor.server_log.parser.apache import ApacheParser
from iemlav.lib.log_monitor.server_log.server_logger import ServerLogger
try:
# if python 3.x.x
from unittest.mock import patch
except ImportError: # python 2.x.x
from mock import patch
class TestApacheParser(unittest.TestCase):
"""
Test class for SecureTea Server Log Apache Log Parser.
"""
def setUp(self):
"""
Setup class for TestApacheParser.
"""
# Initialize Apache object
self.apache_obj = ApacheParser(window=30, path="random_path")
# Mock log data
self.data = ['83.149.9.216 - - [14/Jun/2019:10:30:00 +0000] ' \
'"GET /presentations/logstash-monitorama-2013/images/kibana-dashboard3.png HTTP/1.1" ' \
'400 171717 "http://semicomplete.com/presentations/logstash-monitorama-2013/" ' \
'"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_1) AppleWebKit/537.36 (KHTML, like Gecko) '\
'Chrome/32.0.1700.77 Safari/537.36']
# Mock parsed log data
self.parsed_dict = {'83.149.9.216': {
'ep_time': [1560508200],
'get': ['/presentations/logstash-monitorama-2013/images/kibana-dashboard3.png HTTP/1.1'],
'status_code': [400],
'ua': ['Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_1) ' \
'AppleWebKit/537.36 (KHTML, like Gecko) Chrome/32.0.1700.77 Safari/537.36'],
'count': 1,
'unique_get': ['/presentations/logstash-monitorama-2013/images/kibana-dashboard3.png HTTP/1.1']
}
}
@patch.object(ApacheParser, "check_within_window")
@patch.object(ServerLogger, "log")
@patch("iemlav.lib.log_monitor.server_log.parser.apache.utils")
def test_parse(self, mck_utils, mock_log, mck_window):
"""
Test parse.
"""
mck_utils.open_file.return_value = self.data
mck_window.return_value = True
mck_utils.get_epoch_time.return_value = 1560508200
# Check if the parsing is correct
self.assertEqual(self.apache_obj.parse(),
self.parsed_dict)
@patch("iemlav.lib.log_monitor.server_log.parser.apache.time")
def test_check_within_window(self, mock_time):
"""
Test check_within_window.
"""
# Case 1: When time difference is less than window
mock_time.time.return_value = 1560508200
res = self.apache_obj.check_within_window(1560508200)
self.assertTrue(res)
# Case 2: When time difference is greater than window
res = self.apache_obj.check_within_window(1557916100)
self.assertFalse(res)
def test_update_dict(self):
"""
Test update_dict.
"""
self.apache_obj.update_dict(
ip="1.1.1.1",
ep_time=1500,
get="/random/get/req",
status_code=200,
user_agent="random-user-agent"
)
temp_dict = {'ep_time': [1500],
'get': ['/random/get/req'],
'status_code': [200],
'ua': ['random-user-agent'],
'count': 1,
'unique_get': ['/random/get/req']}
# Check if the key exists
self.assertTrue(self.apache_obj.apache_dict.get("1.1.1.1"))
# Check if the updated dict is correct
self.assertEqual(self.apache_obj.apache_dict["1.1.1.1"], temp_dict)
|
python
|
# Copyright (c) 2019 fortiss GmbH
#
# This software is released under the MIT License.
# https://opensource.org/licenses/MIT
from bark.world.agent import *
from bark.models.behavior import *
from bark.world import *
from bark.world.map import *
from modules.runtime.commons.parameters import ParameterServer
from modules.runtime.commons.xodr_parser import XodrParser
import copy
class Scenario:
def __init__(self,
agent_list=None,
eval_agent_ids=None,
map_file_name=None,
json_params=None,
map_interface=None):
self._agent_list = agent_list or []
self._eval_agent_ids = eval_agent_ids or []
self._map_file_name = map_file_name
self._json_params = json_params
self._map_interface = map_interface
def get_world_state(self):
"""get initial world state of scenario to start simulation from here
Returns:
[bark.world.World]
"""
return self._build_world_state()
def copy(self):
return Scenario(agent_list=copy.deepcopy(self._agent_list),
eval_agent_ids=self._eval_agent_ids.copy(),
map_file_name=self._map_file_name,
json_params=self._json_params.copy(),
map_interface=self._map_interface)
def _build_world_state(self):
param_server = ParameterServer(json=self._json_params)
world = World(param_server)
if self._map_interface is None:
world = self.setup_map(world, self._map_file_name)
else:
world.set_map(self._map_interface)
for agent in self._agent_list:
world.add_agent(agent)
return world
def __getstate__(self):
odict = self.__dict__.copy()
print(odict['_map_interface'])
del odict['_map_interface']
print(odict)
return odict
def __setstate__(self, sdict):
sdict['_map_interface'] = None
self.__dict__.update(sdict)
def setup_map(self, world, _map_file_name):
if not _map_file_name:
return world
xodr_parser = XodrParser(_map_file_name )
map_interface = MapInterface()
map_interface.set_open_drive_map(xodr_parser.map)
self._map_interface = map_interface
world.set_map(map_interface)
return world
|
python
|
import os
import pytest
from h2_conf import HttpdConf
def setup_data(env):
s100 = "012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678\n"
with open(os.path.join(env.gen_dir, "data-1k"), 'w') as f:
for i in range(10):
f.write(s100)
# The trailer tests depend on "nghttp" as no other client seems to be able to send those
# rare things.
class TestStore:
@pytest.fixture(autouse=True, scope='class')
def _class_scope(self, env):
setup_data(env)
HttpdConf(env).add_vhost_cgi(h2proxy_self=True).install()
assert env.apache_restart() == 0
# check if the server survives a trailer or two
def test_202_01(self, env):
url = env.mkurl("https", "cgi", "/echo.py")
fpath = os.path.join(env.gen_dir, "data-1k")
r = env.nghttp().upload(url, fpath, options=["--trailer", "test: 1"])
assert 300 > r.response["status"]
assert 1000 == len(r.response["body"])
r = env.nghttp().upload(url, fpath, options=["--trailer", "test: 1b", "--trailer", "XXX: test"])
assert 300 > r.response["status"]
assert 1000 == len(r.response["body"])
# check if the server survives a trailer without content-length
def test_202_02(self, env):
url = env.mkurl("https", "cgi", "/echo.py")
fpath = os.path.join(env.gen_dir, "data-1k")
r = env.nghttp().upload(url, fpath, options=["--trailer", "test: 2", "--no-content-length"])
assert 300 > r.response["status"]
assert 1000 == len(r.response["body"])
# check if echoing request headers in response from GET works
def test_202_03(self, env):
url = env.mkurl("https", "cgi", "/echohd.py?name=X")
r = env.nghttp().get(url, options=["--header", "X: 3"])
assert 300 > r.response["status"]
assert b"X: 3\n" == r.response["body"]
# check if echoing request headers in response from POST works
def test_202_03b(self, env):
url = env.mkurl("https", "cgi", "/echohd.py?name=X")
r = env.nghttp().post_name(url, "Y", options=["--header", "X: 3b"])
assert 300 > r.response["status"]
assert b"X: 3b\n" == r.response["body"]
# check if echoing request headers in response from POST works, but trailers are not seen
# This is the way CGI invocation works.
def test_202_04(self, env):
url = env.mkurl("https", "cgi", "/echohd.py?name=X")
r = env.nghttp().post_name(url, "Y", options=["--header", "X: 4a", "--trailer", "X: 4b"])
assert 300 > r.response["status"]
assert b"X: 4a\n" == r.response["body"]
|
python
|
# -*- coding: utf-8 -*-
from mimetypes import MimeTypes
from hashlib import md5
def list_of(_list, _class):
"""
Chequea que la lista _list contenga elementos del mismo tipo, desciptos en _class.
Args:
- _list:
- list().
- Lista de elementos sobre la que se desea trabajar.
- El argumento solo acepta objetos de class list.
- _class:
- Clase esperada en los elemntos de la lista.
- admite que se chequee cualquier tipo, incuso, NoneType.
Returns:
- bool().
- True: La lista posee todos sus elementos de la clase _class
- False: Al menos uno de los elementos no es de clase _class
"""
if not isinstance(_list, list):
raise TypeError('check_list_type() solo acepta type(_list)==list')
return not False in [isinstance(element, _class) for element in _list]
def get_mimetype(_filename=None):
"""
Retorna Mime Type de un archivo (_filename).
Args:
====
- _filename: Str(). path al archivo que se desea chequear.
Require:
=======
- Python Builtin lib: mimetypes.
Returns:
=======
- Str(). MimeType. Exito.
- None: Fallo.
"""
try:
mime = MimeTypes()
return mime.guess_type(_filename)[0]
except TypeError:
pass
except IOError:
pass
def build_hash(_filename):
"""
Crear hash de recurso.
Args:
====
- _filename: Str(). Archivo sobre el cual se desea calcular HASH.
Return:
======
- Exito: Str() MD5-Hash.
- Fallo: None.
"""
hash_md5 = md5()
try:
with open(_filename, "rb") as f:
for chunk in iter(lambda: f.read(4096), b""):
hash_md5.update(chunk)
return hash_md5.hexdigest()
except IOError:
pass
|
python
|
import unittest
import random
from hypothesis import given, settings, assume, Verbosity, strategies as st
from src.poker.app.card import Deck, Card, Suit, Value
from src.poker.app.hand import Hand, Play, Result, calculate_play_hand
DeckStrategy = st.builds(Deck)
NaiveHandStrategy = st.builds(Hand, st.sets(
st.builds(Card,
st.sampled_from(Suit),
st.sampled_from(Value))
, max_size = 5
, min_size = 5))
@st.composite
def three_of_a_kind_in_hand(draw) -> Hand:
d = draw(DeckStrategy)
r = draw(st.randoms())
#1
sample = r.choice(list(d.deck))
cards = set([sample])
#2
sample = Card(sample.suit.next(), sample.val.next())
cards.add(sample)
#3
sample = Card(sample.suit.next(), sample.val.next())
cards.add(sample)
#4
sample = Card(sample.suit.next(), sample.val)
cards.add(sample)
#5
sample = Card(sample.suit.next(), sample.val)
cards.add(sample)
return Hand(cards)
@st.composite
def full_house_in_hand(draw) -> Hand:
d = draw(DeckStrategy)
r = draw(st.randoms())
#1
sample = r.choice(list(d.deck))
cards = set([sample])
#2
sample = Card(sample.suit.next(), sample.val)
cards.add(sample)
#3
sample = Card(sample.suit.next(), sample.val.next())
cards.add(sample)
#4
sample = Card(sample.suit.next(), sample.val)
cards.add(sample)
#5
sample = Card(sample.suit.next(), sample.val)
cards.add(sample)
return Hand(cards)
@st.composite
def straight_in_hand(draw) -> Hand:
blacklist = {Value.JACK, Value.QUEEN, Value.KING}
d = draw(DeckStrategy)
r = draw(st.randoms())
sample = r.choice(list(d.deck))
assume(not sample.val in blacklist)
# while v in blacklist:
# v = random.choice(list(Value))
cards = set([sample])
for _ in range(4):
sample = Card(sample.suit.next(), sample.val.next())
cards.add(sample)
return Hand(cards)
class PokerTest(unittest.TestCase):
@given(d=DeckStrategy,
n_gets=st.integers(min_value=0, max_value=55),
m_sets=st.integers(min_value=0, max_value=55))
#@settings(verbosity=Verbosity.verbose)
def test_deck_gets_and_sets(self, d: Deck, n_gets, m_sets) -> None:
"""
Tests if the deck class takes and returns properly cards
"""
withdraws = list()
for _ in range(n_gets+1):
card = d.get_random_card()
if card:
withdraws.append(card)
for _ in range(m_sets+1):
if withdraws:
card = random.choice(withdraws)
withdraws.remove(card)
d.set_card(card)
self.assertEqual(len(withdraws) + len(d.deck), Deck.TOTAL_CARDS)
@given(hand=NaiveHandStrategy)
@settings(max_examples=150)
def test_hand_plays_value(self, hand: Hand) -> None:
calculate_play_hand(hand)
assert hand.value > 0 and len(hand.cards) == 5
@given(hand=three_of_a_kind_in_hand())
def test_three_of_a_kind(self, hand: Hand) -> None:
calculate_play_hand(hand)
self.assertEqual(hand.play, Play.THREE_OF_A_KIND)
@given(hand=full_house_in_hand())
def test_full_house(self, hand: Hand) -> None:
calculate_play_hand(hand)
self.assertEqual(hand.play, Play.FULL_HOUSE)
@given(hand=straight_in_hand())
def test_straight(self, hand: Hand) -> None:
calculate_play_hand(hand)
self.assertEqual(hand.play, Play.STRAIGHT)
@given(hand1=st.one_of(full_house_in_hand(), straight_in_hand()),
hand2=st.one_of(three_of_a_kind_in_hand()))
#@settings(verbosity=Verbosity.verbose)
def test_two_hands(self, hand1: Hand, hand2: Hand) -> None:
calculate_play_hand(hand1)
calculate_play_hand(hand2)
self.assertEqual(Result.WIN, hand1.compare(hand2))
if __name__ == "__main__":
unittest.main()
|
python
|
import numpy as np
# Reshaping arrays:
# Reshaping means changing the shape of an array.
# The shape of an array is the number of elements in each dimension.
# By reshaping we can add or remove dimensions or change number of elements in each dimension.
# **Note: The product of the number of elements inside the Reshape must be equal to the number of elements of the array
arr = np.array([1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 5])
print(arr.reshape(4, 4, 2))
print(arr.reshape(2, 2, 2, 4))
copy_or_view = arr.reshape(4, 8)
print(copy_or_view.base)
print(arr.reshape(2, 4, -1))
arr = np.array([
[[1, 1, 1, 1],
[1, 1, 1, 1],
[1, 1, 1, 1],
[1, 1, 1, 1]],
[[1, 1, 1, 1],
[1, 1, 1, 1],
[1, 1, 1, 1],
[1, 1, 1, 5]]
])
print(arr.reshape(-1))
|
python
|
import re
j_format = {
"j": "000010",
}
i_format = {
'beq': "000100",
'bne': None,
'addi': "001000",
'addiu': None,
'andi': None,
'ori': None,
'slti': None,
'sltiu': None,
'lui': None,
'lw': "100011",
'sw': "101011",
}
r_format = {
'add': "100000",
'addu': None,
'sub': "100010",
'subu': None,
'and': "100100",
'or': "100101",
'xor': "100110",
'slt': "101010",
'sltu': None,
'sll': "101001",
'srl': None,
'jr': None
}
reg_name = {
'$zero': 1,
'$at': 1,
'$v': 2,
'$a': 4,
'$t': 8,
'$s': 8,
'$tt': 2,
'$k': 2,
'$gp': 1,
'$sp': 1,
'$fp': 1,
'$ra': 1
}
def reg_init():
registers = {}
n = 0
for key, value in reg_name.items():
for i in range(value):
b = bin(n)[2:].zfill(5)
if key == "$zero":
registers[f"{key}"] = b
elif key == "$tt":
registers[f"$t{i + 8}"] = b
else:
registers[f"{key}{i}"] = b
n += 1
return registers
def check_dependency(ins, regs):
if ins[1] in regs:
return True
return False
def twos_comp(binary):
binary = binary[::-1]
new_b = ""
flag = False
for i in range(len(binary)):
if flag:
new_b += '0' if binary[i] == '1' else '1'
else:
if binary[i] == '1':
flag = True
new_b += binary[i]
return new_b[::-1]
def compile_asm(lines, registers):
# for beq, if R -> NOP, if LW -> NOP*2
instructions = []
labels = {}
add = 0
for line in lines:
if line[-1] == ':':
labels[line[:-1]] = add
continue
ins = re.findall("^[a-z]+", line)
if ins[0] != 'j':
regs = re.findall(" [a-zA-Z0-9]+|\$[a-z]+[0-9]|[0-9]+|\$zero|-[0-9]+", line)
else:
regs = [line.split(" ")[1]]
if ins[0] == 'beq' and check_dependency(instructions[-1], regs):
if instructions[-1][0] == 'lw':
instructions.append(['nop'])
instructions.append(['nop'])
add += 2
elif r_format.get(instructions[-1][0]):
instructions.append(['nop'])
add += 1
elif instructions[-1][0] in list(i_format.keys())[2:9] and i_format.get(instructions[-1][0]):
instructions.append(['nop'])
add += 1
add += 1
instructions.append(ins + regs)
binary = []
for add, ins in enumerate(instructions):
b = []
if ins[0] == 'nop':
b.append('0' * 32)
elif ins[0] in i_format:
b.append(i_format[ins[0]])
im, reg = (ins[2], ins[3]) if ins[0] in ['lw', 'sw'] else (ins[3], ins[2])
im = im.strip()
b.append(registers[reg])
b.append(registers[ins[1]])
if im.isnumeric() or (im[0] == '-' and im[1:].isnumeric()):
immediate = int(im)
while ins[0] == "beq" and instructions[immediate][0] == "nop":
immediate += 1
if immediate < 0:
b.append(twos_comp(bin(immediate)[2:].zfill(16)))
else:
b.append(bin(immediate)[2:].zfill(16))
else:
r_ad = labels[im.strip()] - add - 1
while instructions[add + 1 + r_ad][0] == "nop":
r_ad += 1
if r_ad < 0:
r_ad_bin = twos_comp(bin(r_ad)[2:].zfill(16))
else:
r_ad_bin = bin(r_ad)[2:].zfill(16)
b.append(r_ad_bin)
elif ins[0] in r_format:
b.append("000000") # OPCODE
if ins[0] == "sll" or ins[0] == "srl":
b.append(registers[ins[2]]) # RT
b.append("00000") # RS
b.append(registers[ins[1]]) # RD
shamt = bin(int(ins[3]))[2:].zfill(5)
b.append(shamt) # SHAMT
else:
b.append(registers[ins[2]]) # RS
b.append(registers[ins[3]]) # RT
b.append(registers[ins[1]]) # RD
b.append("00000") # SHAMT
b.append(r_format[ins[0]]) # FUNCT
elif ins[0] in j_format:
b.append(j_format[ins[0]])
if ins[1].isnumeric():
ad = int(ins[1])
while instructions[ad][0] == "nop":
ad += 1
b.append(bin(ad)[2:].zfill(26))
else:
ad = labels[ins[1]]
while instructions[ad][0] == "nop":
ad += 1
b.append(bin(ad)[2:].zfill(26))
binary.append("".join(b))
return binary
def compiler(file_name):
registers = reg_init()
lines = open(file_name).read().split('\n')
return compile_asm(lines, registers)
# compiler("p.art")
|
python
|
import random
from collections import defaultdict
import numpy as np
from maddpg.common.utils_common import zip_map
class ReplayBuffer(object):
def __init__(self, size):
"""Create Prioritized Replay buffer.
Parameters
----------
size: int
Max number of transitions to store in the buffer. When the buffer
overflows the old memories are dropped.
"""
self._storage = []
self._maxsize = int(size)
self._next_idx = 0
def __len__(self):
return len(self._storage)
def clear(self):
self._storage = []
self._next_idx = 0
def add(self, obs_t, action, reward, obs_tp1, done):
data = (obs_t, action, reward, obs_tp1, done)
if self._next_idx >= len(self._storage):
self._storage.append(data)
else:
self._storage[self._next_idx] = data
self._next_idx = (self._next_idx + 1) % self._maxsize
def _encode_sample(self, idxes):
obses_t = defaultdict(list)
actions = defaultdict(list)
rewards = defaultdict(list)
obses_tp1 = defaultdict(list)
dones = defaultdict(list)
for i in idxes:
data = self._storage[i]
obs_t, action, reward, obs_tp1, done = data
for key, (obs_t, action, reward, obs_tp1, done) in zip_map(*data):
obses_t[key].append(obs_t)
actions[key].append(action)
rewards[key].append(reward)
obses_tp1[key].append(obs_tp1)
dones[key].append(done)
return obses_t, actions, rewards, obses_tp1, dones
def make_index(self, batch_size):
return [random.randint(0, len(self._storage) - 1) for _ in range(batch_size)]
def make_latest_index(self, batch_size):
idx = [(self._next_idx - 1 - i) %
self._maxsize for i in range(batch_size)]
np.random.shuffle(idx)
return idx
def sample_index(self, idxes):
return self._encode_sample(idxes)
def sample(self, batch_size):
"""Sample a batch of experiences.
Parameters
----------
batch_size: int
How many transitions to sample.
Returns
-------
obs_batch: np.array
batch of observations
act_batch: np.array
batch of actions executed given obs_batch
rew_batch: np.array
rewards received as results of executing act_batch
next_obs_batch: np.array
next set of observations seen after executing act_batch
done_mask: np.array
done_mask[i] = 1 if executing act_batch[i] resulted in
the end of an episode and 0 otherwise.
"""
if batch_size > 0:
idxes = self.make_index(batch_size)
else:
idxes = range(0, len(self._storage))
return self._encode_sample(idxes)
def collect(self):
return self.sample(-1)
|
python
|
from arbre_binaire_jeu import *
#-------------------------------------------------------------------------------
# DM MISSION
#
# Objectif : Construire un jeu à partir d'un texte préconstruit
#
# Contrainte : utiliser un arbre binaire
#-------------------------------------------------------------------------------
# Phrases préconstruites
phrases = [None] * 18
phrases[0] = """21/10/2024 - 03h30, New York, un quartier mal famé et mal éclairé. Vous êtes au pied de l'immeuble auquel vous a mené votre enquête.
25 étages à vue de nez. Même au coeur de la nuit, on voit qu'il aurait besoin au minimum d'un sacré coup de peinture ; on le donnerait pour
abandonné si on ne percevait pas ça et là de faibles rayons de lumière. Tout est calme alentour, un silence à couper au couteau.
D'après mon informateur, le fils du Président, retenu prisonnier par le Gang des Ignares, est situé tout en haut au dernier étage.
Il est probablement sous surveillance. Pour le libérer, il va falloir être discret..."""
phrases[1] = "Damnation, je ne peux plus avancer! Il va falloir tenter la voie des airs, ça va être compliqué ..."
phrases[2] = "La porte d'entrée n'offre pas de résistance, je traverse le hall vers les accès aux étages."
phrases[3] = "Quelle malchance, l'ascenseur n' a plus d 'électricité. je vois un boîtier avec des fils qui dépassent."
phrases[4] = "L'escalier est fermé par une énorme grille. Il y a un boîtier avec un code à taper."
phrases[5] = "Ca y est, l'ascenseur fonctionne. Voilà, je suis dedans et je monte."
phrases[6] = "Ca y est, la grille s'ouvre. Prenons l' escalier. A moi les 25 étages...pfff !"
phrases[7] = """Ascension terminée, me voici à pied d'oeuvre! Je découvre un couloir. Il y a une porte latérale ouverte, ce doit être celle du gardien.
La porte fermée au fond doit être celle du prisonnier"""
phrases[8] = "Sacrebleu ! J 'ai foutu en l'air cette boîte rouillée qui s'est appelée un jour ascenseur. Voyons l'escalier..."
phrases[9] = "Enfer, le code ne marche pas, la grille est bloquée l'escalier est inaccessible. Voyons l'ascenseur..."
phrases[10] = "C'est la catastrophe, je ne peux pas monter, j' abandonne la mission en attendant de trouver un autre moyen"
phrases[11] = "Malédiction, le couloir est allumé, je vais me faire repérer, à moins qu' il dorme"
phrases[12] = "Le couloir est dans l'obscurité, pas de lumière, je vais me glisser dans l'ombre, il ne me verra pas"
phrases[13] = "Pas de bruit sauf une légère respiration, le surveillant doit dormir, je tente ma chance"
phrases[14] = "Des bruits de table et de chaise, le surveillant est apparemment bien éveillé."
phrases[15] = """Ouf, j'ai pu passer le couloir sans encombre. J'ouvre la porte au fond. Le prisonnier tourne la tête lentement
vers moi et me lance un regard ébahi. Je prend la pose et je lui lance un « Salut fiston, ton sauveur est arrivé! »"""
phrases[16] = """Un jet de lumière, le gardien braque sur moi un gros flingue, un sourire mauvais éclaire son visage ;
manifestement il m'attendait c'est un piège !"""
phrases[17] = "C'est trop risqué pour l' instant, je reviendrai dans quelques heures."
questions = [None] * 6
questions[0] = "La porte de l'immeuble est-elle ouverte (taper 1) ou verrouillée (taper 0) ? "
questions[1] = "Choisissez vous de prendre l'ascenseur (taper 1) ou l'escalier (taper 0)? "
questions[2] = "Branchez vous le fil vert avec noir (taper 1) ou le rouge avec le noir (taper 0)? "
questions[3] = "Choisissez vous le code 1111 (taper 1) ou le code 9999 (taper 0)? "
questions[4] = "Le couloir en face est-il éclairé (taper 1) ou dans le noir (taper 0)? "
questions[5] = "Entendez vous quelqu'un qui s'agite dans la pièce de surveillance (taper 1) ou est elle silencieuse (taper 0) ? "
# déroulement de l'histoire (aide)
'''
porte ouverte ou fermée ?
porte ouverte, choix ascenseur ou escalier
porte fermée, fin de l' histoire
ascenseur, fil vert ou rouge
ascenseur marche, arrivée en haut, question sur lumière couloir
ascenseur en panne, test escalier
couloir allumé, bruit ?
trop risqué
sauvé
couloir éteint, bruit ?
sauvé
piège
escalier, 1111 ou 9999 ?
escalier marche, arrivée en haut, question sur lumière couloir ...
escalier foutu, test ascenseur
escalier bloqué, mission reportée
'''
arbre_jeu = Noeud((phrases[0] + questions[0]),
Noeud(phrases[1],
None,
None
),
Noeud((phrases[2] + questions[1]),
Noeud((phrases[3] + questions[2]),
Noeud((phrases[8] + phrases[4] + questions[3]),
"Fin Bis",
None
),
Noeud((phrases[6] + phrases[7] + questions[4]),
Noeud((phrases[11] + questions[5]),
Noeud((phrases[13] + phrases[16]), # Fin
None,
None
)
),
Noeud((phrases[12] + phrases[15]))
)
),
Noeud((phrases[4] + phrases[3]),
Noeud,
Noeud((phrases[6] + phrases[7] + questions[4])
Noeud((phrases[11] + questions[5]),
Noeud((phrases[13] + phrases[16]), # Fin
None,
None
)
),
Noeud((phrases[12] + phrases[15]))
)
),
)
)
affichage(arbre_jeu)
print("....!! FIN !!...")
#-------------------------------------------------------------------------------
# QUESTIONS
#
# 1. a) Sur papier, construire l'arbre en notant comme étiquette de noeud les phrases et la question éventuelle associées
# b) Déterminer la taille et la hauteur de l'arbre. Combien comporte-t-il de feuilles ?
# 2. A l'aide du module arbre_binaire importé, constuire l'arbre précédent en python.
# 3. Ecrire une fonction qui parcours l'arbre comme dans en affichant le texte dans la console et les questions
# sous forme d'input pour lequel le joueur répond 0 ou 1.
# 4. Tester le jeu.
|
python
|
# coding: utf-8
from olo import funcs
from olo.funcs import COUNT, SUM, AVG, MAX, DISTINCT
from .base import TestCase, Foo, Bar, Dummy
from .fixture import is_pg
from .utils import (
patched_execute, no_pk
)
attrs = dict(
name='foo',
tags=['a', 'b', 'c'],
password='password',
payload={
'abc': ['1', 2, 3],
'def': [4, '5', 6]
}
)
class TestCachedQuery(TestCase):
def test_fallback(self):
bar = Bar.create(name='a', xixi='a', age=1)
with patched_execute as execute:
bar = Bar.cq.filter(age=MAX(Bar.cq('age'))).first()
self.assertIsNotNone(bar)
self.assertTrue(execute.called)
with patched_execute as execute:
bar = Bar.cq.filter(age=MAX(Bar.cq('age'))).first()
self.assertIsNotNone(bar)
self.assertTrue(execute.called)
with patched_execute as execute:
bar = Bar.cq.filter(Bar.age > 0).first()
self.assertIsNotNone(bar)
self.assertTrue(execute.called)
with patched_execute as execute:
bar = Bar.cq.filter(Bar.age > 0).first()
self.assertIsNotNone(bar)
self.assertTrue(execute.called)
with patched_execute as execute:
bar = Bar.cq('age').filter(Bar.age > 0).first()
self.assertIsNotNone(bar)
self.assertTrue(execute.called)
with patched_execute as execute:
bar = Bar.cq('age').filter(Bar.age > 0).first()
self.assertIsNotNone(bar)
self.assertTrue(execute.called)
def test_first(self):
with patched_execute as execute:
bar = Bar.cq.filter(xixi='a', age=1).first()
self.assertIsNone(bar)
self.assertTrue(execute.called)
with patched_execute as execute:
bar = Bar.cq.filter(xixi='a', age=1).first()
self.assertIsNone(bar)
self.assertFalse(execute.called)
bar = Bar.create(name='a', xixi='a', age=1)
with patched_execute as execute:
bar = Bar.cq.filter(xixi='a', age=1).first()
self.assertIsNotNone(bar)
self.assertTrue(execute.called)
with patched_execute as execute:
bar = Bar.cq.filter(xixi='a', age=1).first()
self.assertIsNotNone(bar)
self.assertFalse(execute.called)
def test_all(self):
with patched_execute as execute:
bars = Bar.cq.filter(xixi='a', age=1).all()
self.assertEqual(bars, [])
self.assertTrue(execute.called)
with patched_execute as execute:
bars = Bar.cq.filter(xixi='a', age=1).all()
self.assertEqual(bars, [])
self.assertFalse(execute.called)
with patched_execute as execute:
bars = Bar.cq.filter(xixi='a', age=1).limit(10).all()
self.assertEqual(bars, [])
self.assertFalse(execute.called)
with patched_execute as execute:
bars = Bar.cq.filter(xixi='a', age=1).limit(11).all()
self.assertEqual(bars, [])
self.assertFalse(execute.called)
with patched_execute as execute:
bars = Bar.cq.limit(10).all()
self.assertEqual(bars, [])
self.assertTrue(execute.called)
with patched_execute as execute:
bars = Bar.cq.limit(11).all()
self.assertEqual(bars, [])
self.assertFalse(execute.called)
bar = Bar.create(name='a', xixi='a', age=1)
with patched_execute as execute:
bars = Bar.cq.filter(xixi='a', age=1).limit(11).all()
self.assertEqual(len(bars), 1)
self.assertTrue(execute.called)
self.assertEqual(execute.call_count, 2)
with patched_execute as execute:
bars = Bar.cq.filter(xixi='a', age=1).limit(11).all()
self.assertEqual(len(bars), 1)
self.assertFalse(execute.called)
with patched_execute as execute:
bars = Bar.cq.limit(10).all()
self.assertEqual(len(bars), 1)
self.assertTrue(execute.called)
bar.update(name='a+')
with patched_execute as execute:
bars = Bar.cq.filter(xixi='a', age=1).limit(11).all()
self.assertEqual(len(bars), 1)
self.assertTrue(execute.called)
self.assertEqual(execute.call_count, 2)
with patched_execute as execute:
bars = Bar.cq.filter(xixi='a', age=1).limit(11).all()
self.assertEqual(len(bars), 1)
self.assertFalse(execute.called)
bar.update(name='a')
with patched_execute as execute:
bars = Bar.cq.filter(xixi='a', age=1).limit(11).all()
self.assertEqual(len(bars), 1)
self.assertTrue(execute.called)
self.assertEqual(execute.call_count, 2)
with patched_execute as execute:
bars = Bar.cq.filter(xixi='a', age=1).limit(11).all()
self.assertEqual(len(bars), 1)
self.assertFalse(execute.called)
bar.update(word='1')
with patched_execute as execute:
bars = Bar.cq.filter(xixi='a', age=1).limit(11).all()
self.assertEqual(len(bars), 1)
self.assertTrue(execute.called)
self.assertEqual(execute.call_count, 1)
self.assertEqual(bars[0].word, bar.word)
bar.update(word='2')
Bar.cache.get(bar.name)
with patched_execute as execute:
bars = Bar.cq.filter(xixi='a', age=1).limit(11).all()
self.assertEqual(len(bars), 1)
self.assertFalse(execute.called)
self.assertEqual(bars[0].word, bar.word)
bar.update(xixi='b')
with patched_execute as execute:
bars = Bar.cq.filter(xixi='a', age=1).limit(11).all()
self.assertEqual(len(bars), 0)
self.assertTrue(execute.called)
with patched_execute as execute:
bars = Bar.cq.filter(xixi='a', age=1).limit(11).all()
self.assertEqual(len(bars), 0)
self.assertFalse(execute.called)
with patched_execute as execute:
bars = Bar.cq.filter(xixi='b', age=1).limit(11).all()
self.assertEqual(len(bars), 1)
self.assertTrue(execute.called)
with patched_execute as execute:
bars = Bar.cq.filter(xixi='b', age=1).limit(11).all()
self.assertEqual(len(bars), 1)
self.assertFalse(execute.called)
bar.update(word='a')
bar = Bar.create(name='b', xixi='b', age=1, word='b')
bar = Bar.create(name='c', xixi='b', age=1, word='c')
bar = Bar.create(name='d', xixi='b', age=1, word='d')
with patched_execute as execute:
bars = Bar.cq.filter(xixi='b', age=1).limit(11).all()
self.assertEqual(len(bars), 4)
self.assertTrue(execute.called)
with patched_execute as execute:
bars = Bar.cq.filter(xixi='b', age=1).limit(11).all()
self.assertEqual(len(bars), 4)
self.assertFalse(execute.called)
with patched_execute as execute:
bars = Bar.cq.filter(xixi='b', age=1).limit(2).all()
self.assertEqual(len(bars), 2)
self.assertFalse(execute.called)
with patched_execute as execute:
bars = Bar.cache.gets_by(xixi='b', age=1, start=3,
limit=2)
self.assertEqual(len(bars), 1)
self.assertFalse(execute.called)
with patched_execute as execute:
bars = Bar.cq.filter(xixi='b', age=1).order_by(
'-name'
).limit(3).all()
self.assertEqual(len(bars), 3)
self.assertEqual(['d', 'c', 'b'], list(map(lambda x: x.name, bars)))
self.assertTrue(execute.called)
with patched_execute as execute:
bars = Bar.cq.filter(xixi='b', age=1).order_by(
'-name'
).limit(3).all()
self.assertEqual(len(bars), 3)
self.assertEqual(['d', 'c', 'b'], list(map(lambda x: x.name, bars)))
self.assertFalse(execute.called)
with patched_execute as execute:
bars = Bar.cq.filter(xixi='b', age=1).order_by(
'name'
).limit(3).all()
self.assertEqual(len(bars), 3)
self.assertTrue(execute.called)
with patched_execute as execute:
bars = Bar.cq.filter(xixi='b', age=1).order_by(
'name'
).limit(3).all()
self.assertEqual(len(bars), 3)
self.assertEqual(['a', 'b', 'c'], list(map(lambda x: x.name, bars)))
self.assertFalse(execute.called)
with patched_execute as execute:
bars = Bar.cq.filter(xixi='b', age=1).order_by(
'-age', 'word'
).offset(3).limit(2).all()
self.assertEqual(len(bars), 1)
self.assertTrue(execute.called)
with patched_execute as execute:
bars = Bar.cq.filter(xixi='b', age=1).order_by(
'-age', 'word'
).offset(3).limit(2).all()
self.assertEqual(len(bars), 1)
self.assertFalse(execute.called)
_bar = bars[0]
_bar.update(xixi='c')
with patched_execute as execute:
bars = Bar.cq.filter(xixi='b', age=1).order_by(
'-age', 'word'
).offset(2).limit(2).all()
self.assertEqual(len(bars), 1)
self.assertTrue(execute.called)
_bar.update(xixi='b')
with patched_execute as execute:
bars = Bar.cq.filter(xixi='b', age=1).order_by(
'word', 'age'
).offset(3).limit(2).all()
self.assertEqual(len(bars), 1)
self.assertTrue(execute.called)
with patched_execute as execute:
bars = Bar.cq.filter(xixi='b', age=1).order_by(
'word', 'age'
).offset(3).limit(2).all()
self.assertEqual(len(bars), 1)
self.assertFalse(execute.called)
Bar.create(name='e', xixi='b', age=1, word='e')
Bar.create(name='f', xixi='b', age=1, word='f')
with patched_execute as execute:
bars = Bar.cq.filter(xixi='b', age=1).order_by(
'word', 'age'
).offset(3).limit(2).all()
self.assertEqual(len(bars), 2)
self.assertTrue(execute.called)
with patched_execute as execute:
bars = Bar.cq.filter(xixi='b', age=1).order_by(
'word', 'age'
).offset(3).limit(2).all()
self.assertEqual(len(bars), 2)
self.assertFalse(execute.called)
with patched_execute as execute:
bars = Bar.cq.filter(name='e').all()
self.assertEqual(len(bars), 1)
self.assertFalse(execute.called)
Foo.create(name='1', age=1)
Foo.create(name='2', age=1)
Foo.create(name='3', age=2)
with no_pk(Foo):
Foo.cq.filter(age=1).limit(3).all()
foos = Foo.cq.filter(age=3).limit(3).all()
self.assertEqual(foos, [])
def test_count_by(self):
with patched_execute as execute:
c = Bar.cq.filter(xixi='a', age=1).count()
self.assertEqual(c, 0)
self.assertTrue(execute.called)
with patched_execute as execute:
c = Bar.cq.filter(xixi='a', age=1).count()
self.assertEqual(c, 0)
self.assertFalse(execute.called)
with patched_execute as execute:
c = Bar.cq.filter(xixi='b', age=1).count()
self.assertEqual(c, 0)
self.assertTrue(execute.called)
with patched_execute as execute:
c = Bar.cq.filter(xixi='b', age=1).count()
self.assertEqual(c, 0)
self.assertFalse(execute.called)
with patched_execute as execute:
c = Bar.cq.filter().count()
self.assertEqual(c, 0)
self.assertTrue(execute.called)
with patched_execute as execute:
c = Bar.cq.filter().count()
self.assertEqual(c, 0)
self.assertFalse(execute.called)
with patched_execute as execute:
c = Bar.cq.filter(name='a').count()
self.assertEqual(c, 0)
self.assertTrue(execute.called)
with patched_execute as execute:
c = Bar.cq.filter(name='a').count()
self.assertEqual(c, 0)
self.assertFalse(execute.called)
with patched_execute as execute:
c = Bar.cq.filter(word='a').count()
self.assertEqual(c, 0)
self.assertTrue(execute.called)
with patched_execute as execute:
c = Bar.cq.filter(word='a').count()
self.assertEqual(c, 0)
self.assertTrue(execute.called)
Bar.create(name='a', xixi='b', age=1)
with patched_execute as execute:
c = Bar.cq.filter(xixi='a', age=1).count()
self.assertEqual(c, 0)
self.assertFalse(execute.called)
with patched_execute as execute:
c = Bar.cq.filter(xixi='b', age=1).count()
self.assertEqual(c, 1)
self.assertTrue(execute.called)
with patched_execute as execute:
c = Bar.cq.filter().count()
self.assertEqual(c, 1)
self.assertTrue(execute.called)
with patched_execute as execute:
c = Bar.cq.filter(name='a').count()
self.assertEqual(c, 1)
self.assertTrue(execute.called)
Bar.create(name='b', xixi='a', age=1)
with patched_execute as execute:
c = Bar.cq.filter(xixi='a', age=1).count()
self.assertEqual(c, 1)
self.assertTrue(execute.called)
with patched_execute as execute:
c = Bar.cq.filter(xixi='b', age=1).count()
self.assertEqual(c, 1)
self.assertFalse(execute.called)
bar = Bar.create(name='c', xixi='b', age=1)
with patched_execute as execute:
c = Bar.cq.filter(xixi='b', age=1).count()
self.assertEqual(c, 2)
self.assertTrue(execute.called)
with patched_execute as execute:
c = Bar.cq.filter(xixi='b', age=1).count()
self.assertEqual(c, 2)
self.assertFalse(execute.called)
bar.update(xixi='c')
with patched_execute as execute:
c = Bar.cq.filter(xixi='b', age=1).count()
self.assertEqual(c, 1)
self.assertTrue(execute.called)
with patched_execute as execute:
c = Bar.cq.filter(xixi='b', age=1).count()
self.assertEqual(c, 1)
self.assertFalse(execute.called)
def test_order_by(self):
Dummy.create(name='foo0', age=3)
Dummy.create(name='foo2', age=6)
Dummy.create(name='foo2', age=7)
Dummy.create(name='foo3', age=4)
Dummy.create(name='foo4', age=2)
rv = Dummy.cq('age').order_by('age').all()
self.assertEqual(rv, [2, 3, 4, 6, 7])
rv = Dummy.cq('age').order_by(Dummy.age).all()
self.assertEqual(rv, [2, 3, 4, 6, 7])
rv = Dummy.cq('age').order_by(Dummy.age.desc()).all()
self.assertEqual(rv, [7, 6, 4, 3, 2])
age = Dummy.age.alias('a')
rv = Dummy.cq(age).order_by(age).all()
self.assertEqual(rv, [2, 3, 4, 6, 7])
rv = Dummy.cq(age).order_by(age.desc()).all()
self.assertEqual(rv, [7, 6, 4, 3, 2])
rv = Dummy.cq(age).order_by(Dummy.id.asc(), Dummy.age.desc()).all()
self.assertEqual(rv, [3, 6, 7, 4, 2])
rv = Dummy.cq(age).order_by(Dummy.age.in_([2, 4]).desc(), Dummy.id.desc()).all() # noqa
self.assertEqual(rv, [2, 4, 7, 6, 3])
rv = Dummy.cq(age).order_by(Dummy.age.in_([2, 4]).desc()).order_by(Dummy.id.desc()).all() # noqa
self.assertEqual(rv, [2, 4, 7, 6, 3])
def test_group_by(self):
Dummy.create(name='foo0', age=1)
Dummy.create(name='foo2', age=2)
Dummy.create(name='foo2', age=2)
Dummy.create(name='foo3', age=3)
Dummy.create(name='foo4', age=3)
rv = Dummy.cq('age', funcs.COUNT(1)).group_by('age').order_by('age').all()
self.assertEqual(rv, [(1, 1), (2, 2), (3, 2)])
rv = Dummy.cq('name', 'age').group_by('name', 'age').order_by('age').all()
self.assertEqual(rv, [('foo0', 1), ('foo2', 2),
('foo3', 3), ('foo4', 3)])
rv = Dummy.cq('name', 'age').group_by('name').group_by('age').order_by('age').all()
self.assertEqual(rv, [('foo0', 1), ('foo2', 2),
('foo3', 3), ('foo4', 3)])
def test_having(self):
# FIXME(PG)
if is_pg:
return
Dummy.create(name='foo0', age=1)
Dummy.create(name='foo2', age=2)
Dummy.create(name='foo2', age=2)
Dummy.create(name='foo3', age=3)
Dummy.create(name='foo4', age=3)
Dummy.create(name='foo5', age=3)
c = COUNT(1).alias('c')
rv = Dummy.cq('age', c).group_by(
'age'
).having(c > 2).all()
self.assertEqual(rv, [(3, 3)])
def test_join(self):
Dummy.create(name='dummy0', age=3)
Dummy.create(name='dummy1', age=6)
Dummy.create(name='dummy2', age=9)
Foo.create(name='foo0', age=1)
Foo.create(name='foo1', age=2)
Foo.create(name='foo2', age=3)
Foo.create(name='foo3', age=3)
Foo.create(name='foo4', age=6)
Foo.create(name='foo5', age=6)
Foo.create(name='foo6', age=6)
q = Foo.cq.join(Dummy).on(Foo.age == Dummy.age)
res = q.all()
self.assertEqual(len(res), 5)
self.assertEqual({x.name for x in res}, {
'foo2', 'foo3', 'foo4', 'foo5', 'foo6'
})
q = Dummy.cq.join(Foo).on(Foo.age == Dummy.age)
res = q.all()
self.assertEqual(len(res), 5)
self.assertEqual({x.name for x in res}, {
'dummy0', 'dummy0', 'dummy1', 'dummy1', 'dummy1'
})
q = Dummy.cq.join(Foo).on(Foo.age == Dummy.age,
Dummy.age == 6)
res = q.all()
self.assertEqual(len(res), 3)
self.assertEqual({x.name for x in res}, {
'dummy1', 'dummy1', 'dummy1'
})
q = Dummy.cq(DISTINCT(Dummy.id)).join(Foo).on(
Foo.age == Dummy.age
).order_by(
Foo.id.desc(), Dummy.age.desc()
)
res = q.all()
self.assertEqual(res, [2, 1])
q = Dummy.cq(DISTINCT(Dummy.id)).left_join(Foo).on(
Foo.age == Dummy.age
).order_by(
Foo.id.desc(), Dummy.age.desc()
)
res = q.all()
if is_pg:
self.assertEqual(res, [3, 2, 1])
else:
self.assertEqual(res, [2, 1, 3])
q = Dummy.cq(DISTINCT(Dummy.id)).right_join(Foo).on(
Foo.age == Dummy.age
).order_by(
Foo.id.desc(), Dummy.age.desc()
)
res = q.all()
self.assertEqual(res, [2, 1, None])
def test_sum(self):
Dummy.create(name='foo0', age=1)
Dummy.create(name='foo2', age=2)
Dummy.create(name='foo3', age=3)
rv = Dummy.cq(SUM(Dummy.age)).first()
self.assertEqual(rv, 6)
def test_avg(self):
Dummy.create(name='foo0', age=1)
Dummy.create(name='foo2', age=2)
Dummy.create(name='foo3', age=3)
rv = Dummy.cq(AVG(Dummy.age)).first()
self.assertEqual(rv, 2)
|
python
|
from setuptools import setup
setup(
name='german_transliterate',
version='0.1.3',
author='repodiac',
author_email='[email protected]',
packages=['german_transliterate'],
url='http://github.com/repodiac/german_transliterate',
license='CC-BY-4.0 License',
description='german_transliterate can clean and transliterate (i.e. normalize) German text including abbreviations, numbers, timestamps etc.',
long_description=open('README.md', encoding="UTF-8").read(),
install_requires=[
"num2words",
],
)
|
python
|
# This file is part of Sequana software
#
# Copyright (c) 2016-2021 - Sequana Development Team
#
#
# Distributed under the terms of the 3-clause BSD license.
# The full license is in the LICENSE file, distributed with this software.
#
# website: https://github.com/sequana/sequana
# documentation: http://sequana.readthedocs.io
#
##############################################################################
import os
import sys
import shutil
from easydev import execute, TempFile, md5
from sequana.lazy import pandas as pd
from sequana.lazy import pylab
from sequana.lazy import numpy as np
from sequana.misc import wget
from sequana import sequana_config_path
from colormap import Colormap
import colorlog
logger = colorlog.getLogger(__name__)
__all__ = [
"KrakenResults",
"KrakenPipeline",
"KrakenAnalysis",
"KrakenDownload",
"KrakenSequential",
"KrakenDB",
]
class KrakenDB:
"""Class to handle a kraken DB"""
def __init__(self, filename):
if isinstance(filename, KrakenDB):
filename = filename.path
if os.path.exists(filename) is False:
possible_path = sequana_config_path + "/kraken2_dbs/" + filename
if os.path.exists(possible_path) is True:
self.path = possible_path
else:
msg = f"{filename} not found locally or in {sequana_config_path}."
raise IOError(msg)
else:
self.path = os.path.abspath(filename)
self.name = os.path.basename(self.path)
def _get_database_version(self):
if os.path.exists(self.path + os.sep + "hash.k2d"):
return "kraken2"
else: # pragma: no cover
logger.error(
"Sequana supports kraken2 only. Looks like an invalid kraken database directory"
)
version = property(_get_database_version)
def __repr__(self):
return self.name
class KrakenResults(object):
"""Translate Kraken results into a Krona-compatible file
If you run a kraken analysis with :class:`KrakenAnalysis`, you will end up
with a file e.g. named kraken.out (by default).
You could use kraken-translate but then you need extra parsing to convert
into a Krona-compatible file. Here, we take the output from kraken and
directly transform it to a krona-compatible file.
kraken2 uses the --use-names that needs extra parsing.
::
k = KrakenResults("kraken.out")
k.kraken_to_krona()
Then format expected looks like::
C HISEQ:426:C5T65ACXX:5:2301:18719:16377 1 203 1:71 A:31 1:71
C HISEQ:426:C5T65ACXX:5:2301:21238:16397 1 202 1:71 A:31 1:71
Where each row corresponds to one read.
::
"562:13 561:4 A:31 0:1 562:3" would indicate that:
the first 13 k-mers mapped to taxonomy ID #562
the next 4 k-mers mapped to taxonomy ID #561
the next 31 k-mers contained an ambiguous nucleotide
the next k-mer was not in the database
the last 3 k-mers mapped to taxonomy ID #562
For kraken2, format is slighlty different since it depends on paired or not.
If paired, ::
C read1 2697049 151|151 2697049:117 |:| 0:1 2697049:116
See kraken documentation for details.
.. note:: a taxon of ID 1 (root) means that the read is classified but in
differen domain. https://github.com/DerrickWood/kraken/issues/100
.. note:: This takes care of fetching taxons and the corresponding lineages
from online web services.
"""
def __init__(self, filename="kraken.out", verbose=True):
""".. rubric:: **constructor**
:param filename: the input from KrakenAnalysis class
"""
self.filename = filename
on_rtd = os.environ.get("READTHEDOCS", None) == "True"
if on_rtd is False:
from sequana.taxonomy import Taxonomy
self.tax = Taxonomy(verbose=verbose)
self.tax.download_taxonomic_file() # make sure it is available locally
else: # pragma: no cover
class Taxonomy(object): # pragma: no cover
from sequana import sequana_data # must be local
df = pd.read_csv(sequana_data("test_taxon_rtd.csv"), index_col=0)
def get_lineage_and_rank(self, x):
# Note that we add the name as well here
ranks = [
"kingdom",
"phylum",
"class",
"order",
"family",
"genus",
"species",
"name",
]
return [(self.df.loc[x][rank], rank) for rank in ranks]
self.tax = Taxonomy()
if filename:
# This initialise the data
self._parse_data()
self._data_created = False
def get_taxonomy_db(self, ids):
"""Retrieve taxons given a list of taxons
:param list ids: list of taxons as strings or integers. Could also
be a single string or a single integer
:return: a dataframe
.. note:: the first call first loads all taxons in memory and takes a
few seconds but subsequent calls are much faster
"""
# filter the lineage to keep only information from one of the main rank
# that is superkingdom, kingdom, phylum, class, order, family, genus and
# species
ranks = ("kingdom", "phylum", "class", "order", "family", "genus", "species")
if isinstance(ids, int):
ids = [ids]
if len(ids) == 0:
return pd.DataFrame()
if isinstance(ids, list) is False:
ids = [ids]
lineage = [self.tax.get_lineage_and_rank(x) for x in ids]
# Now, we filter each lineage to keep only relevant ranks
# There are a few caveats though as explained hereafter
# we merge the kingdom and superkingdom and subkingdom
results = []
for i, this in enumerate(lineage):
default = dict.fromkeys(ranks, " ")
for entry in this:
if entry[1] in ranks:
default[entry[1]] = entry[0]
# if there is a superkingdom, overwrite the kingdom
for entry in this:
if entry[1] == "superkingdom":
default["kingdom"] = entry[0]
if default["kingdom"] == " ":
for entry in this:
if entry[1] == "subkingdom":
default["kingdom"] = entry[0]
# in theory, we have now populated all ranks;
# Yet, there are several special cases (need examples):
# 1. all ranks are filled: perfect
# 2. some ranks are empty: we fill them with a space.
# 3. all ranks are empty:
# a. this is the root
# b. this may be expected. e.g for an artifical sequence
# c. all ranks below species are empty --> this is probably
# what we will get e.g. for plasmids
# case 3.b
if set([x[1] for x in this]) == {"no rank", "species"}:
# we can ignore the root and keep the others
# if we end up with more than 6 entries, this is annoying
# let us put a warning for now.
count = 0
for x in this:
if x[1] == "no rank" and x[0] != "root":
default[ranks[count]] = x[0]
count += 1
if count > 6:
logger.warning("too many no_rank in taxon{}".format(ids[i]))
break
# for the name, we take the last entry, which is suppose to be the
# scientific name found, so the scientific name of the taxon itself.
# Note that this is not alwyas the species rank name
# For instance for the taxon 2509511, the ID correspond to
# a subgenus of Sarbecovirus and has no species entry.
last_name, last_rank = this[-1]
if last_rank not in ["species", "no rank"]:
default["name"] = f"{last_rank}:{last_name}"
else:
default["name"] = ""
results.append(default)
df = pd.DataFrame.from_records(results)
df.index = ids
df = df[list(ranks) + ["name"]]
df.index = df.index.astype(int)
return df
def _parse_data(self):
taxonomy = {}
logger.info("Reading kraken data from {}".format(self.filename))
columns = ["status", "taxon", "length"]
# we select only col 0,2,3 to save memory, which is required on very
# large files
try:
# each call to concat in the for loop below
# will take time and increase with chunk position.
# for 15M reads, this has a big cost. So chunksize set to 1M
# is better than 1000 and still reasonable in memory
reader = pd.read_csv(
self.filename,
sep="\t",
header=None,
usecols=[0, 2, 3],
chunksize=1000000,
)
except pd.errors.EmptyDataError: # pragma: no cover
logger.warning("Empty files. 100%% unclassified ?")
self.unclassified = "?" # size of the input data set
self.classified = 0
self._df = pd.DataFrame([], columns=columns)
self._taxons = self._df.taxon
return
except pd.errors.ParserError:
# raise NotImplementedError # this section is for the case
# #only_classified_output when there is no found classified read
raise NotImplementedError
for chunk in reader:
try:
self._df
self._df = pd.concat([self._df, chunk])
except AttributeError:
self._df = chunk
self._df.columns = columns
count = sum(self._df.taxon == 1)
percentage = count / len(self._df) * 100
if percentage >= 1:
logger.warning(
"Found {} taxons of classified reads with root ID (1) ({} %)".format(
count, round(percentage, 2)
)
)
# This gives the list of taxons as index and their amount
# above, we select only columns 0, 2, 3 the column are still labelled
# 0, 2, 3 in the df
self._taxons = self._df.groupby("taxon").size()
try:
self._taxons.drop(0, inplace=True)
except:
pass # 0 may not be there
self._taxons.sort_values(ascending=False, inplace=True)
category = self.df.groupby("status").size()
if "C" in category.index:
self.classified = category["C"]
else:
self.classified = 0
if "U" in category.index:
self.unclassified = category["U"]
else:
self.unclassified = 0
logger.debug(self.taxons.iloc[0:10])
def _get_taxons(self):
try:
return self._taxons
except:
self._parse_data()
return self._taxons
taxons = property(_get_taxons)
def _get_df(self):
try:
return self._df
except:
self._parse_data()
return self._df
df = property(_get_df)
def _get_df_with_taxon(self, dbname):
df = self.get_taxonomy_db([int(x) for x in self.taxons.index])
df["count"] = self.taxons.values
df.reset_index(inplace=True)
newrow = len(df)
df.loc[newrow] = "Unclassified"
df.loc[newrow, "count"] = self.unclassified
df.loc[newrow, "index"] = -1
df.rename(columns={"index": "taxon"}, inplace=True)
df["percentage"] = df["count"] / df["count"].sum() * 100
starter = ["taxon", "count", "percentage"]
df = df[starter + [x for x in df.columns if x not in starter]]
df.sort_values(by="percentage", inplace=True, ascending=False)
return df
def kraken_to_csv(self, filename, dbname):
df = self._get_df_with_taxon(dbname)
df.to_csv(filename, index=False)
return df
def kraken_to_json(self, filename, dbname):
df = self._get_df_with_taxon(dbname)
try:
df.to_json(filename, indent=4, orient="records")
except:
df.to_json(filename, orient="records")
return df
def kraken_to_krona(self, output_filename=None, nofile=False):
"""
:return: status: True is everything went fine otherwise False
"""
if output_filename is None:
output_filename = self.filename + ".summary"
taxon_to_find = list(self.taxons.index)
if len(taxon_to_find) == 0:
logger.warning(
"No reads were identified. You will need a more complete database"
)
self.output_filename = output_filename
with open(output_filename, "w") as fout:
fout.write("%s\t%s" % (self.unclassified, "Unclassified"))
return False
if len(taxon_to_find) == 0:
return False
df = self.get_taxonomy_db(taxon_to_find)
self.lineage = [";".join(this) for this in df[df.columns[0:-1]].values]
self.scnames = list(df["name"].values) # do we need a cast ?
# Now save the file
self.output_filename = output_filename
with open(output_filename, "w") as fout:
for i, this in enumerate(self.lineage):
taxon = taxon_to_find[i]
count = self.taxons.loc[taxon]
line = str(count) + "\t" + "\t".join(this.split(";"))
line += " " + self.scnames[i]
fout.write(line + "\n")
try:
fout.write("%s\t%s" % (self.unclassified, "Unclassified"))
except:
pass # unclassified may not exists if all classified
self._data_created = True
return True
def plot2(self, kind="pie", fontsize=12):
"""This is the simplified static krona-like plot included in HTML reports"""
import matplotlib.pyplot as plt
taxons = self.taxons.copy()
if len(self.taxons.index) == 0:
return None
df = self.get_taxonomy_db(list(self.taxons.index))
self.dd = df
if self.unclassified > 0:
df.loc[-1] = ["Unclassified"] * 8
taxons[-1] = self.unclassified
df["ratio"] = taxons / taxons.sum() * 100
data_class = df.groupby(["kingdom", "class"]).sum()
data_species = df.groupby(["kingdom", "species"]).sum()
X = []
Y = []
Z = []
labels = []
zlabels, ztaxons = [], []
kingdom_colors = []
inner_colors = []
inner_labels = []
species_colors = []
taxons = df["species"].reset_index().set_index("species")
for kingdom in data_class.index.levels[0]:
# kingdom info
X.append(data_class.loc[kingdom].ratio.sum())
# class info
y = list(data_class.loc[kingdom].ratio.values)
temp = data_class.loc[kingdom]
y1 = temp.query("ratio>=0.5")
y2 = temp.query("ratio<0.5")
y = list(y1.ratio.values) + list(y2.ratio.values)
inner_labels += list(y1.ratio.index) + [""] * len(y2.ratio)
Y.extend(y)
# species info
temp = data_species.loc[kingdom]
z1 = temp.query("ratio>=0.5")
z2 = temp.query("ratio<0.5")
z = list(z1.ratio.values) + list(z2.ratio.values)
zlabels += list(z1.ratio.index) + [""] * len(z2.ratio)
Z.extend(z)
if kingdom.strip():
labels.append(kingdom)
else:
labels.append("undefined/unknown taxon")
if kingdom == "Eukaryota":
this_cmap = plt.cm.Purples
elif kingdom == "Unclassified":
this_cmap = plt.cm.Greys
elif kingdom == "Bacteria":
this_cmap = plt.cm.Reds
elif kingdom == "Viruses":
this_cmap = plt.cm.Greens
elif kingdom == "Archaea":
this_cmap = Colormap().cmap_linear("yellow", "yellow", "orange")
else:
this_cmap = Colormap().cmap_linear(
"light gray", "gray(w3c)", "dark gray"
)
kingdom_colors.append(this_cmap(0.8))
inner_colors.extend(this_cmap(np.linspace(0.6, 0.2, len(y))))
species_colors.extend(this_cmap(np.linspace(0.6, 0.2, len(z))))
fig, ax = pylab.subplots(figsize=(9.5, 7))
size = 0.2
pct_distance = 0
w1, l1 = ax.pie(
X,
radius=1 - 2 * size,
colors=kingdom_colors,
wedgeprops=dict(width=size, edgecolor="w"),
labels=labels,
labeldistance=0.4,
)
w2, l2 = ax.pie(
Y,
radius=1 - size,
colors=inner_colors,
labels=[x.replace("Unclassified", "") for x in inner_labels],
wedgeprops=dict(width=size, edgecolor="w"),
labeldistance=0.65,
)
# labels can be long. Let us cut them
zlabels2 = []
for this in zlabels:
if len(this) > 30:
zlabels2.append(this[0:30] + "...")
else:
zlabels2.append(this)
w3, l3 = ax.pie(
Z,
radius=1,
colors=species_colors,
labels=[x.replace("Unclassified", "") for x in zlabels2],
wedgeprops=dict(width=size, edgecolor="w"),
labeldistance=0.9,
)
ax.set(aspect="equal")
pylab.subplots_adjust(right=1, left=0, bottom=0, top=1)
pylab.legend(labels, title="kingdom", loc="upper right", fontsize=fontsize)
import webbrowser
mapper = {k: v for k, v in zip(zlabels, Z)}
def on_pick(event):
wedge = event.artist
label = wedge.get_label()
if mapper[label] > 1:
taxon = taxons.loc[label, "index"]
webbrowser.open(
"https://www.ncbi.nlm.nih.gov/Taxonomy/Browser/wwwtax.cgi?id={}".format(
taxon
)
)
else:
wedge.set_color("white")
for wedge in w3:
wedge.set_picker(True)
fig.canvas.mpl_connect("pick_event", on_pick)
# this is used to check that everything was okay in the rules
return df
def plot(
self,
kind="pie",
cmap="tab20c",
threshold=1,
radius=0.9,
textcolor="red",
delete_krona_file=False,
**kargs,
):
"""A simple non-interactive plot of taxons
:return: None if no taxon were found and a dataframe otherwise
A Krona Javascript output is also available in :meth:`kraken_to_krona`
.. plot::
:include-source:
from sequana import KrakenResults, sequana_data
test_file = sequana_data("kraken.out", "doc")
k = KrakenResults(test_file)
df = k.plot(kind='pie')
.. seealso:: to generate the data see :class:`KrakenPipeline`
or the standalone application **sequana_taxonomy**.
.. todo:: For a future release, we could use this kind of plot
https://stackoverflow.com/questions/57720935/how-to-use-correct-cmap-colors-in-nested-pie-chart-in-matplotlib
"""
if len(self._df) == 0:
return
if self._data_created == False:
status = self.kraken_to_krona()
if kind not in ["barh", "pie"]:
logger.error("kind parameter: Only barh and pie are supported")
return
# This may have already been called but maybe not. This is not time
# consuming, so we call it again here
if len(self.taxons.index) == 0:
return None
df = self.get_taxonomy_db(list(self.taxons.index))
if self.unclassified > 0:
df.loc[-1] = ["Unclassified"] * 8
data = self.taxons.copy()
# we add the unclassified only if needed
if self.unclassified > 0:
data.loc[-1] = self.unclassified
data = data / data.sum() * 100
assert threshold > 0 and threshold < 100
# everything below the threshold (1) is gather together and summarised
# into 'others'
others = data[data < threshold].sum()
data = data[data >= threshold]
names = df.loc[data.index]["name"]
data.index = names.values
if others > 0:
data.loc["others"] = others
try:
data.sort_values(inplace=True)
except:
data.sort(inplace=True)
pylab.figure(figsize=(10, 8))
pylab.clf()
self.dd = data
if kind == "pie":
ax = data.plot(
kind=kind, cmap=cmap, autopct="%1.1f%%", radius=radius, **kargs
)
pylab.ylabel(" ")
for text in ax.texts:
# large, x-small, small, None, x-large, medium, xx-small,
# smaller, xx-large, larger
text.set_size("small")
text.set_color(textcolor)
for wedge in ax.patches:
wedge.set_linewidth(1)
wedge.set_edgecolor("k")
self.ax = ax
elif kind == "barh":
ax = data.plot(kind=kind, **kargs)
pylab.xlabel(" percentage ")
if delete_krona_file:
os.remove(self.filename + ".summary")
return data
def to_js(self, output="krona.html"):
if self._data_created == False:
status = self.kraken_to_krona()
execute("ktImportText %s -o %s" % (self.output_filename, output))
def boxplot_classified_vs_read_length(self):
"""Show distribution of the read length grouped by classified or not"""
# if paired and kraken2, there are | in length to separate both reads.
# to simplify, if this is the case, we will just take the first read
# length for now.
df = self.df.copy()
try: # kraken2
df.length = df.length.apply(lambda x: int(x.split("|")[0]))
except:
pass
df[["status", "length"]].groupby("status").boxplot()
return df
def histo_classified_vs_read_length(self):
"""Show distribution of the read length grouped by classified or not"""
# if paired and kraken2, there are | in length to separate both reads.
# to simplify, if this is the case, we will just take the first read
# length for now.
df = self.df.copy()
if "|" in str(df.length.values[0]):
df.length = df.length.apply(lambda x: int(x.split("|")[0]))
df = df[["status", "length"]]
M = df["length"].max()
df.hist(by="status", sharey=True, bins=pylab.linspace(0, M, int(M / 5)))
axes = pylab.gcf().get_axes()
axes[0].set_xlabel("read length")
axes[1].set_xlabel("read length")
axes[1].grid(True)
axes[0].grid(True)
return df
class KrakenPipeline(object):
"""Used by the standalone application sequana_taxonomy
This runs Kraken on a set of FastQ files, transform the results
in a format compatible for Krona, and creates a Krona HTML report.
::
from sequana import KrakenPipeline
kt = KrakenPipeline(["R1.fastq.gz", "R2.fastq.gz"], database="krakendb")
kt.run()
kt.show()
.. warning:: We do not provide Kraken database within sequana. You may
either download a database from https://ccb.jhu.edu/software/kraken/
or use this class to download a toy example that will
be stored in e.g .config/sequana under Unix platforms.
See :class:`KrakenDownload`.
.. seealso:: We provide a standalone application of this class, which is
called sequana_taxonomy and can be used within a command shell.
"""
def __init__(
self,
fastq,
database,
threads=4,
output_directory="kraken",
dbname=None,
confidence=0,
):
""".. rubric:: Constructor
:param fastq: either a fastq filename or a list of 2 fastq filenames
:param database: the path to a valid Kraken database
:param threads: number of threads to be used by Kraken
:param output_directory: output filename of the Krona HTML page
:param dbname:
Description: internally, once Kraken has performed an analysis, reads
are associated to a taxon (or not). We then find the correponding
lineage and scientific names to be stored within a Krona formatted file.
KtImportTex is then used to create the Krona page.
"""
# Set and create output directory
self.output_directory = output_directory
try:
os.makedirs(output_directory)
except FileExistsError:
pass
self.database = database
self.ka = KrakenAnalysis(fastq, database, threads, confidence=confidence)
if dbname is None:
self.dbname = os.path.basename(database)
else:
self.dbname = dbname
def run(
self,
output_filename_classified=None,
output_filename_unclassified=None,
only_classified_output=False,
):
"""Run the analysis using Kraken and create the Krona output
.. todo:: reuse the KrakenResults code to simplify this method.
"""
# Run Kraken (KrakenAnalysis)
kraken_results = self.output_directory + os.sep + "kraken.out"
self.ka.run(
output_filename=kraken_results,
output_filename_unclassified=output_filename_unclassified,
output_filename_classified=output_filename_classified,
only_classified_output=only_classified_output,
)
# Translate kraken output to a format understood by Krona and save png
# image
self.kr = KrakenResults(kraken_results, verbose=False)
# we save the pie chart
try:
self.kr.plot2(kind="pie")
except Exception as err:
logger.warning(err)
self.kr.plot(kind="pie")
pylab.savefig(self.output_directory + os.sep + "kraken.png")
# we save information about the unclassified reads (length)
try:
self.kr.boxplot_classified_vs_read_length()
pylab.savefig(self.output_directory + os.sep + "boxplot_read_length.png")
except Exception as err:
logger.warning("boxplot read length could not be computed")
try:
self.kr.histo_classified_vs_read_length()
pylab.savefig(self.output_directory + os.sep + "hist_read_length.png")
except Exception as err:
logger.warning("hist read length could not be computed")
prefix = self.output_directory + os.sep
self.kr.kraken_to_json(prefix + "kraken.json", self.dbname)
self.kr.kraken_to_csv(prefix + "kraken.csv", self.dbname)
# Transform to Krona HTML
from snakemake import shell
kraken_html = self.output_directory + os.sep + "kraken.html"
status = self.kr.kraken_to_krona(output_filename=prefix + "kraken.out.summary")
if status is True:
shell(
"ktImportText %s -o %s" % (prefix + "kraken.out.summary", kraken_html)
)
else:
shell("touch {}".format(kraken_html))
# finally a summary
database = KrakenDB(self.database)
summary = {"database": [database.name]}
summary[database.name] = {"C": int(self.kr.classified)}
summary["U"] = int(self.kr.unclassified)
summary["total"] = int(self.kr.unclassified + self.kr.classified)
# redundant but useful and compatible with sequential approach
summary["unclassified"] = int(self.kr.unclassified)
summary["classified"] = int(self.kr.classified)
return summary
def show(self):
"""Opens the filename defined in the constructor"""
from easydev import onweb
onweb(self.output)
class KrakenAnalysis(object):
"""Run kraken on a set of FastQ files
In order to run a Kraken analysis, we firtst need a local database.
We provide a Toy example. The ToyDB is downloadable as follows ( you will
need to run the following code only once)::
from sequana import KrakenDownload
kd = KrakenDownload()
kd.download_kraken_toydb()
.. seealso:: :class:`KrakenDownload` for more databases
The path to the database is required to run the analysis. It has been
stored in the directory ./config/sequana/kraken_toydb under Linux platforms
The following code should be platform independent::
import os
from sequana import sequana_config_path
database = sequana_config_path + os.sep + "kraken_toydb")
Finally, we can run the analysis on the toy data set::
from sequana import sequana_data
data = sequana_data("Hm2_GTGAAA_L005_R1_001.fastq.gz", "data")
ka = KrakenAnalysis(data, database=database)
ka.run()
This creates a file named *kraken.out*. It can be interpreted with
:class:`KrakenResults`
"""
def __init__(self, fastq, database, threads=4, confidence=0):
""".. rubric:: Constructor
:param fastq: either a fastq filename or a list of 2 fastq filenames
:param database: the path to a valid Kraken database
:param threads: number of threads to be used by Kraken
:param confidence: parameter used by kraken2
:param return:
"""
self.database = KrakenDB(database)
self.threads = threads
self.confidence = confidence
# Fastq input
if isinstance(fastq, str):
self.paired = False
self.fastq = [fastq]
elif isinstance(fastq, list):
if len(fastq) == 2:
self.paired = True
elif len(fastq) == 1:
self.paired = False
else:
raise IOError(("You must provide 1 or 2 files"))
self.fastq = fastq
else:
raise ValueError("Expected a fastq filename or list of 2 fastq filenames")
def run(
self,
output_filename=None,
output_filename_classified=None,
output_filename_unclassified=None,
only_classified_output=False,
):
"""Performs the kraken analysis
:param str output_filename: if not provided, a temporary file is used
and stored in :attr:`kraken_output`.
:param str output_filename_classified: not compressed
:param str output_filename_unclassified: not compressed
"""
if self.database.version != "kraken2":
logger.error(f"input database is not valid kraken2 database")
sys.exit(1)
if output_filename is None:
self.kraken_output = TempFile().name
else:
self.kraken_output = output_filename
dirname = os.path.dirname(output_filename)
if os.path.exists(dirname) is False:
os.makedirs(dirname)
# make sure the required output directories exist:
# and that the output filenames ends in .fastq
if output_filename_classified:
assert output_filename_classified.endswith(".fastq")
dirname = os.path.dirname(output_filename_classified)
if os.path.exists(dirname) is False:
os.makedirs(dirname)
if output_filename_unclassified:
assert output_filename_unclassified.endswith(".fastq")
dirname = os.path.dirname(output_filename_unclassified)
if os.path.exists(dirname) is False:
os.makedirs(dirname)
params = {
"database": self.database.path,
"thread": self.threads,
"file1": self.fastq[0],
"kraken_output": self.kraken_output,
"output_filename_unclassified": output_filename_unclassified,
"output_filename_classified": output_filename_classified,
}
if self.paired:
params["file2"] = self.fastq[1]
command = f"kraken2 --confidence {self.confidence}"
command += f" {params['file1']}"
if self.paired:
command += f" {params['file2']} --paired"
command += f" --db {params['database']} "
command += f" --threads {params['thread']} "
command += f" --output {params['kraken_output']} "
# If N is number of reads unclassified 3 cases depending on out-fmt
# choice
# case1 --paired and out-fmt legacy saved fasta R1 and R2 together on N lines
# case2 --paired and out-fmt interleaved saved fasta R1 and R2 alternatively on 2N lines
# case3 --paired and out-fmt paired saved R1 on N lines. Where is R2 ????
# Note, that there is always one single file. So, the only way for
# kraken to know that this new files (used as input) is paired, is to
# use --paired.
# In any case, this new file looks like an R1-only file. Indeed, if
# interleaved, all data inside the file, if legacy, The R1 and R2 are
# separated by N but a unique sequence. If --out-fmt is paired, this is
# annoying. Indeed, half of the data is lost.
# So, if now input is
# case1, we cannot provide --paired
# case2 we cannot either, so how are R1 and R2 taken care of ?
# besides, if provided, the interleaved input is seen as single ended.
# Indeed, if provided, --out-fmt cannot be interleaved since krakne1
# complains that input is not paired.
# case3, only R1 so we cannot use --paired
# if kraken2, there is no --out-fmt option, so output is always a fastq
# with either R1 only or two output files.
# If we omit the --paired options, the 2 input R1 and R2 are considered
# as 2 different unrelated samples
# if we use --paired we now must have # in the file name, and then
# the two files are created
if self.database.version == "kraken2":
if output_filename_unclassified:
command += " --unclassified-out %(output_filename_unclassified)s "
if output_filename_classified:
command += " --classified-out %(output_filename_classified)s "
command = command % params
logger.debug(command)
from snakemake import shell
shell(command)
if only_classified_output:
# kraken2 has no classified_output option. we mimic it here below
# just to get a temporary filename
fout = TempFile()
outname = fout.name
newfile = open(outname, "w")
with open(output_filename, "r") as fin:
for line in fin.readlines():
if line.startswith("C"):
newfile.write(line)
newfile.close()
shutil.move(outname, output_filename)
# a simple utility function
try:
from itertools import izip_longest
except:
from itertools import zip_longest as izip_longest
def grouper(iterable):
args = [iter(iterable)] * 8
return izip_longest(*args)
class KrakenSequential(object):
"""Kraken Sequential Analysis
This runs Kraken on a FastQ file with multiple k-mer databases in a
sequencial way way. Unclassified sequences with the first database are input
for the second, and so on.
The input may be a single FastQ file or paired, gzipped or not. FastA are
also accepted.
"""
def __init__(
self,
filename_fastq,
fof_databases,
threads=1,
output_directory="./kraken_sequential/",
keep_temp_files=False,
output_filename_unclassified=None,
output_filename_classified=None,
force=False,
confidence=0,
):
""".. rubric:: **constructor**
:param filename_fastq: FastQ file to analyse
:param fof_databases: file that contains a list of databases paths
(one per line). The order is important. Note that you may also
provide a list of datab ase paths.
:param threads: number of threads to be used by Kraken
:param output_directory: name of the output directory
:param keep_temp_files: bool, if True, will keep intermediate files
from each Kraken analysis, and save html report at each step
:param bool force: if the output directory already exists, the
instanciation fails so that the existing data is not overrwritten.
If you wish to overwrite the existing directory, set this
parameter to iTrue.
"""
self.filename_fastq = filename_fastq
self.confidence = confidence
# input databases may be stored in a file
if isinstance(fof_databases, str) and os.path.exists(fof_databases):
with open(fof_databases, "r") as fof:
self.databases = [
absolute_path.split("\n")[0] for absolute_path in fof.readlines()
]
# or simply provided as a list
elif isinstance(fof_databases, list):
self.databases = fof_databases[:]
else:
raise TypeError(
"input databases must be a list of valid kraken2 "
"databases or a file (see documebntation)"
)
self.databases = [KrakenDB(x) for x in self.databases]
for d in self.databases:
if d.version != "kraken2":
logger.error(f"input database {d} is not valid kraken2 ")
sys.exit(1)
self.threads = threads
self.output_directory = output_directory
self.keep_temp_files = keep_temp_files
# check if the output directory already exist
try:
os.mkdir(output_directory)
except OSError:
if os.path.isdir(output_directory) and force is False:
logger.error("Output directory %s already exists" % output_directory)
raise Exception
elif force is True:
logger.warning(
"Output directory %s already exists. You may "
"overwrite existing results" % output_directory
)
# list of input fastq files
if isinstance(filename_fastq, list) and len(filename_fastq) in [1, 2]:
self.inputs = filename_fastq[:]
elif isinstance(filename_fastq, str):
self.inputs = [filename_fastq]
else:
msg = "input file must be a string or list of 2 filenames"
msg += "\nYou provided {}".format(filename_fastq)
raise TypeError(msg)
if len(self.inputs) == 1:
self.paired = False
elif len(self.inputs) == 2:
self.paired = True
self.unclassified_output = output_filename_unclassified
self.classified_output = output_filename_classified
def _run_one_analysis(self, iteration):
"""Run one analysis"""
db = self.databases[iteration]
logger.info("Analysing data using database {}".format(db))
# a convenient alias
_pathto = lambda x: self.output_directory + x
# the output is saved in this file
if self.paired:
# if paired, kraken2 expect a # and then will create 2 files (1 and 2
# )
# Note that kraken adds a _ before the # (1,2) so no need to add one
output_filename_unclassified = _pathto("unclassified_%d#.fastq" % iteration)
file_fastq_unclass = [
_pathto("unclassified_%d_1.fastq" % iteration),
_pathto("unclassified_%d_2.fastq" % iteration),
]
else:
output_filename_unclassified = _pathto("unclassified_%d.fastq" % iteration)
file_fastq_unclass = _pathto("unclassified_%d.fastq" % iteration)
if iteration == 0:
inputs = self.inputs
else:
inputs = self._list_kraken_input[iteration - 1]
# if this is the last iteration (even if iteration is zero), save
# classified and unclassified in the final kraken results.
if iteration == len(self.databases) - 1:
only_classified_output = False
else:
only_classified_output = True
file_kraken_out = self.output_directory + "/kraken_{}.out".format(iteration)
# The analysis itself
analysis = KrakenAnalysis(inputs, db, self.threads, confidence=self.confidence)
analysis.run(
output_filename=file_kraken_out,
output_filename_unclassified=output_filename_unclassified,
only_classified_output=only_classified_output,
)
# save input/output files.
self._list_kraken_input.append(file_fastq_unclass)
self._list_kraken_output.append(file_kraken_out)
def run(self, dbname="multiple", output_prefix="kraken_final"):
"""Run the sequential analysis
:param dbname:
:param output_prefix:
:return: dictionary summarizing the databases names and
classified/unclassied
This method does not return anything creates a set of files:
- kraken_final.out
- krona_final.html
- kraken.png (pie plot of the classified/unclassified reads)
.. note:: the databases are run in the order provided in the constructor.
"""
# list of all output to merge at the end
self._list_kraken_output = []
self._list_kraken_input = []
# Iteration over the databases
for iteration in range(len(self.databases)):
# The analysis itself
status = self._run_one_analysis(iteration)
last_unclassified = self._list_kraken_input[-1]
# If everything was classified, we can stop here
if isinstance(last_unclassified, str):
stat = os.stat(last_unclassified)
if stat.st_size == 0:
break
elif isinstance(last_unclassified, list):
stat = os.stat(last_unclassified[0])
if stat.st_size == 0:
break
# concatenate all kraken output files
file_output_final = self.output_directory + os.sep + "%s.out" % output_prefix
with open(file_output_final, "w") as outfile:
for fname in self._list_kraken_output:
with open(fname) as infile:
for line in infile:
outfile.write(line)
logger.info("Analysing final results")
result = KrakenResults(file_output_final, verbose=False)
try:
result.histo_classified_vs_read_length()
pylab.savefig(self.output_directory + os.sep + "hist_read_length.png")
except Exception as err:
logger.warning("hist read length could not be computed")
try:
result.boxplot_classified_vs_read_length()
pylab.savefig(self.output_directory + os.sep + "boxplot_read_length.png")
except Exception as err:
logger.warning("hist read length could not be computed")
# TODO: this looks similar to the code in KrakenPipeline. could be factorised
result.to_js("%s%s%s.html" % (self.output_directory, os.sep, output_prefix))
try:
result.plot2(kind="pie")
except Exception as err:
logger.warning(err)
result.plot(kind="pie")
pylab.savefig(self.output_directory + os.sep + "kraken.png")
prefix = self.output_directory + os.sep
result.kraken_to_json(prefix + "kraken.json", dbname)
result.kraken_to_csv(prefix + "kraken.csv", dbname)
# remove kraken intermediate files (including unclassified files)
if self.unclassified_output:
# Just cp the last unclassified file
try:
# single-end data (one file)
shutil.copy2(self._list_kraken_input[-1], self.unclassified_output)
except:
for i, x in enumerate(self._list_kraken_input[-1]):
shutil.copy2(x, self.unclassified_output.replace("#", str(i + 1)))
if self.classified_output:
# Just cp the last classified file
shutil.copy2(self._list_kraken_input[-1], self.classified_output)
summary = {"databases": [x.name for x in self.databases]}
total = 0
classified = 0
for f_temp, db in zip(self._list_kraken_output, self.databases):
# In theory, the first N-1 DB returns only classified (C) read
# and the last one contains both
try:
df = pd.read_csv(f_temp, sep="\t", header=None, usecols=[0])
C = sum(df[0] == "C")
U = sum(df[0] == "U")
except pd.errors.EmptyDataError:
# if no read classified,
C = 0
U = 0
total += U
total += C
classified += C
summary[db.name] = {"C": C}
if U != 0: # the last one
summary["unclassified"] = U
summary["total"] = total
summary["classified"] = classified
if not self.keep_temp_files:
# kraken_0.out
for f_temp in self._list_kraken_output:
os.remove(f_temp)
# unclassified
for f_temp in self._list_kraken_input:
if isinstance(f_temp, str):
os.remove(f_temp)
elif isinstance(f_temp, list):
for this in f_temp:
os.remove(this)
return summary
class KrakenDownload(object):
"""Utility to download Kraken DB and place them in a local directory
::
from sequana import KrakenDownload
kd = KrakenDownload()
kd.download('toydb')
"""
def __init__(self, output_dir=None):
if output_dir is None:
self.output_dir = f"{sequana_config_path}{os.sep}kraken2_dbs"
else:
self.output_dir = output_dir
def download(self, name, verbose=True):
if name == "toydb":
self._download_kraken2_toydb(verbose=verbose)
else:
raise ValueError("name must be 'toydb' for now")
def _download_kraken2_toydb(self, verbose=True):
"""Download the kraken DB toy example from sequana_data into
.config/sequana directory
Checks the md5 checksums. About 32Mb of data
"""
base = f"{self.output_dir}{os.sep}toydb"
try:
os.makedirs(base)
except FileExistsError:
pass
baseurl = "https://github.com/sequana/data/raw/master/"
# download only if required
logger.info("Downloading the database into %s" % base)
md5sums = [
"31f4b20f9e5c6beb9e1444805264a6e5",
"733f7587f9c0c7339666d5906ec6fcd3",
"7bb56a0f035b27839fb5c18590b79263",
]
filenames = ["hash.k2d", "opts.k2d", "taxo.k2d"]
for filename, md5sum in zip(filenames, md5sums):
url = baseurl + f"kraken2_toydb/{filename}"
filename = base + os.sep + filename
if os.path.exists(filename) and md5(filename) == md5sum:
logger.warning(f"{filename} already present with good md5sum")
else:
logger.info(f"Downloading {url}")
wget(url, filename)
|
python
|
import planckStyle as s
g = s.getSubplotPlotter()
g.settings.legend_fontsize -= 3.5
g.settings.lineM = ['-g', '-r', '-b', '-k', '--r', '--b']
pol = ['TT', 'TE', 'EE', 'TTTEEE']
dataroots = [ getattr(s, 'defdata_' + p) for p in pol]
dataroots += [dataroots[1].replace('lowEB', 'lowTEB'), dataroots[2].replace('lowEB', 'lowTEB')]
for par, marker in zip(['', 'nnu', 'mnu', 'Alens', 'r', 'yhe', 'nrun'], [None, 3.046, 0.06, 1, None, 0.2449, 0]):
g.newPlot()
base = 'base_'
if par: base += par + '_'
roots = [base + dat for dat in dataroots]
labels = [s.datalabel[r] for r in dataroots]
g.settings.legend_frac_subplot_margin = 0.15
plotpars = [ 'zrei', 'H0', 'omegabh2', 'thetastar', 'A', 'tau', 'omegam', 'omegach2', 'ns', 'sigma8']
if par: plotpars[0] = par
g.plots_1d(roots, plotpars, nx=5, legend_ncol=len(roots), legend_labels=labels, share_y=True, markers=[marker])
g.export(tag=par)
|
python
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Python KISS Module Test Constants."""
__author__ = 'Greg Albrecht W2GMD <[email protected]>' # NOQA pylint: disable=R0801
__copyright__ = 'Copyright 2017 Greg Albrecht and Contributors' # NOQA pylint: disable=R0801
__license__ = 'Apache License, Version 2.0' # NOQA pylint: disable=R0801
PANGRAM = 'the quick brown fox jumps over the lazy dog'
ALPHABET = PANGRAM.replace(' ', '')
NUMBERS = ''.join([str(x) for x in range(0, 10)])
POSITIVE_NUMBERS = NUMBERS[1:]
ALPHANUM = ''.join([ALPHABET, NUMBERS])
TEST_FRAMES = 'tests/test_frames.log'
TEST_FRAME = (
'82a0a4b0646860ae648e9a88406cae92888a62406303f021333734352e3735'
'4e4931323232382e303557235732474d442d3620496e6e65722053756e73657'
'42c2053462069476174652f4469676970656174657220687474703a2f2f7732'
'676d642e6f7267')
|
python
|
from toolz import get
from functools import partial
pairs = [(1, 2) for i in range(100000)]
def test_get():
first = partial(get, 0)
for p in pairs:
first(p)
|
python
|
import torch
import torch.nn as nn
import torch.optim as optim
import pytorch_lightning as pl
from network.dla import MOC_DLA
from network.resnet import MOC_ResNet
from trainer.losses import MOCLoss
from MOC_utils.model import load_coco_pretrained_model
backbone = {
'dla': MOC_DLA,
'resnet': MOC_ResNet
}
def fill_fc_weights(layers):
for m in layers.modules():
if isinstance(m, nn.Conv2d):
if m.bias is not None:
nn.init.constant_(m.bias, 0)
class MOC_Branch(nn.Module):
def __init__(self, input_channel, arch, head_conv, branch_info, K):
super(MOC_Branch, self).__init__()
assert head_conv > 0
wh_head_conv = 64 if arch == 'resnet' else head_conv
self.hm = nn.Sequential(
nn.Conv2d(K * input_channel, head_conv,
kernel_size=3, padding=1, bias=True),
nn.ReLU(inplace=True),
nn.Conv2d(head_conv, branch_info['hm'],
kernel_size=1, stride=1,
padding=0, bias=True))
self.hm[-1].bias.data.fill_(-2.19)
self.mov = nn.Sequential(
nn.Conv2d(K * input_channel, head_conv,
kernel_size=3, padding=1, bias=True),
nn.ReLU(inplace=True),
nn.Conv2d(head_conv, branch_info['mov'],
kernel_size=1, stride=1,
padding=0, bias=True))
fill_fc_weights(self.mov)
self.wh = nn.Sequential(
nn.Conv2d(input_channel, wh_head_conv,
kernel_size=3, padding=1, bias=True),
nn.ReLU(inplace=True),
nn.Conv2d(wh_head_conv, branch_info['wh'] // K,
kernel_size=1, stride=1,
padding=0, bias=True))
fill_fc_weights(self.wh)
def forward(self, input_chunk):
output = {}
output_wh = []
for feature in input_chunk:
output_wh.append(self.wh(feature))
input_chunk = torch.cat(input_chunk, dim=1)
output_wh = torch.cat(output_wh, dim=1)
output['hm'] = self.hm(input_chunk)
output['mov'] = self.mov(input_chunk)
output['wh'] = output_wh
return output
class MOC_Net(pl.LightningModule):
def __init__(self, arch, num_classes, head_conv=256, K=7, **kwargs):
super().__init__()
self.save_hyperparameters()
num_layers = int(arch[arch.find('_') + 1:]) if '_' in arch else 0
arch = arch[:arch.find('_')] if '_' in arch else arch
branch_info = {'hm': num_classes,
'mov': 2 * K,
'wh': 2 * K}
self.K = K
self.backbone = backbone[arch](num_layers)
self.branch = MOC_Branch(self.backbone.output_channel, arch, head_conv, branch_info, K)
# Define the loss function
self.loss = MOCLoss()
def forward(self, x):
chunk = [self.backbone(x[i]) for i in range(self.K)]
return [self.branch(chunk)]
def configure_optimizers(self):
if self.hparams.optimizer == 'sgd':
return optim.SGD(self.parameters(), self.hparams.lr, momentum = 0.9)
elif self.hparams.optimizer == 'adam':
return optim.Adam(self.parameters(), self.hparams.lr)
elif self.hparams.optimizer == 'adamax':
return optim.Adamax(self.parameters(), self.hparams.lr)
def run_epoch(self, phase, batch, batch_idx):
assert len(batch['input']) == self.K
output = self(batch['input'])[0]
loss, loss_stats = self.loss(output, batch)
self.log(f'{phase}_loss', loss, prog_bar=True, logger=True)
self.log(f'{phase}_loss_hm', loss_stats['loss_hm'], prog_bar=True, logger=True)
self.log(f'{phase}_loss_mov', loss_stats['loss_mov'], prog_bar=True, logger=True)
self.log(f'{phase}_loss_wh', loss_stats['loss_wh'], prog_bar=True, logger=True)
return loss.mean()
def training_step(self, batch, batch_idx):
return self.run_epoch("train", batch, batch_idx)
def validation_step(self, batch, batch_idx):
self.run_epoch("val", batch, batch_idx)
def test_step(self, batch, batch_idx):
self.run_epoch("test", batch, batch_idx)
if __name__ == '__main__':
num_classes = 24
K = 7
arch = 'resnet_18'
head_conv = 256
model = MOC_Net(arch, num_classes, head_conv, K, lr=0.001, optimizer='adam')
model = load_coco_pretrained_model(model, arch, print_log=False)
input_shape = (1, 3, 288, 288)
x = [torch.randn(input_shape)] * K
# y = model.backbone(x) #1, 64, 72, 72
y = model(x)
# print(len(y))
print(y[0].keys())
hm = y[0]['hm']
mov = y[0]['mov']
wh = y[0]['wh']
print(hm.shape)
print(mov.shape)
print(wh.shape)
print(model.hparams)
model.configure_optimizers()
|
python
|
import os
from django.http import HttpResponse
from django.template import Context, RequestContext, loader
def ajax_aware_render(request, template_list, context=None, **kwargs):
"""
Render a template, using a different one automatically for AJAX requests.
:param template_list: Either a template name or a list of template names.
:param context: Optional extra context to pass to the template.
For AJAX requests, the template list is altered to look for alternate
templates first and the ``is_ajax`` context variable is set to ``True``.
For example, if ``template_list`` was set to
``['custom/login.html', 'login.html']``, then an AJAX request will change
this to::
['custom/login.ajax.html', 'login.ajax.html',
'custom/login.html', 'login.html']
"""
if not isinstance(context, Context):
context = RequestContext(request, context)
if isinstance(template_list, basestring):
template_list = [template_list]
if request.is_ajax():
ajax_template_list = []
for name in template_list:
ajax_template_list.append('%s.ajax%s' % os.path.splitext(name))
template_list = ajax_template_list + list(template_list)
context['is_ajax'] = True
context['current_url'] = request.get_full_path()
template = loader.select_template(template_list)
return HttpResponse(template.render(context), **kwargs)
|
python
|
import logging
import collections
import time
import six
from six.moves import http_client
from flask import url_for, g, jsonify
from flask.views import MethodView
import marshmallow as ma
from flask_restx import reqparse
from flask_smorest import Blueprint, abort
from drift.core.extensions.urlregistry import Endpoints
from driftbase.models.db import CorePlayer, Counter, CounterEntry
from driftbase.utils import get_all_counters, get_counter
from driftbase.players import get_playergroup_ids
log = logging.getLogger(__name__)
bp = Blueprint("counters", __name__, url_prefix="/counters", description="Counters")
endpoints = Endpoints()
NUM_RESULTS = 100
def drift_init_extension(app, api, **kwargs):
api.register_blueprint(bp)
endpoints.init_app(app)
@bp.route('/', endpoint='list')
class CountersApi(MethodView):
get_args = reqparse.RequestParser()
def get(self):
"""
Get a list of all 'leaderboards'
"""
all_counters = g.db.query(Counter).order_by(Counter.name).distinct()
ret = []
for s in all_counters:
ret.append({
"name": s.name,
"label": s.label,
"counter_id": s.counter_id,
"url": url_for("counters.entry", counter_id=s.counter_id, _external=True)
})
return jsonify(ret), http_client.OK, {'Cache-Control': "max_age=60"}
@bp.route('/<int:counter_id>', endpoint='entry')
class CounterApi(MethodView):
get_args = reqparse.RequestParser()
get_args.add_argument("num", type=int, default=NUM_RESULTS)
get_args.add_argument("include", type=int, action='append')
# TODO: Sunset this in favour of player_group
get_args.add_argument("player_id", type=int, action='append')
get_args.add_argument("player_group", type=str)
get_args.add_argument("reverse", type=bool)
#@namespace.expect(get_args)
def get(self, counter_id):
start_time = time.time()
args = self.get_args.parse_args()
num = args.get("num") or NUM_RESULTS
counter = get_counter(counter_id)
if not counter:
abort(404)
filter_player_ids = []
reverse = not not args.reverse
if args.player_id:
filter_player_ids = args.player_id
query = g.db.query(CounterEntry, CorePlayer)
query = query.filter(CounterEntry.counter_id == counter_id,
CounterEntry.period == "total",
CounterEntry.player_id == CorePlayer.player_id,
CorePlayer.status == "active",
CorePlayer.player_name != u"",)
if filter_player_ids:
query = query.filter(CounterEntry.player_id.in_(filter_player_ids))
if args.player_group:
filter_player_ids = get_playergroup_ids(args.player_group)
query = query.filter(CounterEntry.player_id.in_(filter_player_ids))
if reverse:
query = query.order_by(CounterEntry.value)
else:
query = query.order_by(-CounterEntry.value)
query = query.limit(num)
rows = query.all()
counter_totals = collections.defaultdict(list)
counter_names = {}
if args.include:
all_counters = get_all_counters()
# inline other counters for the players
player_ids = [r[0].player_id for r in rows]
counter_rows = g.db.query(CounterEntry.player_id,
CounterEntry.counter_id,
CounterEntry.value) \
.filter(CounterEntry.period == "total",
CounterEntry.player_id.in_(player_ids),
CounterEntry.counter_id.in_(args.include)) \
.all()
for r in counter_rows:
this_player_id = r[0]
this_counter_id = r[1]
this_value = r[2]
# find the name of this counter. We cache this locally for performance
try:
counter_name = counter_names[this_counter_id]
except KeyError:
c = all_counters.get(six.text_type(this_counter_id), {})
name = c.get("name", this_counter_id)
counter_names[this_counter_id] = name
counter_name = name
entry = {
"name": counter_name,
"counter_id": this_counter_id,
"counter_url": url_for("player_counters.entry",
player_id=this_player_id,
counter_id=this_counter_id,
_external=True),
"total": this_value
}
counter_totals[r.player_id].append(entry)
ret = []
for i, row in enumerate(rows):
player_id = row[0].player_id
entry = {
"name": counter["name"],
"counter_id": counter_id,
"player_id": player_id,
"player_name": row[1].player_name,
"player_url": url_for("players.entry", player_id=player_id, _external=True),
"counter_url": url_for("player_counters.entry",
player_id=player_id,
counter_id=row[0].counter_id,
_external=True),
"total": row[0].value,
"position": i + 1,
"include": counter_totals.get(player_id, {})
}
ret.append(entry)
log.info("Returning counters in %.2fsec", time.time() - start_time)
return jsonify(ret), http_client.OK, {'Cache-Control': "max_age=60"}
@endpoints.register
def endpoint_info(current_user):
ret = {}
ret["counters"] = url_for("counters.list", _external=True)
return ret
|
python
|
#!/usr/bin/env python
"""
Recursively find and replace text in files under a specific folder with preview of changed data in dry-run mode
============
Example Usage
---------------
**See what is going to change (dry run):**
> flip all dates from 2017-12-31 to 31-12-2017
find_replace.py --dir project/myfolder --search-regex "\d{4}-\d{2}-\d{2}" --replace-regex "\3-\2-\1" --dry-run
**Do actual replacement:**
find_replace.py --dir project/myfolder --search-regex "\d{4}-\d{2}-\d{2}" --replace-regex "\3-\2-\1"
**Do actual replacement and create backup files:**
find_replace.py --dir project/myfolder --search-regex "\d{4}-\d{2}-\d{2}" --replace-regex "\3-\2-\1" --create-backup
**Same action as previous command with short-hand syntax:**
find_replace.py -d project/myfolder -s "\d{4}-\d{2}-\d{2}" -r "\3-\2-\1" -b
Output of `find_replace.py -h`:
usage: find-replace-in-files-regex.py [-h] [--dir DIR] --search-regex
SEARCH_REGEX --replace-regex
REPLACE_REGEX [--glob GLOB] [--dry-run]
[--create-backup] [--verbose]
[--print-parent-folder]
USAGE:
find-replace-in-files-regex.py -d [my_folder] -s <search_regex> -r <replace_regex> -g [glob_pattern]
"""
from __future__ import print_function
import os
import fnmatch
import sys
import shutil
import re
import argparse
class Colors:
Default = "\033[39m"
Black = "\033[30m"
Red = "\033[31m"
Green = "\033[32m"
Yellow = "\033[33m"
Blue = "\033[34m"
Magenta = "\033[35m"
Cyan = "\033[36m"
LightGray = "\033[37m"
DarkGray = "\033[90m"
LightRed = "\033[91m"
LightGreen = "\033[92m"
LightYellow = "\033[93m"
LightBlue = "\033[94m"
LightMagenta = "\033[95m"
LightCyan = "\033[96m"
White = "\033[97m"
NoColor = "\033[0m"
def find_replace(cfg):
search_pattern = re.compile(cfg.search_regex)
if cfg.dry_run:
print('THIS IS A DRY RUN -- NO FILES WILL BE CHANGED!')
for path, dirs, files in os.walk(os.path.abspath(cfg.dir)):
for filename in fnmatch.filter(files, cfg.glob):
if cfg.print_parent_folder:
pardir = os.path.normpath(os.path.join(path, '..'))
pardir = os.path.split(pardir)[-1]
print('[%s]' % pardir)
full_path = os.path.join(path, filename)
# backup original file
if cfg.create_backup:
backup_path = full_path + '.bak'
while os.path.exists(backup_path):
backup_path += '.bak'
print('DBG: creating backup', backup_path)
shutil.copyfile(full_path, backup_path)
if os.path.islink(full_path):
print("{}File {} is a symlink. Skipping{}".format(Colors.Red, full_path, Colors.NoColor))
continue
with open(full_path) as f:
old_text = f.read()
all_matches = search_pattern.findall(old_text)
if all_matches:
print('{}Found {} match(es) in file {}{}'.format(Colors.LightMagenta, len(all_matches), filename, Colors.NoColor))
new_text = search_pattern.sub(cfg.replace_regex, old_text)
if not cfg.dry_run:
with open(full_path, "w") as f:
print('DBG: replacing in file', full_path)
f.write(new_text)
# else:
# for idx, matches in enumerate(all_matches):
# print("Match #{}: {}".format(idx, matches))
if cfg.verbose or cfg.dry_run:
colorized_old = search_pattern.sub(Colors.LightBlue + r"\g<0>" + Colors.NoColor, old_text)
colorized_old = '\n'.join(['\t' + line.strip() for line in colorized_old.split('\n') if Colors.LightBlue in line])
colorized = search_pattern.sub(Colors.Green + cfg.replace_regex + Colors.NoColor, old_text)
colorized = '\n'.join(['\t' + line.strip() for line in colorized.split('\n') if Colors.Green in line])
print("{}BEFORE:{}\n{}".format(Colors.White, Colors.NoColor, colorized_old))
print("{}AFTER :{}\n{}".format(Colors.Yellow, Colors.NoColor, colorized))
elif cfg.list_non_matching:
print('File {} does not contain search regex "{}"'.format(filename, cfg.search_regex))
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='''DESCRIPTION:
Find and replace recursively from the given folder using regular expressions''',
formatter_class=argparse.RawDescriptionHelpFormatter,
epilog='''USAGE:
{0} -d [my_folder] -s <search_regex> -r <replace_regex> -g [glob_pattern]
'''.format(os.path.basename(sys.argv[0])))
parser.add_argument('--dir', '-d',
help='folder to search in; by default current folder',
default='.')
parser.add_argument('--search-regex', '-s',
help='search regex',
required=True)
parser.add_argument('--replace-regex', '-r',
help='replacement regex',
required=True)
parser.add_argument('--glob', '-g',
help='glob pattern, i.e. *.html',
default="*.*")
parser.add_argument('--dry-run', '-dr',
action='store_true',
help="don't replace anything just show what is going to be done",
default=False)
parser.add_argument('--create-backup', '-b',
action='store_true',
help='Create backup files',
default=False)
parser.add_argument('--verbose', '-v',
action='store_true',
help="Show files which don't match the search regex",
default=False)
parser.add_argument('--print-parent-folder', '-p',
action='store_true',
help="Show the parent info for debug",
default=False)
parser.add_argument('--list-non-matching', '-n',
action='store_true',
help="Supress colors",
default=False)
config = parser.parse_args(sys.argv[1:])
find_replace(config)
|
python
|
# ___________________________________________________________________________
#
# EGRET: Electrical Grid Research and Engineering Tools
# Copyright 2019 National Technology & Engineering Solutions of Sandia, LLC
# (NTESS). Under the terms of Contract DE-NA0003525 with NTESS, the U.S.
# Government retains certain rights in this software.
# This software is distributed under the Revised BSD License.
# ___________________________________________________________________________
"""
This is the logging configuration for EGRET.
The documentation below is primarily for EGRET developers.
Examples
========
To use the logger in your code, add the following
after your import
.. code-block:: python
import logging
logger = logging.getLogger('egret.path.to.module')
Then, you can use the standard logging functions
.. code-block:: python
logger.debug('message')
logger.info('message')
logger.warning('message')
logger.error('message')
logger.critical('message')
Note that by default, any message that has a logging level
of warning or higher (warning, error, critical) will be
logged.
To log an exception and capture the stack trace
.. code-block:: python
try:
c = a / b
except Exception as e:
logging.error("Exception occurred", exc_info=True)
"""
import sys
import logging
log_format = '%(message)s'
# configure the root logger for egret
logger = logging.getLogger('egret')
logger.setLevel(logging.INFO)
console_handler = logging.StreamHandler(sys.stdout)
fmtr = logging.Formatter(log_format)
console_handler.setFormatter(fmtr)
logger.addHandler(console_handler)
|
python
|
import os
import cv2
import numpy as np
import random
classnames = ["no weather degradation", "fog", "rain", "snow"]
modes = ["train", "val", "test"]
for classname in classnames:
input_path = "./jhucrowd+weather dataset/{}".format(classname)
images = os.listdir(input_path)
random.shuffle(images)
N = len(images)
tot_train = int(N * 0.7)
tot_val = int(N * 0.1)
tot_test = int(N * 0.2)
r = N - (tot_train + tot_val + tot_test)
tot_train = tot_train + r
start_index_train = 0
start_index_val = tot_train
start_index_test = tot_train + tot_val
for i_img, img_name in enumerate(images):
if i_img < start_index_val:
mode = modes[0]
elif i_img < start_index_test and i_img >= start_index_val:
mode = modes[1]
else:
mode = modes[2]
output_path = "./preprocessed_data/{}/{}".format(mode, classname)
print(os.path.join(output_path, img_name))
image = cv2.imread(os.path.join(input_path, img_name))
cv2.imwrite(os.path.join(output_path, img_name), image)
|
python
|
#!/usr/bin/env python
import sys
import time
import random
mn,mx,count = map(int,sys.argv[1:4])
seed = sys.argv[4] if len(sys.argv) > 4 else time.time()
random.seed(seed)
print 'x,y'
for i in xrange(count):
print ','.join(map(str,[random.randint(mn,mx),random.randint(mn,mx)]))
|
python
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.utils.encoding import python_2_unicode_compatible
from django.db import models
# Create your models here.
@python_2_unicode_compatible
class SummaryNote(models.Model):
title = models.CharField(max_length=60)
content = models.TextField()
def __str__ (self):
return self.title
def __repr__ (self):
return '<SummaryNote %s>' % self.title
|
python
|
# Generated by Django 3.0.8 on 2020-07-29 13:57
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('wagtailimages', '0022_uploadedimage'),
('events', '0003_auto_20200725_2158'),
]
operations = [
migrations.AddField(
model_name='eventtype',
name='list_image',
field=models.ForeignKey(blank=True, help_text='This image will be displayed above the event on the front page', null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to='wagtailimages.Image'),
),
]
|
python
|
"""A CLI utility that aggregates configuration sources into a JSON object."""
import json
import logging
import os
import typing
import cleo
import structlog
import toml
import pitstop
import pitstop.backends.base
import pitstop.strategies
import pitstop.strategies.base
import pitstop.types
__all__ = ('app', 'main')
app = cleo.Application("pitstop", pitstop.__version__, complete=True)
def load_strategy(
path: str, strategy_name: typing.Optional[str] = None
) -> pitstop.strategies.base.BaseStrategy:
"""Load a configuration strategy from a pitstop configuration file."""
filename = os.path.basename(path)
with open(path, 'r') as f:
config = toml.loads(f.read())
if filename == 'pyproject.toml':
config = config['tool']['pitstop']
return pitstop.strategies.strategy_factory(config, strategy_name)
def main() -> None:
"""``pitstop`` entrypoint."""
shared_processors = [
structlog.stdlib.add_logger_name,
structlog.stdlib.add_log_level,
structlog.stdlib.PositionalArgumentsFormatter(),
structlog.processors.TimeStamper(fmt='%Y-%m-%d %H:%M:%S'),
structlog.processors.StackInfoRenderer(),
structlog.processors.format_exc_info,
structlog.processors.UnicodeDecoder(),
]
structlog.configure(
processors=shared_processors
+ [structlog.stdlib.ProcessorFormatter.wrap_for_formatter],
context_class=dict,
logger_factory=structlog.stdlib.LoggerFactory(),
wrapper_class=structlog.stdlib.BoundLogger,
cache_logger_on_first_use=True,
)
formatter = structlog.stdlib.ProcessorFormatter(
processor=structlog.dev.ConsoleRenderer(),
foreign_pre_chain=shared_processors,
)
handler = logging.StreamHandler()
handler.setFormatter(formatter)
root_logger = logging.getLogger()
root_logger.addHandler(handler)
app.add(ResolveCommand())
app.run()
class BaseCommand(cleo.Command):
"""Base :class:`cleo.Command`."""
def handle(self) -> None:
"""Perform shared CLI application setup.
All CLI commands should subclass :class:`BaseCommand` and call
:func:`super` when overriding this method.
"""
verbosity = self.output.get_verbosity()
if verbosity == cleo.Output.VERBOSITY_QUIET:
level = logging.FATAL
elif verbosity == cleo.Output.VERBOSITY_NORMAL:
level = logging.WARN
elif verbosity <= cleo.Output.VERBOSITY_VERBOSE:
level = logging.INFO
elif verbosity <= cleo.Output.VERBOSITY_DEBUG:
level = logging.DEBUG
root_logger = logging.getLogger()
root_logger.setLevel(level)
class ResolveCommand(BaseCommand):
"""
Resolve all backend sources and output resolved configuration.
resolve
{config? : pitstop configuration file}
{--s|strategy=v1 : pitstop strategy version}
{--c|compact : enable compact output}
"""
def handle(self) -> None: # noqa: D102
super().handle()
config = self.argument('config')
strategy = self.option('strategy')
if config is None:
config = 'pyproject.toml'
strategy = load_strategy(config, strategy_name=strategy)
config = strategy.resolve()
self.line(
json.dumps(config, indent=None if self.option('compact') else 4)
)
if __name__ == '__main__':
main()
|
python
|
from django.shortcuts import render, get_object_or_404
from django.http import HttpResponseRedirect, JsonResponse
from django.contrib.auth.decorators import login_required
from django.core.urlresolvers import reverse
from django.template import loader, Context
from pure_pagination import Paginator, EmptyPage, PageNotAnInteger
from .decorators import valid_character_selected
from .forms import FilterJournalForm
from apps.apies.forms import ApiForm
from apps.characters.models import CharacterApi, CharacterJournal
import utils
@login_required
def characters(request):
api_form = ApiForm(request.POST or None, user=request.user)
if request.POST and api_form.is_valid():
api_form.save(request.user)
api_form = ApiForm(user=request.user)
characters = CharacterApi.objects.filter(api__user=request.user)
if request.user.groups.filter(
name="moderator"
).exists() or request.user.is_superuser:
members = CharacterApi.objects.exclude(api__user=request.user)
return render(
request,
"characters/characters.html",
{
"api_form": api_form,
"characters": characters,
"members": members
}
)
return render(
request,
"characters/characters.html",
{
"api_form": api_form,
"characters": characters
}
)
@login_required
def select_character(request, pk):
if request.user.groups.filter(
name="moderator"
).exists() or request.user.is_superuser:
character = get_object_or_404(CharacterApi, pk=pk)
request.session['moderator'] = True
else:
character = get_object_or_404(
CharacterApi,
pk=pk,
api__user=request.user
)
request.session['moderator'] = False
request.session['charpk'] = character.pk
request.session['access'] = character.api.access()
return HttpResponseRedirect(reverse("character_sheet"))
@login_required
@valid_character_selected
def character_sheet(request):
character = get_object_or_404(CharacterApi, pk=request.session['charpk'])
cache_key = character.sheet_cache_key()
result = utils.connection.get_cache(cache_key)
if not result:
#or time to live is to long
character.sheet_set_cache_job()
#sheet, employment = character.character_sheet()
#account = character.api.account_status()
#in_training = character.skill_in_training()
# "employment": employment,
# "in_training": in_training,
# "sheet": sheet,
# "account": account,
#"character": character,
return render(
request,
"characters/sheet.html",
)
@login_required
@valid_character_selected
def character_sheet_data(request):
character = get_object_or_404(CharacterApi, pk=request.session['charpk'])
cache_key = character.sheet_cache_key()
result = utils.connection.get_cache(cache_key)
if result:
#render template
sheet, employment = character.character_sheet()
paid_until = character.api.account_status()
in_training = None #character.skill_in_training()
context = Context(
{
"employment": employment,
"in_training": in_training,
"sheet": sheet,
"paid_until": paid_until,
"character": character,
}
)
template = loader.get_template('characters/sheet_content.html')
content = template.render(context)
refresh_timer = 60 * 10 * 1000
else:
content = """<i class="fa fa-spinner fa-spin text-center"></i>"""
refrsh_timer = 0.3
return JsonResponse(
{
"content": content,
"refresh_timer": refresh_timer,
}
)
@login_required
@valid_character_selected
def character_skills(request):
character = get_object_or_404(CharacterApi, pk=request.session['charpk'])
if not character.api.access_to("CharacterSheet"):
return HttpResponseRedirect(reverse("characters"))
skills = character.trained_skills()
queue = character.skill_queue()
return render(
request,
"characters/character_skills.html",
{
"character": character,
"skills": skills,
"queue": queue,
}
)
@login_required
@valid_character_selected
def character_journal(request):
character = get_object_or_404(CharacterApi, pk=request.session['charpk'])
if not character.api.access_to("WalletJournal"):
return HttpResponseRedirect(reverse("characters"))
all_transactions = character.wallet_journal()
filter_form = FilterJournalForm(
request.POST or None, characterapi=character
)
paginator = Paginator(
all_transactions,
50, request=request
)
page = request.GET.get('page', 1)
try:
transactions = paginator.page(page)
except PageNotAnInteger:
transactions = paginator.page(1)
except EmptyPage:
transactions = paginator.page(paginator.num_pages)
chart_list = CharacterJournal.monthly_balance(character)
return render(
request,
"characters/wallet_journal.html",
{
"character": character,
"transactions": transactions,
"chart_list": chart_list,
"filter_form": filter_form,
}
)
|
python
|
import os
import unittest
from bs4 import BeautifulSoup
from parser import Parser
class ParserTestCase(unittest.TestCase):
def setUp(self):
pass
def test_item_info_images(self):
base_url = "https://www.akusherstvo.ru"
page_url = "/catalog/50666-avtokreslo-rant-star/"
page_mock_url = base_url + page_url
dump_folder = "test"
parser = Parser(base_url, dump_folder)
page = self.get_page_mock(parser, page_mock_url)
page_url = "/catalog/36172-carmela/"
item_info = parser.get_item_info(page, page_url)
more_photos = item_info["more_photos"]
color_photos = item_info["color_photos"]
self.assertEqual(len(more_photos), 4)
self.assertEqual(len(color_photos), 4)
self.assertEqual(any([ "_b." in photo_url for photo_url in color_photos]), False, "all paths should be without and postfix")
self.assertEqual(any([ "_s." in photo_url for photo_url in more_photos]), False, "all paths should be without and postfix")
def get_page_mock(self, parser, url):
normalized_url = url.replace("/", "_")
full_path = "./test_data/mock_{}.html".format(normalized_url)
if os.path.exists(full_path):
with open(full_path, "r") as f:
raw_text = f.read()
page = BeautifulSoup(raw_text, features="html5lib")
else:
page = parser.get_bs(url, codec="cp1251")
os.makedirs("./test_data", exist_ok=True)
with open(full_path, "w") as f:
f.write(str(page))
return page
if __name__ == '__main__':
unittest.main()
|
python
|
from modules import engine
from modules import out
@engine.prepare_and_clean
def execute(key = None):
out.log('These are all configuration settings.')
config_vars = engine.get_config(key)
if key is None:
for k in config_vars:
out.log(k + ' = ' + str(config_vars[k]))
else:
out.log(key + ' = ' + config_vars)
def help():
out.log("This command will print all the variables, that are set in the engines environment that look like config variables.", 'help')
|
python
|
# 2D dataset loaders
import data.data_hcp as data_hcp
import data.data_abide as data_abide
import data.data_nci as data_nci
import data.data_promise as data_promise
import data.data_pirad_erc as data_pirad_erc
import data.data_mnms as data_mnms
import data.data_wmh as data_wmh
import data.data_scgm as data_scgm
# other imports
import logging
import config.system_paths as sys_config
import numpy as np
# ==================================================================
# TRAINING DATA LOADER
# ==================================================================
def load_test_data(dataset,
image_size,
target_resolution,
cv_fold_num = 1):
# ================================================================
# NCI
# ================================================================
if dataset in ['RUNMC', 'BMC']:
logging.info('Reading NCI - ' + dataset + ' images...')
logging.info('Data root directory: ' + sys_config.orig_data_root_nci)
data_pros = data_nci.load_and_maybe_process_data(input_folder = sys_config.orig_data_root_nci,
preprocessing_folder = sys_config.preproc_folder_nci,
size = image_size,
target_resolution = target_resolution,
force_overwrite = False,
sub_dataset = dataset,
cv_fold_num = cv_fold_num)
imtr = data_pros['images_train']
gttr = data_pros['labels_train']
orig_data_res_x = data_pros['px_train'][:]
orig_data_res_y = data_pros['py_train'][:]
orig_data_res_z = data_pros['pz_train'][:]
orig_data_siz_x = data_pros['nx_train'][:]
orig_data_siz_y = data_pros['ny_train'][:]
orig_data_siz_z = data_pros['nz_train'][:]
num_train_subjects = orig_data_siz_z.shape[0]
imvl = data_pros['images_validation']
gtvl = data_pros['labels_validation']
orig_data_siz_z_val = data_pros['nz_validation'][:]
num_val_subjects = orig_data_siz_z_val.shape[0]
elif dataset in ['UCL', 'BIDMC', 'HK']:
logging.info('Reading' + dataset + ' images...')
logging.info('Data root directory: ' + sys_config.orig_data_root_promise)
data_pros = data_promise.load_and_maybe_process_data(input_folder = sys_config.orig_data_root_promise,
preprocessing_folder = sys_config.preproc_folder_promise,
size = image_size,
target_resolution = target_resolution,
force_overwrite = False,
sub_dataset = dataset,
cv_fold_num = cv_fold_num)
imtr = data_pros['images_train']
gttr = data_pros['labels_train']
orig_data_res_x = data_pros['px_train'][:]
orig_data_res_y = data_pros['py_train'][:]
orig_data_res_z = data_pros['pz_train'][:]
orig_data_siz_x = data_pros['nx_train'][:]
orig_data_siz_y = data_pros['ny_train'][:]
orig_data_siz_z = data_pros['nz_train'][:]
num_train_subjects = orig_data_siz_z.shape[0]
imvl = data_pros['images_validation']
gtvl = data_pros['labels_validation']
orig_data_siz_z_val = data_pros['nz_validation'][:]
num_val_subjects = orig_data_siz_z_val.shape[0]
elif dataset in ['USZ']:
logging.info('Reading PIRAD_ERC images...')
logging.info('Data root directory: ' + sys_config.orig_data_root_pirad_erc)
data_pros_train = data_pirad_erc.load_data(input_folder = sys_config.orig_data_root_pirad_erc,
preproc_folder = sys_config.preproc_folder_pirad_erc,
idx_start = 40,
idx_end = 68,
size = image_size,
target_resolution = target_resolution,
labeller = 'ek',
force_overwrite = False)
imtr = data_pros_train['images']
gttr = data_pros_train['labels']
orig_data_res_x = data_pros_train['px'][:]
orig_data_res_y = data_pros_train['py'][:]
orig_data_res_z = data_pros_train['pz'][:]
orig_data_siz_x = data_pros_train['nx'][:]
orig_data_siz_y = data_pros_train['ny'][:]
orig_data_siz_z = data_pros_train['nz'][:]
num_train_subjects = orig_data_siz_z.shape[0]
data_pros_val = data_pirad_erc.load_data(input_folder = sys_config.orig_data_root_pirad_erc,
preproc_folder = sys_config.preproc_folder_pirad_erc,
idx_start = 20,
idx_end = 40,
size = image_size,
target_resolution = target_resolution,
labeller = 'ek',
force_overwrite = False)
imvl = data_pros_val['images']
gtvl = data_pros_val['labels']
orig_data_siz_z_val = data_pros_val['nz'][:]
num_val_subjects = orig_data_siz_z_val.shape[0]
# ================================================================
# CARDIAC (MNMS)
# ================================================================
elif dataset in ['HVHD', 'CSF', 'UHE']:
logging.info('Reading MNMS - ' + dataset + ' images...')
logging.info('Data root directory: ' + sys_config.orig_data_root_mnms)
data_cardiac = data_mnms.load_and_maybe_process_data(input_folder = sys_config.orig_data_root_mnms,
preprocessing_folder = sys_config.preproc_folder_mnms,
size = image_size,
target_resolution = target_resolution,
force_overwrite = False,
sub_dataset = dataset)
imtr = data_cardiac['images_train']
gttr = data_cardiac['labels_train']
orig_data_res_x = data_cardiac['px_train'][:]
orig_data_res_y = data_cardiac['py_train'][:]
orig_data_res_z = data_cardiac['pz_train'][:]
orig_data_siz_x = data_cardiac['nx_train'][:]
orig_data_siz_y = data_cardiac['ny_train'][:]
orig_data_siz_z = data_cardiac['nz_train'][:]
num_train_subjects = orig_data_siz_z.shape[0]
imvl = data_cardiac['images_validation']
gtvl = data_cardiac['labels_validation']
orig_data_siz_z_val = data_cardiac['nz_validation'][:]
num_val_subjects = orig_data_siz_z_val.shape[0]
# ================================================================
# Brain lesions (WMH)
# ================================================================
elif dataset in ['UMC', 'NUHS']:
data_brain_lesions = data_wmh.load_and_maybe_process_data(sys_config.orig_data_root_wmh,
sys_config.preproc_folder_wmh,
image_size,
target_resolution,
force_overwrite=False,
sub_dataset = dataset,
cv_fold_number = cv_fold_num,
protocol = 'FLAIR')
imtr = data_brain_lesions['images_train']
gttr = data_brain_lesions['labels_train']
orig_data_res_x = data_brain_lesions['px_train'][:]
orig_data_res_y = data_brain_lesions['py_train'][:]
orig_data_res_z = data_brain_lesions['pz_train'][:]
orig_data_siz_x = data_brain_lesions['nx_train'][:]
orig_data_siz_y = data_brain_lesions['ny_train'][:]
orig_data_siz_z = data_brain_lesions['nz_train'][:]
num_train_subjects = orig_data_siz_z.shape[0]
imvl = data_brain_lesions['images_validation']
gtvl = data_brain_lesions['labels_validation']
orig_data_siz_z_val = data_brain_lesions['nz_validation'][:]
num_val_subjects = orig_data_siz_z_val.shape[0]
elif dataset in ['site1', 'site2', 'site3', 'site4']:
data_gm = data_scgm.load_and_maybe_process_data(sys_config.orig_data_root_scgm,
sys_config.preproc_folder_scgm,
image_size,
target_resolution,
force_overwrite=False,
sub_dataset = dataset,
cv_fold_number = cv_fold_num)
imtr = data_gm['images_train']
gttr = data_gm['labels_train']
orig_data_res_x = data_gm['px_train'][:]
orig_data_res_y = data_gm['py_train'][:]
orig_data_res_z = data_gm['pz_train'][:]
orig_data_siz_x = data_gm['nx_train'][:]
orig_data_siz_y = data_gm['ny_train'][:]
orig_data_siz_z = data_gm['nz_train'][:]
num_train_subjects = orig_data_siz_z.shape[0]
imvl = data_gm['images_validation']
gtvl = data_gm['labels_validation']
orig_data_siz_z_val = data_gm['nz_validation'][:]
num_val_subjects = orig_data_siz_z_val.shape[0]
# ================================================================
# HCP T1 / T2
# ================================================================
elif dataset in ['HCPT1', 'HCPT2']:
logging.info('Reading ' + str(dataset) + ' images...')
logging.info('Data root directory: ' + sys_config.orig_data_root_hcp)
data_brain_train = data_hcp.load_and_maybe_process_data(input_folder = sys_config.orig_data_root_hcp,
preprocessing_folder = sys_config.preproc_folder_hcp,
idx_start = 0,
idx_end = 20,
protocol = dataset[-2:],
size = image_size,
depth = 256,
target_resolution = target_resolution)
imtr = data_brain_train['images']
gttr = data_brain_train['labels']
orig_data_res_x = data_brain_train['px'][:]
orig_data_res_y = data_brain_train['py'][:]
orig_data_res_z = data_brain_train['pz'][:]
orig_data_siz_x = data_brain_train['nx'][:]
orig_data_siz_y = data_brain_train['ny'][:]
orig_data_siz_z = data_brain_train['nz'][:]
num_train_subjects = orig_data_siz_z.shape[0]
data_brain_val = data_hcp.load_and_maybe_process_data(input_folder = sys_config.orig_data_root_hcp,
preprocessing_folder = sys_config.preproc_folder_hcp,
idx_start = 20,
idx_end = 25,
protocol = dataset[-2:],
size = image_size,
depth = 256,
target_resolution = target_resolution)
imvl = data_brain_val['images']
gtvl = data_brain_val['labels']
orig_data_siz_z_val = data_brain_val['nz'][:]
num_val_subjects = orig_data_siz_z_val.shape[0]
elif dataset in ['CALTECH']:
logging.info('Reading CALTECH images...')
logging.info('Data root directory: ' + sys_config.orig_data_root_abide + 'CALTECH/')
data_brain_train = data_abide.load_and_maybe_process_data(input_folder = sys_config.orig_data_root_abide,
preprocessing_folder = sys_config.preproc_folder_abide,
site_name = 'CALTECH',
idx_start = 0,
idx_end = 10,
protocol = 'T1',
size = image_size,
depth = 256,
target_resolution = target_resolution)
imtr = data_brain_train['images']
gttr = data_brain_train['labels']
orig_data_res_x = data_brain_train['px'][:]
orig_data_res_y = data_brain_train['py'][:]
orig_data_res_z = data_brain_train['pz'][:]
orig_data_siz_x = data_brain_train['nx'][:]
orig_data_siz_y = data_brain_train['ny'][:]
orig_data_siz_z = data_brain_train['nz'][:]
num_train_subjects = orig_data_siz_z.shape[0]
data_brain_val = data_abide.load_and_maybe_process_data(input_folder = sys_config.orig_data_root_abide,
preprocessing_folder = sys_config.preproc_folder_abide,
site_name = 'CALTECH',
idx_start = 10,
idx_end = 15,
protocol = 'T1',
size = image_size,
depth = 256,
target_resolution = target_resolution)
imvl = data_brain_val['images']
gtvl = data_brain_val['labels']
orig_data_siz_z_val = data_brain_val['nz'][:]
num_val_subjects = orig_data_siz_z_val.shape[0]
return (imtr, # 0
gttr, # 1
orig_data_res_x, # 2
orig_data_res_y, # 3
orig_data_res_z, # 4
orig_data_siz_x, # 5
orig_data_siz_y, # 6
orig_data_siz_z, # 7
num_train_subjects, # 8
imvl, # 9
gtvl, # 10
orig_data_siz_z_val, # 11
num_val_subjects) # 12
# ==================================================================
# TEST DATA LOADER
# ==================================================================
def load_testing_data(test_dataset,
cv_fold_num,
image_size,
target_resolution,
image_depth):
# ================================================================
# PROMISE
# ================================================================
if test_dataset in ['UCL', 'BIDMC', 'HK']:
data_pros = data_promise.load_and_maybe_process_data(input_folder = sys_config.orig_data_root_promise,
preprocessing_folder = sys_config.preproc_folder_promise,
size = image_size,
target_resolution = target_resolution,
force_overwrite = False,
sub_dataset = test_dataset,
cv_fold_num = cv_fold_num)
imts = data_pros['images_test']
gtts = data_pros['labels_test']
orig_data_res_x = data_pros['px_test'][:]
orig_data_res_y = data_pros['py_test'][:]
orig_data_res_z = data_pros['pz_test'][:]
orig_data_siz_x = data_pros['nx_test'][:]
orig_data_siz_y = data_pros['ny_test'][:]
orig_data_siz_z = data_pros['nz_test'][:]
name_test_subjects = data_pros['patnames_test']
num_test_subjects = orig_data_siz_z.shape[0]
ids = np.arange(num_test_subjects)
# ================================================================
# USZ
# ================================================================
elif test_dataset == 'USZ':
image_depth = 32
z_resolution = 2.5
idx_start = 0
idx_end = 20
data_pros = data_pirad_erc.load_data(input_folder = sys_config.orig_data_root_pirad_erc,
preproc_folder = sys_config.preproc_folder_pirad_erc,
idx_start = idx_start,
idx_end = idx_end,
size = image_size,
target_resolution = target_resolution,
labeller = 'ek')
imts = data_pros['images']
gtts = data_pros['labels']
orig_data_res_x = data_pros['px'][:]
orig_data_res_y = data_pros['py'][:]
orig_data_res_z = data_pros['pz'][:]
orig_data_siz_x = data_pros['nx'][:]
orig_data_siz_y = data_pros['ny'][:]
orig_data_siz_z = data_pros['nz'][:]
name_test_subjects = data_pros['patnames']
num_test_subjects = 10 # orig_data_siz_z.shape[0]
ids = np.arange(idx_start, idx_end)
# ================================================================
# NCI
# ================================================================
elif test_dataset in ['BMC', 'RUNMC']:
logging.info('Reading ' + test_dataset + ' images...')
logging.info('Data root directory: ' + sys_config.orig_data_root_nci)
data_pros = data_nci.load_and_maybe_process_data(input_folder = sys_config.orig_data_root_nci,
preprocessing_folder = sys_config.preproc_folder_nci,
size = image_size,
target_resolution = target_resolution,
force_overwrite = False,
sub_dataset = test_dataset,
cv_fold_num = cv_fold_num)
imts = data_pros['images_test']
gtts = data_pros['labels_test']
orig_data_res_x = data_pros['px_test'][:]
orig_data_res_y = data_pros['py_test'][:]
orig_data_res_z = data_pros['pz_test'][:]
orig_data_siz_x = data_pros['nx_test'][:]
orig_data_siz_y = data_pros['ny_test'][:]
orig_data_siz_z = data_pros['nz_test'][:]
name_test_subjects = data_pros['patnames_test']
num_test_subjects = orig_data_siz_z.shape[0]
ids = np.arange(num_test_subjects)
# ================================================================
# CARDIAC (MNMS)
# ================================================================
elif test_dataset == 'HVHD' or test_dataset == 'CSF' or test_dataset == 'UHE':
logging.info('Reading MNMS - ' + test_dataset + ' images...')
logging.info('Data root directory: ' + sys_config.orig_data_root_mnms)
data_cardiac = data_mnms.load_and_maybe_process_data(input_folder = sys_config.orig_data_root_mnms,
preprocessing_folder = sys_config.preproc_folder_mnms,
size = image_size,
target_resolution = target_resolution,
force_overwrite = False,
sub_dataset = test_dataset)
imts = data_cardiac['images_test']
gtts = data_cardiac['labels_test']
orig_data_res_x = data_cardiac['px_test'][:]
orig_data_res_y = data_cardiac['py_test'][:]
orig_data_res_z = data_cardiac['pz_test'][:]
orig_data_siz_x = data_cardiac['nx_test'][:]
orig_data_siz_y = data_cardiac['ny_test'][:]
orig_data_siz_z = data_cardiac['nz_test'][:]
name_test_subjects = data_cardiac['patnames_test']
num_test_subjects = orig_data_siz_z.shape[0]
ids = np.arange(num_test_subjects)
# ================================================================
# Brain lesions (WMH)
# ================================================================
elif test_dataset == 'UMC' or test_dataset == 'NUHS':
data_brain_lesions = data_wmh.load_and_maybe_process_data(sys_config.orig_data_root_wmh,
sys_config.preproc_folder_wmh,
image_size,
target_resolution,
force_overwrite=False,
sub_dataset = test_dataset,
cv_fold_number = cv_fold_num,
protocol = 'FLAIR')
imts = data_brain_lesions['images_test']
gtts = data_brain_lesions['labels_test']
orig_data_res_x = data_brain_lesions['px_test'][:]
orig_data_res_y = data_brain_lesions['py_test'][:]
orig_data_res_z = data_brain_lesions['pz_test'][:]
orig_data_siz_x = data_brain_lesions['nx_test'][:]
orig_data_siz_y = data_brain_lesions['ny_test'][:]
orig_data_siz_z = data_brain_lesions['nz_test'][:]
name_test_subjects = data_brain_lesions['patnames_test']
num_test_subjects = orig_data_siz_z.shape[0]
ids = np.arange(num_test_subjects)
# ================================================================
# SPINE
# ================================================================
elif test_dataset == 'site1' or test_dataset == 'site2' or test_dataset == 'site3' or test_dataset == 'site4':
data_spine = data_scgm.load_and_maybe_process_data(sys_config.orig_data_root_scgm,
sys_config.preproc_folder_scgm,
image_size,
target_resolution,
force_overwrite=False,
sub_dataset = test_dataset,
cv_fold_number = cv_fold_num)
imts = data_spine['images_test']
gtts = data_spine['labels_test']
orig_data_res_x = data_spine['px_test'][:]
orig_data_res_y = data_spine['py_test'][:]
orig_data_res_z = data_spine['pz_test'][:]
orig_data_siz_x = data_spine['nx_test'][:]
orig_data_siz_y = data_spine['ny_test'][:]
orig_data_siz_z = data_spine['nz_test'][:]
name_test_subjects = data_spine['patnames_test']
num_test_subjects = orig_data_siz_z.shape[0]
ids = np.arange(num_test_subjects)
# ================================================================
# HCP T1
# ================================================================
elif test_dataset == 'HCPT1':
logging.info('Reading HCPT1 images...')
logging.info('Data root directory: ' + sys_config.orig_data_root_hcp)
idx_start = 50
idx_end = 70
data_brain = data_hcp.load_and_maybe_process_data(input_folder = sys_config.orig_data_root_hcp,
preprocessing_folder = sys_config.preproc_folder_hcp,
idx_start = idx_start,
idx_end = idx_end,
protocol = 'T1',
size = image_size,
depth = image_depth,
target_resolution = target_resolution)
imts = data_brain['images']
gtts = data_brain['labels']
orig_data_res_x = data_brain['px'][:]
orig_data_res_y = data_brain['py'][:]
orig_data_res_z = data_brain['pz'][:]
orig_data_siz_x = data_brain['nx'][:]
orig_data_siz_y = data_brain['ny'][:]
orig_data_siz_z = data_brain['nz'][:]
name_test_subjects = data_brain['patnames']
num_test_subjects = 10 # imts.shape[0] // image_depth
ids = np.arange(idx_start, idx_end)
# ================================================================
# HCP T2
# ================================================================
elif test_dataset == 'HCPT2':
logging.info('Reading HCPT2 images...')
logging.info('Data root directory: ' + sys_config.orig_data_root_hcp)
idx_start = 50
idx_end = 70
data_brain = data_hcp.load_and_maybe_process_data(input_folder = sys_config.orig_data_root_hcp,
preprocessing_folder = sys_config.preproc_folder_hcp,
idx_start = idx_start,
idx_end = idx_end,
protocol = 'T2',
size = image_size,
depth = image_depth,
target_resolution = target_resolution)
imts = data_brain['images']
gtts = data_brain['labels']
orig_data_res_x = data_brain['px'][:]
orig_data_res_y = data_brain['py'][:]
orig_data_res_z = data_brain['pz'][:]
orig_data_siz_x = data_brain['nx'][:]
orig_data_siz_y = data_brain['ny'][:]
orig_data_siz_z = data_brain['nz'][:]
name_test_subjects = data_brain['patnames']
num_test_subjects = 10 # imts.shape[0] // image_depth
ids = np.arange(idx_start, idx_end)
# ================================================================
# ABIDE CALTECH T1
# ================================================================
elif test_dataset == 'CALTECH':
logging.info('Reading CALTECH images...')
logging.info('Data root directory: ' + sys_config.orig_data_root_abide + 'CALTECH/')
idx_start = 16
idx_end = 36
data_brain = data_abide.load_and_maybe_process_data(input_folder = sys_config.orig_data_root_abide,
preprocessing_folder = sys_config.preproc_folder_abide,
site_name = 'CALTECH',
idx_start = idx_start,
idx_end = idx_end,
protocol = 'T1',
size = image_size,
depth = image_depth,
target_resolution = target_resolution)
imts = data_brain['images']
gtts = data_brain['labels']
orig_data_res_x = data_brain['px'][:]
orig_data_res_y = data_brain['py'][:]
orig_data_res_z = data_brain['pz'][:]
orig_data_siz_x = data_brain['nx'][:]
orig_data_siz_y = data_brain['ny'][:]
orig_data_siz_z = data_brain['nz'][:]
name_test_subjects = data_brain['patnames']
num_test_subjects = 10 # imts.shape[0] // image_depth
ids = np.arange(idx_start, idx_end)
# ================================================================
# ABIDE STANFORD T1
# ================================================================
elif test_dataset == 'STANFORD':
logging.info('Reading STANFORD images...')
logging.info('Data root directory: ' + sys_config.orig_data_root_abide + 'STANFORD/')
idx_start = 16
idx_end = 36
data_brain = data_abide.load_and_maybe_process_data(input_folder = sys_config.orig_data_root_abide,
preprocessing_folder = sys_config.preproc_folder_abide,
site_name = 'STANFORD',
idx_start = idx_start,
idx_end = idx_end,
protocol = 'T1',
size = image_size,
depth = image_depth,
target_resolution = target_resolution)
imts = data_brain['images']
gtts = data_brain['labels']
orig_data_res_x = data_brain['px'][:]
orig_data_res_y = data_brain['py'][:]
orig_data_res_z = data_brain['pz'][:]
orig_data_siz_x = data_brain['nx'][:]
orig_data_siz_y = data_brain['ny'][:]
orig_data_siz_z = data_brain['nz'][:]
name_test_subjects = data_brain['patnames']
num_test_subjects = 10 # imts.shape[0] // image_depth
ids = np.arange(idx_start, idx_end)
return (imts, # 0
gtts, # 1
orig_data_res_x, # 2
orig_data_res_y, # 3
orig_data_res_z, # 4
orig_data_siz_x, # 5
orig_data_siz_y, # 6
orig_data_siz_z, # 7
name_test_subjects, # 8
num_test_subjects, # 9
ids) # 10
# ================================================================
# ================================================================
def load_testing_data_wo_preproc(test_dataset_name,
ids,
sub_num,
subject_name,
image_depth):
if test_dataset_name == 'HCPT1':
# image will be normalized to [0,1]
image_orig, labels_orig = data_hcp.load_without_size_preprocessing(input_folder = sys_config.orig_data_root_hcp,
idx = ids[sub_num],
protocol = 'T1',
preprocessing_folder = sys_config.preproc_folder_hcp,
depth = image_depth)
elif test_dataset_name == 'HCPT2':
# image will be normalized to [0,1]
image_orig, labels_orig = data_hcp.load_without_size_preprocessing(input_folder = sys_config.orig_data_root_hcp,
idx = ids[sub_num],
protocol = 'T2',
preprocessing_folder = sys_config.preproc_folder_hcp,
depth = image_depth)
elif test_dataset_name == 'CALTECH':
# image will be normalized to [0,1]
image_orig, labels_orig = data_abide.load_without_size_preprocessing(input_folder = sys_config.orig_data_root_abide,
site_name = 'CALTECH',
idx = ids[sub_num],
depth = image_depth)
elif test_dataset_name == 'STANFORD':
# image will be normalized to [0,1]
image_orig, labels_orig = data_abide.load_without_size_preprocessing(input_folder = sys_config.orig_data_root_abide,
site_name = 'STANFORD',
idx = ids[sub_num],
depth = image_depth)
elif test_dataset_name in ['BMC', 'RUNMC']:
# image will be normalized to [0,1]
image_orig, labels_orig = data_nci.load_without_size_preprocessing(sys_config.orig_data_root_nci,
sys_config.preproc_folder_nci,
test_dataset_name,
cv_fold_num=1,
train_test='test',
idx=ids[sub_num])
elif test_dataset_name == 'USZ':
# image will be normalized to [0,1]
image_orig, labels_orig = data_pirad_erc.load_without_size_preprocessing(sys_config.orig_data_root_pirad_erc,
subject_name,
labeller='ek')
elif test_dataset_name in ['UCL', 'BIDMC', 'HK']:
# image will be normalized to [0,1]
image_orig, labels_orig = data_promise.load_without_size_preprocessing(sys_config.preproc_folder_promise,
subject_name[4:6])
elif test_dataset_name in ['CSF', 'UHE', 'HVHD']:
# image will be normalized to [0,1]
image_orig, labels_orig = data_mnms.load_without_size_preprocessing(sys_config.preproc_folder_mnms,
subject_name)
elif test_dataset_name in ['VU', 'UMC', 'NUHS']:
# image will be normalized to [0,1]
image_orig, labels_orig = data_wmh.load_without_size_preprocessing(sys_config.orig_data_root_wmh,
test_dataset_name,
subject_name,
'FLAIR')
elif test_dataset_name in ['site1', 'site2', 'site3', 'site4']:
# image will be normalized to [0,1]
image_orig, labels_orig = data_scgm.load_without_size_preprocessing(sys_config.orig_data_root_scgm,
sys_config.preproc_folder_scgm,
test_dataset_name,
subject_name)
return image_orig, labels_orig
def load_and_maybe_process_data(input_folder,
preprocessing_folder,
size,
target_resolution,
force_overwrite=False,
sub_dataset = 'RUNMC', # RUNMC / BMC
cv_fold_num = 1):
#size_str = '_'.join([str(i) for i in size])
#res_str = '_'.join([str(i) for i in target_resolution])
data_file_name = 'data_2d_size_%s_res_%s_cv_fold_%d_%s.hdf5' % (size, target_resolution, cv_fold_num, sub_dataset)
data_file_path = os.path.join(preprocessing_folder, data_file_name)
utils.makefolder(preprocessing_folder)
if not os.path.exists(data_file_path) or force_overwrite:
logging.info('This configuration of mode, size and target resolution has not yet been preprocessed')
logging.info('Preprocessing now!')
prepare_data(input_folder,
preprocessing_folder,
data_file_path,
size,
target_resolution,
sub_dataset,
cv_fold_num)
else:
logging.info('Already preprocessed this configuration. Loading now!')
return h5py.File(data_file_path, 'r')
# ===============================================================
# function to read a single subjects image and labels without any pre-processing
# ===============================================================
def load_without_size_preprocessing(input_folder,
preprocessing_folder,
sub_dataset,
cv_fold_num,
train_test,
idx):
# =======================
# =======================
if sub_dataset == 'RUNMC':
image_folder = input_folder + 'Images/Prostate-3T/'
folder_base = 'Prostate3T'
elif sub_dataset == 'BMC':
image_folder = input_folder + 'Images/PROSTATE-DIAGNOSIS/'
folder_base = 'ProstateDx'
# =======================
# =======================
folder_list = get_patient_folders(image_folder, folder_base, sub_dataset, cv_fold_num)
folder = folder_list[train_test][idx]
patname = folder_base + '-' + str(folder.split('-')[-2]) + '-' + str(folder.split('-')[-1])
nifti_img_path = preprocessing_folder + 'Individual_NIFTI/' + patname
# ============
# read the image and normalize the image to be between 0 and 1
# ============
image = utils.load_nii(img_path = nifti_img_path + '_img_n4.nii.gz')[0]
image = utils.normalise_image(image, norm_type='div_by_max')
# ==================
# read the label file
# ==================
label = utils.load_nii(img_path = nifti_img_path + '_lbl.nii.gz')[0]
return image, label
|
python
|
# -*- coding: utf-8 -*-
from sqlalchemy.ext.hybrid import hybrid_property
from . import db, bcrypt
from datetime import datetime
class User(db.Model):
__tablename__ = 'users'
id = db.Column(db.Integer, primary_key=True, autoincrement=True)
username = db.Column(db.String(32), index=True, unique=True)
email = db.Column(db.String(64), unique=True)
_password = db.Column(db.String(64))
reg_time = db.Column(db.DateTime, default=datetime.utcnow)
last_login = db.Column(db.DateTime)
@property
def is_authenticated(self):
return True
@property
def is_active(self):
return True
@property
def is_anonymous(self):
return False
def get_id(self):
try:
return unicode(self.id)
except NameError:
return str(self.id)
@hybrid_property
def password(self):
return self._password
@password.setter
def _set_password(self, plaintext):
self._password = bcrypt.generate_password_hash(plaintext)
def is_correct_password(self, plaintext):
if bcrypt.check_password_hash(self._password, plaintext):
return True
return False
def __repr__(self):
return '<User %r>' % self.username
class Entry(db.Model):
__tablename__ = 'entries'
id = db.Column(db.Integer, primary_key=True, autoincrement=True)
good = db.Column(db.String(64))
bad = db.Column(db.String(64))
def __repr__(self):
return '<Entry %r & %r>' % (self.good, self.bad)
class Hobby(db.Model):
__tablename__ = 'hobbies'
id = db.Column(db.Integer, primary_key=True, autoincrement=True)
hobby = db.Column(db.String(12))
hb_entries = db.relationship('HBEntry', backref='hobby', lazy='dynamic')
class HBEntry(db.Model):
__tablename__ = 'hbentries'
id = db.Column(db.Integer, primary_key=True, autoincrement=True)
hb_id = db.Column(db.Integer, db.ForeignKey('hobbies.id'))
good = db.Column(db.String(64))
bad = db.Column(db.String(64))
def __repr__(self):
return '<Entry %r & %r>' % (self.good, self.bad)
|
python
|
"""
Stingy OLX ad message forwarder: check for new message(s) and send them to your email
@author [email protected]
"""
from stingy_olx import StingyOLX
import re
import argparse
import smtplib
email_tpl = '''From: {0}\r\nTo: {1}\r\nSubject: {2}\r\nMIME-Version: 1.0\r\nContent-Type: text/html\r\n\r\n
{3}
'''
message_group_tpl = '''
<strong><a href="{}">{}</a></strong>
{}
'''
message_tpl = '''
<div style="padding-bottom:5px">
<em>{} ({})</em>
<div>{}</div>
</div>
'''
def send_email(smtp_config, to, body):
server_ssl = smtplib.SMTP_SSL(smtp_config['server'], smtp_config['port'])
server_ssl.ehlo()
server_ssl.login(smtp_config['username'], smtp_config['password'])
email = email_tpl.format(
smtp_config['from'],
to,
smtp_config['subject'],
body,
)
server_ssl.sendmail(smtp_config['from'], to, email)
server_ssl.close()
print('Email sent')
def build_email(ads):
"""
Build HTML email format based on template and ad messages
"""
email = []
for ad in ads:
html_messages = []
for msg in ad['messages']:
html_messages.append(message_tpl.format(msg['sender'], msg['time'], msg['body']))
email.append(message_group_tpl.format(ad['url'], ad['title'], '\n'.join(html_messages)))
return '\n'.join(email)
def main():
parser = argparse.ArgumentParser()
parser.add_argument("olx_username", help="OLX username")
parser.add_argument("olx_password", help="OLX password")
parser.add_argument("smtp_username", help="SMTP username")
parser.add_argument("smtp_password", help="SMTP password")
parser.add_argument("email_to", help="Email recipient")
parser.add_argument("-s", "--smtp_server", help="SMTP server", default="smtp.gmail.com")
parser.add_argument("-p", "--smtp_port", help="SMTP port", type=int, default=465)
args = parser.parse_args()
smtp_config = {
'username': args.smtp_username,
'password': args.smtp_password,
'server': args.smtp_server,
'port': args.smtp_port,
'from': 'Yohanes Gultom',
'subject': 'Pesan baru di olx.co.id'
}
olx = StingyOLX()
olx.login(args.olx_username, args.olx_password)
ads = olx.check_unread_message()
if ads:
email = build_email(ads)
send_email(smtp_config, args.email_to, email)
olx.logout()
if __name__ == '__main__':
main()
|
python
|
# encoding: utf-8
# module pandas._libs.reduction
# from C:\Python27\lib\site-packages\pandas\_libs\reduction.pyd
# by generator 1.147
# no doc
# imports
import __builtin__ as __builtins__ # <module '__builtin__' (built-in)>
import numpy as np # C:\Python27\lib\site-packages\numpy\__init__.pyc
from pandas._libs.lib import maybe_convert_objects
import distutils.version as __distutils_version
# functions
def apply_frame_axis0(*args, **kwargs): # real signature unknown
pass
def reduce(*args, **kwargs): # real signature unknown
"""
Parameters
-----------
arr : NDFrame object
f : function
axis : integer axis
dummy : type of reduced output (series)
labels : Index or None
"""
pass
def __pyx_unpickle_Reducer(*args, **kwargs): # real signature unknown
pass
def __pyx_unpickle_SeriesBinGrouper(*args, **kwargs): # real signature unknown
pass
def __pyx_unpickle_SeriesGrouper(*args, **kwargs): # real signature unknown
pass
def __pyx_unpickle_Slider(*args, **kwargs): # real signature unknown
pass
# classes
class BlockSlider(object):
""" Only capable of sliding on axis=0 """
def move(self, *args, **kwargs): # real signature unknown
pass
def __init__(self, *args, **kwargs): # real signature unknown
pass
@staticmethod # known case of __new__
def __new__(S, *more): # real signature unknown; restored from __doc__
""" T.__new__(S, ...) -> a new object with type S, a subtype of T """
pass
def __reduce__(self, *args, **kwargs): # real signature unknown
pass
def __setstate__(self, *args, **kwargs): # real signature unknown
pass
blocks = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
dummy = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
frame = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
idx_slider = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
index = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
nblocks = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
__pyx_vtable__ = None # (!) real value is '<capsule object NULL at 0x0000000006A57CC0>'
class InvalidApply(Exception):
# no doc
def __init__(self, *args, **kwargs): # real signature unknown
pass
__weakref__ = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""list of weak references to the object (if defined)"""
__qualname__ = 'InvalidApply'
class LooseVersion(__distutils_version.Version):
"""
Version numbering for anarchists and software realists.
Implements the standard interface for version number classes as
described above. A version number consists of a series of numbers,
separated by either periods or strings of letters. When comparing
version numbers, the numeric components will be compared
numerically, and the alphabetic components lexically. The following
are all valid version numbers, in no particular order:
1.5.1
1.5.2b2
161
3.10a
8.02
3.4j
1996.07.12
3.2.pl0
3.1.1.6
2g6
11g
0.960923
2.2beta29
1.13++
5.5.kw
2.0b1pl0
In fact, there is no such thing as an invalid version number under
this scheme; the rules for comparison are simple and predictable,
but may not always give the results you want (for some definition
of "want").
"""
def parse(self, *args, **kwargs): # real signature unknown
pass
def __cmp__(self, *args, **kwargs): # real signature unknown
pass
def __init__(self, *args, **kwargs): # real signature unknown
pass
def __repr__(self, *args, **kwargs): # real signature unknown
pass
def __str__(self, *args, **kwargs): # real signature unknown
pass
component_re = None # (!) real value is '<_sre.SRE_Pattern object at 0x0000000003C98470>'
class Reducer(object):
"""
Performs generic reduction operation on a C or Fortran-contiguous ndarray
while avoiding ndarray construction overhead
"""
def get_result(self, *args, **kwargs): # real signature unknown
pass
def _check_dummy(self, *args, **kwargs): # real signature unknown
pass
def __init__(self, *args, **kwargs): # real signature unknown
pass
@staticmethod # known case of __new__
def __new__(S, *more): # real signature unknown; restored from __doc__
""" T.__new__(S, ...) -> a new object with type S, a subtype of T """
pass
def __reduce__(self, *args, **kwargs): # real signature unknown
pass
def __setstate__(self, *args, **kwargs): # real signature unknown
pass
class SeriesBinGrouper(object):
""" Performs grouping operation according to bin edges, rather than labels """
def get_result(self, *args, **kwargs): # real signature unknown
pass
def _check_dummy(self, *args, **kwargs): # real signature unknown
pass
def __init__(self, *args, **kwargs): # real signature unknown
pass
@staticmethod # known case of __new__
def __new__(S, *more): # real signature unknown; restored from __doc__
""" T.__new__(S, ...) -> a new object with type S, a subtype of T """
pass
def __reduce__(self, *args, **kwargs): # real signature unknown
pass
def __setstate__(self, *args, **kwargs): # real signature unknown
pass
arr = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
bins = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
dummy_arr = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
dummy_index = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
f = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
index = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
ityp = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
name = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
typ = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
values = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
class SeriesGrouper(object):
"""
Performs generic grouping operation while avoiding ndarray construction
overhead
"""
def get_result(self, *args, **kwargs): # real signature unknown
pass
def _check_dummy(self, *args, **kwargs): # real signature unknown
pass
def __init__(self, *args, **kwargs): # real signature unknown
pass
@staticmethod # known case of __new__
def __new__(S, *more): # real signature unknown; restored from __doc__
""" T.__new__(S, ...) -> a new object with type S, a subtype of T """
pass
def __reduce__(self, *args, **kwargs): # real signature unknown
pass
def __setstate__(self, *args, **kwargs): # real signature unknown
pass
arr = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
dummy_arr = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
dummy_index = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
f = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
index = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
ityp = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
labels = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
name = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
typ = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
values = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
class Slider(object):
""" Only handles contiguous data for now """
def advance(self, *args, **kwargs): # real signature unknown
pass
def reset(self, *args, **kwargs): # real signature unknown
pass
def set_length(self, *args, **kwargs): # real signature unknown
pass
def __init__(self, *args, **kwargs): # real signature unknown
pass
@staticmethod # known case of __new__
def __new__(S, *more): # real signature unknown; restored from __doc__
""" T.__new__(S, ...) -> a new object with type S, a subtype of T """
pass
def __reduce__(self, *args, **kwargs): # real signature unknown
pass
def __setstate__(self, *args, **kwargs): # real signature unknown
pass
__pyx_vtable__ = None # (!) real value is '<capsule object NULL at 0x0000000006A57C60>'
# variables with complex values
__test__ = {}
|
python
|
"""
Base threading server class
"""
from threading import Thread
class ThreadServer:
def __init__(self):
self.server_thread = None
self.running = False
def start(self, *args, **kwargs):
if self.running:
return
self.running = True
self.server_thread = Thread(target=self.run, args=args, kwargs=kwargs)
self.server_thread.start()
def stop(self):
self.running = False
def run(self):
"""
Server main function
"""
pass
class StaticServer:
def start(self, *args, **kwargs):
pass
def stop(self):
pass
|
python
|
from dynaconf import FlaskDynaconf
flask_dynaconf = FlaskDynaconf()
def init_app(app, **config):
flask_dynaconf.init_app(app, **config)
app.config.load_extensions()
|
python
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Model utilities."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import json
import os
import sys
from absl import logging
import numpy as np
import tensorflow.compat.v1 as tf
class BatchNormalization(tf.layers.BatchNormalization):
"""Fixed default name of BatchNormalization to match TpuBatchNormalization."""
def __init__(self, name='tpu_batch_normalization', **kwargs):
super(BatchNormalization, self).__init__(name=name, **kwargs)
def drop_connect(inputs, is_training, survival_prob):
"""Drop the entire conv with given survival probability."""
# "Deep Networks with Stochastic Depth", https://arxiv.org/pdf/1603.09382.pdf
if not is_training:
return inputs
# Compute tensor.
batch_size = tf.shape(inputs)[0]
random_tensor = survival_prob
random_tensor += tf.random_uniform([batch_size, 1, 1, 1], dtype=inputs.dtype)
binary_tensor = tf.floor(random_tensor)
# Unlike conventional way that multiply survival_prob at test time, here we
# divide survival_prob at training time, such that no addition compute is
# needed at test time.
output = tf.div(inputs, survival_prob) * binary_tensor
return output
def get_ema_vars():
"""Get all exponential moving average (ema) variables."""
ema_vars = tf.trainable_variables() + tf.get_collection('moving_vars')
for v in tf.global_variables():
# We maintain mva for batch norm moving mean and variance as well.
if 'moving_mean' in v.name or 'moving_variance' in v.name:
ema_vars.append(v)
return list(set(ema_vars))
class DepthwiseConv2D(tf.keras.layers.DepthwiseConv2D, tf.layers.Layer):
"""Wrap keras DepthwiseConv2D to tf.layers."""
pass
class Conv2D(tf.layers.Conv2D):
"""Wrapper for Conv2D with specialization for fast inference."""
def _bias_activation(self, outputs):
if self.use_bias:
outputs = tf.nn.bias_add(outputs, self.bias, data_format='NCHW')
if self.activation is not None:
return self.activation(outputs)
return outputs
def _can_run_fast_1x1(self, inputs):
batch_size = inputs.shape.as_list()[0]
return (self.data_format == 'channels_first' and
batch_size == 1 and
self.kernel_size == (1, 1))
def _call_fast_1x1(self, inputs):
# Compute the 1x1 convolution as a matmul.
inputs_shape = tf.shape(inputs)
flat_inputs = tf.reshape(inputs, [inputs_shape[1], -1])
flat_outputs = tf.matmul(
tf.squeeze(self.kernel),
flat_inputs,
transpose_a=True)
outputs_shape = tf.concat([[1, self.filters], inputs_shape[2:]], axis=0)
outputs = tf.reshape(flat_outputs, outputs_shape)
# Handle the bias and activation function.
return self._bias_activation(outputs)
def call(self, inputs):
if self._can_run_fast_1x1(inputs):
return self._call_fast_1x1(inputs)
return super(Conv2D, self).call(inputs)
class EvalCkptDriver(object):
"""A driver for running eval inference.
Attributes:
model_name: str. Model name to eval.
batch_size: int. Eval batch size.
image_size: int. Input image size, determined by model name.
num_classes: int. Number of classes, default to 1000 for ImageNet.
include_background_label: whether to include extra background label.
advprop_preprocessing: whether to use advprop preprocessing.
"""
def __init__(self,
model_name,
batch_size=1,
image_size=224,
num_classes=1000,
include_background_label=False,
advprop_preprocessing=False):
"""Initialize internal variables."""
self.model_name = model_name
self.batch_size = batch_size
self.num_classes = num_classes
self.include_background_label = include_background_label
self.image_size = image_size
self.advprop_preprocessing = advprop_preprocessing
def restore_model(self, sess, ckpt_dir, enable_ema=True, export_ckpt=None):
"""Restore variables from checkpoint dir."""
sess.run(tf.global_variables_initializer())
checkpoint = tf.train.latest_checkpoint(ckpt_dir)
if enable_ema:
ema = tf.train.ExponentialMovingAverage(decay=0.0)
ema_vars = get_ema_vars()
var_dict = ema.variables_to_restore(ema_vars)
ema_assign_op = ema.apply(ema_vars)
else:
var_dict = get_ema_vars()
ema_assign_op = None
tf.train.get_or_create_global_step()
sess.run(tf.global_variables_initializer())
saver = tf.train.Saver(var_dict, max_to_keep=1)
saver.restore(sess, checkpoint)
if export_ckpt:
if ema_assign_op is not None:
sess.run(ema_assign_op)
saver = tf.train.Saver(max_to_keep=1, save_relative_paths=True)
saver.save(sess, export_ckpt)
def build_model(self, features, is_training):
"""Build model with input features."""
del features, is_training
raise ValueError('Must be implemented by subclasses.')
def get_preprocess_fn(self):
raise ValueError('Must be implemented by subclsses.')
def build_dataset(self, filenames, labels, is_training):
"""Build input dataset."""
batch_drop_remainder = False
if 'condconv' in self.model_name and not is_training:
# CondConv layers can only be called with known batch dimension. Thus, we
# must drop all remaining examples that do not make up one full batch.
# To ensure all examples are evaluated, use a batch size that evenly
# divides the number of files.
batch_drop_remainder = True
num_files = len(filenames)
if num_files % self.batch_size != 0:
tf.logging.warn('Remaining examples in last batch are not being '
'evaluated.')
filenames = tf.constant(filenames)
labels = tf.constant(labels)
dataset = tf.data.Dataset.from_tensor_slices((filenames, labels))
def _parse_function(filename, label):
image_string = tf.read_file(filename)
preprocess_fn = self.get_preprocess_fn()
image_decoded = preprocess_fn(
image_string, is_training, image_size=self.image_size)
image = tf.cast(image_decoded, tf.float32)
return image, label
dataset = dataset.map(_parse_function)
dataset = dataset.batch(self.batch_size,
drop_remainder=batch_drop_remainder)
iterator = dataset.make_one_shot_iterator()
images, labels = iterator.get_next()
return images, labels
def run_inference(self,
ckpt_dir,
image_files,
labels,
enable_ema=True,
export_ckpt=None):
"""Build and run inference on the target images and labels."""
label_offset = 1 if self.include_background_label else 0
with tf.Graph().as_default(), tf.Session() as sess:
images, labels = self.build_dataset(image_files, labels, False)
probs = self.build_model(images, is_training=False)
if isinstance(probs, tuple):
probs = probs[0]
self.restore_model(sess, ckpt_dir, enable_ema, export_ckpt)
prediction_idx = []
prediction_prob = []
for _ in range(len(image_files) // self.batch_size):
out_probs = sess.run(probs)
idx = np.argsort(out_probs)[::-1]
prediction_idx.append(idx[:5] - label_offset)
prediction_prob.append([out_probs[pid] for pid in idx[:5]])
# Return the top 5 predictions (idx and prob) for each image.
return prediction_idx, prediction_prob
def eval_example_images(self,
ckpt_dir,
image_files,
labels_map_file,
enable_ema=True,
export_ckpt=None):
"""Eval a list of example images.
Args:
ckpt_dir: str. Checkpoint directory path.
image_files: List[str]. A list of image file paths.
labels_map_file: str. The labels map file path.
enable_ema: enable expotential moving average.
export_ckpt: export ckpt folder.
Returns:
A tuple (pred_idx, and pred_prob), where pred_idx is the top 5 prediction
index and pred_prob is the top 5 prediction probability.
"""
classes = json.loads(tf.gfile.Open(labels_map_file).read())
pred_idx, pred_prob = self.run_inference(
ckpt_dir, image_files, [0] * len(image_files), enable_ema, export_ckpt)
for i in range(len(image_files)):
print('predicted class for image {}: '.format(image_files[i]))
for j, idx in enumerate(pred_idx[i]):
print(' -> top_{} ({:4.2f}%): {} '.format(j, pred_prob[i][j] * 100,
classes[str(idx)]))
return pred_idx, pred_prob
def eval_imagenet(self, ckpt_dir, imagenet_eval_glob,
imagenet_eval_label, num_images, enable_ema, export_ckpt):
"""Eval ImageNet images and report top1/top5 accuracy.
Args:
ckpt_dir: str. Checkpoint directory path.
imagenet_eval_glob: str. File path glob for all eval images.
imagenet_eval_label: str. File path for eval label.
num_images: int. Number of images to eval: -1 means eval the whole
dataset.
enable_ema: enable expotential moving average.
export_ckpt: export checkpoint folder.
Returns:
A tuple (top1, top5) for top1 and top5 accuracy.
"""
imagenet_val_labels = [int(i) for i in tf.gfile.GFile(imagenet_eval_label)]
imagenet_filenames = sorted(tf.gfile.Glob(imagenet_eval_glob))
if num_images < 0:
num_images = len(imagenet_filenames)
image_files = imagenet_filenames[:num_images]
labels = imagenet_val_labels[:num_images]
pred_idx, _ = self.run_inference(
ckpt_dir, image_files, labels, enable_ema, export_ckpt)
top1_cnt, top5_cnt = 0.0, 0.0
for i, label in enumerate(labels):
top1_cnt += label in pred_idx[i][:1]
top5_cnt += label in pred_idx[i][:5]
if i % 100 == 0:
print('Step {}: top1_acc = {:4.2f}% top5_acc = {:4.2f}%'.format(
i, 100 * top1_cnt / (i + 1), 100 * top5_cnt / (i + 1)))
sys.stdout.flush()
top1, top5 = 100 * top1_cnt / num_images, 100 * top5_cnt / num_images
print('Final: top1_acc = {:4.2f}% top5_acc = {:4.2f}%'.format(top1, top5))
return top1, top5
|
python
|
from django import forms
from django.http import QueryDict
from django.utils.translation import ugettext_lazy as _
from panoptes.analysis import FilteredSessions
from panoptes.analysis.fields import LensChoiceField, WeekdayChoiceField
from panoptes.core.fields import LocationField
from panoptes.core.models import Session
import datetime
class SessionFilterForm(forms.Form):
"""A form for filtering session data based on user bounds."""
location = LocationField(label=_("location"))
lens = LensChoiceField(label=_("data view"))
start = forms.DateField(label=_("start date"), required=False)
end = forms.DateField(label=_("end date"), required=False)
start_time = forms.TimeField(label=_("start time"), required=False)
end_time = forms.TimeField(label=_("end time"), required=False)
weekdays = WeekdayChoiceField(label=_("weekdays"), required=False)
x_detail = forms.CharField(label=_("x-value detail"), required=False, widget=forms.HiddenInput)
y_detail = forms.CharField(label=_("y-value detail"), required=False, widget=forms.HiddenInput)
def __init__(self, *args, **kwargs):
"""Accept a 'profile' kwarg that provides default data."""
profile = kwargs.pop('profile', None)
if profile:
today = datetime.date.today()
post = QueryDict("")
post = post.copy()
post.update({
'location': profile.default_location.pk,
'lens': profile.default_lens.slug,
'start': self._parsable_date(today - datetime.timedelta(days=profile.default_recent_days)),
'end': self._parsable_date(today)
})
args = (post,)
super(SessionFilterForm, self).__init__(*args, **kwargs)
def _parsable_date(self, date):
"""Return the given date as a parsable string."""
return date.strftime("%m/%d/%Y")
def clean(self):
"""Perform extra validation and resolution of data.
This adds an `x_detail` key to the cleaned data containing the resolved
x-value whose details should be shown, and also makes sure that the dates and
times are coherent.
"""
cleaned_data = self.cleaned_data
today = datetime.date.today()
# If a start date is provided but the end date is left blank, end on the
# current date
if cleaned_data.get('start',None) and not cleaned_data.get('end', None):
cleaned_data['end'] = today
# If an end date is provided and no start date is given, start at the first
# date on which sessions were recorded, or a year ago, if no sessions exist
if cleaned_data.get('end', None) and not cleaned_data.get('start', None):
cleaned_data['start'] = Session.objects.first_session_date_for_location(cleaned_data['location'])
# If the date bounds are left blank, default to viewing the past week
if not cleaned_data.get('start', None) and not cleaned_data.get('end', None):
cleaned_data['start'] = today - datetime.timedelta(weeks=1)
cleaned_data['end'] = today
# Have empty time filters use the opening or closing time of the location
if not cleaned_data.get('start_time', None):
cleaned_data['start_time'] = cleaned_data['location'].earliest_opening
if not cleaned_data.get('end_time', None):
cleaned_data['end_time'] = cleaned_data['location'].latest_closing
# Make sure that the start and end dates and times are properly ordered
if cleaned_data['start'] > cleaned_data['end']:
raise forms.ValidationError(_("The start must come before the end date"))
if cleaned_data['start_time'] > cleaned_data['end_time']:
raise forms.ValidationError(_("The start time must come before the end time"))
# Resolve the x- and y-value details if possible
if cleaned_data.get('x_detail', None):
x_axis = cleaned_data['lens'].x_axis()
cleaned_data['x_detail'] = x_axis.deserialize_value(cleaned_data['x_detail'])
if cleaned_data.get('y_detail', None):
y_axis = cleaned_data['lens'].y_axis()
cleaned_data['y_detail'] = y_axis.deserialize_value(cleaned_data['y_detail'])
cleaned_data['x_detail'] = cleaned_data['x_detail'] or None
cleaned_data['y_detail'] = cleaned_data['y_detail'] or None
return cleaned_data
def as_filtered_sessions(self):
"""
If the form was successfully validated, return a FilteredSessions
instance built from the form's cleaned data.
"""
data = self.cleaned_data
filtered_sessions = FilteredSessions(
location=data['location'],
start_date=data.get('start', None),
end_date=data.get('end', None),
start_time=data.get('start_time', None),
end_time=data.get('end_time', None),
weekdays=data.get('weekdays', []),
x_detail=data.get('x_detail', None))
lens = data.get('lens', None)
if lens:
filtered_sessions.set_axes(lens.x_axis, lens.y_axis)
return filtered_sessions
|
python
|
import os
from typing import Any
import torch.optim as optim
import yaml
from aim.sdk.utils import generate_run_hash
from deep_compression.losses import (
BatchChannelDecorrelationLoss,
RateDistortionLoss,
)
def create_criterion(conf):
if conf.name == "RateDistortionLoss":
return RateDistortionLoss(
lmbda=conf.lambda_,
target_bpp=conf.get("target_bpp", None),
)
if conf.name == "BatchChannelDecorrelationLoss":
return BatchChannelDecorrelationLoss(
lmbda=conf.lambda_,
lmbda_corr=conf.lambda_corr,
top_k_corr=conf.top_k_corr,
)
raise ValueError("Unknown criterion.")
def configure_optimizers(net, conf):
"""Separate parameters for the main optimizer and the auxiliary optimizer.
Return two optimizers"""
parameters = {
n
for n, p in net.named_parameters()
if not n.endswith(".quantiles") and p.requires_grad
}
aux_parameters = {
n
for n, p in net.named_parameters()
if n.endswith(".quantiles") and p.requires_grad
}
# Make sure we don't have an intersection of parameters
params_dict = dict(net.named_parameters())
inter_params = parameters & aux_parameters
union_params = parameters | aux_parameters
assert len(inter_params) == 0
assert len(union_params) - len(params_dict.keys()) == 0
optimizer = optim.Adam(
(params_dict[n] for n in sorted(parameters)),
lr=conf.learning_rate,
)
aux_optimizer = optim.Adam(
(params_dict[n] for n in sorted(aux_parameters)),
lr=conf.aux_learning_rate,
)
return {"net": optimizer, "aux": aux_optimizer}
def configure_logs(logdir: str) -> dict[str, Any]:
filename = os.path.join(logdir, "info.yaml")
try:
with open(filename) as f:
config = yaml.safe_load(f)
except FileNotFoundError:
config = {}
config["run_hash"] = generate_run_hash()
os.makedirs(logdir, exist_ok=True)
with open(filename, "w") as f:
yaml.safe_dump(config, f)
return config
|
python
|
# -*- coding: utf-8 -*-
# maya
import pymel.core as pm
from maya.app.general.mayaMixin import MayaQDockWidget
from maya.app.general.mayaMixin import MayaQWidgetDockableMixin
# Built-in
from functools import partial
import os
import sys
import json
import shutil
import subprocess
# mbox
from . import naming_rules_ui as name_ui
from . import custom_step_ui as custom_step_ui
from . import root_settings_ui as root_ui
from . import block_settings_ui as block_ui
from . import joint_names_ui as joint_name_ui
from mbox.lego import naming, lib
# mgear
from mgear.core import pyqt, string
from mgear.vendor.Qt import QtCore, QtWidgets, QtGui
from mgear.anim_picker.gui import MAYA_OVERRIDE_COLOR
ROOT_TYPE = "mbox_guide_root"
BLOCK_TYPE = "mbox_guide_block"
class RootMainTabUI(QtWidgets.QDialog, root_ui.Ui_Form):
def __init__(self, parent=None):
super(RootMainTabUI, self).__init__(parent)
self.setupUi(self)
class RootCustomStepTabUI(QtWidgets.QDialog, custom_step_ui.Ui_Form):
def __init__(self, parent=None):
super(RootCustomStepTabUI, self).__init__(parent)
self.setupUi(self)
class RootNameTabUI(QtWidgets.QDialog, name_ui.Ui_Form):
def __init__(self, parent=None):
super(RootNameTabUI, self).__init__(parent)
self.setupUi(self)
class HelperSlots:
def __init__(self):
self._network = None #
def update_host_ui(self, l_edit, target_attr):
guide = lib.get_component_guide(pm.selected(type="transform")[0])
if guide:
network = guide[0].message.outputs(type="network")[0]
l_edit.setText(guide[0].name())
self._network.attr(target_attr).set("{},{}".format(guide[0].name(), network.attr("oid").get()))
else:
if l_edit.text():
l_edit.clear()
self._network.attr(target_attr).set("")
pm.displayWarning("")
def update_line_edit(self, l_edit, target_attr):
name = string.removeInvalidCharacter(l_edit.text())
l_edit.setText(name)
self._network.attr(target_attr).set(name)
def update_line_edit2(self, l_edit, target_attr):
# nomralize the text to be Maya naming compatible
# replace invalid characters with "_"
name = string.normalize2(l_edit.text())
l_edit.setText(name)
self._network.attr(target_attr).set(name)
def update_text_edit(self, l_edit, target_attr):
self._network.attr(target_attr).set(l_edit.toPlainText())
def update_line_edit_path(self, l_edit, target_attr):
self._network.attr(target_attr).set(l_edit.text())
def update_name_rule_line_edit(self, l_edit, target_attr):
# nomralize the text to be Maya naming compatible
# replace invalid characters with "_"
name = naming.normalize_name_rule(l_edit.text())
l_edit.setText(name)
self._network.attr(target_attr).set(name)
self.naming_rule_validator(l_edit)
def naming_rule_validator(self, l_edit, log=True):
Palette = QtGui.QPalette()
if not naming.name_rule_validator(l_edit.text(),
naming.NAMING_RULE_TOKENS,
log=log):
Palette.setBrush(QtGui.QPalette.Text, self.red_brush)
else:
Palette.setBrush(QtGui.QPalette.Text, self.white_down_brush)
l_edit.setPalette(Palette)
def add_item_to_list_widget(self, list_widget, target_attr=None):
items = pm.selected()
items_list = [i.text() for i in list_widget.findItems(
"", QtCore.Qt.MatchContains)]
# Quick clean the first empty item
if items_list and not items_list[0]:
list_widget.takeItem(0)
for item in items:
if len(item.name().split("|")) != 1:
pm.displayWarning("Not valid obj: %s, name is not unique." %
item.name())
continue
if item.name() not in items_list:
if item.hasAttr("is_guide_component") or item.hasAttr("is_guide_root"):
list_widget.addItem(item.name())
else:
pm.displayWarning(
"The object: %s, is not a valid"
" reference, Please select only guide componet"
" roots and guide locators." % item.name())
else:
pm.displayWarning("The object: %s, is already in the list." %
item.name())
if target_attr:
self.update_list_attr(list_widget, target_attr)
def remove_selected_from_list_widget(self, list_widget, target_attr=None):
for item in list_widget.selectedItems():
list_widget.takeItem(list_widget.row(item))
if target_attr:
self.update_list_attr(list_widget, target_attr)
def move_from_list_widget_to_list_widget(self, source_list_widget, target_list_widget,
target_attr_list_widget, target_attr=None):
# Quick clean the first empty item
items_list = [i.text() for i in target_attr_list_widget.findItems(
"", QtCore.Qt.MatchContains)]
if items_list and not items_list[0]:
target_attr_list_widget.takeItem(0)
for item in source_list_widget.selectedItems():
target_list_widget.addItem(item.text())
source_list_widget.takeItem(source_list_widget.row(item))
if target_attr:
self.update_list_attr(target_attr_list_widget, target_attr)
def copy_from_list_widget(self, source_list_widget, target_list_widget,
target_attr=None):
target_list_widget.clear()
items_list = [i.text() for i in source_list_widget.findItems(
"", QtCore.Qt.MatchContains)]
for item in items_list:
target_list_widget.addItem(item)
if target_attr:
self.update_list_attr(source_list_widget, target_attr)
def update_list_attr(self, source_list_widget, target_attr):
"""Update the string attribute with values separated by commas"""
new_value = ",".join([i.text() for i in source_list_widget.findItems(
"", QtCore.Qt.MatchContains)])
self._network.attr(target_attr).set(new_value)
def update_component_name(self):
with pm.UndoChunk():
side_set = ["center", "left", "right"]
line_name = self.main_tab.name_lineEdit.text()
new_name = string.normalize2(line_name)
if line_name != new_name:
self.main_tab.name_lineEdit.setText(new_name)
return
side_index = self.main_tab.side_comboBox.currentIndex()
new_side = side_set[side_index]
index = self.main_tab.componentIndex_spinBox.value()
blueprint = lib.blueprint_from_guide(self._guide.getParent(generations=-1))
block = blueprint.find_block_with_oid(self._network.attr("oid").get())
new_index = blueprint.solve_index(new_name, new_side, index, block)
rename_check = False
if self._network.attr("comp_name").get() != new_name \
or self._network.attr("comp_side").get(asString=True) != new_side \
or self._network.attr("comp_index").get() != new_index:
rename_check = True
if self._network.attr("comp_name").get() == new_name \
and self._network.attr("comp_side").get(asString=True) == new_side \
and self._network.attr("comp_index").get() == index:
return
if rename_check:
block["comp_name"] = new_name
block["comp_side"] = new_side
block["comp_index"] = new_index
block.to_network()
block.update_guide()
if self._network.attr("comp_index").get() != self.main_tab.componentIndex_spinBox.value():
self.main_tab.componentIndex_spinBox.setValue(self._network.attr("comp_index").get())
def update_connector(self, source_widget, items_list, *args):
self._network.attr("connector").set(items_list[source_widget.currentIndex()])
def populate_check(self, target_widget, source_attr, *args):
if self._network.attr(source_attr).get():
target_widget.setCheckState(QtCore.Qt.Checked)
else:
target_widget.setCheckState(QtCore.Qt.Unchecked)
def update_check(self, source_widget, target_attr, *args):
self._network.attr(target_attr).set(source_widget.isChecked())
def update_spin_box(self, source_widget, target_attr, *args):
self._network.attr(target_attr).set(source_widget.value())
return True
def update_slider(self, source_widget, target_attr, *args):
self._network.attr(target_attr).set(float(source_widget.value()) / 100)
def update_combo_box(self, source_widget, target_attr, *args):
self._network.attr(target_attr).set(source_widget.currentIndex())
def update_control_shape(self, source_widget, ctl_list, target_attr, *args):
current_index = source_widget.currentIndex()
self._network.attr(target_attr).set(ctl_list[current_index])
def update_index_color_widgets(
self, source_widget, target_attr, color_widget, *args):
self.update_spin_box(source_widget, target_attr)
self.update_widget_style_sheet(
color_widget,
(i / 255.0 for i in MAYA_OVERRIDE_COLOR[source_widget.value()]))
def update_rgb_color_widgets(self, button_widget, rgb, slider_widget):
self.update_widget_style_sheet(button_widget, rgb)
slider_widget.blockSignals(True)
slider_widget.setValue(sorted(rgb)[2] * 255)
slider_widget.blockSignals(False)
def update_widget_style_sheet(self, source_widget, rgb):
color = ', '.join(str(i * 255) for i in pm.colorManagementConvert(toDisplaySpace=rgb))
source_widget.setStyleSheet(
"* {background-color: rgb(" + color + ")}")
def rgb_slider_value_changed(self, button_widget, target_attr, value):
rgb = self._network.attr(target_attr).get()
hsv_value = sorted(rgb)[2]
if hsv_value:
new_rgb = tuple(i / (hsv_value / 1.0) * (value / 255.0)
for i in rgb)
else:
new_rgb = tuple((1.0 * (value / 255.0), 1.0
* (value / 255.0), 1.0 * (value / 255.0)))
self.update_widget_style_sheet(button_widget, new_rgb)
self._network.attr(target_attr).set(new_rgb)
def rgb_color_editor(self, source_widget, target_attr, slider_widget, *args):
pm.colorEditor(rgb=self._network.attr(target_attr).get())
if pm.colorEditor(query=True, result=True):
rgb = pm.colorEditor(query=True, rgb=True)
self._network.attr(target_attr).set(rgb)
self.update_rgb_color_widgets(source_widget, rgb, slider_widget)
def toggle_rgb_index_widgets(self, check_box, idx_widgets, rgb_widgets, target_attr, checked):
show_widgets, hide_widgets = (
rgb_widgets, idx_widgets) if checked else (
idx_widgets, rgb_widgets)
for widget in show_widgets:
widget.show()
for widget in hide_widgets:
widget.hide()
self.update_check(check_box, target_attr)
def set_profile(self):
pm.select(self._network, r=True)
pm.runtime.GraphEditor()
def get_cs_file_fullpath(self, cs_data):
filepath = cs_data.split("|")[-1][1:]
if os.environ.get("MBOX_CUSTOM_STEP_PATH", ""):
fullpath = os.path.join(
os.environ.get(
"MBOX_CUSTOM_STEP_PATH", ""), filepath)
else:
fullpath = filepath
return fullpath
def edit_file(self, widgetList):
try:
cs_data = widgetList.selectedItems()[0].text()
fullpath = self.get_cs_file_fullpath(cs_data)
if fullpath:
if sys.platform.startswith('darwin'):
subprocess.call(('open', fullpath))
elif os.name == 'nt':
os.startfile(fullpath)
elif os.name == 'posix':
subprocess.call(('xdg-open', fullpath))
else:
pm.displayWarning("Please select one item from the list")
except Exception:
pm.displayError("The step can't be find or does't exists")
def format_info(self, data):
data_parts = data.split("|")
cs_name = data_parts[0]
if cs_name.startswith("*"):
cs_status = "Deactivated"
cs_name = cs_name[1:]
else:
cs_status = "Active"
cs_fullpath = self.get_cs_file_fullpath(data)
if "_shared" in data:
cs_shared_owner = self.shared_owner(cs_fullpath)
cs_shared_status = "Shared"
else:
cs_shared_status = "Local"
cs_shared_owner = "None"
info = '<html><head/><body><p><span style=" font-weight:600;">\
{0}</span></p><p>------------------</p><p><span style=" \
font-weight:600;">Status</span>: {1}</p><p><span style=" \
font-weight:600;">Shared Status:</span> {2}</p><p><span \
style=" font-weight:600;">Shared Owner:</span> \
{3}</p><p><span style=" font-weight:600;">Full Path</span>: \
{4}</p></body></html>'.format(cs_name,
cs_status,
cs_shared_status,
cs_shared_owner,
cs_fullpath)
return info
def shared_owner(self, cs_fullpath):
scan_dir = os.path.abspath(os.path.join(cs_fullpath, os.pardir))
while not scan_dir.endswith("_shared"):
scan_dir = os.path.abspath(os.path.join(scan_dir, os.pardir))
# escape infinite loop
if scan_dir == '/':
break
scan_dir = os.path.abspath(os.path.join(scan_dir, os.pardir))
return os.path.split(scan_dir)[1]
@classmethod
def get_steps_dict(self, itemsList):
stepsDict = {}
stepsDict["itemsList"] = itemsList
for item in itemsList:
step = open(item, "r")
data = step.read()
stepsDict[item] = data
step.close()
return stepsDict
@classmethod
def runStep(self, stepPath, customStepDic):
try:
with pm.UndoChunk():
pm.displayInfo(
"EXEC: Executing custom step: %s" % stepPath)
# use forward slash for OS compatibility
if sys.platform.startswith('darwin'):
stepPath = stepPath.replace('\\', '/')
fileName = os.path.split(stepPath)[1].split(".")[0]
if os.environ.get(MGEAR_SHIFTER_CUSTOMSTEP_KEY, ""):
runPath = os.path.join(
os.environ.get(
MGEAR_SHIFTER_CUSTOMSTEP_KEY, ""), stepPath)
else:
runPath = stepPath
customStep = imp.load_source(fileName, runPath)
if hasattr(customStep, "CustomShifterStep"):
argspec = inspect.getargspec(
customStep.CustomShifterStep.__init__)
if "stored_dict" in argspec.args:
cs = customStep.CustomShifterStep(customStepDic)
cs.setup()
cs.run()
else:
cs = customStep.CustomShifterStep()
cs.run(customStepDic)
customStepDic[cs.name] = cs
pm.displayInfo(
"SUCCEED: Custom Shifter Step Class: %s. "
"Succeed!!" % stepPath)
else:
pm.displayInfo(
"SUCCEED: Custom Step simple script: %s. "
"Succeed!!" % stepPath)
except Exception as ex:
template = "An exception of type {0} occurred. "
"Arguments:\n{1!r}"
message = template.format(type(ex).__name__, ex.args)
pm.displayError(message)
pm.displayError(traceback.format_exc())
cont = pm.confirmBox(
"FAIL: Custom Step Fail",
"The step:%s has failed. Continue with next step?"
% stepPath
+ "\n\n"
+ message
+ "\n\n"
+ traceback.format_exc(),
"Continue", "Stop Build", "Try Again!")
if cont == "Stop Build":
# stop Build
return True
elif cont == "Try Again!":
try: # just in case there is nothing to undo
pm.undo()
except Exception:
pass
pm.displayInfo("Trying again! : {}".format(stepPath))
inception = self.runStep(stepPath, customStepDic)
if inception: # stops build from the recursion loop.
return True
else:
return False
def run_manual_step(self, widgetList):
selItems = widgetList.selectedItems()
for item in selItems:
self.runStep(item.text().split("|")[-1][1:], customStepDic={})
def close_settings(self):
self.close()
pyqt.deleteInstances(self, MayaQDockWidget)
class RootSettings(MayaQWidgetDockableMixin, QtWidgets.QDialog, HelperSlots):
green_brush = QtGui.QColor(0, 160, 0)
red_brush = QtGui.QColor(180, 0, 0)
white_brush = QtGui.QColor(255, 255, 255)
white_down_brush = QtGui.QColor(160, 160, 160)
orange_brush = QtGui.QColor(240, 160, 0)
def __init__(self):
self.toolName = ROOT_TYPE
# Delete old instances of the componet settings window.
pyqt.deleteInstances(self, MayaQDockWidget)
# super(self.__class__, self).__init__(parent=parent)
super(RootSettings, self).__init__()
# the inspectSettings function set the current selection to the
# component root before open the settings dialog
self._network = pm.selected(type="transform")[0].message.outputs(type="network")[0]
self.main_tab = RootMainTabUI()
self.custom_step_tab = RootCustomStepTabUI()
self.naming_rule_tab = RootNameTabUI()
self.mayaMainWindow = pyqt.maya_main_window()
self.setObjectName(self.toolName)
self.setWindowFlags(QtCore.Qt.Window)
self.setWindowTitle(ROOT_TYPE)
self.resize(500, 615)
self.create_controls()
self.populate_controls()
self.create_layout()
self.create_connections()
self.setAttribute(QtCore.Qt.WA_DeleteOnClose, True)
# hover info
self.pre_cs = self.custom_step_tab.preCustomStep_listWidget
self.pre_cs.setMouseTracking(True)
self.pre_cs.entered.connect(self.pre_info)
self.post_cs = self.custom_step_tab.postCustomStep_listWidget
self.post_cs.setMouseTracking(True)
self.post_cs.entered.connect(self.post_info)
def pre_info(self, index):
self.hover_info_item_entered(self.pre_cs, index)
def post_info(self, index):
self.hover_info_item_entered(self.post_cs, index)
def hover_info_item_entered(self, view, index):
if index.isValid():
info_data = self.format_info(index.data())
QtWidgets.QToolTip.showText(
QtGui.QCursor.pos(),
info_data,
view.viewport(),
view.visualRect(index))
def create_controls(self):
"""Create the controls for the component base"""
self.tabs = QtWidgets.QTabWidget()
self.tabs.setObjectName("settings_tab")
# Close Button
self.close_button = QtWidgets.QPushButton("Close")
def populate_controls(self):
"""Populate the controls values
from the custom attributes of the component.
"""
# populate tab
self.tabs.insertTab(0, self.main_tab, "Guide Settings")
self.tabs.insertTab(1, self.custom_step_tab, "Custom Steps")
self.tabs.insertTab(2, self.naming_rule_tab, "Naming Rules")
# populate main settings
self.main_tab.rigName_lineEdit.setText(
self._network.attr("name").get())
self.main_tab.mode_comboBox.setCurrentIndex(
self._network.attr("process").get())
self.main_tab.step_comboBox.setCurrentIndex(
self._network.attr("step").get())
# self.populateCheck(
# self.main_tab.proxyChannels_checkBox, "proxyChannels")
self.populate_check(self.main_tab.worldCtl_checkBox, "world_ctl")
self.main_tab.worldCtl_lineEdit.setText(
self._network.attr("world_ctl_name").get())
# self.populateCheck(
# self.main_tab.classicChannelNames_checkBox,
# "classicChannelNames")
# self.populateCheck(
# self.main_tab.attrPrefix_checkBox,
# "attrPrefixName")
# self.populateCheck(
# self.main_tab.importSkin_checkBox, "importSkin")
# self.main_tab.skin_lineEdit.setText(
# self._network.attr("skin").get())
# self.populateCheck(
# self.main_tab.dataCollector_checkBox, "data_collector")
# self.main_tab.dataCollectorPath_lineEdit.setText(
# self._network.attr("data_collector_path").get())
self.populate_check(
self.main_tab.jointRig_checkBox, "joint_rig")
self.populate_check(
self.main_tab.force_uniScale_checkBox, "force_uni_scale")
self.populate_check(
self.main_tab.connect_joints_checkBox, "connect_joints")
# self.populateAvailableSynopticTabs()
# for item in self._network.attr("synoptic").get().split(","):
# self.main_tab.rigTabs_listWidget.addItem(item)
tap = self.main_tab
index_widgets = ((tap.L_color_fk_spinBox,
tap.L_color_fk_label,
"l_color_fk"),
(tap.L_color_ik_spinBox,
tap.L_color_ik_label,
"l_color_ik"),
(tap.C_color_fk_spinBox,
tap.C_color_fk_label,
"c_color_fk"),
(tap.C_color_ik_spinBox,
tap.C_color_ik_label,
"c_color_ik"),
(tap.R_color_fk_spinBox,
tap.R_color_fk_label,
"r_color_fk"),
(tap.R_color_ik_spinBox,
tap.R_color_ik_label,
"r_color_ik"))
rgb_widgets = ((tap.L_RGB_fk_pushButton,
tap.L_RGB_fk_slider,
"l_RGB_fk"),
(tap.L_RGB_ik_pushButton,
tap.L_RGB_ik_slider,
"l_RGB_ik"),
(tap.C_RGB_fk_pushButton,
tap.C_RGB_fk_slider,
"c_RGB_fk"),
(tap.C_RGB_ik_pushButton,
tap.C_RGB_ik_slider,
"c_RGB_ik"),
(tap.R_RGB_fk_pushButton,
tap.R_RGB_fk_slider,
"r_RGB_fk"),
(tap.R_RGB_ik_pushButton,
tap.R_RGB_ik_slider,
"r_RGB_ik"))
for spinBox, label, source_attr in index_widgets:
color_index = self._network.attr(source_attr).get()
spinBox.setValue(color_index)
self.update_widget_style_sheet(
label, [i / 255.0 for i in MAYA_OVERRIDE_COLOR[color_index]])
for button, slider, source_attr in rgb_widgets:
self.update_rgb_color_widgets(
button, self._network.attr(source_attr).get(), slider)
# forceing the size of the color buttons/label to keep ui clean
for widget in tuple(i[0] for i in rgb_widgets) + tuple(
i[1] for i in index_widgets):
widget.setFixedSize(pyqt.dpi_scale(30), pyqt.dpi_scale(20))
self.populate_check(tap.useRGB_checkBox, "use_RGB_color")
self.toggle_rgb_index_widgets(tap.useRGB_checkBox,
(w for i in index_widgets for w in i[:2]),
(w for i in rgb_widgets for w in i[:2]),
"use_RGB_color",
tap.useRGB_checkBox.checkState())
tap.notes_textEdit.setText(self._network.attr("notes").get())
# pupulate custom steps sttings
self.populate_check(
self.custom_step_tab.preCustomStep_checkBox, "run_pre_custom_step")
for item in self._network.attr("pre_custom_step").get().split(","):
self.custom_step_tab.preCustomStep_listWidget.addItem(item)
self.refresh_status_color(self.custom_step_tab.preCustomStep_listWidget)
self.populate_check(
self.custom_step_tab.postCustomStep_checkBox, "run_post_custom_step")
for item in self._network.attr("post_custom_step").get().split(","):
self.custom_step_tab.postCustomStep_listWidget.addItem(item)
self.refresh_status_color(self.custom_step_tab.postCustomStep_listWidget)
self.populate_naming_controls()
def populate_naming_controls(self):
# populate name settings
self.naming_rule_tab.ctl_name_rule_lineEdit.setText(
self._network.attr("ctl_name_rule").get())
self.naming_rule_validator(
self.naming_rule_tab.ctl_name_rule_lineEdit)
self.naming_rule_tab.joint_name_rule_lineEdit.setText(
self._network.attr("joint_name_rule").get())
self.naming_rule_validator(
self.naming_rule_tab.joint_name_rule_lineEdit)
self.naming_rule_tab.side_left_name_lineEdit.setText(
self._network.attr("ctl_left_name").get())
self.naming_rule_tab.side_right_name_lineEdit.setText(
self._network.attr("ctl_right_name").get())
self.naming_rule_tab.side_center_name_lineEdit.setText(
self._network.attr("ctl_center_name").get())
self.naming_rule_tab.side_joint_left_name_lineEdit.setText(
self._network.attr("joint_left_name").get())
self.naming_rule_tab.side_joint_right_name_lineEdit.setText(
self._network.attr("joint_right_name").get())
self.naming_rule_tab.side_joint_center_name_lineEdit.setText(
self._network.attr("joint_center_name").get())
self.naming_rule_tab.ctl_name_ext_lineEdit.setText(
self._network.attr("ctl_name_ext").get())
self.naming_rule_tab.joint_name_ext_lineEdit.setText(
self._network.attr("joint_name_ext").get())
self.naming_rule_tab.ctl_des_letter_case_comboBox.setCurrentIndex(
self._network.attr("ctl_description_letter_case").get())
self.naming_rule_tab.joint_des_letter_case_comboBox.setCurrentIndex(
self._network.attr("joint_description_letter_case").get())
self.naming_rule_tab.ctl_padding_spinBox.setValue(
self._network.attr("ctl_index_padding").get())
self.naming_rule_tab.joint_padding_spinBox.setValue(
self._network.attr("joint_index_padding").get())
def create_layout(self):
"""
Create the layout for the component base settings
"""
self.settings_layout = QtWidgets.QVBoxLayout()
self.settings_layout.addWidget(self.tabs)
self.settings_layout.addWidget(self.close_button)
self.setLayout(self.settings_layout)
def create_connections(self):
"""Create the slots connections to the controls functions"""
self.close_button.clicked.connect(self.close_settings)
# Setting Tab
tap = self.main_tab
tap.rigName_lineEdit.editingFinished.connect(
partial(self.update_line_edit,
tap.rigName_lineEdit,
"name"))
tap.mode_comboBox.currentIndexChanged.connect(
partial(self.update_combo_box,
tap.mode_comboBox,
"process"))
tap.step_comboBox.currentIndexChanged.connect(
partial(self.update_combo_box,
tap.step_comboBox,
"step"))
# tap.proxyChannels_checkBox.stateChanged.connect(
# partial(self.update_check,
# tap.proxyChannels_checkBox,
# "proxyChannels"))
tap.worldCtl_checkBox.stateChanged.connect(
partial(self.update_check,
tap.worldCtl_checkBox,
"world_ctl"))
tap.worldCtl_lineEdit.editingFinished.connect(
partial(self.update_line_edit2,
tap.worldCtl_lineEdit,
"world_ctl_name"))
# tap.classicChannelNames_checkBox.stateChanged.connect(
# partial(self.updateCheck,
# tap.classicChannelNames_checkBox,
# "classicChannelNames"))
# tap.attrPrefix_checkBox.stateChanged.connect(
# partial(self.updateCheck,
# tap.attrPrefix_checkBox,
# "attrPrefixName"))
# tap.dataCollector_checkBox.stateChanged.connect(
# partial(self.updateCheck,
# tap.dataCollector_checkBox,
# "data_collector"))
# tap.dataCollectorPath_lineEdit.editingFinished.connect(
# partial(self.updateLineEditPath,
# tap.dataCollectorPath_lineEdit,
# "data_collector_path"))
tap.jointRig_checkBox.stateChanged.connect(
partial(self.update_check,
tap.jointRig_checkBox,
"joint_rig"))
tap.force_uniScale_checkBox.stateChanged.connect(
partial(self.update_check,
tap.force_uniScale_checkBox,
"force_uni_scale"))
tap.connect_joints_checkBox.stateChanged.connect(
partial(self.update_check,
tap.connect_joints_checkBox,
"connect_joints"))
# tap.addTab_pushButton.clicked.connect(
# partial(self.moveFromListWidget2ListWidget,
# tap.available_listWidget,
# tap.rigTabs_listWidget,
# tap.rigTabs_listWidget,
# "synoptic"))
# tap.removeTab_pushButton.clicked.connect(
# partial(self.moveFromListWidget2ListWidget,
# tap.rigTabs_listWidget,
# tap.available_listWidget,
# tap.rigTabs_listWidget,
# "synoptic"))
# tap.loadSkinPath_pushButton.clicked.connect(
# self.skinLoad)
# tap.dataCollectorPath_pushButton.clicked.connect(
# self.data_collector_path)
# tap.rigTabs_listWidget.installEventFilter(self)
# colors connections
index_widgets = ((tap.L_color_fk_spinBox,
tap.L_color_fk_label, "l_color_fk"),
(tap.L_color_ik_spinBox,
tap.L_color_ik_label, "l_color_ik"),
(tap.C_color_fk_spinBox,
tap.C_color_fk_label, "c_color_fk"),
(tap.C_color_ik_spinBox,
tap.C_color_ik_label, "c_color_ik"),
(tap.R_color_fk_spinBox,
tap.R_color_fk_label, "r_color_fk"),
(tap.R_color_ik_spinBox,
tap.R_color_ik_label, "r_color_ik"))
rgb_widgets = ((tap.L_RGB_fk_pushButton,
tap.L_RGB_fk_slider, "l_RGB_fk"),
(tap.L_RGB_ik_pushButton,
tap.L_RGB_ik_slider, "l_RGB_ik"),
(tap.C_RGB_fk_pushButton,
tap.C_RGB_fk_slider, "c_RGB_fk"),
(tap.C_RGB_ik_pushButton,
tap.C_RGB_ik_slider, "c_RGB_ik"),
(tap.R_RGB_fk_pushButton,
tap.R_RGB_fk_slider, "r_RGB_fk"),
(tap.R_RGB_ik_pushButton,
tap.R_RGB_ik_slider, "r_RGB_ik"))
for spinBox, label, source_attr in index_widgets:
spinBox.valueChanged.connect(
partial(self.update_index_color_widgets,
spinBox,
source_attr,
label))
for button, slider, source_attr in rgb_widgets:
button.clicked.connect(
partial(self.rgb_color_editor, button, source_attr, slider))
slider.valueChanged.connect(
partial(self.rgb_slider_value_changed, button, source_attr))
tap.useRGB_checkBox.stateChanged.connect(
partial(self.toggle_rgb_index_widgets,
tap.useRGB_checkBox,
tuple(w for i in index_widgets for w in i[:2]),
tuple(w for i in rgb_widgets for w in i[:2]),
"use_RGB_color"))
tap.notes_textEdit.textChanged.connect(
partial(self.update_text_edit,
tap.notes_textEdit,
"notes"))
# custom Step Tab
csTap = self.custom_step_tab
csTap.preCustomStep_checkBox.stateChanged.connect(
partial(self.update_check,
csTap.preCustomStep_checkBox,
"run_pre_custom_step"))
csTap.preCustomStepAdd_pushButton.clicked.connect(
self.add_custom_step)
csTap.preCustomStepNew_pushButton.clicked.connect(
self.new_custom_step)
csTap.preCustomStepDuplicate_pushButton.clicked.connect(
self.duplicate_custom_step)
csTap.preCustomStepExport_pushButton.clicked.connect(
self.export_custom_step)
csTap.preCustomStepImport_pushButton.clicked.connect(
self.import_custom_step)
csTap.preCustomStepRemove_pushButton.clicked.connect(
partial(self.remove_selected_from_list_widget,
csTap.preCustomStep_listWidget,
"pre_custom_step"))
csTap.preCustomStep_listWidget.installEventFilter(self)
csTap.preCustomStepRun_pushButton.clicked.connect(
partial(self.run_manual_step,
csTap.preCustomStep_listWidget))
csTap.preCustomStepEdit_pushButton.clicked.connect(
partial(self.edit_file,
csTap.preCustomStep_listWidget))
csTap.postCustomStep_checkBox.stateChanged.connect(
partial(self.update_check,
csTap.postCustomStep_checkBox,
"run_post_custom_step"))
csTap.postCustomStepAdd_pushButton.clicked.connect(
partial(self.add_custom_step, False))
csTap.postCustomStepNew_pushButton.clicked.connect(
partial(self.new_custom_step, False))
csTap.postCustomStepDuplicate_pushButton.clicked.connect(
partial(self.duplicate_custom_step, False))
csTap.postCustomStepExport_pushButton.clicked.connect(
partial(self.export_custom_step, False))
csTap.postCustomStepImport_pushButton.clicked.connect(
partial(self.import_custom_step, False))
csTap.postCustomStepRemove_pushButton.clicked.connect(
partial(self.remove_selected_from_list_widget,
csTap.postCustomStep_listWidget,
"post_custom_step"))
csTap.postCustomStep_listWidget.installEventFilter(self)
csTap.postCustomStepRun_pushButton.clicked.connect(
partial(self.run_manual_step,
csTap.postCustomStep_listWidget))
csTap.postCustomStepEdit_pushButton.clicked.connect(
partial(self.edit_file,
csTap.postCustomStep_listWidget))
# right click menus
csTap.preCustomStep_listWidget.setContextMenuPolicy(
QtCore.Qt.CustomContextMenu)
csTap.preCustomStep_listWidget.customContextMenuRequested.connect(
self.pre_custom_step_menu)
csTap.postCustomStep_listWidget.setContextMenuPolicy(
QtCore.Qt.CustomContextMenu)
csTap.postCustomStep_listWidget.customContextMenuRequested.connect(
self.post_custom_step_menu)
# search hightlight
csTap.preSearch_lineEdit.textChanged.connect(
self.pre_highlight_search)
csTap.postSearch_lineEdit.textChanged.connect(
self.post_highlight_search)
# Naming Tab
tap = self.naming_rule_tab
# names rules
tap.ctl_name_rule_lineEdit.editingFinished.connect(
partial(self.update_name_rule_line_edit,
tap.ctl_name_rule_lineEdit,
"ctl_name_rule"))
tap.joint_name_rule_lineEdit.editingFinished.connect(
partial(self.update_name_rule_line_edit,
tap.joint_name_rule_lineEdit,
"joint_name_rule"))
# sides names
tap.side_left_name_lineEdit.editingFinished.connect(
partial(self.update_line_edit2,
tap.side_left_name_lineEdit,
"ctl_left_name"))
tap.side_right_name_lineEdit.editingFinished.connect(
partial(self.update_line_edit2,
tap.side_right_name_lineEdit,
"ctl_right_name"))
tap.side_center_name_lineEdit.editingFinished.connect(
partial(self.update_line_edit2,
tap.side_center_name_lineEdit,
"ctl_center_name"))
tap.side_joint_left_name_lineEdit.editingFinished.connect(
partial(self.update_line_edit2,
tap.side_joint_left_name_lineEdit,
"joint_left_name"))
tap.side_joint_right_name_lineEdit.editingFinished.connect(
partial(self.update_line_edit2,
tap.side_joint_right_name_lineEdit,
"joint_right_name"))
tap.side_joint_center_name_lineEdit.editingFinished.connect(
partial(self.update_line_edit2,
tap.side_joint_center_name_lineEdit,
"joint_center_name"))
# names extensions
tap.ctl_name_ext_lineEdit.editingFinished.connect(
partial(self.update_line_edit2,
tap.ctl_name_ext_lineEdit,
"ctl_name_ext"))
tap.joint_name_ext_lineEdit.editingFinished.connect(
partial(self.update_line_edit2,
tap.joint_name_ext_lineEdit,
"joint_name_ext"))
# description letter case
tap.ctl_des_letter_case_comboBox.currentIndexChanged.connect(
partial(self.update_combo_box,
tap.ctl_des_letter_case_comboBox,
"ctl_description_letter_case"))
tap.joint_des_letter_case_comboBox.currentIndexChanged.connect(
partial(self.update_combo_box,
tap.joint_des_letter_case_comboBox,
"joint_description_letter_case"))
# reset naming rules
tap.reset_ctl_name_rule_pushButton.clicked.connect(
partial(self.reset_naming_rule,
tap.ctl_name_rule_lineEdit,
"ctl_name_rule"))
tap.reset_joint_name_rule_pushButton.clicked.connect(
partial(self.reset_naming_rule,
tap.joint_name_rule_lineEdit,
"joint_name_rule"))
# reset naming sides
tap.reset_side_name_pushButton.clicked.connect(
self.reset_naming_sides)
tap.reset_joint_side_name_pushButton.clicked.connect(
self.reset_joint_naming_sides)
# reset naming extension
tap.reset_name_ext_pushButton.clicked.connect(
self.reset_naming_extension)
# index padding
tap.ctl_padding_spinBox.valueChanged.connect(
partial(self.update_spin_box,
tap.ctl_padding_spinBox,
"ctl_index_padding"))
tap.joint_padding_spinBox.valueChanged.connect(
partial(self.update_spin_box,
tap.joint_padding_spinBox,
"joint_index_padding"))
# import name configuration
tap.load_naming_configuration_pushButton.clicked.connect(
self.import_name_config)
# export name configuration
tap.save_naming_configuration_pushButton.clicked.connect(
self.export_name_config)
def eventFilter(self, sender, event):
if event.type() == QtCore.QEvent.ChildRemoved:
# if sender == self.main_tab.rigTabs_listWidget:
# self.updateListAttr(sender, "synoptic")
if sender == self.custom_step_tab.preCustomStep_listWidget:
self.update_list_attr(sender, "pre_custom_step")
elif sender == self.custom_step_tab.postCustomStep_listWidget:
self.update_list_attr(sender, "post_custom_step")
return True
else:
return QtWidgets.QDialog.eventFilter(self, sender, event)
# Slots ########################################################
def export_name_config(self, file_path=None):
# set focus to the save button to ensure all values are updated
# if the cursor stay in other lineEdit since the edition is not
# finished will not take the last edition
self.naming_rule_tab.save_naming_configuration_pushButton.setFocus(
QtCore.Qt.MouseFocusReason)
config = dict()
config["ctl_name_rule"] = self._network.attr(
"ctl_name_rule").get()
config["joint_name_rule"] = self._network.attr(
"joint_name_rule").get()
config["ctl_left_name"] = self._network.attr(
"ctl_left_name").get()
config["ctl_right_name"] = self._network.attr(
"ctl_right_name").get()
config["ctl_center_name"] = self._network.attr(
"ctl_center_name").get()
config["joint_left_name"] = self._network.attr(
"joint_left_name").get()
config["joint_right_name"] = self._network.attr(
"joint_right_name").get()
config["joint_center_name"] = self._network.attr(
"joint_center_name").get()
config["ctl_name_ext"] = self._network.attr(
"ctl_name_ext").get()
config["joint_name_ext"] = self._network.attr(
"joint_name_ext").get()
config["ctl_description_letter_case"] = self._network.attr(
"ctl_description_letter_case").get()
config["joint_description_letter_case"] = self._network.attr(
"joint_description_letter_case").get()
config["ctl_index_padding"] = self._network.attr(
"ctl_index_padding").get()
config["joint_index_padding"] = self._network.attr(
"joint_index_padding").get()
if os.environ.get("MBOX_CUSTOM_STEP_PATH", ""):
startDir = os.environ.get("MBOX_CUSTOM_STEP_PATH", "")
else:
startDir = pm.workspace(q=True, rootDirectory=True)
data_string = json.dumps(config, indent=4, sort_keys=True)
if not file_path:
file_path = pm.fileDialog2(
fileMode=0,
startingDirectory=startDir,
fileFilter='Naming Configuration .naming (*%s)' % ".naming")
if not file_path:
return
if not isinstance(file_path, str):
file_path = file_path[0]
f = open(file_path, 'w')
f.write(data_string)
f.close()
def import_name_config(self, file_path=None):
if os.environ.get("MBOX_CUSTOM_STEP_PATH", ""):
startDir = os.environ.get("MBOX_CUSTOM_STEP_PATH", "")
else:
startDir = pm.workspace(q=True, rootDirectory=True)
if not file_path:
file_path = pm.fileDialog2(
fileMode=1,
startingDirectory=startDir,
fileFilter='Naming Configuration .naming (*%s)' % ".naming")
if not file_path:
return
if not isinstance(file_path, str):
file_path = file_path[0]
config = json.load(open(file_path))
for key in config.keys():
self._network.attr(key).set(config[key])
self.populate_naming_controls()
def reset_naming_rule(self, rule_lineEdit, target_attr):
rule_lineEdit.setText(naming.DEFAULT_NAMING_RULE)
self.update_name_rule_line_edit(rule_lineEdit, target_attr)
def reset_naming_sides(self):
self.naming_rule_tab.side_left_name_lineEdit.setText(
naming.DEFAULT_SIDE_L_NAME)
self.naming_rule_tab.side_right_name_lineEdit.setText(
naming.DEFAULT_SIDE_R_NAME)
self.naming_rule_tab.side_center_name_lineEdit.setText(
naming.DEFAULT_SIDE_C_NAME)
self._network.attr("ctl_left_name").set(naming.DEFAULT_SIDE_L_NAME)
self._network.attr("ctl_right_name").set(naming.DEFAULT_SIDE_R_NAME)
self._network.attr("ctl_center_name").set(naming.DEFAULT_SIDE_C_NAME)
def reset_joint_naming_sides(self):
self.naming_rule_tab.side_joint_left_name_lineEdit.setText(
naming.DEFAULT_JOINT_SIDE_L_NAME)
self.naming_rule_tab.side_joint_right_name_lineEdit.setText(
naming.DEFAULT_JOINT_SIDE_R_NAME)
self.naming_rule_tab.side_joint_center_name_lineEdit.setText(
naming.DEFAULT_JOINT_SIDE_C_NAME)
self._network.attr("joint_left_name").set(
naming.DEFAULT_JOINT_SIDE_L_NAME)
self._network.attr("joint_right_name").set(
naming.DEFAULT_JOINT_SIDE_R_NAME)
self._network.attr("joint_center_name").set(
naming.DEFAULT_JOINT_SIDE_C_NAME)
def reset_naming_extension(self):
self.naming_rule_tab.ctl_name_ext_lineEdit.setText(
naming.DEFAULT_CTL_EXT_NAME)
self.naming_rule_tab.joint_name_ext_lineEdit.setText(
naming.DEFAULT_JOINT_EXT_NAME)
self._network.attr("ctl_name_ext").set(naming.DEFAULT_CTL_EXT_NAME)
self._network.attr("joint_name_ext").set(naming.DEFAULT_JOINT_EXT_NAME)
# def populateAvailableSynopticTabs(self):
#
# import mgear.shifter as shifter
# defPath = os.environ.get("MGEAR_SYNOPTIC_PATH", None)
# if not defPath or not os.path.isdir(defPath):
# defPath = shifter.SYNOPTIC_PATH
#
# # Sanity check for folder existence.
# if not os.path.isdir(defPath):
# return
#
# tabsDirectories = [name for name in os.listdir(defPath) if
# os.path.isdir(os.path.join(defPath, name))]
# # Quick clean the first empty item
# if tabsDirectories and not tabsDirectories[0]:
# self.main_tab.available_listWidget.takeItem(0)
#
# itemsList = self._network.attr("synoptic").get().split(",")
# for tab in sorted(tabsDirectories):
# if tab not in itemsList:
# self.main_tab.available_listWidget.addItem(tab)
#
# def skinLoad(self, *args):
# startDir = self._network.attr("skin").get()
# filePath = pm.fileDialog2(
# fileMode=1,
# startingDirectory=startDir,
# okc="Apply",
# fileFilter='mGear skin (*%s)' % skin.FILE_EXT)
# if not filePath:
# return
# if not isinstance(filePath, str):
# filePath = filePath[0]
#
# self._network.attr("skin").set(filePath)
# self.main_tab.skin_lineEdit.setText(filePath)
#
# def _data_collector_path(self, *args):
# ext_filter = 'Shifter Collected data (*{})'.format(DATA_COLLECTOR_EXT)
# filePath = pm.fileDialog2(
# fileMode=0,
# fileFilter=ext_filter)
# if not filePath:
# return
# if not isinstance(filePath, str):
# filePath = filePath[0]
#
# return filePath
#
# def data_collector_path(self, *args):
# filePath = self._data_collector_path()
#
# if filePath:
# self._network.attr("data_collector_path").set(filePath)
# self.main_tab.dataCollectorPath_lineEdit.setText(filePath)
def add_custom_step(self, pre=True, *args):
"""Add a new custom step
Arguments:
pre (bool, optional): If true adds the steps to the pre step list
*args: Maya's Dummy
Returns:
None: None
"""
if pre:
stepAttr = "pre_custom_step"
stepWidget = self.custom_step_tab.preCustomStep_listWidget
else:
stepAttr = "post_custom_step"
stepWidget = self.custom_step_tab.postCustomStep_listWidget
# Check if we have a custom env for the custom steps initial folder
if os.environ.get("MBOX_CUSTOM_STEP_PATH", ""):
startDir = os.environ.get("MBOX_CUSTOM_STEP_PATH", "")
else:
startDir = self._network.attr(stepAttr).get()
filePath = pm.fileDialog2(
fileMode=1,
startingDirectory=startDir,
okc="Add",
fileFilter='Custom Step .py (*.py)')
if not filePath:
return
if not isinstance(filePath, str):
filePath = filePath[0]
# Quick clean the first empty item
itemsList = [i.text() for i in stepWidget.findItems(
"", QtCore.Qt.MatchContains)]
if itemsList and not itemsList[0]:
stepWidget.takeItem(0)
if os.environ.get("MBOX_CUSTOM_STEP_PATH", ""):
filePath = os.path.abspath(filePath)
baseReplace = os.path.abspath(os.environ.get(
"MBOX_CUSTOM_STEP_PATH", ""))
filePath = filePath.replace(baseReplace, "")[1:]
fileName = os.path.split(filePath)[1].split(".")[0]
stepWidget.addItem(fileName + " | " + filePath)
self.updateListAttr(stepWidget, stepAttr)
self.refresh_status_color(stepWidget)
def new_custom_step(self, pre=True, *args):
"""Creates a new custom step
Arguments:
pre (bool, optional): If true adds the steps to the pre step list
*args: Maya's Dummy
Returns:
None: None
"""
if pre:
stepAttr = "pre_custom_step"
stepWidget = self.custom_step_tab.preCustomStep_listWidget
else:
stepAttr = "post_custom_step"
stepWidget = self.custom_step_tab.postCustomStep_listWidget
# Check if we have a custom env for the custom steps initial folder
if os.environ.get("MBOX_CUSTOM_STEP_PATH", ""):
startDir = os.environ.get("MBOX_CUSTOM_STEP_PATH", "")
else:
startDir = self._network.attr(stepAttr).get()
filePath = pm.fileDialog2(
fileMode=0,
startingDirectory=startDir,
okc="New",
fileFilter='Custom Step .py (*.py)')
if not filePath:
return
if not isinstance(filePath, str):
filePath = filePath[0]
n, e = os.path.splitext(filePath)
stepName = os.path.split(n)[-1]
# raw custome step string
rawString = r'''import mbox.lego.lib as lib
class CustomStep(lib.{pre_post}):
"""Custom Step description
"""
def process(self):
"""Run method.
Returns:
None: None
"""
return'''.format(pre_post="PreScript" if pre else "PostScript")
f = open(filePath, 'w')
f.write(rawString + "\n")
f.close()
# Quick clean the first empty item
itemsList = [i.text() for i in stepWidget.findItems(
"", QtCore.Qt.MatchContains)]
if itemsList and not itemsList[0]:
stepWidget.takeItem(0)
if os.environ.get("MBOX_CUSTOM_STEP_PATH", ""):
filePath = os.path.abspath(filePath)
baseReplace = os.path.abspath(os.environ.get(
"MBOX_CUSTOM_STEP_PATH", ""))
filePath = filePath.replace(baseReplace, "")[1:]
fileName = os.path.split(filePath)[1].split(".")[0]
stepWidget.addItem(fileName + " | " + filePath)
self.update_list_attr(stepWidget, stepAttr)
self.refresh_status_color(stepWidget)
def duplicate_custom_step(self, pre=True, *args):
"""Duplicate the selected step
Arguments:
pre (bool, optional): If true adds the steps to the pre step list
*args: Maya's Dummy
Returns:
None: None
"""
if pre:
stepAttr = "pre_custom_step"
stepWidget = self.custom_step_tab.preCustomStep_listWidget
else:
stepAttr = "post_custom_step"
stepWidget = self.custom_step_tab.postCustomStep_listWidget
# Check if we have a custom env for the custom steps initial folder
if os.environ.get("MBOX_CUSTOM_STEP_PATH", ""):
startDir = os.environ.get("MBOX_CUSTOM_STEP_PATH", "")
else:
startDir = self._network.attr(stepAttr).get()
if stepWidget.selectedItems():
sourcePath = stepWidget.selectedItems()[0].text().split(
"|")[-1][1:]
filePath = pm.fileDialog2(
fileMode=0,
startingDirectory=startDir,
okc="New",
fileFilter='Custom Step .py (*.py)')
if not filePath:
return
if not isinstance(filePath, str):
filePath = filePath[0]
if os.environ.get("MBOX_CUSTOM_STEP_PATH", ""):
sourcePath = os.path.join(startDir, sourcePath)
shutil.copy(sourcePath, filePath)
# Quick clean the first empty item
itemsList = [i.text() for i in stepWidget.findItems(
"", QtCore.Qt.MatchContains)]
if itemsList and not itemsList[0]:
stepWidget.takeItem(0)
if os.environ.get("MBOX_CUSTOM_STEP_PATH", ""):
filePath = os.path.abspath(filePath)
baseReplace = os.path.abspath(os.environ.get(
"MBOX_CUSTOM_STEP_PATH", ""))
filePath = filePath.replace(baseReplace, "")[1:]
fileName = os.path.split(filePath)[1].split(".")[0]
stepWidget.addItem(fileName + " | " + filePath)
self.update_list_attr(stepWidget, stepAttr)
self.refresh_status_color(stepWidget)
def export_custom_step(self, pre=True, *args):
"""Export custom steps to a json file
Arguments:
pre (bool, optional): If true takes the steps from the
pre step list
*args: Maya's Dummy
Returns:
None: None
"""
if pre:
stepWidget = self.custom_step_tab.preCustomStep_listWidget
else:
stepWidget = self.custom_step_tab.postCustomStep_listWidget
# Quick clean the first empty item
itemsList = [i.text() for i in stepWidget.findItems(
"", QtCore.Qt.MatchContains)]
if itemsList and not itemsList[0]:
stepWidget.takeItem(0)
# Check if we have a custom env for the custom steps initial folder
if os.environ.get("MBOX_CUSTOM_STEP_PATH", ""):
startDir = os.environ.get("MBOX_CUSTOM_STEP_PATH", "")
itemsList = [os.path.join(startDir, i.text().split("|")[-1][1:])
for i in stepWidget.findItems(
"", QtCore.Qt.MatchContains)]
else:
itemsList = [i.text().split("|")[-1][1:]
for i in stepWidget.findItems(
"", QtCore.Qt.MatchContains)]
if itemsList:
startDir = os.path.split(itemsList[-1])[0]
else:
pm.displayWarning("No custom steps to export.")
return
stepsDict = self.get_steps_dict(itemsList)
data_string = json.dumps(stepsDict, indent=4, sort_keys=True)
filePath = pm.fileDialog2(
fileMode=0,
startingDirectory=startDir,
fileFilter='Lego Custom Steps .lcs (*%s)' % ".lcs")
if not filePath:
return
if not isinstance(filePath, str):
filePath = filePath[0]
f = open(filePath, 'w')
f.write(data_string)
f.close()
def import_custom_step(self, pre=True, *args):
"""Import custom steps from a json file
Arguments:
pre (bool, optional): If true import to pre steps list
*args: Maya's Dummy
Returns:
None: None
"""
if pre:
stepAttr = "pre_custom_step"
stepWidget = self.custom_step_tab.preCustomStep_listWidget
else:
stepAttr = "post_custom_step"
stepWidget = self.custom_step_tab.postCustomStep_listWidget
# option import only paths or unpack steps
option = pm.confirmDialog(
title='Lego Custom Step Import Style',
message='Do you want to import only the path or'
' unpack and import?',
button=['Only Path', 'Unpack', 'Cancel'],
defaultButton='Only Path',
cancelButton='Cancel',
dismissString='Cancel')
if option in ['Only Path', 'Unpack']:
if os.environ.get("MBOX_CUSTOM_STEP_PATH", ""):
startDir = os.environ.get("MBOX_CUSTOM_STEP_PATH", "")
else:
startDir = pm.workspace(q=True, rootDirectory=True)
filePath = pm.fileDialog2(
fileMode=1,
startingDirectory=startDir,
fileFilter='Shifter Custom Steps .scs (*%s)' % ".scs")
if not filePath:
return
if not isinstance(filePath, str):
filePath = filePath[0]
stepDict = json.load(open(filePath))
stepsList = []
if option == 'Only Path':
for item in stepDict["itemsList"]:
stepsList.append(item)
elif option == 'Unpack':
unPackDir = pm.fileDialog2(
fileMode=2,
startingDirectory=startDir)
if not filePath:
return
if not isinstance(unPackDir, str):
unPackDir = unPackDir[0]
for item in stepDict["itemsList"]:
fileName = os.path.split(item)[1]
fileNewPath = os.path.join(unPackDir, fileName)
stepsList.append(fileNewPath)
f = open(fileNewPath, 'w')
f.write(stepDict[item])
f.close()
if option in ['Only Path', 'Unpack']:
for item in stepsList:
# Quick clean the first empty item
itemsList = [i.text() for i in stepWidget.findItems(
"", QtCore.Qt.MatchContains)]
if itemsList and not itemsList[0]:
stepWidget.takeItem(0)
if os.environ.get("MBOX_CUSTOM_STEP_PATH", ""):
item = os.path.abspath(item)
baseReplace = os.path.abspath(os.environ.get(
"MBOX_CUSTOM_STEP_PATH", ""))
item = item.replace(baseReplace, "")[1:]
fileName = os.path.split(item)[1].split(".")[0]
stepWidget.addItem(fileName + " | " + item)
self.update_list_attr(stepWidget, stepAttr)
def _custom_step_menu(self, cs_listWidget, stepAttr, QPos):
"right click context menu for custom step"
currentSelection = cs_listWidget.currentItem()
if currentSelection is None:
return
self.csMenu = QtWidgets.QMenu()
parentPosition = cs_listWidget.mapToGlobal(QtCore.QPoint(0, 0))
menu_item_01 = self.csMenu.addAction("Toggle Custom Step")
self.csMenu.addSeparator()
menu_item_02 = self.csMenu.addAction("Turn OFF Selected")
menu_item_03 = self.csMenu.addAction("Turn ON Selected")
self.csMenu.addSeparator()
menu_item_04 = self.csMenu.addAction("Turn OFF All")
menu_item_05 = self.csMenu.addAction("Turn ON All")
menu_item_01.triggered.connect(partial(self.toggle_status_custom_step,
cs_listWidget,
stepAttr))
menu_item_02.triggered.connect(partial(self.set_status_custom_step,
cs_listWidget,
stepAttr,
False))
menu_item_03.triggered.connect(partial(self.set_status_custom_step,
cs_listWidget,
stepAttr,
True))
menu_item_04.triggered.connect(partial(self.set_status_custom_step,
cs_listWidget,
stepAttr,
False,
False))
menu_item_05.triggered.connect(partial(self.set_status_custom_step,
cs_listWidget,
stepAttr,
True,
False))
self.csMenu.move(parentPosition + QPos)
self.csMenu.show()
def pre_custom_step_menu(self, QPos):
self._custom_step_menu(self.custom_step_tab.preCustomStep_listWidget,
"pre_custom_step",
QPos)
def post_custom_step_menu(self, QPos):
self._custom_step_menu(self.custom_step_tab.postCustomStep_listWidget,
"post_custom_step",
QPos)
def toggle_status_custom_step(self, cs_listWidget, stepAttr):
items = cs_listWidget.selectedItems()
for item in items:
if item.text().startswith("*"):
item.setText(item.text()[1:])
item.setForeground(self.white_down_brush)
else:
item.setText("*" + item.text())
item.setForeground(self.red_brush)
self.update_list_attr(cs_listWidget, stepAttr)
self.refresh_status_color(cs_listWidget)
def set_status_custom_step(
self, cs_listWidget, stepAttr, status=True, selected=True):
if selected:
items = cs_listWidget.selectedItems()
else:
items = self.get_all_items(cs_listWidget)
for item in items:
off = item.text().startswith("*")
if status and off:
item.setText(item.text()[1:])
elif not status and not off:
item.setText("*" + item.text())
self.set_status_color(item)
self.update_list_attr(cs_listWidget, stepAttr)
self.refresh_status_color(cs_listWidget)
def get_all_items(self, cs_listWidget):
return [cs_listWidget.item(i) for i in range(cs_listWidget.count())]
def set_status_color(self, item):
if item.text().startswith("*"):
item.setForeground(self.red_brush)
elif "_shared" in item.text():
item.setForeground(self.green_brush)
else:
item.setForeground(self.white_down_brush)
def refresh_status_color(self, cs_listWidget):
items = self.get_all_items(cs_listWidget)
for i in items:
self.set_status_color(i)
# Highligter filter
def _highlight_search(self, cs_listWidget, searchText):
items = self.get_all_items(cs_listWidget)
for i in items:
if searchText and searchText.lower() in i.text().lower():
i.setBackground(QtGui.QColor(128, 128, 128, 255))
else:
i.setBackground(QtGui.QColor(255, 255, 255, 0))
def pre_highlight_search(self):
searchText = self.custom_step_tab.preSearch_lineEdit.text()
self._highlight_search(self.custom_step_tab.preCustomStep_listWidget,
searchText)
def post_highlight_search(self):
searchText = self.custom_step_tab.postSearch_lineEdit.text()
self._highlight_search(self.custom_step_tab.postCustomStep_listWidget,
searchText)
class BlockMainTabUI(QtWidgets.QDialog, block_ui.Ui_Form):
def __init__(self):
super(BlockMainTabUI, self).__init__()
self.setupUi(self)
class BlockSettings(QtWidgets.QDialog, HelperSlots):
valueChanged = QtCore.Signal(int)
def __init__(self, parent=None):
super(BlockSettings, self).__init__()
# the inspectSettings function set the current selection to the
# component root before open the settings dialog
self._guide = lib.get_component_guide(pm.selected(type="transform")[0])[0]
self._network = self._guide.message.outputs(type="network")[0]
self.main_tab = BlockMainTabUI()
self.create_controls()
self.populate_controls()
self.create_layout()
self.create_connections()
self.setAttribute(QtCore.Qt.WA_DeleteOnClose, True)
def create_controls(self):
"""
Create the controls for the component base
"""
self.tabs = QtWidgets.QTabWidget()
self.tabs.setObjectName("block_settings_tab")
# Close Button
self.close_button = QtWidgets.QPushButton("Close")
def populate_controls(self):
"""Populate Controls attribute values
Populate the controls values from the custom attributes
of the component.
"""
# populate tab
self.tabs.insertTab(0, self.main_tab, "Main Settings")
# populate main settings
self.main_tab.name_lineEdit.setText(
self._network.attr("comp_name").get())
sideSet = ["center", "left", "right"]
sideIndex = sideSet.index(self._network.attr("comp_side").get(asString=True))
self.main_tab.side_comboBox.setCurrentIndex(sideIndex)
self.main_tab.componentIndex_spinBox.setValue(
self._network.attr("comp_index").get())
# if self._network.attr("useIndex").get():
# self.main_tab.useJointIndex_checkBox.setCheckState(
# QtCore.Qt.Checked)
# else:
# self.main_tab.useJointIndex_checkBox.setCheckState(
# QtCore.Qt.Unchecked)
# self.main_tab.parentJointIndex_spinBox.setValue(
# self._network.attr("parentJointIndex").get())
self.main_tab.host_lineEdit.setText(
self._network.attr("ui_host").get().split(",")[0])
# self.main_tab.subGroup_lineEdit.setText(
# self._network.attr("ctlGrp").get())
# self.main_tab.joint_offset_x_doubleSpinBox.setValue(
# self._network.attr("joint_rot_offset_x").get())
# self.main_tab.joint_offset_y_doubleSpinBox.setValue(
# self._network.attr("joint_rot_offset_y").get())
# self.main_tab.joint_offset_z_doubleSpinBox.setValue(
# self._network.attr("joint_rot_offset_z").get())
# testing adding custom color per component
self.main_tab.overrideColors_checkBox.setCheckState(
QtCore.Qt.Checked if self._network.attr("override_color").get()
else QtCore.Qt.Unchecked)
self.main_tab.useRGB_checkBox.setCheckState(
QtCore.Qt.Checked if self._network.attr("use_RGB_color").get()
else QtCore.Qt.Unchecked)
tab = self.main_tab
index_widgets = ((tab.color_fk_spinBox,
tab.color_fk_label,
"color_fk"),
(tab.color_ik_spinBox,
tab.color_ik_label,
"color_ik"))
rgb_widgets = ((tab.RGB_fk_pushButton, tab.RGB_fk_slider, "RGB_fk"),
(tab.RGB_ik_pushButton, tab.RGB_ik_slider, "RGB_ik"))
for spinBox, label, source_attr in index_widgets:
color_index = self._network.attr(source_attr).get()
spinBox.setValue(color_index)
self.update_widget_style_sheet(
label, [i / 255.0 for i in MAYA_OVERRIDE_COLOR[color_index]])
for button, slider, source_attr in rgb_widgets:
self.update_rgb_color_widgets(
button, self._network.attr(source_attr).get(), slider)
# forceing the size of the color buttons/label to keep ui clean
for widget in tuple(i[0] for i in rgb_widgets) + tuple(
i[1] for i in index_widgets):
widget.setFixedSize(pyqt.dpi_scale(30), pyqt.dpi_scale(20))
self.toggle_rgb_index_widgets(tab.useRGB_checkBox,
(w for i in index_widgets for w in i[:2]),
(w for i in rgb_widgets for w in i[:2]),
"use_RGB_color",
tab.useRGB_checkBox.checkState())
self.refresh_controls()
def refresh_controls(self):
joint_names = [name.strip() for name in
self._network.attr("joint_names").get().split(",")]
if any(joint_names):
summary = "<b>({0} set)</b>".format(sum(map(bool, joint_names)))
else:
summary = "(None)"
self.main_tab.jointNames_label.setText("Joint Names " + summary)
def create_layout(self):
"""
Create the layout for the component base settings
"""
return
def create_connections(self):
"""
Create the slots connections to the controls functions
"""
self.close_button.clicked.connect(self.close_settings)
self.main_tab.name_lineEdit.editingFinished.connect(
self.update_component_name)
self.main_tab.side_comboBox.currentIndexChanged.connect(
self.update_component_name)
self.main_tab.componentIndex_spinBox.valueChanged.connect(
self.update_component_name)
# self.main_tab.useJointIndex_checkBox.stateChanged.connect(
# partial(self.update_check,
# self.main_tab.useJointIndex_checkBox,
# "useIndex"))
# self.main_tab.parentJointIndex_spinBox.valueChanged.connect(
# partial(self.update_spin_box,
# self.main_tab.parentJointIndex_spinBox,
# "parentJointIndex"))
self.main_tab.host_pushButton.clicked.connect(
partial(self.update_host_ui,
self.main_tab.host_lineEdit,
"ui_host"))
# self.main_tab.subGroup_lineEdit.editingFinished.connect(
# partial(self.update_line_edit,
# self.main_tab.subGroup_lineEdit,
# "ctlGrp"))
self.main_tab.jointNames_pushButton.clicked.connect(
self.joint_names_dialog)
# self.main_tab.joint_offset_x_doubleSpinBox.valueChanged.connect(
# partial(self.update_spin_box,
# self.main_tab.joint_offset_x_doubleSpinBox,
# "joint_rot_offset_x"))
# self.main_tab.joint_offset_y_doubleSpinBox.valueChanged.connect(
# partial(self.update_spin_box,
# self.main_tab.joint_offset_y_doubleSpinBox,
# "joint_rot_offset_y"))
# self.main_tab.joint_offset_z_doubleSpinBox.valueChanged.connect(
# partial(self.update_spin_box,
# self.main_tab.joint_offset_z_doubleSpinBox,
# "joint_rot_offset_z"))
tab = self.main_tab
index_widgets = ((tab.color_fk_spinBox,
tab.color_fk_label,
"color_fk"),
(tab.color_ik_spinBox,
tab.color_ik_label,
"color_ik"))
rgb_widgets = ((tab.RGB_fk_pushButton, tab.RGB_fk_slider, "RGB_fk"),
(tab.RGB_ik_pushButton, tab.RGB_ik_slider, "RGB_ik"))
for spinBox, label, source_attr in index_widgets:
spinBox.valueChanged.connect(
partial(self.update_index_color_widgets,
spinBox,
source_attr,
label))
for button, slider, source_attr in rgb_widgets:
button.clicked.connect(
partial(self.rgb_color_editor, button, source_attr, slider))
slider.valueChanged.connect(
partial(self.rgb_slider_value_changed, button, source_attr))
tab.useRGB_checkBox.stateChanged.connect(
partial(self.toggle_rgb_index_widgets,
tab.useRGB_checkBox,
tuple(w for i in index_widgets for w in i[:2]),
tuple(w for i in rgb_widgets for w in i[:2]),
"use_RGB_color"))
tab.overrideColors_checkBox.stateChanged.connect(
partial(self.update_check,
tab.overrideColors_checkBox,
"override_color"))
def joint_names_dialog(self):
dialog = JointNames(self._network, self)
dialog.setWindowTitle(self.windowTitle())
dialog.attributeChanged.connect(self.refresh_controls)
dialog.show()
class JointNames(QtWidgets.QDialog, joint_name_ui.Ui_Form):
attributeChanged = QtCore.Signal()
def __init__(self, network, parent=None):
super(JointNames, self).__init__(parent)
self._network = network
self.setupUi(self)
self.populate_controls()
self.apply_names()
self.create_connections()
def populate_controls(self):
jointNames = self._network.attr("joint_names").get().split(",")
if jointNames[-1]:
jointNames.append("")
self.jointNamesList.clearContents()
self.jointNamesList.setRowCount(0)
for i, name in enumerate(jointNames):
self.jointNamesList.insertRow(i)
item = QtWidgets.QTableWidgetItem(name.strip())
self.jointNamesList.setItem(i, 0, item)
def create_connections(self):
self.jointNamesList.cellChanged.connect(self.update_name)
self.jointNamesList.itemActivated.connect(self.jointNamesList.editItem)
self.add_pushButton.clicked.connect(self.add)
self.remove_pushButton.clicked.connect(self.remove)
self.removeAll_pushButton.clicked.connect(self.remove_all)
self.moveUp_pushButton.clicked.connect(lambda: self.move(-1))
self.moveDown_pushButton.clicked.connect(lambda: self.move(1))
def apply_names(self):
jointNames = []
for i in range(self.jointNamesList.rowCount()):
item = self.jointNamesList.item(i, 0)
jointNames.append(item.text())
value = ",".join(jointNames[0:-1])
self._network.attr("joint_names").set(value)
self.jointNamesList.setVerticalHeaderLabels(
[str(i) for i in range(len(jointNames))])
self.attributeChanged.emit()
def add(self):
row = max(0, self.jointNamesList.currentRow() or 0)
self.jointNamesList.insertRow(row)
item = QtWidgets.QTableWidgetItem("")
self.jointNamesList.setItem(row, 0, item)
self.jointNamesList.setCurrentCell(row, 0)
self.apply_names()
def remove(self):
row = self.jointNamesList.currentRow()
if row + 1 < self.jointNamesList.rowCount() > 1:
self.jointNamesList.removeRow(row)
self.apply_names()
self.jointNamesList.setCurrentCell(row, 0)
def remove_all(self):
self.jointNamesList.clearContents()
self.jointNamesList.setRowCount(0)
self.jointNamesList.insertRow(0)
self.jointNamesList.setItem(0, 0, QtWidgets.QTableWidgetItem(""))
self.jointNamesList.setCurrentCell(0, 0)
self.apply_names()
def move(self, step):
row = self.jointNamesList.currentRow()
if row + step < 0:
return
item1 = self.jointNamesList.item(row, 0).text()
item2 = self.jointNamesList.item(row + step, 0).text()
self.jointNamesList.item(row, 0).setText(item2)
self.jointNamesList.item(row + step, 0).setText(item1)
self.jointNamesList.setCurrentCell(row + step, 0)
def update_name(self, row, column):
item = self.jointNamesList.item(row, column)
if row == self.jointNamesList.rowCount() - 1 and item.text():
self.jointNamesList.insertRow(row + 1)
self.jointNamesList.setItem(
row + 1, 0, QtWidgets.QTableWidgetItem(""))
self.apply_names()
self.jointNamesList.setCurrentCell(row + 1, 0)
self.jointNamesList.editItem(self.jointNamesList.currentItem())
def keyPressEvent(self):
pass
|
python
|
from django.shortcuts import render
# Create your views here.
def main(request):
title = 'Travel Freely!'
content = {
'title': title,
}
return render(request, 'mainsite/index.html', context=content)
|
python
|
from empire.python.typings import *
from empire.fs.file_system import FileSystem
from empire.archive.archive_types import ArchiveTypes
from empire.archive.abstract_compression import AbstractCompression
from empire.archive.abstract_archive import AbstractArchive
from empire.archive.zip_ar import Zip
from empire.archive.gzip_ar import GZIP
from empire.archive.lzma_ar import LZMA
from empire.archive.bzip_ar import BZIP
from empire.archive.tarbz_ar import TAR_BZ
from empire.archive.targz_ar import TAR_GZ
from empire.archive.tarxz_ar import TAR_XZ
from empire.util.log import *
COMPRESSION_TYPE_TO_IMPL: Final[Dict[int, Type[AbstractCompression]]] = {
ArchiveTypes.ZIP: Zip,
ArchiveTypes.GZIP: GZIP,
ArchiveTypes.LZMA: LZMA,
ArchiveTypes.BZIP: BZIP
}
TAR_TYPE_TO_IMPL: Final[Dict[int, Type[AbstractCompression]]] = {
ArchiveTypes.TAR_XZ: TAR_XZ,
ArchiveTypes.TAR_BZ: TAR_BZ,
ArchiveTypes.TAR_GZ: TAR_GZ
}
MIME_TYPE_TO_IMPL: Final[Dict[str, Type[AbstractCompression]]] = {
'application/x-bzip2': BZIP,
'application/x-bzip': BZIP,
'application/x-gzip': GZIP,
'application/x-compressed': Zip,
'application/x-zip-compressed': Zip,
'application/zip': Zip,
'application/x-xz': LZMA,
'application/x-lzma': LZMA
}
def get_class_for_file(file_path: str) -> Union[Type[AbstractCompression], Type[AbstractArchive], None]:
if '.tar' in file_path:
compresser: Type[AbstractCompression] = MIME_TYPE_TO_IMPL[FileSystem.get_mime_from_file(file_path)]
if compresser == LZMA:
return TAR_XZ
elif compresser == BZIP:
return TAR_BZ
elif compresser == GZIP:
return TAR_GZ
else:
Log.severe('Unable to determine valid class', __file__, get_function_name(), file=file_path)
return None
else:
return MIME_TYPE_TO_IMPL[FileSystem.get_mime_from_file(file_path)]
|
python
|
"""API urls."""
from rest_framework import routers
from . import viewsets
router = routers.SimpleRouter()
router.register(r"email-providers", viewsets.EmailProviderViewSet)
router.register(r"migrations", viewsets.MigrationViewSet)
urlpatterns = router.urls
|
python
|
'''
Leia 3 valores de ponto flutuante A, B e C e ordene-os em ordem decrescente, de modo que o lado A representa o maior dos 3 lados. A seguir, determine o tipo de triângulo que estes três lados formam, com base nos seguintes casos, sempre escrevendo uma mensagem adequada:
- se A ≥ B+C, apresente a mensagem: NAO FORMA TRIANGULO
- se A2 = B2 + C2, apresente a mensagem: TRIANGULO RETANGULO
- se A2 > B2 + C2, apresente a mensagem: TRIANGULO OBTUSANGULO
- se A2 < B2 + C2, apresente a mensagem: TRIANGULO ACUTANGULO
- se os três lados forem iguais, apresente a mensagem: TRIANGULO EQUILATERO
- se apenas dois dos lados forem iguais, apresente a mensagem: TRIANGULO ISOSCELES
**Input**
A entrada contem três valores de ponto flutuante de dupla precisão A (0 < A) , B (0 < B) e C (0 < C).
**Output**
Imprima todas as classificações do triângulo especificado na entrada.
| Input Sample | Output Samples |
| ------------ | ---------------------- |
| 7.0 5.0 7.0 | TRIANGULO ACUTANGULO |
| | TRIANGULO ISOSCELES |
| ------------ | ---------------------- |
| 6.0 6.0 10.0 | TRIANGULO OBTUSANGULO |
| | TRIANGULO ISOSCELES |
| ------------ | ---------------------- |
| 6.0 6.0 6.0 | TRIANGULO ACUTANGULO |
| | TRIANGULO EQUILATERO |
| ------------ | ---------------------- |
| 5.0 7.0 2.0 | NAO FORMA TRIANGULO |
| ------------ | ---------------------- |
| 6.0 8.0 10.0 | TRIANGULO RETANGULO |
'''
triang = input().split()
n1 = float(triang[0])
n2 = float(triang[1])
n3 = float(triang[2])
result = n1, n2, n3
ordem = sorted(result, reverse=True)
a = ordem[0]
b = ordem[1]
c = ordem[2]
'''
#debug
print("n1 = {}".format(n1))
print("n2 = {}".format(n2))
print("n3 = {}".format(n3))
print("result = {}".format(result))
print("ordem = {}".format(ordem))
print("A = {}".format(a))
print("B = {}".format(b))
print("C = {}".format(c))
'''
tag = True
if a >= (b + c):
tag = False
print("NAO FORMA TRIANGULO")
if (a ** 2) == (b ** 2) + (c ** 2) and tag == True:
print("TRIANGULO RETANGULO")
if (a ** 2) > (b ** 2) + (c ** 2) and tag == True:
print("TRIANGULO OBTUSANGULO")
if (a ** 2) < (b ** 2) + (c ** 2) and tag == True:
print("TRIANGULO ACUTANGULO")
if a == b and a == c and b == a and b == c and c == a and c == b and tag == True:
print("TRIANGULO EQUILATERO")
if a == b and a != c or a == c and a != b or b == a and b!= c or b == c and b != a or c == b and c != a or c == a and c != b and tag == True:
print("TRIANGULO ISOSCELES")
|
python
|
from esteira.pipeline.stage import Stage
from pathlib import Path
BASE_DIR = Path(__file__).parent.absolute()
def test_instance():
class TestShell(Stage):
script = [
'echo "hello world"'
]
test = TestShell(BASE_DIR)
test.run()
|
python
|
from __future__ import division
import sys
import math
import random
import time
import webbrowser as wb
import keyboard as kb
import pyautogui
from collections import deque
from pyglet import image
from pyglet.gl import *
from pyglet.graphics import TextureGroup
from pyglet.window import key, mouse
from playsound import playsound
TICKS_PER_SEC = 60
# Size of sectors used to ease block loading.
SECTOR_SIZE = 16
WALKING_SPEED = 3
FLYING_SPEED = 15
GRAVITY = 20.0
MAX_JUMP_HEIGHT = 1.0 # About the height of a block.
# To derive the formula for calculating jump speed, first solve
# v_t = v_0 + a * t
# for the time at which you achieve maximum height, where a is the acceleration
# due to gravity and v_t = 0. This gives:
# t = - v_0 / a
# Use t and the desired MAX_JUMP_HEIGHT to solve for v_0 (jump speed) in
# s = s_0 + v_0 * t + (a * t^2) / 2
JUMP_SPEED = math.sqrt(2 * GRAVITY * MAX_JUMP_HEIGHT)
TERMINAL_VELOCITY = 50
PLAYER_HEIGHT = 2
LIVES = 10
if sys.version_info[0] >= 3:
xrange = range
def cube_vertices(x, y, z, n):
""" Return the vertices of the cube at position x, y, z with size 2*n.
"""
return [
x-n,y+n,z-n, x-n,y+n,z+n, x+n,y+n,z+n, x+n,y+n,z-n, # top
x-n,y-n,z-n, x+n,y-n,z-n, x+n,y-n,z+n, x-n,y-n,z+n, # bottom
x-n,y-n,z-n, x-n,y-n,z+n, x-n,y+n,z+n, x-n,y+n,z-n, # left
x+n,y-n,z+n, x+n,y-n,z-n, x+n,y+n,z-n, x+n,y+n,z+n, # right
x-n,y-n,z+n, x+n,y-n,z+n, x+n,y+n,z+n, x-n,y+n,z+n, # front
x+n,y-n,z-n, x-n,y-n,z-n, x-n,y+n,z-n, x+n,y+n,z-n, # back
]
def tex_coord(x, y, n=4):
""" Return the bounding vertices of the texture square.
"""
m = 1.0 / n
dx = x * m
dy = y * m
return dx, dy, dx + m, dy, dx + m, dy + m, dx, dy + m
def tex_coords(top, bottom, side):
""" Return a list of the texture squares for the top, bottom and side.
"""
top = tex_coord(*top)
bottom = tex_coord(*bottom)
side = tex_coord(*side)
result = []
result.extend(top)
result.extend(bottom)
result.extend(side * 4)
return result
def crouch():
WALKING_SPEED = 0.5
TEXTURE_PATH = 'texture.png'
GRASS = tex_coords((1, 0), (0, 1), (0, 0))
SAND = tex_coords((1, 1), (1, 1), (1, 1))
BRICK = tex_coords((2, 0), (2, 0), (2, 0))
STONE = tex_coords((2, 1), (2, 1), (2, 1))
DIRT = tex_coords((0, 1), (0, 1), (0, 1))
BOOKSHELF = tex_coords((1, 2), (1, 2), (0, 2))
SNOW = tex_coords((2, 2), (2, 2), (2, 2))
WOOD = tex_coords((3, 0), (3, 0), (3, 1))
LEAVES = tex_coords((3, 2), (3, 2), (3, 2))
FACES = [
( 0, 1, 0),
( 0,-1, 0),
(-1, 0, 0),
( 1, 0, 0),
( 0, 0, 1),
( 0, 0,-1),
]
def normalize(position):
""" Accepts `position` of arbitrary precision and returns the block
containing that position.
Parameters
----------
position : tuple of len 3
Returns
-------
block_position : tuple of ints of len 3
"""
x, y, z = position
x, y, z = (int(round(x)), int(round(y)), int(round(z)))
return (x, y, z)
def sectorize(position):
""" Returns a tuple representing the sector for the given `position`.
Parameters
----------
position : tuple of len 3
Returns
-------
sector : tuple of len 3
"""
x, y, z = normalize(position)
x, y, z = x // SECTOR_SIZE, y // SECTOR_SIZE, z // SECTOR_SIZE
return (x, 0, z)
class Model(object):
def __init__(self):
# A Batch is a collection of vertex lists for batched rendering.
self.batch = pyglet.graphics.Batch()
# A TextureGroup manages an OpenGL texture.
self.group = TextureGroup(image.load(TEXTURE_PATH).get_texture())
# A mapping from position to the texture of the block at that position.
# This defines all the blocks that are currently in the world.
self.world = {}
# Same mapping as `world` but only contains blocks that are shown.
self.shown = {}
# Mapping from position to a pyglet `VertextList` for all shown blocks.
self._shown = {}
# Mapping from sector to a list of positions inside that sector.
self.sectors = {}
# Simple function queue implementation. The queue is populated with
# _show_block() and _hide_block() calls
self.queue = deque()
self._initialize()
def _initialize(self):
""" Initialize the world by placing all the blocks.
"""
n = 80 # 1/2 width and height of world
s = 1 # step size
y = 0 # initial y height
for x in xrange(-n, n + 1, s):
for z in xrange(-n, n + 1, s):
# create a layer stone an grass everywhere.
self.add_block((x, y - 1, z), GRASS, immediate=False)
self.add_block((x, y - 2, z), DIRT, immediate=False)
self.add_block((x, y - 3, z), DIRT, immediate=False)
self.add_block((x, y - 4, z), DIRT, immediate=False)
self.add_block((x, y - 5, z), STONE, immediate=False)
# generate the hills randomly
o = n - 10
for _ in xrange(120):
a = random.randint(-o, o) # x position of the hill
b = random.randint(-o, o) # z position of the hill
c = -1 # base of the hill
h = random.randint(1, 9) # height of the hill
s = random.randint(4, 8) # 2 * s is the side length of the hill
d = 1 # how quickly to taper off the hills
t = random.choice([STONE])
tz = random.choice([SNOW])
fk = random.choice([DIRT])
for y in xrange(c, c + h):
for x in xrange(a - s, a + s + 1):
for z in xrange(b - s, b + s + 1):
if (x - a) ** 2 + (z - b) ** 2 > (s + 1) ** 2:
continue
if (x - 0) ** 2 + (z - 0) ** 2 < 5 ** 2:
continue
if y == 10:
self.add_block((x, y, z), tz, immediate=False)
if y >= 4:
self.add_block((x, y, z), tz, immediate=False)
elif y < 4:
self.add_block((x, y, z), t, immediate=False)
s -= d # decrement side lenth so hills taper off
# generate trees
X = random.randint(-80, 80)
Z = random.randint(-80, 80)
H = random.randint(4, 8)
B = random.choice([WOOD])
C = 0
w = WOOD
for x in range(0, C):
self.add_block((X, H, Z), w, immediate=False)
if C == H:
continue
def hit_test(self, position, vector, max_distance=8):
""" Line of sight search from current position. If a block is
intersected it is returned, along with the block previously in the line
of sight. If no block is found, return None, None.
Parameters
----------
position : tuple of len 3
The (x, y, z) position to check visibility from.
vector : tuple of len 3
The line of sight vector.
max_distance : int
How many blocks away to search for a hit.
"""
m = 8
x, y, z = position
dx, dy, dz = vector
previous = None
for _ in xrange(max_distance * m):
key = normalize((x, y, z))
if key != previous and key in self.world:
return key, previous
previous = key
x, y, z = x + dx / m, y + dy / m, z + dz / m
return None, None
def exposed(self, position):
""" Returns False is given `position` is surrounded on all 6 sides by
blocks, True otherwise.
"""
x, y, z = position
for dx, dy, dz in FACES:
if (x + dx, y + dy, z + dz) not in self.world:
return True
return False
def add_block(self, position, texture, immediate=True):
""" Add a block with the given `texture` and `position` to the world.
Parameters
----------
position : tuple of len 3
The (x, y, z) position of the block to add.
texture : list of len 3
The coordinates of the texture squares. Use `tex_coords()` to
generate.
immediate : bool
Whether or not to draw the block immediately.
"""
if position in self.world:
self.remove_block(position, immediate)
self.world[position] = texture
self.sectors.setdefault(sectorize(position), []).append(position)
if immediate:
if self.exposed(position):
self.show_block(position)
self.check_neighbors(position)
def remove_block(self, position, immediate=True):
""" Remove the block at the given `position`.
Parameters
----------
position : tuple of len 3
The (x, y, z) position of the block to remove.
immediate : bool
Whether or not to immediately remove block from canvas.
"""
del self.world[position]
self.sectors[sectorize(position)].remove(position)
if immediate:
if position in self.shown:
self.hide_block(position)
self.check_neighbors(position)
def check_neighbors(self, position):
""" Check all blocks surrounding `position` and ensure their visual
state is current. This means hiding blocks that are not exposed and
ensuring that all exposed blocks are shown. Usually used after a block
is added or removed.
"""
x, y, z = position
for dx, dy, dz in FACES:
key = (x + dx, y + dy, z + dz)
if key not in self.world:
continue
if self.exposed(key):
if key not in self.shown:
self.show_block(key)
else:
if key in self.shown:
self.hide_block(key)
def show_block(self, position, immediate=True):
""" Show the block at the given `position`. This method assumes the
block has already been added with add_block()
Parameters
----------
position : tuple of len 3
The (x, y, z) position of the block to show.
immediate : bool
Whether or not to show the block immediately.
"""
texture = self.world[position]
self.shown[position] = texture
if immediate:
self._show_block(position, texture)
else:
self._enqueue(self._show_block, position, texture)
def _show_block(self, position, texture):
""" Private implementation of the `show_block()` method.
Parameters
----------
position : tuple of len 3
The (x, y, z) position of the block to show.
texture : list of len 3
The coordinates of the texture squares. Use `tex_coords()` to
generate.
"""
x, y, z = position
vertex_data = cube_vertices(x, y, z, 0.5)
texture_data = list(texture)
# create vertex list
# FIXME Maybe `add_indexed()` should be used instead
self._shown[position] = self.batch.add(24, GL_QUADS, self.group,
('v3f/static', vertex_data),
('t2f/static', texture_data))
def hide_block(self, position, immediate=True):
""" Hide the block at the given `position`. Hiding does not remove the
block from the world.
Parameters
----------
position : tuple of len 3
The (x, y, z) position of the block to hide.
immediate : bool
Whether or not to immediately remove the block from the canvas.
"""
self.shown.pop(position)
if immediate:
self._hide_block(position)
else:
self._enqueue(self._hide_block, position)
def _hide_block(self, position):
""" Private implementation of the 'hide_block()` method.
"""
self._shown.pop(position).delete()
def show_sector(self, sector):
""" Ensure all blocks in the given sector that should be shown are
drawn to the canvas.
"""
for position in self.sectors.get(sector, []):
if position not in self.shown and self.exposed(position):
self.show_block(position, False)
def hide_sector(self, sector):
""" Ensure all blocks in the given sector that should be hidden are
removed from the canvas.
"""
for position in self.sectors.get(sector, []):
if position in self.shown:
self.hide_block(position, False)
def change_sectors(self, before, after):
""" Move from sector `before` to sector `after`. A sector is a
contiguous x, y sub-region of world. Sectors are used to speed up
world rendering.
"""
before_set = set()
after_set = set()
pad = 4
for dx in xrange(-pad, pad + 1):
for dy in [0]: # xrange(-pad, pad + 1):
for dz in xrange(-pad, pad + 1):
if dx ** 2 + dy ** 2 + dz ** 2 > (pad + 1) ** 2:
continue
if before:
x, y, z = before
before_set.add((x + dx, y + dy, z + dz))
if after:
x, y, z = after
after_set.add((x + dx, y + dy, z + dz))
show = after_set - before_set
hide = before_set - after_set
for sector in show:
self.show_sector(sector)
for sector in hide:
self.hide_sector(sector)
def _enqueue(self, func, *args):
""" Add `func` to the internal queue.
"""
self.queue.append((func, args))
def _dequeue(self):
""" Pop the top function from the internal queue and call it.
"""
func, args = self.queue.popleft()
func(*args)
def process_queue(self):
""" Process the entire queue while taking periodic breaks. This allows
the game loop to run smoothly. The queue contains calls to
_show_block() and _hide_block() so this method should be called if
add_block() or remove_block() was called with immediate=False
"""
start = time.clock()
while self.queue and time.clock() - start < 1.0 / TICKS_PER_SEC:
self._dequeue()
def process_entire_queue(self):
""" Process the entire queue with no breaks.
"""
while self.queue:
self._dequeue()
class Window(pyglet.window.Window):
def __init__(self, *args, **kwargs):
super(Window, self).__init__(*args, **kwargs)
# Whether or not the window exclusively captures the mouse.
self.exclusive = False
# When flying gravity has no effect and speed is increased.
self.flying = False
# Strafing is moving lateral to the direction you are facing,
# e.g. moving to the left or right while continuing to face forward.
#
# First element is -1 when moving forward, 1 when moving back, and 0
# otherwise. The second element is -1 when moving left, 1 when moving
# right, and 0 otherwise.
self.strafe = [0, 0]
# Current (x, y, z) position in the world, specified with floats. Note
# that, perhaps unlike in math class, the y-axis is the vertical axis.
self.position = (0, 0, 0)
# First element is rotation of the player in the x-z plane (ground
# plane) measured from the z-axis down. The second is the rotation
# angle from the ground plane up. Rotation is in degrees.
#
# The vertical plane rotation ranges from -90 (looking straight down) to
# 90 (looking straight up). The horizontal rotation range is unbounded.
self.rotation = (0, 0)
# Which sector the player is currently in.
self.sector = None
# The crosshairs at the center of the screen.
self.reticle = None
# Velocity in the y (upward) direction.
self.dy = 0
# A list of blocks the player can place. Hit num keys to cycle.
self.inventory = [BRICK, GRASS, SAND, BOOKSHELF, WOOD, SNOW, LEAVES, DIRT, STONE]
# The current block the user can place. Hit num keys to cycle.
self.block = self.inventory[0]
# Convenience list of num keys.
self.num_keys = [
key._1, key._2, key._3, key._4, key._5,
key._6, key._7, key._8, key._9, key._0]
# Instance of the model that handles the world.
self.model = Model()
# The label that is displayed in the top left of the canvas.
self.label = pyglet.text.Label('', font_name='Arial', font_size=18,
x=10, y=self.height - 10, anchor_x='left', anchor_y='top',
color=(0, 0, 0, 255))
# This call schedules the `update()` method to be called
# TICKS_PER_SEC. This is the main game event loop.
pyglet.clock.schedule_interval(self.update, 1.0 / TICKS_PER_SEC)
def set_exclusive_mouse(self, exclusive):
""" If `exclusive` is True, the game will capture the mouse, if False
the game will ignore the mouse.
"""
super(Window, self).set_exclusive_mouse(exclusive)
self.exclusive = exclusive
def get_sight_vector(self):
""" Returns the current line of sight vector indicating the direction
the player is looking.
"""
x, y = self.rotation
# y ranges from -90 to 90, or -pi/2 to pi/2, so m ranges from 0 to 1 and
# is 1 when looking ahead parallel to the ground and 0 when looking
# straight up or down.
m = math.cos(math.radians(y))
# dy ranges from -1 to 1 and is -1 when looking straight down and 1 when
# looking straight up.
dy = math.sin(math.radians(y))
dx = math.cos(math.radians(x - 90)) * m
dz = math.sin(math.radians(x - 90)) * m
return (dx, dy, dz)
def get_motion_vector(self):
""" Returns the current motion vector indicating the velocity of the
player.
Returns
-------
vector : tuple of len 3
Tuple containing the velocity in x, y, and z respectively.
"""
if any(self.strafe):
x, y = self.rotation
strafe = math.degrees(math.atan2(*self.strafe))
y_angle = math.radians(y)
x_angle = math.radians(x + strafe)
if self.flying:
m = math.cos(y_angle)
dy = math.sin(y_angle)
if self.strafe[1]:
# Moving left or right.
dy = 0.0
m = 1
if self.strafe[0] > 0:
# Moving backwards.
dy *= -1
# When you are flying up or down, you have less left and right
# motion.
dx = math.cos(x_angle) * m
dz = math.sin(x_angle) * m
else:
dy = 0.0
dx = math.cos(x_angle)
dz = math.sin(x_angle)
else:
dy = 0.0
dx = 0.0
dz = 0.0
return (dx, dy, dz)
def update(self, dt):
""" This method is scheduled to be called repeatedly by the pyglet
clock.
Parameters
----------
dt : float
The change in time since the last call.
"""
self.model.process_queue()
sector = sectorize(self.position)
if sector != self.sector:
self.model.change_sectors(self.sector, sector)
if self.sector is None:
self.model.process_entire_queue()
self.sector = sector
m = 8
dt = min(dt, 0.2)
for _ in xrange(m):
self._update(dt / m)
def _update(self, dt):
""" Private implementation of the `update()` method. This is where most
of the motion logic lives, along with gravity and collision detection.
Parameters
----------
dt : float
The change in time since the last call.
"""
# walking
speed = FLYING_SPEED if self.flying else WALKING_SPEED
d = dt * speed # distance covered this tick.
dx, dy, dz = self.get_motion_vector()
# New position in space, before accounting for gravity.
dx, dy, dz = dx * d, dy * d, dz * d
# gravity
if not self.flying:
# Update your vertical speed: if you are falling, speed up until you
# hit terminal velocity; if you are jumping, slow down until you
# start falling.
self.dy -= dt * GRAVITY
self.dy = max(self.dy, -TERMINAL_VELOCITY)
dy += self.dy * dt
# collisions
x, y, z = self.position
x, y, z = self.collide((x + dx, y + dy, z + dz), PLAYER_HEIGHT)
self.position = (x, y, z)
def collide(self, position, height):
""" Checks to see if the player at the given `position` and `height`
is colliding with any blocks in the world.
Parameters
----------
position : tuple of len 3
The (x, y, z) position to check for collisions at.
height : int or float
The height of the player.
Returns
-------
position : tuple of len 3
The new position of the player taking into account collisions.
"""
# How much overlap with a dimension of a surrounding block you need to
# have to count as a collision. If 0, touching terrain at all counts as
# a collision. If .49, you sink into the ground, as if walking through
# tall grass. If >= .5, you'll fall through the ground.
pad = 0.25
p = list(position)
np = normalize(position)
for face in FACES: # check all surrounding blocks
for i in xrange(3): # check each dimension independently
if not face[i]:
continue
# How much overlap you have with this dimension.
d = (p[i] - np[i]) * face[i]
if d < pad:
continue
for dy in xrange(height): # check each height
op = list(np)
op[1] -= dy
op[i] += face[i]
if tuple(op) not in self.model.world:
continue
p[i] -= (d - pad) * face[i]
if face == (0, -1, 0) or face == (0, 1, 0):
# You are colliding with the ground or ceiling, so stop
# falling / rising.
self.dy = 0
break
return tuple(p)
def on_mouse_press(self, x, y, button, modifiers):
""" Called when a mouse button is pressed. See pyglet docs for button
amd modifier mappings.
Parameters
----------
x, y : int
The coordinates of the mouse click. Always center of the screen if
the mouse is captured.
button : int
Number representing mouse button that was clicked. 1 = left button,
4 = right button.
modifiers : int
Number representing any modifying keys that were pressed when the
mouse button was clicked.
"""
if self.exclusive:
vector = self.get_sight_vector()
block, previous = self.model.hit_test(self.position, vector)
if (button == mouse.RIGHT) or \
((button == mouse.LEFT) and (modifiers & key.MOD_CTRL)):
# ON OSX, control + left click = right click.
if previous:
self.model.add_block(previous, self.block)
elif button == pyglet.window.mouse.LEFT and block:
texture = self.model.world[block]
else:
self.set_exclusive_mouse(True)
def on_mouse_motion(self, x, y, dx, dy):
""" Called when the player moves the mouse.
Parameters
----------
x, y : int
The coordinates of the mouse click. Always center of the screen if
the mouse is captured.
dx, dy : float
The movement of the mouse.
"""
if self.exclusive:
m = 0.15
x, y = self.rotation
x, y = x + dx * m, y + dy * m
y = max(-90, min(90, y))
self.rotation = (x, y)
def on_key_press(self, symbol, modifiers):
""" Called when the player presses a key. See pyglet docs for key
mappings.
Parameters
----------
symbol : int
Number representing the key that was pressed.
modifiers : int
Number representing any modifying keys that were pressed.
"""
if symbol == key.W:
self.strafe[0] -= 1
elif symbol == key.S:
self.strafe[0] += 1
elif symbol == key.A:
self.strafe[1] -= 1
elif symbol == key.D:
self.strafe[1] += 1
elif symbol == key.L:
crouch()
elif symbol == key.SPACE:
if self.dy == 0:
self.dy = JUMP_SPEED
elif symbol == key.ESCAPE:
self.set_exclusive_mouse(False)
elif symbol == key.TAB:
self.flying = not self.flying
elif symbol in self.num_keys:
index = (symbol - self.num_keys[0]) % len(self.inventory)
self.block = self.inventory[index]
def on_key_release(self, symbol, modifiers):
""" Called when the player releases a key. See pyglet docs for key
mappings.
Parameters
----------
symbol : int
Number representing the key that was pressed.
modifiers : int
Number representing any modifying keys that were pressed.
"""
if symbol == key.W:
self.strafe[0] += 1
elif symbol == key.S:
self.strafe[0] -= 1
elif symbol == key.A:
self.strafe[1] += 1
elif symbol == key.D:
self.strafe[1] -= 1
def on_resize(self, width, height):
""" Called when the window is resized to a new `width` and `height`.
"""
# label
self.label.y = height - 10
# reticle
if self.reticle:
self.reticle.delete()
x, y = self.width // 2, self.height // 2
n = 10
self.reticle = pyglet.graphics.vertex_list(4,
('v2i', (x - n, y, x + n, y, x, y - n, x, y + n))
)
def set_2d(self):
""" Configure OpenGL to draw in 2d.
"""
width, height = self.get_size()
glDisable(GL_DEPTH_TEST)
glViewport(0, 0, width, height)
glMatrixMode(GL_PROJECTION)
glLoadIdentity()
glOrtho(0, width, 0, height, -1, 1)
glMatrixMode(GL_MODELVIEW)
glLoadIdentity()
def set_3d(self):
""" Configure OpenGL to draw in 3d.
"""
width, height = self.get_size()
glEnable(GL_DEPTH_TEST)
glViewport(0, 0, width, height)
glMatrixMode(GL_PROJECTION)
glLoadIdentity()
gluPerspective(65.0, width / float(height), 0.1, 60.0)
glMatrixMode(GL_MODELVIEW)
glLoadIdentity()
x, y = self.rotation
glRotatef(x, 0, 1, 0)
glRotatef(-y, math.cos(math.radians(x)), 0, math.sin(math.radians(x)))
x, y, z = self.position
glTranslatef(-x, -y, -z)
def on_draw(self):
""" Called by pyglet to draw the canvas.
"""
self.clear()
self.set_3d()
glColor3d(1, 1, 1)
self.model.batch.draw()
self.draw_focused_block()
self.set_2d()
self.draw_label()
self.draw_reticle()
def draw_focused_block(self):
""" Draw black edges around the block that is currently under the
crosshairs.
"""
vector = self.get_sight_vector()
block = self.model.hit_test(self.position, vector)[0]
if block:
x, y, z = block
vertex_data = cube_vertices(x, y, z, 0.51)
glColor3d(0, 0, 0)
glPolygonMode(GL_FRONT_AND_BACK, GL_LINE)
pyglet.graphics.draw(24, GL_QUADS, ('v3f/static', vertex_data))
glPolygonMode(GL_FRONT_AND_BACK, GL_FILL)
def draw_label(self):
""" Draw the label in the top left of the screen.
"""
x, y, z = self.position
self.label.text = '%02d (%.2f, %.2f, %.2f) %d / %d' % (
pyglet.clock.get_fps(), x, y, z,
len(self.model._shown), len(self.model.world))
self.label.draw()
self.label.text = 'JetAdven 0.04. Work in progress'
def draw_reticle(self):
""" Draw the crosshairs in the center of the screen.
"""
glColor3d(0, 0, 0)
self.reticle.draw(GL_LINES)
def setup_fog():
""" Configure the OpenGL fog properties.
"""
# Enable fog. Fog "blends a fog color with each rasterized pixel fragment's
# post-texturing color."
glEnable(GL_FOG)
# Set the fog color.
glFogfv(GL_FOG_COLOR, (GLfloat * 4)(0.5, 0.69, 1.0, 1))
# Say we have no preference between rendering speed and quality.
glHint(GL_FOG_HINT, GL_DONT_CARE)
# Specify the equation used to compute the blending factor.
glFogi(GL_FOG_MODE, GL_LINEAR)
# How close and far away fog starts and ends. The closer the start and end,
# the denser the fog in the fog range.
glFogf(GL_FOG_START, 20.0)
glFogf(GL_FOG_END, 60.0)
def music():
music = pyglet.resource.media('gamemusic.mp3')
music.play()
def setup():
""" Basic OpenGL configuration.
"""
# Set the color of "clear", i.e. the sky, in rgba. (will add day night cycle)
glClearColor(0.5, 0.69, 1.0, 1)
#def daynight():
#time.sleep(100)
#glClearColor(0.2, 0.2, 0.2, 1)
#time.sleep(100)
#d#/aynight()
# Enable culling (not rendering) of back-facing facets -- facets that aren't
# visible to you.
glEnable(GL_CULL_FACE)
# Set the texture minification/magnification function to GL_NEAREST (nearest
# in Manhattan distance) to the specified texture coordinates. GL_NEAREST
# "is generally faster than GL_LINEAR, but it can produce textured images
# with sharper edges because the transition between texture elements is not
# as smooth."
# glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST)
# glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST)
# setup_fog()
def main():
window = Window(width=2500*2, height=1500*2, caption='JetAdven 0.04. not a clone of minecraft :)', resizable=True)
# Hide the mouse cursor and prevent the mouse from leaving the window.
window.set_exclusive_mouse(True)
setup()
pyglet.app.run()
music()
if __name__ == '__main__':
main()
|
python
|
from logging import getLogger
from hornet import models
from .common import ClientCommand
logger = getLogger(__name__)
class Command(ClientCommand):
def add_arguments(self, parser):
parser.add_argument("member_id", type=int)
def handle(self, member_id, *args, **kwargs):
try:
member = models.Member.objects.get(pk=member_id)
except models.Member.DoesNotExist:
self.stderr.write("Unknown member")
return
result = self.client.list_message(member)
for message in result:
print(" ", message)
self.stderr.write("Total messages: %s" % len(result))
|
python
|
# Copyright DataStax, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import hashlib
import logging
from base64 import urlsafe_b64encode
from collections import namedtuple
from datetime import datetime, tzinfo, timedelta
try:
from itertools import ifilterfalse as filterfalse
except ImportError:
from itertools import filterfalse
from adelphi.anonymize import anonymize_keyspace
from adelphi.exceptions import KeyspaceSelectionException
from adelphi.store import build_keyspace_objects
log = logging.getLogger('adelphi')
try:
from datetime import timezone
utc = timezone.utc
except ImportError:
class UTC(tzinfo):
def utcoffset(self, dt):
return timedelta(0)
def tzname(self, dt):
return "UTC"
def dst(self, dt):
return timedelta(0)
utc = UTC()
KsTuple = namedtuple('KsTuple',['ks_id', 'ks_obj'])
class BaseExporter:
# unique_everseen from itertools recipes
def __unique(self, iterable, key=None):
"List unique elements, preserving order. Remember all elements ever seen."
# unique_everseen('AAAABBBCCDAABBB') --> A B C D
# unique_everseen('ABBCcAD', str.lower) --> A B C D
seen = set()
seen_add = seen.add
if key is None:
for element in filterfalse(seen.__contains__, iterable):
seen_add(element)
yield element
else:
for element in iterable:
k = key(element)
if k not in seen:
seen_add(k)
yield element
def build_keyspace_id(self, ks):
m = hashlib.sha256()
m.update(ks.name.encode("utf-8"))
# Leverage the urlsafe base64 encoding defined in RFC 4648, section 5 to provide an ID which can
# safely be used for filenames as well
return urlsafe_b64encode(m.digest()).decode('ascii')
def get_keyspaces(self, cluster, props):
keyspace_names = props["keyspace-names"]
metadata = cluster.metadata
keyspaces = build_keyspace_objects(keyspace_names, metadata)
if len(keyspaces) == 0:
raise KeyspaceSelectionException("Unable to select a keyspace from specified keyspace names")
log.info("Processing the following keyspaces: %s", ','.join((ks.name for ks in keyspaces)))
# anonymize_keyspace mutates keyspace state so we must trap keyspace_id before we (possibly) call it
ids = {ks.name : self.build_keyspace_id(ks) for ks in keyspaces}
# Create a tuple to represent this keyspace. Note that we must perform anonymization as part of this
# operation because we need the keyspace name before anonymization to access the correct ID from the
# dict above.
def make_tuple(ks):
orig_name = ks.name
if props['anonymize']:
anonymize_keyspace(ks)
return KsTuple(ids[orig_name], ks)
return {t.ks_obj.name : t for t in [make_tuple(ks) for ks in keyspaces]}
def get_cluster_metadata(self, cluster):
hosts = cluster.metadata.all_hosts()
unique_dcs = self.__unique((host.datacenter for host in hosts))
unique_cass_vers = self.__unique((host.release_version for host in hosts))
return {"host_count": len(hosts), "dc_count": sum(1 for _ in unique_dcs), "cassandra_versions": ",".join(unique_cass_vers)}
def get_common_metadata(self, cluster, props):
metadata = {k : props[k] for k in ["purpose", "maturity"] if k in props}
metadata.update(self.get_cluster_metadata(cluster))
metadata["creation_timestamp"] = datetime.now(utc).isoformat()
return metadata
# Remaining methods in this class represent default impls of methods for subclasses
def export_all(self):
return self.export_schema()
# Note assumption of keyspace and keyspace_id as attrs
def each_keyspace(self, ks_fn):
ks_fn(self.keyspace, self.keyspace_id)
# Functions below assume self.metadata as a dict
def export_metadata_dict(self):
return {k : self.metadata[k] for k in self.metadata.keys() if self.metadata[k]}
def add_metadata(self, k, v):
"""Note that this function sets a metadata value for the entire exporter. If you
need something keyspace-specific you're probably better off just adding it to the
exported metadata directory."""
self.metadata[k] = v
|
python
|
import uuid
import hashlib
import prettytable
from keystoneclient import exceptions
# Decorator for cli-args
def arg(*args, **kwargs):
def _decorator(func):
# Because of the sematics of decorator composition if we just append
# to the options list positional options will appear to be backwards.
func.__dict__.setdefault('arguments', []).insert(0, (args, kwargs))
return func
return _decorator
def pretty_choice_list(l):
return ', '.join("'%s'" % i for i in l)
def print_list(objs, fields, formatters={}):
pt = prettytable.PrettyTable([f for f in fields], caching=False)
pt.aligns = ['l' for f in fields]
for o in objs:
row = []
for field in fields:
if field in formatters:
row.append(formatters[field](o))
else:
field_name = field.lower().replace(' ', '_')
data = getattr(o, field_name, '')
if data is None:
data = ''
row.append(data)
pt.add_row(row)
print pt.get_string(sortby=fields[0])
def _word_wrap(string, max_length=0):
"""wrap long strings to be no longer then max_length"""
if max_length <= 0:
return string
return '\n'.join([string[i:i + max_length] for i in
range(0, len(string), max_length)])
def print_dict(d, wrap=0):
"""pretty table prints dictionaries.
Wrap values to max_length wrap if wrap>0
"""
pt = prettytable.PrettyTable(['Property', 'Value'], caching=False)
pt.aligns = ['l', 'l']
for (prop, value) in d.iteritems():
if value is None:
value = ''
value = _word_wrap(value, max_length=wrap)
pt.add_row([prop, value])
print pt.get_string(sortby='Property')
def find_resource(manager, name_or_id):
"""Helper for the _find_* methods."""
# first try to get entity as integer id
try:
if isinstance(name_or_id, int) or name_or_id.isdigit():
return manager.get(int(name_or_id))
except exceptions.NotFound:
pass
# now try to get entity as uuid
try:
uuid.UUID(str(name_or_id))
return manager.get(name_or_id)
except (ValueError, exceptions.NotFound):
pass
# finally try to find entity by name
try:
return manager.find(name=name_or_id)
except exceptions.NotFound:
msg = ("No %s with a name or ID of '%s' exists." %
(manager.resource_class.__name__.lower(), name_or_id))
raise exceptions.CommandError(msg)
def unauthenticated(f):
"""Adds 'unauthenticated' attribute to decorated function.
Usage::
@unauthenticated
def mymethod(f):
...
"""
f.unauthenticated = True
return f
def isunauthenticated(f):
"""
Checks to see if the function is marked as not requiring authentication
with the @unauthenticated decorator. Returns True if decorator is
set to True, False otherwise.
"""
return getattr(f, 'unauthenticated', False)
def string_to_bool(arg):
if isinstance(arg, bool):
return arg
return arg.strip().lower() in ('t', 'true', 'yes', '1')
def hash_signed_token(signed_text):
hash_ = hashlib.md5()
hash_.update(signed_text)
return hash_.hexdigest()
|
python
|
import multiprocessing
print(multiprocessing.cpu_count(), "núcleos") # conta a quantidade de núcleos disponpives no sistema
# processamento senquancial
import threading # módulo para a a contrução de threads
import urllib.request # módulo para a requição da url
import time # módulo para tratar o tempo
# função criada para realização do dowload das imagens
def downloadImangens(imagepath, fileName):
print("Realizando Dowload......", imagepath)
urllib.request.urlretrieve(imagepath, fileName) # Realiza a requisição para pagina da Web
t0 = time.time() # armazena o tempo inicial da execução
for i in range (10):
imageName = "imagens/image-" +str(i) +".jpg" # coloca o nome em cada uma das imagens baixadas
downloadImangens("http://lorempixel.com.br/400/200/sports", imageName) # aplica o download da imagem
t1 = time.time() # tempo final após a execução
totalTime = t1 - t0 # diferença de tempo entre o valor inicial de execução e o final
print("Tempo tiotal de execuções {}".format(totalTime))
|
python
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.